]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/rs6000/rs6000.c
re PR translation/79019 (translatable string typo in cif-code.def:141)
[thirdparty/gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2017 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "backend.h"
25 #include "rtl.h"
26 #include "tree.h"
27 #include "memmodel.h"
28 #include "gimple.h"
29 #include "cfghooks.h"
30 #include "cfgloop.h"
31 #include "df.h"
32 #include "tm_p.h"
33 #include "stringpool.h"
34 #include "expmed.h"
35 #include "optabs.h"
36 #include "regs.h"
37 #include "ira.h"
38 #include "recog.h"
39 #include "cgraph.h"
40 #include "diagnostic-core.h"
41 #include "insn-attr.h"
42 #include "flags.h"
43 #include "alias.h"
44 #include "fold-const.h"
45 #include "stor-layout.h"
46 #include "calls.h"
47 #include "print-tree.h"
48 #include "varasm.h"
49 #include "explow.h"
50 #include "expr.h"
51 #include "output.h"
52 #include "dbxout.h"
53 #include "common/common-target.h"
54 #include "langhooks.h"
55 #include "reload.h"
56 #include "sched-int.h"
57 #include "gimplify.h"
58 #include "gimple-iterator.h"
59 #include "gimple-ssa.h"
60 #include "gimple-walk.h"
61 #include "intl.h"
62 #include "params.h"
63 #include "tm-constrs.h"
64 #include "tree-vectorizer.h"
65 #include "target-globals.h"
66 #include "builtins.h"
67 #include "context.h"
68 #include "tree-pass.h"
69 #if TARGET_XCOFF
70 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
71 #endif
72 #if TARGET_MACHO
73 #include "gstab.h" /* for N_SLINE */
74 #endif
75 #include "case-cfn-macros.h"
76 #include "ppc-auxv.h"
77
78 /* This file should be included last. */
79 #include "target-def.h"
80
81 #ifndef TARGET_NO_PROTOTYPE
82 #define TARGET_NO_PROTOTYPE 0
83 #endif
84
85 #define min(A,B) ((A) < (B) ? (A) : (B))
86 #define max(A,B) ((A) > (B) ? (A) : (B))
87
88 /* Structure used to define the rs6000 stack */
89 typedef struct rs6000_stack {
90 int reload_completed; /* stack info won't change from here on */
91 int first_gp_reg_save; /* first callee saved GP register used */
92 int first_fp_reg_save; /* first callee saved FP register used */
93 int first_altivec_reg_save; /* first callee saved AltiVec register used */
94 int lr_save_p; /* true if the link reg needs to be saved */
95 int cr_save_p; /* true if the CR reg needs to be saved */
96 unsigned int vrsave_mask; /* mask of vec registers to save */
97 int push_p; /* true if we need to allocate stack space */
98 int calls_p; /* true if the function makes any calls */
99 int world_save_p; /* true if we're saving *everything*:
100 r13-r31, cr, f14-f31, vrsave, v20-v31 */
101 enum rs6000_abi abi; /* which ABI to use */
102 int gp_save_offset; /* offset to save GP regs from initial SP */
103 int fp_save_offset; /* offset to save FP regs from initial SP */
104 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
105 int lr_save_offset; /* offset to save LR from initial SP */
106 int cr_save_offset; /* offset to save CR from initial SP */
107 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
108 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
109 int varargs_save_offset; /* offset to save the varargs registers */
110 int ehrd_offset; /* offset to EH return data */
111 int ehcr_offset; /* offset to EH CR field data */
112 int reg_size; /* register size (4 or 8) */
113 HOST_WIDE_INT vars_size; /* variable save area size */
114 int parm_size; /* outgoing parameter size */
115 int save_size; /* save area size */
116 int fixed_size; /* fixed size of stack frame */
117 int gp_size; /* size of saved GP registers */
118 int fp_size; /* size of saved FP registers */
119 int altivec_size; /* size of saved AltiVec registers */
120 int cr_size; /* size to hold CR if not in fixed area */
121 int vrsave_size; /* size to hold VRSAVE */
122 int altivec_padding_size; /* size of altivec alignment padding */
123 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
124 int spe_padding_size;
125 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
126 int spe_64bit_regs_used;
127 int savres_strategy;
128 } rs6000_stack_t;
129
130 /* A C structure for machine-specific, per-function data.
131 This is added to the cfun structure. */
132 typedef struct GTY(()) machine_function
133 {
134 /* Whether the instruction chain has been scanned already. */
135 int spe_insn_chain_scanned_p;
136 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
137 int ra_needs_full_frame;
138 /* Flags if __builtin_return_address (0) was used. */
139 int ra_need_lr;
140 /* Cache lr_save_p after expansion of builtin_eh_return. */
141 int lr_save_state;
142 /* Whether we need to save the TOC to the reserved stack location in the
143 function prologue. */
144 bool save_toc_in_prologue;
145 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
146 varargs save area. */
147 HOST_WIDE_INT varargs_save_offset;
148 /* Temporary stack slot to use for SDmode copies. This slot is
149 64-bits wide and is allocated early enough so that the offset
150 does not overflow the 16-bit load/store offset field. */
151 rtx sdmode_stack_slot;
152 /* Alternative internal arg pointer for -fsplit-stack. */
153 rtx split_stack_arg_pointer;
154 bool split_stack_argp_used;
155 /* Flag if r2 setup is needed with ELFv2 ABI. */
156 bool r2_setup_needed;
157 /* The components already handled by separate shrink-wrapping, which should
158 not be considered by the prologue and epilogue. */
159 bool gpr_is_wrapped_separately[32];
160 bool lr_is_wrapped_separately;
161 } machine_function;
162
163 /* Support targetm.vectorize.builtin_mask_for_load. */
164 static GTY(()) tree altivec_builtin_mask_for_load;
165
166 /* Set to nonzero once AIX common-mode calls have been defined. */
167 static GTY(()) int common_mode_defined;
168
169 /* Label number of label created for -mrelocatable, to call to so we can
170 get the address of the GOT section */
171 static int rs6000_pic_labelno;
172
173 #ifdef USING_ELFOS_H
174 /* Counter for labels which are to be placed in .fixup. */
175 int fixuplabelno = 0;
176 #endif
177
178 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
179 int dot_symbols;
180
181 /* Specify the machine mode that pointers have. After generation of rtl, the
182 compiler makes no further distinction between pointers and any other objects
183 of this machine mode. The type is unsigned since not all things that
184 include rs6000.h also include machmode.h. */
185 unsigned rs6000_pmode;
186
187 /* Width in bits of a pointer. */
188 unsigned rs6000_pointer_size;
189
190 #ifdef HAVE_AS_GNU_ATTRIBUTE
191 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
192 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
193 # endif
194 /* Flag whether floating point values have been passed/returned.
195 Note that this doesn't say whether fprs are used, since the
196 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
197 should be set for soft-float values passed in gprs and ieee128
198 values passed in vsx registers. */
199 static bool rs6000_passes_float;
200 static bool rs6000_passes_long_double;
201 /* Flag whether vector values have been passed/returned. */
202 static bool rs6000_passes_vector;
203 /* Flag whether small (<= 8 byte) structures have been returned. */
204 static bool rs6000_returns_struct;
205 #endif
206
207 /* Value is TRUE if register/mode pair is acceptable. */
208 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
209
210 /* Maximum number of registers needed for a given register class and mode. */
211 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
212
213 /* How many registers are needed for a given register and mode. */
214 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
215
216 /* Map register number to register class. */
217 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
218
219 static int dbg_cost_ctrl;
220
221 /* Built in types. */
222 tree rs6000_builtin_types[RS6000_BTI_MAX];
223 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
224
225 /* Flag to say the TOC is initialized */
226 int toc_initialized, need_toc_init;
227 char toc_label_name[10];
228
229 /* Cached value of rs6000_variable_issue. This is cached in
230 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
231 static short cached_can_issue_more;
232
233 static GTY(()) section *read_only_data_section;
234 static GTY(()) section *private_data_section;
235 static GTY(()) section *tls_data_section;
236 static GTY(()) section *tls_private_data_section;
237 static GTY(()) section *read_only_private_data_section;
238 static GTY(()) section *sdata2_section;
239 static GTY(()) section *toc_section;
240
241 struct builtin_description
242 {
243 const HOST_WIDE_INT mask;
244 const enum insn_code icode;
245 const char *const name;
246 const enum rs6000_builtins code;
247 };
248
249 /* Describe the vector unit used for modes. */
250 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
251 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
252
253 /* Register classes for various constraints that are based on the target
254 switches. */
255 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
256
257 /* Describe the alignment of a vector. */
258 int rs6000_vector_align[NUM_MACHINE_MODES];
259
260 /* Map selected modes to types for builtins. */
261 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
262
263 /* What modes to automatically generate reciprocal divide estimate (fre) and
264 reciprocal sqrt (frsqrte) for. */
265 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
266
267 /* Masks to determine which reciprocal esitmate instructions to generate
268 automatically. */
269 enum rs6000_recip_mask {
270 RECIP_SF_DIV = 0x001, /* Use divide estimate */
271 RECIP_DF_DIV = 0x002,
272 RECIP_V4SF_DIV = 0x004,
273 RECIP_V2DF_DIV = 0x008,
274
275 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
276 RECIP_DF_RSQRT = 0x020,
277 RECIP_V4SF_RSQRT = 0x040,
278 RECIP_V2DF_RSQRT = 0x080,
279
280 /* Various combination of flags for -mrecip=xxx. */
281 RECIP_NONE = 0,
282 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
283 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
284 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
285
286 RECIP_HIGH_PRECISION = RECIP_ALL,
287
288 /* On low precision machines like the power5, don't enable double precision
289 reciprocal square root estimate, since it isn't accurate enough. */
290 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
291 };
292
293 /* -mrecip options. */
294 static struct
295 {
296 const char *string; /* option name */
297 unsigned int mask; /* mask bits to set */
298 } recip_options[] = {
299 { "all", RECIP_ALL },
300 { "none", RECIP_NONE },
301 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
302 | RECIP_V2DF_DIV) },
303 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
304 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
305 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
306 | RECIP_V2DF_RSQRT) },
307 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
308 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
309 };
310
311 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
312 static const struct
313 {
314 const char *cpu;
315 unsigned int cpuid;
316 } cpu_is_info[] = {
317 { "power9", PPC_PLATFORM_POWER9 },
318 { "power8", PPC_PLATFORM_POWER8 },
319 { "power7", PPC_PLATFORM_POWER7 },
320 { "power6x", PPC_PLATFORM_POWER6X },
321 { "power6", PPC_PLATFORM_POWER6 },
322 { "power5+", PPC_PLATFORM_POWER5_PLUS },
323 { "power5", PPC_PLATFORM_POWER5 },
324 { "ppc970", PPC_PLATFORM_PPC970 },
325 { "power4", PPC_PLATFORM_POWER4 },
326 { "ppca2", PPC_PLATFORM_PPCA2 },
327 { "ppc476", PPC_PLATFORM_PPC476 },
328 { "ppc464", PPC_PLATFORM_PPC464 },
329 { "ppc440", PPC_PLATFORM_PPC440 },
330 { "ppc405", PPC_PLATFORM_PPC405 },
331 { "ppc-cell-be", PPC_PLATFORM_CELL_BE }
332 };
333
334 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
335 static const struct
336 {
337 const char *hwcap;
338 int mask;
339 unsigned int id;
340 } cpu_supports_info[] = {
341 /* AT_HWCAP masks. */
342 { "4xxmac", PPC_FEATURE_HAS_4xxMAC, 0 },
343 { "altivec", PPC_FEATURE_HAS_ALTIVEC, 0 },
344 { "arch_2_05", PPC_FEATURE_ARCH_2_05, 0 },
345 { "arch_2_06", PPC_FEATURE_ARCH_2_06, 0 },
346 { "archpmu", PPC_FEATURE_PERFMON_COMPAT, 0 },
347 { "booke", PPC_FEATURE_BOOKE, 0 },
348 { "cellbe", PPC_FEATURE_CELL_BE, 0 },
349 { "dfp", PPC_FEATURE_HAS_DFP, 0 },
350 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE, 0 },
351 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE, 0 },
352 { "fpu", PPC_FEATURE_HAS_FPU, 0 },
353 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP, 0 },
354 { "mmu", PPC_FEATURE_HAS_MMU, 0 },
355 { "notb", PPC_FEATURE_NO_TB, 0 },
356 { "pa6t", PPC_FEATURE_PA6T, 0 },
357 { "power4", PPC_FEATURE_POWER4, 0 },
358 { "power5", PPC_FEATURE_POWER5, 0 },
359 { "power5+", PPC_FEATURE_POWER5_PLUS, 0 },
360 { "power6x", PPC_FEATURE_POWER6_EXT, 0 },
361 { "ppc32", PPC_FEATURE_32, 0 },
362 { "ppc601", PPC_FEATURE_601_INSTR, 0 },
363 { "ppc64", PPC_FEATURE_64, 0 },
364 { "ppcle", PPC_FEATURE_PPC_LE, 0 },
365 { "smt", PPC_FEATURE_SMT, 0 },
366 { "spe", PPC_FEATURE_HAS_SPE, 0 },
367 { "true_le", PPC_FEATURE_TRUE_LE, 0 },
368 { "ucache", PPC_FEATURE_UNIFIED_CACHE, 0 },
369 { "vsx", PPC_FEATURE_HAS_VSX, 0 },
370
371 /* AT_HWCAP2 masks. */
372 { "arch_2_07", PPC_FEATURE2_ARCH_2_07, 1 },
373 { "dscr", PPC_FEATURE2_HAS_DSCR, 1 },
374 { "ebb", PPC_FEATURE2_HAS_EBB, 1 },
375 { "htm", PPC_FEATURE2_HAS_HTM, 1 },
376 { "htm-nosc", PPC_FEATURE2_HTM_NOSC, 1 },
377 { "isel", PPC_FEATURE2_HAS_ISEL, 1 },
378 { "tar", PPC_FEATURE2_HAS_TAR, 1 },
379 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO, 1 },
380 { "arch_3_00", PPC_FEATURE2_ARCH_3_00, 1 },
381 { "ieee128", PPC_FEATURE2_HAS_IEEE128, 1 }
382 };
383
384 /* Newer LIBCs explicitly export this symbol to declare that they provide
385 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
386 reference to this symbol whenever we expand a CPU builtin, so that
387 we never link against an old LIBC. */
388 const char *tcb_verification_symbol = "__parse_hwcap_and_convert_at_platform";
389
390 /* True if we have expanded a CPU builtin. */
391 bool cpu_builtin_p;
392
393 /* Pointer to function (in rs6000-c.c) that can define or undefine target
394 macros that have changed. Languages that don't support the preprocessor
395 don't link in rs6000-c.c, so we can't call it directly. */
396 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
397
398 /* Simplfy register classes into simpler classifications. We assume
399 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
400 check for standard register classes (gpr/floating/altivec/vsx) and
401 floating/vector classes (float/altivec/vsx). */
402
403 enum rs6000_reg_type {
404 NO_REG_TYPE,
405 PSEUDO_REG_TYPE,
406 GPR_REG_TYPE,
407 VSX_REG_TYPE,
408 ALTIVEC_REG_TYPE,
409 FPR_REG_TYPE,
410 SPR_REG_TYPE,
411 CR_REG_TYPE,
412 SPE_ACC_TYPE,
413 SPEFSCR_REG_TYPE
414 };
415
416 /* Map register class to register type. */
417 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
418
419 /* First/last register type for the 'normal' register types (i.e. general
420 purpose, floating point, altivec, and VSX registers). */
421 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
422
423 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
424
425
426 /* Register classes we care about in secondary reload or go if legitimate
427 address. We only need to worry about GPR, FPR, and Altivec registers here,
428 along an ANY field that is the OR of the 3 register classes. */
429
430 enum rs6000_reload_reg_type {
431 RELOAD_REG_GPR, /* General purpose registers. */
432 RELOAD_REG_FPR, /* Traditional floating point regs. */
433 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
434 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
435 N_RELOAD_REG
436 };
437
438 /* For setting up register classes, loop through the 3 register classes mapping
439 into real registers, and skip the ANY class, which is just an OR of the
440 bits. */
441 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
442 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
443
444 /* Map reload register type to a register in the register class. */
445 struct reload_reg_map_type {
446 const char *name; /* Register class name. */
447 int reg; /* Register in the register class. */
448 };
449
450 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
451 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
452 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
453 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
454 { "Any", -1 }, /* RELOAD_REG_ANY. */
455 };
456
457 /* Mask bits for each register class, indexed per mode. Historically the
458 compiler has been more restrictive which types can do PRE_MODIFY instead of
459 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
460 typedef unsigned char addr_mask_type;
461
462 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
463 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
464 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
465 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
466 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
467 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
468 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
469 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
470
471 /* Register type masks based on the type, of valid addressing modes. */
472 struct rs6000_reg_addr {
473 enum insn_code reload_load; /* INSN to reload for loading. */
474 enum insn_code reload_store; /* INSN to reload for storing. */
475 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
476 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
477 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
478 enum insn_code fusion_gpr_ld; /* INSN for fusing gpr ADDIS/loads. */
479 /* INSNs for fusing addi with loads
480 or stores for each reg. class. */
481 enum insn_code fusion_addi_ld[(int)N_RELOAD_REG];
482 enum insn_code fusion_addi_st[(int)N_RELOAD_REG];
483 /* INSNs for fusing addis with loads
484 or stores for each reg. class. */
485 enum insn_code fusion_addis_ld[(int)N_RELOAD_REG];
486 enum insn_code fusion_addis_st[(int)N_RELOAD_REG];
487 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
488 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
489 bool fused_toc; /* Mode supports TOC fusion. */
490 };
491
492 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
493
494 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
495 static inline bool
496 mode_supports_pre_incdec_p (machine_mode mode)
497 {
498 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
499 != 0);
500 }
501
502 /* Helper function to say whether a mode supports PRE_MODIFY. */
503 static inline bool
504 mode_supports_pre_modify_p (machine_mode mode)
505 {
506 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
507 != 0);
508 }
509
510 /* Return true if we have D-form addressing in altivec registers. */
511 static inline bool
512 mode_supports_vmx_dform (machine_mode mode)
513 {
514 return ((reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_OFFSET) != 0);
515 }
516
517 /* Return true if we have D-form addressing in VSX registers. This addressing
518 is more limited than normal d-form addressing in that the offset must be
519 aligned on a 16-byte boundary. */
520 static inline bool
521 mode_supports_vsx_dform_quad (machine_mode mode)
522 {
523 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_QUAD_OFFSET)
524 != 0);
525 }
526
527 \f
528 /* Target cpu costs. */
529
530 struct processor_costs {
531 const int mulsi; /* cost of SImode multiplication. */
532 const int mulsi_const; /* cost of SImode multiplication by constant. */
533 const int mulsi_const9; /* cost of SImode mult by short constant. */
534 const int muldi; /* cost of DImode multiplication. */
535 const int divsi; /* cost of SImode division. */
536 const int divdi; /* cost of DImode division. */
537 const int fp; /* cost of simple SFmode and DFmode insns. */
538 const int dmul; /* cost of DFmode multiplication (and fmadd). */
539 const int sdiv; /* cost of SFmode division (fdivs). */
540 const int ddiv; /* cost of DFmode division (fdiv). */
541 const int cache_line_size; /* cache line size in bytes. */
542 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
543 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
544 const int simultaneous_prefetches; /* number of parallel prefetch
545 operations. */
546 const int sfdf_convert; /* cost of SF->DF conversion. */
547 };
548
549 const struct processor_costs *rs6000_cost;
550
551 /* Processor costs (relative to an add) */
552
553 /* Instruction size costs on 32bit processors. */
554 static const
555 struct processor_costs size32_cost = {
556 COSTS_N_INSNS (1), /* mulsi */
557 COSTS_N_INSNS (1), /* mulsi_const */
558 COSTS_N_INSNS (1), /* mulsi_const9 */
559 COSTS_N_INSNS (1), /* muldi */
560 COSTS_N_INSNS (1), /* divsi */
561 COSTS_N_INSNS (1), /* divdi */
562 COSTS_N_INSNS (1), /* fp */
563 COSTS_N_INSNS (1), /* dmul */
564 COSTS_N_INSNS (1), /* sdiv */
565 COSTS_N_INSNS (1), /* ddiv */
566 32, /* cache line size */
567 0, /* l1 cache */
568 0, /* l2 cache */
569 0, /* streams */
570 0, /* SF->DF convert */
571 };
572
573 /* Instruction size costs on 64bit processors. */
574 static const
575 struct processor_costs size64_cost = {
576 COSTS_N_INSNS (1), /* mulsi */
577 COSTS_N_INSNS (1), /* mulsi_const */
578 COSTS_N_INSNS (1), /* mulsi_const9 */
579 COSTS_N_INSNS (1), /* muldi */
580 COSTS_N_INSNS (1), /* divsi */
581 COSTS_N_INSNS (1), /* divdi */
582 COSTS_N_INSNS (1), /* fp */
583 COSTS_N_INSNS (1), /* dmul */
584 COSTS_N_INSNS (1), /* sdiv */
585 COSTS_N_INSNS (1), /* ddiv */
586 128, /* cache line size */
587 0, /* l1 cache */
588 0, /* l2 cache */
589 0, /* streams */
590 0, /* SF->DF convert */
591 };
592
593 /* Instruction costs on RS64A processors. */
594 static const
595 struct processor_costs rs64a_cost = {
596 COSTS_N_INSNS (20), /* mulsi */
597 COSTS_N_INSNS (12), /* mulsi_const */
598 COSTS_N_INSNS (8), /* mulsi_const9 */
599 COSTS_N_INSNS (34), /* muldi */
600 COSTS_N_INSNS (65), /* divsi */
601 COSTS_N_INSNS (67), /* divdi */
602 COSTS_N_INSNS (4), /* fp */
603 COSTS_N_INSNS (4), /* dmul */
604 COSTS_N_INSNS (31), /* sdiv */
605 COSTS_N_INSNS (31), /* ddiv */
606 128, /* cache line size */
607 128, /* l1 cache */
608 2048, /* l2 cache */
609 1, /* streams */
610 0, /* SF->DF convert */
611 };
612
613 /* Instruction costs on MPCCORE processors. */
614 static const
615 struct processor_costs mpccore_cost = {
616 COSTS_N_INSNS (2), /* mulsi */
617 COSTS_N_INSNS (2), /* mulsi_const */
618 COSTS_N_INSNS (2), /* mulsi_const9 */
619 COSTS_N_INSNS (2), /* muldi */
620 COSTS_N_INSNS (6), /* divsi */
621 COSTS_N_INSNS (6), /* divdi */
622 COSTS_N_INSNS (4), /* fp */
623 COSTS_N_INSNS (5), /* dmul */
624 COSTS_N_INSNS (10), /* sdiv */
625 COSTS_N_INSNS (17), /* ddiv */
626 32, /* cache line size */
627 4, /* l1 cache */
628 16, /* l2 cache */
629 1, /* streams */
630 0, /* SF->DF convert */
631 };
632
633 /* Instruction costs on PPC403 processors. */
634 static const
635 struct processor_costs ppc403_cost = {
636 COSTS_N_INSNS (4), /* mulsi */
637 COSTS_N_INSNS (4), /* mulsi_const */
638 COSTS_N_INSNS (4), /* mulsi_const9 */
639 COSTS_N_INSNS (4), /* muldi */
640 COSTS_N_INSNS (33), /* divsi */
641 COSTS_N_INSNS (33), /* divdi */
642 COSTS_N_INSNS (11), /* fp */
643 COSTS_N_INSNS (11), /* dmul */
644 COSTS_N_INSNS (11), /* sdiv */
645 COSTS_N_INSNS (11), /* ddiv */
646 32, /* cache line size */
647 4, /* l1 cache */
648 16, /* l2 cache */
649 1, /* streams */
650 0, /* SF->DF convert */
651 };
652
653 /* Instruction costs on PPC405 processors. */
654 static const
655 struct processor_costs ppc405_cost = {
656 COSTS_N_INSNS (5), /* mulsi */
657 COSTS_N_INSNS (4), /* mulsi_const */
658 COSTS_N_INSNS (3), /* mulsi_const9 */
659 COSTS_N_INSNS (5), /* muldi */
660 COSTS_N_INSNS (35), /* divsi */
661 COSTS_N_INSNS (35), /* divdi */
662 COSTS_N_INSNS (11), /* fp */
663 COSTS_N_INSNS (11), /* dmul */
664 COSTS_N_INSNS (11), /* sdiv */
665 COSTS_N_INSNS (11), /* ddiv */
666 32, /* cache line size */
667 16, /* l1 cache */
668 128, /* l2 cache */
669 1, /* streams */
670 0, /* SF->DF convert */
671 };
672
673 /* Instruction costs on PPC440 processors. */
674 static const
675 struct processor_costs ppc440_cost = {
676 COSTS_N_INSNS (3), /* mulsi */
677 COSTS_N_INSNS (2), /* mulsi_const */
678 COSTS_N_INSNS (2), /* mulsi_const9 */
679 COSTS_N_INSNS (3), /* muldi */
680 COSTS_N_INSNS (34), /* divsi */
681 COSTS_N_INSNS (34), /* divdi */
682 COSTS_N_INSNS (5), /* fp */
683 COSTS_N_INSNS (5), /* dmul */
684 COSTS_N_INSNS (19), /* sdiv */
685 COSTS_N_INSNS (33), /* ddiv */
686 32, /* cache line size */
687 32, /* l1 cache */
688 256, /* l2 cache */
689 1, /* streams */
690 0, /* SF->DF convert */
691 };
692
693 /* Instruction costs on PPC476 processors. */
694 static const
695 struct processor_costs ppc476_cost = {
696 COSTS_N_INSNS (4), /* mulsi */
697 COSTS_N_INSNS (4), /* mulsi_const */
698 COSTS_N_INSNS (4), /* mulsi_const9 */
699 COSTS_N_INSNS (4), /* muldi */
700 COSTS_N_INSNS (11), /* divsi */
701 COSTS_N_INSNS (11), /* divdi */
702 COSTS_N_INSNS (6), /* fp */
703 COSTS_N_INSNS (6), /* dmul */
704 COSTS_N_INSNS (19), /* sdiv */
705 COSTS_N_INSNS (33), /* ddiv */
706 32, /* l1 cache line size */
707 32, /* l1 cache */
708 512, /* l2 cache */
709 1, /* streams */
710 0, /* SF->DF convert */
711 };
712
713 /* Instruction costs on PPC601 processors. */
714 static const
715 struct processor_costs ppc601_cost = {
716 COSTS_N_INSNS (5), /* mulsi */
717 COSTS_N_INSNS (5), /* mulsi_const */
718 COSTS_N_INSNS (5), /* mulsi_const9 */
719 COSTS_N_INSNS (5), /* muldi */
720 COSTS_N_INSNS (36), /* divsi */
721 COSTS_N_INSNS (36), /* divdi */
722 COSTS_N_INSNS (4), /* fp */
723 COSTS_N_INSNS (5), /* dmul */
724 COSTS_N_INSNS (17), /* sdiv */
725 COSTS_N_INSNS (31), /* ddiv */
726 32, /* cache line size */
727 32, /* l1 cache */
728 256, /* l2 cache */
729 1, /* streams */
730 0, /* SF->DF convert */
731 };
732
733 /* Instruction costs on PPC603 processors. */
734 static const
735 struct processor_costs ppc603_cost = {
736 COSTS_N_INSNS (5), /* mulsi */
737 COSTS_N_INSNS (3), /* mulsi_const */
738 COSTS_N_INSNS (2), /* mulsi_const9 */
739 COSTS_N_INSNS (5), /* muldi */
740 COSTS_N_INSNS (37), /* divsi */
741 COSTS_N_INSNS (37), /* divdi */
742 COSTS_N_INSNS (3), /* fp */
743 COSTS_N_INSNS (4), /* dmul */
744 COSTS_N_INSNS (18), /* sdiv */
745 COSTS_N_INSNS (33), /* ddiv */
746 32, /* cache line size */
747 8, /* l1 cache */
748 64, /* l2 cache */
749 1, /* streams */
750 0, /* SF->DF convert */
751 };
752
753 /* Instruction costs on PPC604 processors. */
754 static const
755 struct processor_costs ppc604_cost = {
756 COSTS_N_INSNS (4), /* mulsi */
757 COSTS_N_INSNS (4), /* mulsi_const */
758 COSTS_N_INSNS (4), /* mulsi_const9 */
759 COSTS_N_INSNS (4), /* muldi */
760 COSTS_N_INSNS (20), /* divsi */
761 COSTS_N_INSNS (20), /* divdi */
762 COSTS_N_INSNS (3), /* fp */
763 COSTS_N_INSNS (3), /* dmul */
764 COSTS_N_INSNS (18), /* sdiv */
765 COSTS_N_INSNS (32), /* ddiv */
766 32, /* cache line size */
767 16, /* l1 cache */
768 512, /* l2 cache */
769 1, /* streams */
770 0, /* SF->DF convert */
771 };
772
773 /* Instruction costs on PPC604e processors. */
774 static const
775 struct processor_costs ppc604e_cost = {
776 COSTS_N_INSNS (2), /* mulsi */
777 COSTS_N_INSNS (2), /* mulsi_const */
778 COSTS_N_INSNS (2), /* mulsi_const9 */
779 COSTS_N_INSNS (2), /* muldi */
780 COSTS_N_INSNS (20), /* divsi */
781 COSTS_N_INSNS (20), /* divdi */
782 COSTS_N_INSNS (3), /* fp */
783 COSTS_N_INSNS (3), /* dmul */
784 COSTS_N_INSNS (18), /* sdiv */
785 COSTS_N_INSNS (32), /* ddiv */
786 32, /* cache line size */
787 32, /* l1 cache */
788 1024, /* l2 cache */
789 1, /* streams */
790 0, /* SF->DF convert */
791 };
792
793 /* Instruction costs on PPC620 processors. */
794 static const
795 struct processor_costs ppc620_cost = {
796 COSTS_N_INSNS (5), /* mulsi */
797 COSTS_N_INSNS (4), /* mulsi_const */
798 COSTS_N_INSNS (3), /* mulsi_const9 */
799 COSTS_N_INSNS (7), /* muldi */
800 COSTS_N_INSNS (21), /* divsi */
801 COSTS_N_INSNS (37), /* divdi */
802 COSTS_N_INSNS (3), /* fp */
803 COSTS_N_INSNS (3), /* dmul */
804 COSTS_N_INSNS (18), /* sdiv */
805 COSTS_N_INSNS (32), /* ddiv */
806 128, /* cache line size */
807 32, /* l1 cache */
808 1024, /* l2 cache */
809 1, /* streams */
810 0, /* SF->DF convert */
811 };
812
813 /* Instruction costs on PPC630 processors. */
814 static const
815 struct processor_costs ppc630_cost = {
816 COSTS_N_INSNS (5), /* mulsi */
817 COSTS_N_INSNS (4), /* mulsi_const */
818 COSTS_N_INSNS (3), /* mulsi_const9 */
819 COSTS_N_INSNS (7), /* muldi */
820 COSTS_N_INSNS (21), /* divsi */
821 COSTS_N_INSNS (37), /* divdi */
822 COSTS_N_INSNS (3), /* fp */
823 COSTS_N_INSNS (3), /* dmul */
824 COSTS_N_INSNS (17), /* sdiv */
825 COSTS_N_INSNS (21), /* ddiv */
826 128, /* cache line size */
827 64, /* l1 cache */
828 1024, /* l2 cache */
829 1, /* streams */
830 0, /* SF->DF convert */
831 };
832
833 /* Instruction costs on Cell processor. */
834 /* COSTS_N_INSNS (1) ~ one add. */
835 static const
836 struct processor_costs ppccell_cost = {
837 COSTS_N_INSNS (9/2)+2, /* mulsi */
838 COSTS_N_INSNS (6/2), /* mulsi_const */
839 COSTS_N_INSNS (6/2), /* mulsi_const9 */
840 COSTS_N_INSNS (15/2)+2, /* muldi */
841 COSTS_N_INSNS (38/2), /* divsi */
842 COSTS_N_INSNS (70/2), /* divdi */
843 COSTS_N_INSNS (10/2), /* fp */
844 COSTS_N_INSNS (10/2), /* dmul */
845 COSTS_N_INSNS (74/2), /* sdiv */
846 COSTS_N_INSNS (74/2), /* ddiv */
847 128, /* cache line size */
848 32, /* l1 cache */
849 512, /* l2 cache */
850 6, /* streams */
851 0, /* SF->DF convert */
852 };
853
854 /* Instruction costs on PPC750 and PPC7400 processors. */
855 static const
856 struct processor_costs ppc750_cost = {
857 COSTS_N_INSNS (5), /* mulsi */
858 COSTS_N_INSNS (3), /* mulsi_const */
859 COSTS_N_INSNS (2), /* mulsi_const9 */
860 COSTS_N_INSNS (5), /* muldi */
861 COSTS_N_INSNS (17), /* divsi */
862 COSTS_N_INSNS (17), /* divdi */
863 COSTS_N_INSNS (3), /* fp */
864 COSTS_N_INSNS (3), /* dmul */
865 COSTS_N_INSNS (17), /* sdiv */
866 COSTS_N_INSNS (31), /* ddiv */
867 32, /* cache line size */
868 32, /* l1 cache */
869 512, /* l2 cache */
870 1, /* streams */
871 0, /* SF->DF convert */
872 };
873
874 /* Instruction costs on PPC7450 processors. */
875 static const
876 struct processor_costs ppc7450_cost = {
877 COSTS_N_INSNS (4), /* mulsi */
878 COSTS_N_INSNS (3), /* mulsi_const */
879 COSTS_N_INSNS (3), /* mulsi_const9 */
880 COSTS_N_INSNS (4), /* muldi */
881 COSTS_N_INSNS (23), /* divsi */
882 COSTS_N_INSNS (23), /* divdi */
883 COSTS_N_INSNS (5), /* fp */
884 COSTS_N_INSNS (5), /* dmul */
885 COSTS_N_INSNS (21), /* sdiv */
886 COSTS_N_INSNS (35), /* ddiv */
887 32, /* cache line size */
888 32, /* l1 cache */
889 1024, /* l2 cache */
890 1, /* streams */
891 0, /* SF->DF convert */
892 };
893
894 /* Instruction costs on PPC8540 processors. */
895 static const
896 struct processor_costs ppc8540_cost = {
897 COSTS_N_INSNS (4), /* mulsi */
898 COSTS_N_INSNS (4), /* mulsi_const */
899 COSTS_N_INSNS (4), /* mulsi_const9 */
900 COSTS_N_INSNS (4), /* muldi */
901 COSTS_N_INSNS (19), /* divsi */
902 COSTS_N_INSNS (19), /* divdi */
903 COSTS_N_INSNS (4), /* fp */
904 COSTS_N_INSNS (4), /* dmul */
905 COSTS_N_INSNS (29), /* sdiv */
906 COSTS_N_INSNS (29), /* ddiv */
907 32, /* cache line size */
908 32, /* l1 cache */
909 256, /* l2 cache */
910 1, /* prefetch streams /*/
911 0, /* SF->DF convert */
912 };
913
914 /* Instruction costs on E300C2 and E300C3 cores. */
915 static const
916 struct processor_costs ppce300c2c3_cost = {
917 COSTS_N_INSNS (4), /* mulsi */
918 COSTS_N_INSNS (4), /* mulsi_const */
919 COSTS_N_INSNS (4), /* mulsi_const9 */
920 COSTS_N_INSNS (4), /* muldi */
921 COSTS_N_INSNS (19), /* divsi */
922 COSTS_N_INSNS (19), /* divdi */
923 COSTS_N_INSNS (3), /* fp */
924 COSTS_N_INSNS (4), /* dmul */
925 COSTS_N_INSNS (18), /* sdiv */
926 COSTS_N_INSNS (33), /* ddiv */
927 32,
928 16, /* l1 cache */
929 16, /* l2 cache */
930 1, /* prefetch streams /*/
931 0, /* SF->DF convert */
932 };
933
934 /* Instruction costs on PPCE500MC processors. */
935 static const
936 struct processor_costs ppce500mc_cost = {
937 COSTS_N_INSNS (4), /* mulsi */
938 COSTS_N_INSNS (4), /* mulsi_const */
939 COSTS_N_INSNS (4), /* mulsi_const9 */
940 COSTS_N_INSNS (4), /* muldi */
941 COSTS_N_INSNS (14), /* divsi */
942 COSTS_N_INSNS (14), /* divdi */
943 COSTS_N_INSNS (8), /* fp */
944 COSTS_N_INSNS (10), /* dmul */
945 COSTS_N_INSNS (36), /* sdiv */
946 COSTS_N_INSNS (66), /* ddiv */
947 64, /* cache line size */
948 32, /* l1 cache */
949 128, /* l2 cache */
950 1, /* prefetch streams /*/
951 0, /* SF->DF convert */
952 };
953
954 /* Instruction costs on PPCE500MC64 processors. */
955 static const
956 struct processor_costs ppce500mc64_cost = {
957 COSTS_N_INSNS (4), /* mulsi */
958 COSTS_N_INSNS (4), /* mulsi_const */
959 COSTS_N_INSNS (4), /* mulsi_const9 */
960 COSTS_N_INSNS (4), /* muldi */
961 COSTS_N_INSNS (14), /* divsi */
962 COSTS_N_INSNS (14), /* divdi */
963 COSTS_N_INSNS (4), /* fp */
964 COSTS_N_INSNS (10), /* dmul */
965 COSTS_N_INSNS (36), /* sdiv */
966 COSTS_N_INSNS (66), /* ddiv */
967 64, /* cache line size */
968 32, /* l1 cache */
969 128, /* l2 cache */
970 1, /* prefetch streams /*/
971 0, /* SF->DF convert */
972 };
973
974 /* Instruction costs on PPCE5500 processors. */
975 static const
976 struct processor_costs ppce5500_cost = {
977 COSTS_N_INSNS (5), /* mulsi */
978 COSTS_N_INSNS (5), /* mulsi_const */
979 COSTS_N_INSNS (4), /* mulsi_const9 */
980 COSTS_N_INSNS (5), /* muldi */
981 COSTS_N_INSNS (14), /* divsi */
982 COSTS_N_INSNS (14), /* divdi */
983 COSTS_N_INSNS (7), /* fp */
984 COSTS_N_INSNS (10), /* dmul */
985 COSTS_N_INSNS (36), /* sdiv */
986 COSTS_N_INSNS (66), /* ddiv */
987 64, /* cache line size */
988 32, /* l1 cache */
989 128, /* l2 cache */
990 1, /* prefetch streams /*/
991 0, /* SF->DF convert */
992 };
993
994 /* Instruction costs on PPCE6500 processors. */
995 static const
996 struct processor_costs ppce6500_cost = {
997 COSTS_N_INSNS (5), /* mulsi */
998 COSTS_N_INSNS (5), /* mulsi_const */
999 COSTS_N_INSNS (4), /* mulsi_const9 */
1000 COSTS_N_INSNS (5), /* muldi */
1001 COSTS_N_INSNS (14), /* divsi */
1002 COSTS_N_INSNS (14), /* divdi */
1003 COSTS_N_INSNS (7), /* fp */
1004 COSTS_N_INSNS (10), /* dmul */
1005 COSTS_N_INSNS (36), /* sdiv */
1006 COSTS_N_INSNS (66), /* ddiv */
1007 64, /* cache line size */
1008 32, /* l1 cache */
1009 128, /* l2 cache */
1010 1, /* prefetch streams /*/
1011 0, /* SF->DF convert */
1012 };
1013
1014 /* Instruction costs on AppliedMicro Titan processors. */
1015 static const
1016 struct processor_costs titan_cost = {
1017 COSTS_N_INSNS (5), /* mulsi */
1018 COSTS_N_INSNS (5), /* mulsi_const */
1019 COSTS_N_INSNS (5), /* mulsi_const9 */
1020 COSTS_N_INSNS (5), /* muldi */
1021 COSTS_N_INSNS (18), /* divsi */
1022 COSTS_N_INSNS (18), /* divdi */
1023 COSTS_N_INSNS (10), /* fp */
1024 COSTS_N_INSNS (10), /* dmul */
1025 COSTS_N_INSNS (46), /* sdiv */
1026 COSTS_N_INSNS (72), /* ddiv */
1027 32, /* cache line size */
1028 32, /* l1 cache */
1029 512, /* l2 cache */
1030 1, /* prefetch streams /*/
1031 0, /* SF->DF convert */
1032 };
1033
1034 /* Instruction costs on POWER4 and POWER5 processors. */
1035 static const
1036 struct processor_costs power4_cost = {
1037 COSTS_N_INSNS (3), /* mulsi */
1038 COSTS_N_INSNS (2), /* mulsi_const */
1039 COSTS_N_INSNS (2), /* mulsi_const9 */
1040 COSTS_N_INSNS (4), /* muldi */
1041 COSTS_N_INSNS (18), /* divsi */
1042 COSTS_N_INSNS (34), /* divdi */
1043 COSTS_N_INSNS (3), /* fp */
1044 COSTS_N_INSNS (3), /* dmul */
1045 COSTS_N_INSNS (17), /* sdiv */
1046 COSTS_N_INSNS (17), /* ddiv */
1047 128, /* cache line size */
1048 32, /* l1 cache */
1049 1024, /* l2 cache */
1050 8, /* prefetch streams /*/
1051 0, /* SF->DF convert */
1052 };
1053
1054 /* Instruction costs on POWER6 processors. */
1055 static const
1056 struct processor_costs power6_cost = {
1057 COSTS_N_INSNS (8), /* mulsi */
1058 COSTS_N_INSNS (8), /* mulsi_const */
1059 COSTS_N_INSNS (8), /* mulsi_const9 */
1060 COSTS_N_INSNS (8), /* muldi */
1061 COSTS_N_INSNS (22), /* divsi */
1062 COSTS_N_INSNS (28), /* divdi */
1063 COSTS_N_INSNS (3), /* fp */
1064 COSTS_N_INSNS (3), /* dmul */
1065 COSTS_N_INSNS (13), /* sdiv */
1066 COSTS_N_INSNS (16), /* ddiv */
1067 128, /* cache line size */
1068 64, /* l1 cache */
1069 2048, /* l2 cache */
1070 16, /* prefetch streams */
1071 0, /* SF->DF convert */
1072 };
1073
1074 /* Instruction costs on POWER7 processors. */
1075 static const
1076 struct processor_costs power7_cost = {
1077 COSTS_N_INSNS (2), /* mulsi */
1078 COSTS_N_INSNS (2), /* mulsi_const */
1079 COSTS_N_INSNS (2), /* mulsi_const9 */
1080 COSTS_N_INSNS (2), /* muldi */
1081 COSTS_N_INSNS (18), /* divsi */
1082 COSTS_N_INSNS (34), /* divdi */
1083 COSTS_N_INSNS (3), /* fp */
1084 COSTS_N_INSNS (3), /* dmul */
1085 COSTS_N_INSNS (13), /* sdiv */
1086 COSTS_N_INSNS (16), /* ddiv */
1087 128, /* cache line size */
1088 32, /* l1 cache */
1089 256, /* l2 cache */
1090 12, /* prefetch streams */
1091 COSTS_N_INSNS (3), /* SF->DF convert */
1092 };
1093
1094 /* Instruction costs on POWER8 processors. */
1095 static const
1096 struct processor_costs power8_cost = {
1097 COSTS_N_INSNS (3), /* mulsi */
1098 COSTS_N_INSNS (3), /* mulsi_const */
1099 COSTS_N_INSNS (3), /* mulsi_const9 */
1100 COSTS_N_INSNS (3), /* muldi */
1101 COSTS_N_INSNS (19), /* divsi */
1102 COSTS_N_INSNS (35), /* divdi */
1103 COSTS_N_INSNS (3), /* fp */
1104 COSTS_N_INSNS (3), /* dmul */
1105 COSTS_N_INSNS (14), /* sdiv */
1106 COSTS_N_INSNS (17), /* ddiv */
1107 128, /* cache line size */
1108 32, /* l1 cache */
1109 256, /* l2 cache */
1110 12, /* prefetch streams */
1111 COSTS_N_INSNS (3), /* SF->DF convert */
1112 };
1113
1114 /* Instruction costs on POWER9 processors. */
1115 static const
1116 struct processor_costs power9_cost = {
1117 COSTS_N_INSNS (3), /* mulsi */
1118 COSTS_N_INSNS (3), /* mulsi_const */
1119 COSTS_N_INSNS (3), /* mulsi_const9 */
1120 COSTS_N_INSNS (3), /* muldi */
1121 COSTS_N_INSNS (8), /* divsi */
1122 COSTS_N_INSNS (12), /* divdi */
1123 COSTS_N_INSNS (3), /* fp */
1124 COSTS_N_INSNS (3), /* dmul */
1125 COSTS_N_INSNS (13), /* sdiv */
1126 COSTS_N_INSNS (18), /* ddiv */
1127 128, /* cache line size */
1128 32, /* l1 cache */
1129 512, /* l2 cache */
1130 8, /* prefetch streams */
1131 COSTS_N_INSNS (3), /* SF->DF convert */
1132 };
1133
1134 /* Instruction costs on POWER A2 processors. */
1135 static const
1136 struct processor_costs ppca2_cost = {
1137 COSTS_N_INSNS (16), /* mulsi */
1138 COSTS_N_INSNS (16), /* mulsi_const */
1139 COSTS_N_INSNS (16), /* mulsi_const9 */
1140 COSTS_N_INSNS (16), /* muldi */
1141 COSTS_N_INSNS (22), /* divsi */
1142 COSTS_N_INSNS (28), /* divdi */
1143 COSTS_N_INSNS (3), /* fp */
1144 COSTS_N_INSNS (3), /* dmul */
1145 COSTS_N_INSNS (59), /* sdiv */
1146 COSTS_N_INSNS (72), /* ddiv */
1147 64,
1148 16, /* l1 cache */
1149 2048, /* l2 cache */
1150 16, /* prefetch streams */
1151 0, /* SF->DF convert */
1152 };
1153
1154 \f
1155 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1156 #undef RS6000_BUILTIN_0
1157 #undef RS6000_BUILTIN_1
1158 #undef RS6000_BUILTIN_2
1159 #undef RS6000_BUILTIN_3
1160 #undef RS6000_BUILTIN_A
1161 #undef RS6000_BUILTIN_D
1162 #undef RS6000_BUILTIN_E
1163 #undef RS6000_BUILTIN_H
1164 #undef RS6000_BUILTIN_P
1165 #undef RS6000_BUILTIN_Q
1166 #undef RS6000_BUILTIN_S
1167 #undef RS6000_BUILTIN_X
1168
1169 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1170 { NAME, ICODE, MASK, ATTR },
1171
1172 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1173 { NAME, ICODE, MASK, ATTR },
1174
1175 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1176 { NAME, ICODE, MASK, ATTR },
1177
1178 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1179 { NAME, ICODE, MASK, ATTR },
1180
1181 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1182 { NAME, ICODE, MASK, ATTR },
1183
1184 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1185 { NAME, ICODE, MASK, ATTR },
1186
1187 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
1188 { NAME, ICODE, MASK, ATTR },
1189
1190 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1191 { NAME, ICODE, MASK, ATTR },
1192
1193 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1194 { NAME, ICODE, MASK, ATTR },
1195
1196 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1197 { NAME, ICODE, MASK, ATTR },
1198
1199 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
1200 { NAME, ICODE, MASK, ATTR },
1201
1202 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1203 { NAME, ICODE, MASK, ATTR },
1204
1205 struct rs6000_builtin_info_type {
1206 const char *name;
1207 const enum insn_code icode;
1208 const HOST_WIDE_INT mask;
1209 const unsigned attr;
1210 };
1211
1212 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1213 {
1214 #include "rs6000-builtin.def"
1215 };
1216
1217 #undef RS6000_BUILTIN_0
1218 #undef RS6000_BUILTIN_1
1219 #undef RS6000_BUILTIN_2
1220 #undef RS6000_BUILTIN_3
1221 #undef RS6000_BUILTIN_A
1222 #undef RS6000_BUILTIN_D
1223 #undef RS6000_BUILTIN_E
1224 #undef RS6000_BUILTIN_H
1225 #undef RS6000_BUILTIN_P
1226 #undef RS6000_BUILTIN_Q
1227 #undef RS6000_BUILTIN_S
1228 #undef RS6000_BUILTIN_X
1229
1230 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1231 static tree (*rs6000_veclib_handler) (combined_fn, tree, tree);
1232
1233 \f
1234 static bool rs6000_debug_legitimate_address_p (machine_mode, rtx, bool);
1235 static bool spe_func_has_64bit_regs_p (void);
1236 static struct machine_function * rs6000_init_machine_status (void);
1237 static int rs6000_ra_ever_killed (void);
1238 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1239 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1240 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1241 static tree rs6000_builtin_vectorized_libmass (combined_fn, tree, tree);
1242 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1243 static int rs6000_memory_move_cost (machine_mode, reg_class_t, bool);
1244 static bool rs6000_debug_rtx_costs (rtx, machine_mode, int, int, int *, bool);
1245 static int rs6000_debug_address_cost (rtx, machine_mode, addr_space_t,
1246 bool);
1247 static int rs6000_debug_adjust_cost (rtx_insn *, int, rtx_insn *, int,
1248 unsigned int);
1249 static bool is_microcoded_insn (rtx_insn *);
1250 static bool is_nonpipeline_insn (rtx_insn *);
1251 static bool is_cracked_insn (rtx_insn *);
1252 static bool is_load_insn (rtx, rtx *);
1253 static bool is_store_insn (rtx, rtx *);
1254 static bool set_to_load_agen (rtx_insn *,rtx_insn *);
1255 static bool insn_terminates_group_p (rtx_insn *, enum group_termination);
1256 static bool insn_must_be_first_in_group (rtx_insn *);
1257 static bool insn_must_be_last_in_group (rtx_insn *);
1258 static void altivec_init_builtins (void);
1259 static tree builtin_function_type (machine_mode, machine_mode,
1260 machine_mode, machine_mode,
1261 enum rs6000_builtins, const char *name);
1262 static void rs6000_common_init_builtins (void);
1263 static void paired_init_builtins (void);
1264 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1265 static void spe_init_builtins (void);
1266 static void htm_init_builtins (void);
1267 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1268 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1269 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1270 static rs6000_stack_t *rs6000_stack_info (void);
1271 static void is_altivec_return_reg (rtx, void *);
1272 int easy_vector_constant (rtx, machine_mode);
1273 static rtx rs6000_debug_legitimize_address (rtx, rtx, machine_mode);
1274 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1275 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1276 bool, bool);
1277 #if TARGET_MACHO
1278 static void macho_branch_islands (void);
1279 #endif
1280 static rtx rs6000_legitimize_reload_address (rtx, machine_mode, int, int,
1281 int, int *);
1282 static rtx rs6000_debug_legitimize_reload_address (rtx, machine_mode, int,
1283 int, int, int *);
1284 static bool rs6000_mode_dependent_address (const_rtx);
1285 static bool rs6000_debug_mode_dependent_address (const_rtx);
1286 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1287 machine_mode, rtx);
1288 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1289 machine_mode,
1290 rtx);
1291 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1292 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1293 enum reg_class);
1294 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1295 machine_mode);
1296 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1297 enum reg_class,
1298 machine_mode);
1299 static bool rs6000_cannot_change_mode_class (machine_mode,
1300 machine_mode,
1301 enum reg_class);
1302 static bool rs6000_debug_cannot_change_mode_class (machine_mode,
1303 machine_mode,
1304 enum reg_class);
1305 static bool rs6000_save_toc_in_prologue_p (void);
1306 static rtx rs6000_internal_arg_pointer (void);
1307
1308 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, machine_mode, int, int,
1309 int, int *)
1310 = rs6000_legitimize_reload_address;
1311
1312 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1313 = rs6000_mode_dependent_address;
1314
1315 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1316 machine_mode, rtx)
1317 = rs6000_secondary_reload_class;
1318
1319 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1320 = rs6000_preferred_reload_class;
1321
1322 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1323 machine_mode)
1324 = rs6000_secondary_memory_needed;
1325
1326 bool (*rs6000_cannot_change_mode_class_ptr) (machine_mode,
1327 machine_mode,
1328 enum reg_class)
1329 = rs6000_cannot_change_mode_class;
1330
1331 const int INSN_NOT_AVAILABLE = -1;
1332
1333 static void rs6000_print_isa_options (FILE *, int, const char *,
1334 HOST_WIDE_INT);
1335 static void rs6000_print_builtin_options (FILE *, int, const char *,
1336 HOST_WIDE_INT);
1337
1338 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1339 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1340 enum rs6000_reg_type,
1341 machine_mode,
1342 secondary_reload_info *,
1343 bool);
1344 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1345 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused));
1346 static tree rs6000_fold_builtin (tree, int, tree *, bool);
1347
1348 /* Hash table stuff for keeping track of TOC entries. */
1349
1350 struct GTY((for_user)) toc_hash_struct
1351 {
1352 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1353 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1354 rtx key;
1355 machine_mode key_mode;
1356 int labelno;
1357 };
1358
1359 struct toc_hasher : ggc_ptr_hash<toc_hash_struct>
1360 {
1361 static hashval_t hash (toc_hash_struct *);
1362 static bool equal (toc_hash_struct *, toc_hash_struct *);
1363 };
1364
1365 static GTY (()) hash_table<toc_hasher> *toc_hash_table;
1366
1367 /* Hash table to keep track of the argument types for builtin functions. */
1368
1369 struct GTY((for_user)) builtin_hash_struct
1370 {
1371 tree type;
1372 machine_mode mode[4]; /* return value + 3 arguments. */
1373 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1374 };
1375
1376 struct builtin_hasher : ggc_ptr_hash<builtin_hash_struct>
1377 {
1378 static hashval_t hash (builtin_hash_struct *);
1379 static bool equal (builtin_hash_struct *, builtin_hash_struct *);
1380 };
1381
1382 static GTY (()) hash_table<builtin_hasher> *builtin_hash_table;
1383
1384 \f
1385 /* Default register names. */
1386 char rs6000_reg_names[][8] =
1387 {
1388 "0", "1", "2", "3", "4", "5", "6", "7",
1389 "8", "9", "10", "11", "12", "13", "14", "15",
1390 "16", "17", "18", "19", "20", "21", "22", "23",
1391 "24", "25", "26", "27", "28", "29", "30", "31",
1392 "0", "1", "2", "3", "4", "5", "6", "7",
1393 "8", "9", "10", "11", "12", "13", "14", "15",
1394 "16", "17", "18", "19", "20", "21", "22", "23",
1395 "24", "25", "26", "27", "28", "29", "30", "31",
1396 "mq", "lr", "ctr","ap",
1397 "0", "1", "2", "3", "4", "5", "6", "7",
1398 "ca",
1399 /* AltiVec registers. */
1400 "0", "1", "2", "3", "4", "5", "6", "7",
1401 "8", "9", "10", "11", "12", "13", "14", "15",
1402 "16", "17", "18", "19", "20", "21", "22", "23",
1403 "24", "25", "26", "27", "28", "29", "30", "31",
1404 "vrsave", "vscr",
1405 /* SPE registers. */
1406 "spe_acc", "spefscr",
1407 /* Soft frame pointer. */
1408 "sfp",
1409 /* HTM SPR registers. */
1410 "tfhar", "tfiar", "texasr",
1411 /* SPE High registers. */
1412 "0", "1", "2", "3", "4", "5", "6", "7",
1413 "8", "9", "10", "11", "12", "13", "14", "15",
1414 "16", "17", "18", "19", "20", "21", "22", "23",
1415 "24", "25", "26", "27", "28", "29", "30", "31"
1416 };
1417
1418 #ifdef TARGET_REGNAMES
1419 static const char alt_reg_names[][8] =
1420 {
1421 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1422 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1423 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1424 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1425 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1426 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1427 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1428 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1429 "mq", "lr", "ctr", "ap",
1430 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1431 "ca",
1432 /* AltiVec registers. */
1433 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1434 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1435 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1436 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1437 "vrsave", "vscr",
1438 /* SPE registers. */
1439 "spe_acc", "spefscr",
1440 /* Soft frame pointer. */
1441 "sfp",
1442 /* HTM SPR registers. */
1443 "tfhar", "tfiar", "texasr",
1444 /* SPE High registers. */
1445 "%rh0", "%rh1", "%rh2", "%rh3", "%rh4", "%rh5", "%rh6", "%rh7",
1446 "%rh8", "%rh9", "%rh10", "%r11", "%rh12", "%rh13", "%rh14", "%rh15",
1447 "%rh16", "%rh17", "%rh18", "%rh19", "%rh20", "%rh21", "%rh22", "%rh23",
1448 "%rh24", "%rh25", "%rh26", "%rh27", "%rh28", "%rh29", "%rh30", "%rh31"
1449 };
1450 #endif
1451
1452 /* Table of valid machine attributes. */
1453
1454 static const struct attribute_spec rs6000_attribute_table[] =
1455 {
1456 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1457 affects_type_identity } */
1458 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1459 false },
1460 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1461 false },
1462 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1463 false },
1464 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1465 false },
1466 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1467 false },
1468 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1469 SUBTARGET_ATTRIBUTE_TABLE,
1470 #endif
1471 { NULL, 0, 0, false, false, false, NULL, false }
1472 };
1473 \f
1474 #ifndef TARGET_PROFILE_KERNEL
1475 #define TARGET_PROFILE_KERNEL 0
1476 #endif
1477
1478 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1479 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1480 \f
1481 /* Initialize the GCC target structure. */
1482 #undef TARGET_ATTRIBUTE_TABLE
1483 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1484 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1485 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1486 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1487 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1488
1489 #undef TARGET_ASM_ALIGNED_DI_OP
1490 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1491
1492 /* Default unaligned ops are only provided for ELF. Find the ops needed
1493 for non-ELF systems. */
1494 #ifndef OBJECT_FORMAT_ELF
1495 #if TARGET_XCOFF
1496 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1497 64-bit targets. */
1498 #undef TARGET_ASM_UNALIGNED_HI_OP
1499 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1500 #undef TARGET_ASM_UNALIGNED_SI_OP
1501 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1502 #undef TARGET_ASM_UNALIGNED_DI_OP
1503 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1504 #else
1505 /* For Darwin. */
1506 #undef TARGET_ASM_UNALIGNED_HI_OP
1507 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1508 #undef TARGET_ASM_UNALIGNED_SI_OP
1509 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1510 #undef TARGET_ASM_UNALIGNED_DI_OP
1511 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1512 #undef TARGET_ASM_ALIGNED_DI_OP
1513 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1514 #endif
1515 #endif
1516
1517 /* This hook deals with fixups for relocatable code and DI-mode objects
1518 in 64-bit code. */
1519 #undef TARGET_ASM_INTEGER
1520 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1521
1522 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1523 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1524 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1525 #endif
1526
1527 #undef TARGET_SET_UP_BY_PROLOGUE
1528 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1529
1530 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1531 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1532 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1533 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1534 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1535 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1536 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1537 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1538 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1539 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1540 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1541 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1542
1543 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1544 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1545
1546 #undef TARGET_INTERNAL_ARG_POINTER
1547 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1548
1549 #undef TARGET_HAVE_TLS
1550 #define TARGET_HAVE_TLS HAVE_AS_TLS
1551
1552 #undef TARGET_CANNOT_FORCE_CONST_MEM
1553 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1554
1555 #undef TARGET_DELEGITIMIZE_ADDRESS
1556 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1557
1558 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1559 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1560
1561 #undef TARGET_ASM_FUNCTION_PROLOGUE
1562 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1563 #undef TARGET_ASM_FUNCTION_EPILOGUE
1564 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1565
1566 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1567 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1568
1569 #undef TARGET_LEGITIMIZE_ADDRESS
1570 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1571
1572 #undef TARGET_SCHED_VARIABLE_ISSUE
1573 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1574
1575 #undef TARGET_SCHED_ISSUE_RATE
1576 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1577 #undef TARGET_SCHED_ADJUST_COST
1578 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1579 #undef TARGET_SCHED_ADJUST_PRIORITY
1580 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1581 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1582 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1583 #undef TARGET_SCHED_INIT
1584 #define TARGET_SCHED_INIT rs6000_sched_init
1585 #undef TARGET_SCHED_FINISH
1586 #define TARGET_SCHED_FINISH rs6000_sched_finish
1587 #undef TARGET_SCHED_REORDER
1588 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1589 #undef TARGET_SCHED_REORDER2
1590 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1591
1592 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1593 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1594
1595 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1596 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1597
1598 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1599 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1600 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1601 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1602 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1603 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1604 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1605 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1606
1607 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1608 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1609 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1610 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1611 rs6000_builtin_support_vector_misalignment
1612 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1613 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1614 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1615 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1616 rs6000_builtin_vectorization_cost
1617 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1618 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1619 rs6000_preferred_simd_mode
1620 #undef TARGET_VECTORIZE_INIT_COST
1621 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1622 #undef TARGET_VECTORIZE_ADD_STMT_COST
1623 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1624 #undef TARGET_VECTORIZE_FINISH_COST
1625 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1626 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1627 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1628
1629 #undef TARGET_INIT_BUILTINS
1630 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1631 #undef TARGET_BUILTIN_DECL
1632 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1633
1634 #undef TARGET_FOLD_BUILTIN
1635 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1636 #undef TARGET_GIMPLE_FOLD_BUILTIN
1637 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1638
1639 #undef TARGET_EXPAND_BUILTIN
1640 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1641
1642 #undef TARGET_MANGLE_TYPE
1643 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1644
1645 #undef TARGET_INIT_LIBFUNCS
1646 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1647
1648 #if TARGET_MACHO
1649 #undef TARGET_BINDS_LOCAL_P
1650 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1651 #endif
1652
1653 #undef TARGET_MS_BITFIELD_LAYOUT_P
1654 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1655
1656 #undef TARGET_ASM_OUTPUT_MI_THUNK
1657 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1658
1659 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1660 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1661
1662 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1663 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1664
1665 #undef TARGET_REGISTER_MOVE_COST
1666 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1667 #undef TARGET_MEMORY_MOVE_COST
1668 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1669 #undef TARGET_CANNOT_COPY_INSN_P
1670 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1671 #undef TARGET_RTX_COSTS
1672 #define TARGET_RTX_COSTS rs6000_rtx_costs
1673 #undef TARGET_ADDRESS_COST
1674 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1675
1676 #undef TARGET_DWARF_REGISTER_SPAN
1677 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1678
1679 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1680 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1681
1682 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1683 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1684
1685 #undef TARGET_PROMOTE_FUNCTION_MODE
1686 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1687
1688 #undef TARGET_RETURN_IN_MEMORY
1689 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1690
1691 #undef TARGET_RETURN_IN_MSB
1692 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1693
1694 #undef TARGET_SETUP_INCOMING_VARARGS
1695 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1696
1697 /* Always strict argument naming on rs6000. */
1698 #undef TARGET_STRICT_ARGUMENT_NAMING
1699 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1700 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1701 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1702 #undef TARGET_SPLIT_COMPLEX_ARG
1703 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1704 #undef TARGET_MUST_PASS_IN_STACK
1705 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1706 #undef TARGET_PASS_BY_REFERENCE
1707 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1708 #undef TARGET_ARG_PARTIAL_BYTES
1709 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1710 #undef TARGET_FUNCTION_ARG_ADVANCE
1711 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1712 #undef TARGET_FUNCTION_ARG
1713 #define TARGET_FUNCTION_ARG rs6000_function_arg
1714 #undef TARGET_FUNCTION_ARG_BOUNDARY
1715 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1716
1717 #undef TARGET_BUILD_BUILTIN_VA_LIST
1718 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1719
1720 #undef TARGET_EXPAND_BUILTIN_VA_START
1721 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1722
1723 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1724 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1725
1726 #undef TARGET_EH_RETURN_FILTER_MODE
1727 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1728
1729 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1730 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1731
1732 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1733 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1734
1735 #undef TARGET_FLOATN_MODE
1736 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1737
1738 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1739 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1740
1741 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1742 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1743
1744 #undef TARGET_MD_ASM_ADJUST
1745 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1746
1747 #undef TARGET_OPTION_OVERRIDE
1748 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1749
1750 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1751 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1752 rs6000_builtin_vectorized_function
1753
1754 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1755 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1756 rs6000_builtin_md_vectorized_function
1757
1758 #ifdef TARGET_THREAD_SSP_OFFSET
1759 #undef TARGET_STACK_PROTECT_GUARD
1760 #define TARGET_STACK_PROTECT_GUARD hook_tree_void_null
1761 #endif
1762
1763 #if !TARGET_MACHO
1764 #undef TARGET_STACK_PROTECT_FAIL
1765 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1766 #endif
1767
1768 #ifdef HAVE_AS_TLS
1769 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1770 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1771 #endif
1772
1773 /* Use a 32-bit anchor range. This leads to sequences like:
1774
1775 addis tmp,anchor,high
1776 add dest,tmp,low
1777
1778 where tmp itself acts as an anchor, and can be shared between
1779 accesses to the same 64k page. */
1780 #undef TARGET_MIN_ANCHOR_OFFSET
1781 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1782 #undef TARGET_MAX_ANCHOR_OFFSET
1783 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1784 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1785 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1786 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1787 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1788
1789 #undef TARGET_BUILTIN_RECIPROCAL
1790 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1791
1792 #undef TARGET_EXPAND_TO_RTL_HOOK
1793 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1794
1795 #undef TARGET_INSTANTIATE_DECLS
1796 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1797
1798 #undef TARGET_SECONDARY_RELOAD
1799 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1800
1801 #undef TARGET_LEGITIMATE_ADDRESS_P
1802 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1803
1804 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1805 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1806
1807 #undef TARGET_LRA_P
1808 #define TARGET_LRA_P rs6000_lra_p
1809
1810 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1811 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1812
1813 #undef TARGET_CAN_ELIMINATE
1814 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1815
1816 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1817 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1818
1819 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1820 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1821
1822 #undef TARGET_TRAMPOLINE_INIT
1823 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1824
1825 #undef TARGET_FUNCTION_VALUE
1826 #define TARGET_FUNCTION_VALUE rs6000_function_value
1827
1828 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1829 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1830
1831 #undef TARGET_OPTION_SAVE
1832 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1833
1834 #undef TARGET_OPTION_RESTORE
1835 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1836
1837 #undef TARGET_OPTION_PRINT
1838 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1839
1840 #undef TARGET_CAN_INLINE_P
1841 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1842
1843 #undef TARGET_SET_CURRENT_FUNCTION
1844 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1845
1846 #undef TARGET_LEGITIMATE_CONSTANT_P
1847 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1848
1849 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1850 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1851
1852 #undef TARGET_CAN_USE_DOLOOP_P
1853 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1854
1855 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1856 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1857
1858 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1859 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1860 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1861 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1862 #undef TARGET_UNWIND_WORD_MODE
1863 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1864
1865 #undef TARGET_OFFLOAD_OPTIONS
1866 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1867
1868 #undef TARGET_C_MODE_FOR_SUFFIX
1869 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1870
1871 #undef TARGET_INVALID_BINARY_OP
1872 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1873
1874 #undef TARGET_OPTAB_SUPPORTED_P
1875 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1876
1877 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1878 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1879 \f
1880
1881 /* Processor table. */
1882 struct rs6000_ptt
1883 {
1884 const char *const name; /* Canonical processor name. */
1885 const enum processor_type processor; /* Processor type enum value. */
1886 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1887 };
1888
1889 static struct rs6000_ptt const processor_target_table[] =
1890 {
1891 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1892 #include "rs6000-cpus.def"
1893 #undef RS6000_CPU
1894 };
1895
1896 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1897 name is invalid. */
1898
1899 static int
1900 rs6000_cpu_name_lookup (const char *name)
1901 {
1902 size_t i;
1903
1904 if (name != NULL)
1905 {
1906 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1907 if (! strcmp (name, processor_target_table[i].name))
1908 return (int)i;
1909 }
1910
1911 return -1;
1912 }
1913
1914 \f
1915 /* Return number of consecutive hard regs needed starting at reg REGNO
1916 to hold something of mode MODE.
1917 This is ordinarily the length in words of a value of mode MODE
1918 but can be less for certain modes in special long registers.
1919
1920 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1921 scalar instructions. The upper 32 bits are only available to the
1922 SIMD instructions.
1923
1924 POWER and PowerPC GPRs hold 32 bits worth;
1925 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1926
1927 static int
1928 rs6000_hard_regno_nregs_internal (int regno, machine_mode mode)
1929 {
1930 unsigned HOST_WIDE_INT reg_size;
1931
1932 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
1933 128-bit floating point that can go in vector registers, which has VSX
1934 memory addressing. */
1935 if (FP_REGNO_P (regno))
1936 reg_size = (VECTOR_MEM_VSX_P (mode) || FLOAT128_VECTOR_P (mode)
1937 ? UNITS_PER_VSX_WORD
1938 : UNITS_PER_FP_WORD);
1939
1940 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1941 reg_size = UNITS_PER_SPE_WORD;
1942
1943 else if (ALTIVEC_REGNO_P (regno))
1944 reg_size = UNITS_PER_ALTIVEC_WORD;
1945
1946 /* The value returned for SCmode in the E500 double case is 2 for
1947 ABI compatibility; storing an SCmode value in a single register
1948 would require function_arg and rs6000_spe_function_arg to handle
1949 SCmode so as to pass the value correctly in a pair of
1950 registers. */
1951 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1952 && !DECIMAL_FLOAT_MODE_P (mode) && SPE_SIMD_REGNO_P (regno))
1953 reg_size = UNITS_PER_FP_WORD;
1954
1955 else
1956 reg_size = UNITS_PER_WORD;
1957
1958 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1959 }
1960
1961 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1962 MODE. */
1963 static int
1964 rs6000_hard_regno_mode_ok (int regno, machine_mode mode)
1965 {
1966 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1967
1968 if (COMPLEX_MODE_P (mode))
1969 mode = GET_MODE_INNER (mode);
1970
1971 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1972 register combinations, and use PTImode where we need to deal with quad
1973 word memory operations. Don't allow quad words in the argument or frame
1974 pointer registers, just registers 0..31. */
1975 if (mode == PTImode)
1976 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1977 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1978 && ((regno & 1) == 0));
1979
1980 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1981 implementations. Don't allow an item to be split between a FP register
1982 and an Altivec register. Allow TImode in all VSX registers if the user
1983 asked for it. */
1984 if (TARGET_VSX && VSX_REGNO_P (regno)
1985 && (VECTOR_MEM_VSX_P (mode)
1986 || FLOAT128_VECTOR_P (mode)
1987 || reg_addr[mode].scalar_in_vmx_p
1988 || (TARGET_VSX_TIMODE && mode == TImode)
1989 || (TARGET_VADDUQM && mode == V1TImode)))
1990 {
1991 if (FP_REGNO_P (regno))
1992 return FP_REGNO_P (last_regno);
1993
1994 if (ALTIVEC_REGNO_P (regno))
1995 {
1996 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
1997 return 0;
1998
1999 return ALTIVEC_REGNO_P (last_regno);
2000 }
2001 }
2002
2003 /* The GPRs can hold any mode, but values bigger than one register
2004 cannot go past R31. */
2005 if (INT_REGNO_P (regno))
2006 return INT_REGNO_P (last_regno);
2007
2008 /* The float registers (except for VSX vector modes) can only hold floating
2009 modes and DImode. */
2010 if (FP_REGNO_P (regno))
2011 {
2012 if (FLOAT128_VECTOR_P (mode))
2013 return false;
2014
2015 if (SCALAR_FLOAT_MODE_P (mode)
2016 && (mode != TDmode || (regno % 2) == 0)
2017 && FP_REGNO_P (last_regno))
2018 return 1;
2019
2020 if (GET_MODE_CLASS (mode) == MODE_INT)
2021 {
2022 if(GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
2023 return 1;
2024
2025 if (TARGET_VSX_SMALL_INTEGER)
2026 {
2027 if (mode == SImode)
2028 return 1;
2029
2030 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
2031 return 1;
2032 }
2033 }
2034
2035 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
2036 && PAIRED_VECTOR_MODE (mode))
2037 return 1;
2038
2039 return 0;
2040 }
2041
2042 /* The CR register can only hold CC modes. */
2043 if (CR_REGNO_P (regno))
2044 return GET_MODE_CLASS (mode) == MODE_CC;
2045
2046 if (CA_REGNO_P (regno))
2047 return mode == Pmode || mode == SImode;
2048
2049 /* AltiVec only in AldyVec registers. */
2050 if (ALTIVEC_REGNO_P (regno))
2051 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
2052 || mode == V1TImode);
2053
2054 /* ...but GPRs can hold SIMD data on the SPE in one register. */
2055 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
2056 return 1;
2057
2058 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2059 and it must be able to fit within the register set. */
2060
2061 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
2062 }
2063
2064 /* Print interesting facts about registers. */
2065 static void
2066 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
2067 {
2068 int r, m;
2069
2070 for (r = first_regno; r <= last_regno; ++r)
2071 {
2072 const char *comma = "";
2073 int len;
2074
2075 if (first_regno == last_regno)
2076 fprintf (stderr, "%s:\t", reg_name);
2077 else
2078 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
2079
2080 len = 8;
2081 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2082 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
2083 {
2084 if (len > 70)
2085 {
2086 fprintf (stderr, ",\n\t");
2087 len = 8;
2088 comma = "";
2089 }
2090
2091 if (rs6000_hard_regno_nregs[m][r] > 1)
2092 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
2093 rs6000_hard_regno_nregs[m][r]);
2094 else
2095 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
2096
2097 comma = ", ";
2098 }
2099
2100 if (call_used_regs[r])
2101 {
2102 if (len > 70)
2103 {
2104 fprintf (stderr, ",\n\t");
2105 len = 8;
2106 comma = "";
2107 }
2108
2109 len += fprintf (stderr, "%s%s", comma, "call-used");
2110 comma = ", ";
2111 }
2112
2113 if (fixed_regs[r])
2114 {
2115 if (len > 70)
2116 {
2117 fprintf (stderr, ",\n\t");
2118 len = 8;
2119 comma = "";
2120 }
2121
2122 len += fprintf (stderr, "%s%s", comma, "fixed");
2123 comma = ", ";
2124 }
2125
2126 if (len > 70)
2127 {
2128 fprintf (stderr, ",\n\t");
2129 comma = "";
2130 }
2131
2132 len += fprintf (stderr, "%sreg-class = %s", comma,
2133 reg_class_names[(int)rs6000_regno_regclass[r]]);
2134 comma = ", ";
2135
2136 if (len > 70)
2137 {
2138 fprintf (stderr, ",\n\t");
2139 comma = "";
2140 }
2141
2142 fprintf (stderr, "%sregno = %d\n", comma, r);
2143 }
2144 }
2145
2146 static const char *
2147 rs6000_debug_vector_unit (enum rs6000_vector v)
2148 {
2149 const char *ret;
2150
2151 switch (v)
2152 {
2153 case VECTOR_NONE: ret = "none"; break;
2154 case VECTOR_ALTIVEC: ret = "altivec"; break;
2155 case VECTOR_VSX: ret = "vsx"; break;
2156 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
2157 case VECTOR_PAIRED: ret = "paired"; break;
2158 case VECTOR_SPE: ret = "spe"; break;
2159 case VECTOR_OTHER: ret = "other"; break;
2160 default: ret = "unknown"; break;
2161 }
2162
2163 return ret;
2164 }
2165
2166 /* Inner function printing just the address mask for a particular reload
2167 register class. */
2168 DEBUG_FUNCTION char *
2169 rs6000_debug_addr_mask (addr_mask_type mask, bool keep_spaces)
2170 {
2171 static char ret[8];
2172 char *p = ret;
2173
2174 if ((mask & RELOAD_REG_VALID) != 0)
2175 *p++ = 'v';
2176 else if (keep_spaces)
2177 *p++ = ' ';
2178
2179 if ((mask & RELOAD_REG_MULTIPLE) != 0)
2180 *p++ = 'm';
2181 else if (keep_spaces)
2182 *p++ = ' ';
2183
2184 if ((mask & RELOAD_REG_INDEXED) != 0)
2185 *p++ = 'i';
2186 else if (keep_spaces)
2187 *p++ = ' ';
2188
2189 if ((mask & RELOAD_REG_QUAD_OFFSET) != 0)
2190 *p++ = 'O';
2191 else if ((mask & RELOAD_REG_OFFSET) != 0)
2192 *p++ = 'o';
2193 else if (keep_spaces)
2194 *p++ = ' ';
2195
2196 if ((mask & RELOAD_REG_PRE_INCDEC) != 0)
2197 *p++ = '+';
2198 else if (keep_spaces)
2199 *p++ = ' ';
2200
2201 if ((mask & RELOAD_REG_PRE_MODIFY) != 0)
2202 *p++ = '+';
2203 else if (keep_spaces)
2204 *p++ = ' ';
2205
2206 if ((mask & RELOAD_REG_AND_M16) != 0)
2207 *p++ = '&';
2208 else if (keep_spaces)
2209 *p++ = ' ';
2210
2211 *p = '\0';
2212
2213 return ret;
2214 }
2215
2216 /* Print the address masks in a human readble fashion. */
2217 DEBUG_FUNCTION void
2218 rs6000_debug_print_mode (ssize_t m)
2219 {
2220 ssize_t rc;
2221 int spaces = 0;
2222 bool fuse_extra_p;
2223
2224 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
2225 for (rc = 0; rc < N_RELOAD_REG; rc++)
2226 fprintf (stderr, " %s: %s", reload_reg_map[rc].name,
2227 rs6000_debug_addr_mask (reg_addr[m].addr_mask[rc], true));
2228
2229 if ((reg_addr[m].reload_store != CODE_FOR_nothing)
2230 || (reg_addr[m].reload_load != CODE_FOR_nothing))
2231 fprintf (stderr, " Reload=%c%c",
2232 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
2233 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*');
2234 else
2235 spaces += sizeof (" Reload=sl") - 1;
2236
2237 if (reg_addr[m].scalar_in_vmx_p)
2238 {
2239 fprintf (stderr, "%*s Upper=y", spaces, "");
2240 spaces = 0;
2241 }
2242 else
2243 spaces += sizeof (" Upper=y") - 1;
2244
2245 fuse_extra_p = ((reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2246 || reg_addr[m].fused_toc);
2247 if (!fuse_extra_p)
2248 {
2249 for (rc = 0; rc < N_RELOAD_REG; rc++)
2250 {
2251 if (rc != RELOAD_REG_ANY)
2252 {
2253 if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2254 || reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing
2255 || reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing
2256 || reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing
2257 || reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2258 {
2259 fuse_extra_p = true;
2260 break;
2261 }
2262 }
2263 }
2264 }
2265
2266 if (fuse_extra_p)
2267 {
2268 fprintf (stderr, "%*s Fuse:", spaces, "");
2269 spaces = 0;
2270
2271 for (rc = 0; rc < N_RELOAD_REG; rc++)
2272 {
2273 if (rc != RELOAD_REG_ANY)
2274 {
2275 char load, store;
2276
2277 if (reg_addr[m].fusion_addis_ld[rc] != CODE_FOR_nothing)
2278 load = 'l';
2279 else if (reg_addr[m].fusion_addi_ld[rc] != CODE_FOR_nothing)
2280 load = 'L';
2281 else
2282 load = '-';
2283
2284 if (reg_addr[m].fusion_addis_st[rc] != CODE_FOR_nothing)
2285 store = 's';
2286 else if (reg_addr[m].fusion_addi_st[rc] != CODE_FOR_nothing)
2287 store = 'S';
2288 else
2289 store = '-';
2290
2291 if (load == '-' && store == '-')
2292 spaces += 5;
2293 else
2294 {
2295 fprintf (stderr, "%*s%c=%c%c", (spaces + 1), "",
2296 reload_reg_map[rc].name[0], load, store);
2297 spaces = 0;
2298 }
2299 }
2300 }
2301
2302 if (reg_addr[m].fusion_gpr_ld != CODE_FOR_nothing)
2303 {
2304 fprintf (stderr, "%*sP8gpr", (spaces + 1), "");
2305 spaces = 0;
2306 }
2307 else
2308 spaces += sizeof (" P8gpr") - 1;
2309
2310 if (reg_addr[m].fused_toc)
2311 {
2312 fprintf (stderr, "%*sToc", (spaces + 1), "");
2313 spaces = 0;
2314 }
2315 else
2316 spaces += sizeof (" Toc") - 1;
2317 }
2318 else
2319 spaces += sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2320
2321 if (rs6000_vector_unit[m] != VECTOR_NONE
2322 || rs6000_vector_mem[m] != VECTOR_NONE)
2323 {
2324 fprintf (stderr, "%*s vector: arith=%-10s mem=%s",
2325 spaces, "",
2326 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
2327 rs6000_debug_vector_unit (rs6000_vector_mem[m]));
2328 }
2329
2330 fputs ("\n", stderr);
2331 }
2332
2333 #define DEBUG_FMT_ID "%-32s= "
2334 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2335 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2336 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2337
2338 /* Print various interesting information with -mdebug=reg. */
2339 static void
2340 rs6000_debug_reg_global (void)
2341 {
2342 static const char *const tf[2] = { "false", "true" };
2343 const char *nl = (const char *)0;
2344 int m;
2345 size_t m1, m2, v;
2346 char costly_num[20];
2347 char nop_num[20];
2348 char flags_buffer[40];
2349 const char *costly_str;
2350 const char *nop_str;
2351 const char *trace_str;
2352 const char *abi_str;
2353 const char *cmodel_str;
2354 struct cl_target_option cl_opts;
2355
2356 /* Modes we want tieable information on. */
2357 static const machine_mode print_tieable_modes[] = {
2358 QImode,
2359 HImode,
2360 SImode,
2361 DImode,
2362 TImode,
2363 PTImode,
2364 SFmode,
2365 DFmode,
2366 TFmode,
2367 IFmode,
2368 KFmode,
2369 SDmode,
2370 DDmode,
2371 TDmode,
2372 V8QImode,
2373 V4HImode,
2374 V2SImode,
2375 V16QImode,
2376 V8HImode,
2377 V4SImode,
2378 V2DImode,
2379 V1TImode,
2380 V32QImode,
2381 V16HImode,
2382 V8SImode,
2383 V4DImode,
2384 V2TImode,
2385 V2SFmode,
2386 V4SFmode,
2387 V2DFmode,
2388 V8SFmode,
2389 V4DFmode,
2390 CCmode,
2391 CCUNSmode,
2392 CCEQmode,
2393 };
2394
2395 /* Virtual regs we are interested in. */
2396 const static struct {
2397 int regno; /* register number. */
2398 const char *name; /* register name. */
2399 } virtual_regs[] = {
2400 { STACK_POINTER_REGNUM, "stack pointer:" },
2401 { TOC_REGNUM, "toc: " },
2402 { STATIC_CHAIN_REGNUM, "static chain: " },
2403 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2404 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2405 { ARG_POINTER_REGNUM, "arg pointer: " },
2406 { FRAME_POINTER_REGNUM, "frame pointer:" },
2407 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2408 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2409 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2410 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2411 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2412 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2413 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2414 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2415 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2416 };
2417
2418 fputs ("\nHard register information:\n", stderr);
2419 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2420 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2421 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2422 LAST_ALTIVEC_REGNO,
2423 "vs");
2424 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2425 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2426 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2427 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2428 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2429 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2430 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
2431 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
2432
2433 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2434 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2435 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2436
2437 fprintf (stderr,
2438 "\n"
2439 "d reg_class = %s\n"
2440 "f reg_class = %s\n"
2441 "v reg_class = %s\n"
2442 "wa reg_class = %s\n"
2443 "wb reg_class = %s\n"
2444 "wd reg_class = %s\n"
2445 "we reg_class = %s\n"
2446 "wf reg_class = %s\n"
2447 "wg reg_class = %s\n"
2448 "wh reg_class = %s\n"
2449 "wi reg_class = %s\n"
2450 "wj reg_class = %s\n"
2451 "wk reg_class = %s\n"
2452 "wl reg_class = %s\n"
2453 "wm reg_class = %s\n"
2454 "wo reg_class = %s\n"
2455 "wp reg_class = %s\n"
2456 "wq reg_class = %s\n"
2457 "wr reg_class = %s\n"
2458 "ws reg_class = %s\n"
2459 "wt reg_class = %s\n"
2460 "wu reg_class = %s\n"
2461 "wv reg_class = %s\n"
2462 "ww reg_class = %s\n"
2463 "wx reg_class = %s\n"
2464 "wy reg_class = %s\n"
2465 "wz reg_class = %s\n"
2466 "wH reg_class = %s\n"
2467 "wI reg_class = %s\n"
2468 "wJ reg_class = %s\n"
2469 "wK reg_class = %s\n"
2470 "\n",
2471 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2472 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2473 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2474 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2475 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wb]],
2476 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2477 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_we]],
2478 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2479 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2480 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2481 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2482 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2483 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2484 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2485 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2486 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wo]],
2487 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wp]],
2488 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wq]],
2489 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2490 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2491 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2492 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2493 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2494 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2495 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2496 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2497 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]],
2498 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wH]],
2499 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wI]],
2500 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wJ]],
2501 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wK]]);
2502
2503 nl = "\n";
2504 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2505 rs6000_debug_print_mode (m);
2506
2507 fputs ("\n", stderr);
2508
2509 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2510 {
2511 machine_mode mode1 = print_tieable_modes[m1];
2512 bool first_time = true;
2513
2514 nl = (const char *)0;
2515 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2516 {
2517 machine_mode mode2 = print_tieable_modes[m2];
2518 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
2519 {
2520 if (first_time)
2521 {
2522 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2523 nl = "\n";
2524 first_time = false;
2525 }
2526
2527 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2528 }
2529 }
2530
2531 if (!first_time)
2532 fputs ("\n", stderr);
2533 }
2534
2535 if (nl)
2536 fputs (nl, stderr);
2537
2538 if (rs6000_recip_control)
2539 {
2540 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2541
2542 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2543 if (rs6000_recip_bits[m])
2544 {
2545 fprintf (stderr,
2546 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2547 GET_MODE_NAME (m),
2548 (RS6000_RECIP_AUTO_RE_P (m)
2549 ? "auto"
2550 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2551 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2552 ? "auto"
2553 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2554 }
2555
2556 fputs ("\n", stderr);
2557 }
2558
2559 if (rs6000_cpu_index >= 0)
2560 {
2561 const char *name = processor_target_table[rs6000_cpu_index].name;
2562 HOST_WIDE_INT flags
2563 = processor_target_table[rs6000_cpu_index].target_enable;
2564
2565 sprintf (flags_buffer, "-mcpu=%s flags", name);
2566 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2567 }
2568 else
2569 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2570
2571 if (rs6000_tune_index >= 0)
2572 {
2573 const char *name = processor_target_table[rs6000_tune_index].name;
2574 HOST_WIDE_INT flags
2575 = processor_target_table[rs6000_tune_index].target_enable;
2576
2577 sprintf (flags_buffer, "-mtune=%s flags", name);
2578 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2579 }
2580 else
2581 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2582
2583 cl_target_option_save (&cl_opts, &global_options);
2584 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2585 rs6000_isa_flags);
2586
2587 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2588 rs6000_isa_flags_explicit);
2589
2590 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2591 rs6000_builtin_mask);
2592
2593 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2594
2595 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2596 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2597
2598 switch (rs6000_sched_costly_dep)
2599 {
2600 case max_dep_latency:
2601 costly_str = "max_dep_latency";
2602 break;
2603
2604 case no_dep_costly:
2605 costly_str = "no_dep_costly";
2606 break;
2607
2608 case all_deps_costly:
2609 costly_str = "all_deps_costly";
2610 break;
2611
2612 case true_store_to_load_dep_costly:
2613 costly_str = "true_store_to_load_dep_costly";
2614 break;
2615
2616 case store_to_load_dep_costly:
2617 costly_str = "store_to_load_dep_costly";
2618 break;
2619
2620 default:
2621 costly_str = costly_num;
2622 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2623 break;
2624 }
2625
2626 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2627
2628 switch (rs6000_sched_insert_nops)
2629 {
2630 case sched_finish_regroup_exact:
2631 nop_str = "sched_finish_regroup_exact";
2632 break;
2633
2634 case sched_finish_pad_groups:
2635 nop_str = "sched_finish_pad_groups";
2636 break;
2637
2638 case sched_finish_none:
2639 nop_str = "sched_finish_none";
2640 break;
2641
2642 default:
2643 nop_str = nop_num;
2644 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2645 break;
2646 }
2647
2648 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2649
2650 switch (rs6000_sdata)
2651 {
2652 default:
2653 case SDATA_NONE:
2654 break;
2655
2656 case SDATA_DATA:
2657 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2658 break;
2659
2660 case SDATA_SYSV:
2661 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2662 break;
2663
2664 case SDATA_EABI:
2665 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2666 break;
2667
2668 }
2669
2670 switch (rs6000_traceback)
2671 {
2672 case traceback_default: trace_str = "default"; break;
2673 case traceback_none: trace_str = "none"; break;
2674 case traceback_part: trace_str = "part"; break;
2675 case traceback_full: trace_str = "full"; break;
2676 default: trace_str = "unknown"; break;
2677 }
2678
2679 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2680
2681 switch (rs6000_current_cmodel)
2682 {
2683 case CMODEL_SMALL: cmodel_str = "small"; break;
2684 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2685 case CMODEL_LARGE: cmodel_str = "large"; break;
2686 default: cmodel_str = "unknown"; break;
2687 }
2688
2689 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2690
2691 switch (rs6000_current_abi)
2692 {
2693 case ABI_NONE: abi_str = "none"; break;
2694 case ABI_AIX: abi_str = "aix"; break;
2695 case ABI_ELFv2: abi_str = "ELFv2"; break;
2696 case ABI_V4: abi_str = "V4"; break;
2697 case ABI_DARWIN: abi_str = "darwin"; break;
2698 default: abi_str = "unknown"; break;
2699 }
2700
2701 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2702
2703 if (rs6000_altivec_abi)
2704 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2705
2706 if (rs6000_spe_abi)
2707 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2708
2709 if (rs6000_darwin64_abi)
2710 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2711
2712 if (rs6000_float_gprs)
2713 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2714
2715 fprintf (stderr, DEBUG_FMT_S, "fprs",
2716 (TARGET_FPRS ? "true" : "false"));
2717
2718 fprintf (stderr, DEBUG_FMT_S, "single_float",
2719 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2720
2721 fprintf (stderr, DEBUG_FMT_S, "double_float",
2722 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2723
2724 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2725 (TARGET_SOFT_FLOAT ? "true" : "false"));
2726
2727 fprintf (stderr, DEBUG_FMT_S, "e500_single",
2728 (TARGET_E500_SINGLE ? "true" : "false"));
2729
2730 fprintf (stderr, DEBUG_FMT_S, "e500_double",
2731 (TARGET_E500_DOUBLE ? "true" : "false"));
2732
2733 if (TARGET_LINK_STACK)
2734 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2735
2736 fprintf (stderr, DEBUG_FMT_S, "lra", TARGET_LRA ? "true" : "false");
2737
2738 if (TARGET_P8_FUSION)
2739 {
2740 char options[80];
2741
2742 strcpy (options, (TARGET_P9_FUSION) ? "power9" : "power8");
2743 if (TARGET_TOC_FUSION)
2744 strcat (options, ", toc");
2745
2746 if (TARGET_P8_FUSION_SIGN)
2747 strcat (options, ", sign");
2748
2749 fprintf (stderr, DEBUG_FMT_S, "fusion", options);
2750 }
2751
2752 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2753 TARGET_SECURE_PLT ? "secure" : "bss");
2754 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2755 aix_struct_return ? "aix" : "sysv");
2756 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2757 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2758 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2759 tf[!!rs6000_align_branch_targets]);
2760 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2761 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2762 rs6000_long_double_type_size);
2763 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2764 (int)rs6000_sched_restricted_insns_priority);
2765 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2766 (int)END_BUILTINS);
2767 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2768 (int)RS6000_BUILTIN_COUNT);
2769
2770 fprintf (stderr, DEBUG_FMT_D, "Enable float128 on VSX",
2771 (int)TARGET_FLOAT128_ENABLE_TYPE);
2772
2773 if (TARGET_VSX)
2774 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2775 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2776
2777 if (TARGET_DIRECT_MOVE_128)
2778 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit mfvsrld element",
2779 (int)VECTOR_ELEMENT_MFVSRLD_64BIT);
2780 }
2781
2782 \f
2783 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2784 legitimate address support to figure out the appropriate addressing to
2785 use. */
2786
2787 static void
2788 rs6000_setup_reg_addr_masks (void)
2789 {
2790 ssize_t rc, reg, m, nregs;
2791 addr_mask_type any_addr_mask, addr_mask;
2792
2793 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2794 {
2795 machine_mode m2 = (machine_mode) m;
2796 bool complex_p = false;
2797 bool small_int_p = (m2 == QImode || m2 == HImode || m2 == SImode);
2798 size_t msize;
2799
2800 if (COMPLEX_MODE_P (m2))
2801 {
2802 complex_p = true;
2803 m2 = GET_MODE_INNER (m2);
2804 }
2805
2806 msize = GET_MODE_SIZE (m2);
2807
2808 /* SDmode is special in that we want to access it only via REG+REG
2809 addressing on power7 and above, since we want to use the LFIWZX and
2810 STFIWZX instructions to load it. */
2811 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2812
2813 any_addr_mask = 0;
2814 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2815 {
2816 addr_mask = 0;
2817 reg = reload_reg_map[rc].reg;
2818
2819 /* Can mode values go in the GPR/FPR/Altivec registers? */
2820 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2821 {
2822 bool small_int_vsx_p = (small_int_p
2823 && (rc == RELOAD_REG_FPR
2824 || rc == RELOAD_REG_VMX));
2825
2826 nregs = rs6000_hard_regno_nregs[m][reg];
2827 addr_mask |= RELOAD_REG_VALID;
2828
2829 /* Indicate if the mode takes more than 1 physical register. If
2830 it takes a single register, indicate it can do REG+REG
2831 addressing. Small integers in VSX registers can only do
2832 REG+REG addressing. */
2833 if (small_int_vsx_p)
2834 addr_mask |= RELOAD_REG_INDEXED;
2835 else if (nregs > 1 || m == BLKmode || complex_p)
2836 addr_mask |= RELOAD_REG_MULTIPLE;
2837 else
2838 addr_mask |= RELOAD_REG_INDEXED;
2839
2840 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2841 addressing. Restrict addressing on SPE for 64-bit types
2842 because of the SUBREG hackery used to address 64-bit floats in
2843 '32-bit' GPRs. If we allow scalars into Altivec registers,
2844 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY. */
2845
2846 if (TARGET_UPDATE
2847 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2848 && msize <= 8
2849 && !VECTOR_MODE_P (m2)
2850 && !FLOAT128_VECTOR_P (m2)
2851 && !complex_p
2852 && !small_int_vsx_p
2853 && (m2 != DFmode || !TARGET_UPPER_REGS_DF)
2854 && (m2 != SFmode || !TARGET_UPPER_REGS_SF)
2855 && !(TARGET_E500_DOUBLE && msize == 8))
2856 {
2857 addr_mask |= RELOAD_REG_PRE_INCDEC;
2858
2859 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2860 we don't allow PRE_MODIFY for some multi-register
2861 operations. */
2862 switch (m)
2863 {
2864 default:
2865 addr_mask |= RELOAD_REG_PRE_MODIFY;
2866 break;
2867
2868 case DImode:
2869 if (TARGET_POWERPC64)
2870 addr_mask |= RELOAD_REG_PRE_MODIFY;
2871 break;
2872
2873 case DFmode:
2874 case DDmode:
2875 if (TARGET_DF_INSN)
2876 addr_mask |= RELOAD_REG_PRE_MODIFY;
2877 break;
2878 }
2879 }
2880 }
2881
2882 /* GPR and FPR registers can do REG+OFFSET addressing, except
2883 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
2884 for 64-bit scalars and 32-bit SFmode to altivec registers. */
2885 if ((addr_mask != 0) && !indexed_only_p
2886 && msize <= 8
2887 && (rc == RELOAD_REG_GPR
2888 || ((msize == 8 || m2 == SFmode)
2889 && (rc == RELOAD_REG_FPR
2890 || (rc == RELOAD_REG_VMX
2891 && TARGET_P9_DFORM_SCALAR)))))
2892 addr_mask |= RELOAD_REG_OFFSET;
2893
2894 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
2895 instructions are enabled. The offset for 128-bit VSX registers is
2896 only 12-bits. While GPRs can handle the full offset range, VSX
2897 registers can only handle the restricted range. */
2898 else if ((addr_mask != 0) && !indexed_only_p
2899 && msize == 16 && TARGET_P9_DFORM_VECTOR
2900 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2)
2901 || (m2 == TImode && TARGET_VSX_TIMODE)))
2902 {
2903 addr_mask |= RELOAD_REG_OFFSET;
2904 if (rc == RELOAD_REG_FPR || rc == RELOAD_REG_VMX)
2905 addr_mask |= RELOAD_REG_QUAD_OFFSET;
2906 }
2907
2908 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
2909 addressing on 128-bit types. */
2910 if (rc == RELOAD_REG_VMX && msize == 16
2911 && (addr_mask & RELOAD_REG_VALID) != 0)
2912 addr_mask |= RELOAD_REG_AND_M16;
2913
2914 reg_addr[m].addr_mask[rc] = addr_mask;
2915 any_addr_mask |= addr_mask;
2916 }
2917
2918 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2919 }
2920 }
2921
2922 \f
2923 /* Initialize the various global tables that are based on register size. */
2924 static void
2925 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2926 {
2927 ssize_t r, m, c;
2928 int align64;
2929 int align32;
2930
2931 /* Precalculate REGNO_REG_CLASS. */
2932 rs6000_regno_regclass[0] = GENERAL_REGS;
2933 for (r = 1; r < 32; ++r)
2934 rs6000_regno_regclass[r] = BASE_REGS;
2935
2936 for (r = 32; r < 64; ++r)
2937 rs6000_regno_regclass[r] = FLOAT_REGS;
2938
2939 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2940 rs6000_regno_regclass[r] = NO_REGS;
2941
2942 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2943 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2944
2945 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2946 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2947 rs6000_regno_regclass[r] = CR_REGS;
2948
2949 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2950 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2951 rs6000_regno_regclass[CA_REGNO] = NO_REGS;
2952 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2953 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2954 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
2955 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
2956 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
2957 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
2958 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
2959 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2960 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2961
2962 /* Precalculate register class to simpler reload register class. We don't
2963 need all of the register classes that are combinations of different
2964 classes, just the simple ones that have constraint letters. */
2965 for (c = 0; c < N_REG_CLASSES; c++)
2966 reg_class_to_reg_type[c] = NO_REG_TYPE;
2967
2968 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2969 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2970 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2971 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2972 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2973 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2974 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2975 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2976 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2977 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2978 reg_class_to_reg_type[(int)SPE_ACC_REGS] = SPE_ACC_TYPE;
2979 reg_class_to_reg_type[(int)SPEFSCR_REGS] = SPEFSCR_REG_TYPE;
2980
2981 if (TARGET_VSX)
2982 {
2983 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2984 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2985 }
2986 else
2987 {
2988 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2989 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2990 }
2991
2992 /* Precalculate the valid memory formats as well as the vector information,
2993 this must be set up before the rs6000_hard_regno_nregs_internal calls
2994 below. */
2995 gcc_assert ((int)VECTOR_NONE == 0);
2996 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
2997 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
2998
2999 gcc_assert ((int)CODE_FOR_nothing == 0);
3000 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
3001
3002 gcc_assert ((int)NO_REGS == 0);
3003 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
3004
3005 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3006 believes it can use native alignment or still uses 128-bit alignment. */
3007 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
3008 {
3009 align64 = 64;
3010 align32 = 32;
3011 }
3012 else
3013 {
3014 align64 = 128;
3015 align32 = 128;
3016 }
3017
3018 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3019 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3020 if (TARGET_FLOAT128_TYPE)
3021 {
3022 rs6000_vector_mem[KFmode] = VECTOR_VSX;
3023 rs6000_vector_align[KFmode] = 128;
3024
3025 if (FLOAT128_IEEE_P (TFmode))
3026 {
3027 rs6000_vector_mem[TFmode] = VECTOR_VSX;
3028 rs6000_vector_align[TFmode] = 128;
3029 }
3030 }
3031
3032 /* V2DF mode, VSX only. */
3033 if (TARGET_VSX)
3034 {
3035 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
3036 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
3037 rs6000_vector_align[V2DFmode] = align64;
3038 }
3039
3040 /* V4SF mode, either VSX or Altivec. */
3041 if (TARGET_VSX)
3042 {
3043 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
3044 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
3045 rs6000_vector_align[V4SFmode] = align32;
3046 }
3047 else if (TARGET_ALTIVEC)
3048 {
3049 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
3050 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
3051 rs6000_vector_align[V4SFmode] = align32;
3052 }
3053
3054 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3055 and stores. */
3056 if (TARGET_ALTIVEC)
3057 {
3058 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
3059 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
3060 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
3061 rs6000_vector_align[V4SImode] = align32;
3062 rs6000_vector_align[V8HImode] = align32;
3063 rs6000_vector_align[V16QImode] = align32;
3064
3065 if (TARGET_VSX)
3066 {
3067 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
3068 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
3069 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
3070 }
3071 else
3072 {
3073 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
3074 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
3075 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
3076 }
3077 }
3078
3079 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3080 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3081 if (TARGET_VSX)
3082 {
3083 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
3084 rs6000_vector_unit[V2DImode]
3085 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3086 rs6000_vector_align[V2DImode] = align64;
3087
3088 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
3089 rs6000_vector_unit[V1TImode]
3090 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
3091 rs6000_vector_align[V1TImode] = 128;
3092 }
3093
3094 /* DFmode, see if we want to use the VSX unit. Memory is handled
3095 differently, so don't set rs6000_vector_mem. */
3096 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
3097 {
3098 rs6000_vector_unit[DFmode] = VECTOR_VSX;
3099 rs6000_vector_align[DFmode] = 64;
3100 }
3101
3102 /* SFmode, see if we want to use the VSX unit. */
3103 if (TARGET_P8_VECTOR && TARGET_VSX_SCALAR_FLOAT)
3104 {
3105 rs6000_vector_unit[SFmode] = VECTOR_VSX;
3106 rs6000_vector_align[SFmode] = 32;
3107 }
3108
3109 /* Allow TImode in VSX register and set the VSX memory macros. */
3110 if (TARGET_VSX && TARGET_VSX_TIMODE)
3111 {
3112 rs6000_vector_mem[TImode] = VECTOR_VSX;
3113 rs6000_vector_align[TImode] = align64;
3114 }
3115
3116 /* TODO add SPE and paired floating point vector support. */
3117
3118 /* Register class constraints for the constraints that depend on compile
3119 switches. When the VSX code was added, different constraints were added
3120 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3121 of the VSX registers are used. The register classes for scalar floating
3122 point types is set, based on whether we allow that type into the upper
3123 (Altivec) registers. GCC has register classes to target the Altivec
3124 registers for load/store operations, to select using a VSX memory
3125 operation instead of the traditional floating point operation. The
3126 constraints are:
3127
3128 d - Register class to use with traditional DFmode instructions.
3129 f - Register class to use with traditional SFmode instructions.
3130 v - Altivec register.
3131 wa - Any VSX register.
3132 wc - Reserved to represent individual CR bits (used in LLVM).
3133 wd - Preferred register class for V2DFmode.
3134 wf - Preferred register class for V4SFmode.
3135 wg - Float register for power6x move insns.
3136 wh - FP register for direct move instructions.
3137 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3138 wj - FP or VSX register to hold 64-bit integers for direct moves.
3139 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3140 wl - Float register if we can do 32-bit signed int loads.
3141 wm - VSX register for ISA 2.07 direct move operations.
3142 wn - always NO_REGS.
3143 wr - GPR if 64-bit mode is permitted.
3144 ws - Register class to do ISA 2.06 DF operations.
3145 wt - VSX register for TImode in VSX registers.
3146 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3147 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3148 ww - Register class to do SF conversions in with VSX operations.
3149 wx - Float register if we can do 32-bit int stores.
3150 wy - Register class to do ISA 2.07 SF operations.
3151 wz - Float register if we can do 32-bit unsigned int loads.
3152 wH - Altivec register if SImode is allowed in VSX registers.
3153 wI - VSX register if SImode is allowed in VSX registers.
3154 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3155 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3156
3157 if (TARGET_HARD_FLOAT && TARGET_FPRS)
3158 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
3159
3160 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
3161 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
3162
3163 if (TARGET_VSX)
3164 {
3165 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
3166 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
3167 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
3168
3169 if (TARGET_VSX_TIMODE)
3170 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
3171
3172 if (TARGET_UPPER_REGS_DF) /* DFmode */
3173 {
3174 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS;
3175 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
3176 }
3177 else
3178 rs6000_constraints[RS6000_CONSTRAINT_ws] = FLOAT_REGS;
3179
3180 if (TARGET_UPPER_REGS_DF) /* DImode */
3181 rs6000_constraints[RS6000_CONSTRAINT_wi] = VSX_REGS;
3182 else
3183 rs6000_constraints[RS6000_CONSTRAINT_wi] = FLOAT_REGS;
3184 }
3185
3186 /* Add conditional constraints based on various options, to allow us to
3187 collapse multiple insn patterns. */
3188 if (TARGET_ALTIVEC)
3189 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
3190
3191 if (TARGET_MFPGPR) /* DFmode */
3192 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
3193
3194 if (TARGET_LFIWAX)
3195 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
3196
3197 if (TARGET_DIRECT_MOVE)
3198 {
3199 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
3200 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
3201 = rs6000_constraints[RS6000_CONSTRAINT_wi];
3202 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
3203 = rs6000_constraints[RS6000_CONSTRAINT_ws];
3204 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
3205 }
3206
3207 if (TARGET_POWERPC64)
3208 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
3209
3210 if (TARGET_P8_VECTOR && TARGET_UPPER_REGS_SF) /* SFmode */
3211 {
3212 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
3213 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
3214 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
3215 }
3216 else if (TARGET_P8_VECTOR)
3217 {
3218 rs6000_constraints[RS6000_CONSTRAINT_wy] = FLOAT_REGS;
3219 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3220 }
3221 else if (TARGET_VSX)
3222 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
3223
3224 if (TARGET_STFIWX)
3225 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
3226
3227 if (TARGET_LFIWZX)
3228 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
3229
3230 if (TARGET_FLOAT128_TYPE)
3231 {
3232 rs6000_constraints[RS6000_CONSTRAINT_wq] = VSX_REGS; /* KFmode */
3233 if (FLOAT128_IEEE_P (TFmode))
3234 rs6000_constraints[RS6000_CONSTRAINT_wp] = VSX_REGS; /* TFmode */
3235 }
3236
3237 /* Support for new D-form instructions. */
3238 if (TARGET_P9_DFORM_SCALAR)
3239 rs6000_constraints[RS6000_CONSTRAINT_wb] = ALTIVEC_REGS;
3240
3241 /* Support for ISA 3.0 (power9) vectors. */
3242 if (TARGET_P9_VECTOR)
3243 rs6000_constraints[RS6000_CONSTRAINT_wo] = VSX_REGS;
3244
3245 /* Support for new direct moves (ISA 3.0 + 64bit). */
3246 if (TARGET_DIRECT_MOVE_128)
3247 rs6000_constraints[RS6000_CONSTRAINT_we] = VSX_REGS;
3248
3249 /* Support small integers in VSX registers. */
3250 if (TARGET_VSX_SMALL_INTEGER)
3251 {
3252 rs6000_constraints[RS6000_CONSTRAINT_wH] = ALTIVEC_REGS;
3253 rs6000_constraints[RS6000_CONSTRAINT_wI] = FLOAT_REGS;
3254 if (TARGET_P9_VECTOR)
3255 {
3256 rs6000_constraints[RS6000_CONSTRAINT_wJ] = FLOAT_REGS;
3257 rs6000_constraints[RS6000_CONSTRAINT_wK] = ALTIVEC_REGS;
3258 }
3259 }
3260
3261 /* Set up the reload helper and direct move functions. */
3262 if (TARGET_VSX || TARGET_ALTIVEC)
3263 {
3264 if (TARGET_64BIT)
3265 {
3266 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
3267 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
3268 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
3269 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
3270 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
3271 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
3272 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
3273 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
3274 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
3275 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
3276 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
3277 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
3278 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
3279 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
3280 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
3281 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
3282 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
3283 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
3284 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
3285 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
3286
3287 if (FLOAT128_VECTOR_P (KFmode))
3288 {
3289 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_di_store;
3290 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_di_load;
3291 }
3292
3293 if (FLOAT128_VECTOR_P (TFmode))
3294 {
3295 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_di_store;
3296 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_di_load;
3297 }
3298
3299 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3300 available. */
3301 if (TARGET_NO_SDMODE_STACK)
3302 {
3303 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
3304 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
3305 }
3306
3307 if (TARGET_VSX_TIMODE)
3308 {
3309 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
3310 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
3311 }
3312
3313 if (TARGET_DIRECT_MOVE && !TARGET_DIRECT_MOVE_128)
3314 {
3315 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
3316 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
3317 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
3318 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
3319 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
3320 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
3321 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
3322 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
3323 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
3324
3325 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
3326 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
3327 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
3328 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
3329 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
3330 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
3331 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
3332 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
3333 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
3334
3335 if (FLOAT128_VECTOR_P (KFmode))
3336 {
3337 reg_addr[KFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxkf;
3338 reg_addr[KFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprkf;
3339 }
3340
3341 if (FLOAT128_VECTOR_P (TFmode))
3342 {
3343 reg_addr[TFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxtf;
3344 reg_addr[TFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprtf;
3345 }
3346 }
3347 }
3348 else
3349 {
3350 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
3351 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
3352 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
3353 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
3354 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
3355 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
3356 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
3357 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
3358 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
3359 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
3360 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
3361 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
3362 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
3363 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
3364 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
3365 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
3366 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
3367 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
3368 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
3369 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
3370
3371 if (FLOAT128_VECTOR_P (KFmode))
3372 {
3373 reg_addr[KFmode].reload_store = CODE_FOR_reload_kf_si_store;
3374 reg_addr[KFmode].reload_load = CODE_FOR_reload_kf_si_load;
3375 }
3376
3377 if (FLOAT128_IEEE_P (TFmode))
3378 {
3379 reg_addr[TFmode].reload_store = CODE_FOR_reload_tf_si_store;
3380 reg_addr[TFmode].reload_load = CODE_FOR_reload_tf_si_load;
3381 }
3382
3383 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3384 available. */
3385 if (TARGET_NO_SDMODE_STACK)
3386 {
3387 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
3388 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
3389 }
3390
3391 if (TARGET_VSX_TIMODE)
3392 {
3393 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
3394 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
3395 }
3396
3397 if (TARGET_DIRECT_MOVE)
3398 {
3399 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
3400 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
3401 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
3402 }
3403 }
3404
3405 if (TARGET_UPPER_REGS_DF)
3406 reg_addr[DFmode].scalar_in_vmx_p = true;
3407
3408 if (TARGET_UPPER_REGS_DI)
3409 reg_addr[DImode].scalar_in_vmx_p = true;
3410
3411 if (TARGET_UPPER_REGS_SF)
3412 reg_addr[SFmode].scalar_in_vmx_p = true;
3413
3414 if (TARGET_VSX_SMALL_INTEGER)
3415 {
3416 reg_addr[SImode].scalar_in_vmx_p = true;
3417 if (TARGET_P9_VECTOR)
3418 {
3419 reg_addr[HImode].scalar_in_vmx_p = true;
3420 reg_addr[QImode].scalar_in_vmx_p = true;
3421 }
3422 }
3423 }
3424
3425 /* Setup the fusion operations. */
3426 if (TARGET_P8_FUSION)
3427 {
3428 reg_addr[QImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_qi;
3429 reg_addr[HImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_hi;
3430 reg_addr[SImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_si;
3431 if (TARGET_64BIT)
3432 reg_addr[DImode].fusion_gpr_ld = CODE_FOR_fusion_gpr_load_di;
3433 }
3434
3435 if (TARGET_P9_FUSION)
3436 {
3437 struct fuse_insns {
3438 enum machine_mode mode; /* mode of the fused type. */
3439 enum machine_mode pmode; /* pointer mode. */
3440 enum rs6000_reload_reg_type rtype; /* register type. */
3441 enum insn_code load; /* load insn. */
3442 enum insn_code store; /* store insn. */
3443 };
3444
3445 static const struct fuse_insns addis_insns[] = {
3446 { SFmode, DImode, RELOAD_REG_FPR,
3447 CODE_FOR_fusion_vsx_di_sf_load,
3448 CODE_FOR_fusion_vsx_di_sf_store },
3449
3450 { SFmode, SImode, RELOAD_REG_FPR,
3451 CODE_FOR_fusion_vsx_si_sf_load,
3452 CODE_FOR_fusion_vsx_si_sf_store },
3453
3454 { DFmode, DImode, RELOAD_REG_FPR,
3455 CODE_FOR_fusion_vsx_di_df_load,
3456 CODE_FOR_fusion_vsx_di_df_store },
3457
3458 { DFmode, SImode, RELOAD_REG_FPR,
3459 CODE_FOR_fusion_vsx_si_df_load,
3460 CODE_FOR_fusion_vsx_si_df_store },
3461
3462 { DImode, DImode, RELOAD_REG_FPR,
3463 CODE_FOR_fusion_vsx_di_di_load,
3464 CODE_FOR_fusion_vsx_di_di_store },
3465
3466 { DImode, SImode, RELOAD_REG_FPR,
3467 CODE_FOR_fusion_vsx_si_di_load,
3468 CODE_FOR_fusion_vsx_si_di_store },
3469
3470 { QImode, DImode, RELOAD_REG_GPR,
3471 CODE_FOR_fusion_gpr_di_qi_load,
3472 CODE_FOR_fusion_gpr_di_qi_store },
3473
3474 { QImode, SImode, RELOAD_REG_GPR,
3475 CODE_FOR_fusion_gpr_si_qi_load,
3476 CODE_FOR_fusion_gpr_si_qi_store },
3477
3478 { HImode, DImode, RELOAD_REG_GPR,
3479 CODE_FOR_fusion_gpr_di_hi_load,
3480 CODE_FOR_fusion_gpr_di_hi_store },
3481
3482 { HImode, SImode, RELOAD_REG_GPR,
3483 CODE_FOR_fusion_gpr_si_hi_load,
3484 CODE_FOR_fusion_gpr_si_hi_store },
3485
3486 { SImode, DImode, RELOAD_REG_GPR,
3487 CODE_FOR_fusion_gpr_di_si_load,
3488 CODE_FOR_fusion_gpr_di_si_store },
3489
3490 { SImode, SImode, RELOAD_REG_GPR,
3491 CODE_FOR_fusion_gpr_si_si_load,
3492 CODE_FOR_fusion_gpr_si_si_store },
3493
3494 { SFmode, DImode, RELOAD_REG_GPR,
3495 CODE_FOR_fusion_gpr_di_sf_load,
3496 CODE_FOR_fusion_gpr_di_sf_store },
3497
3498 { SFmode, SImode, RELOAD_REG_GPR,
3499 CODE_FOR_fusion_gpr_si_sf_load,
3500 CODE_FOR_fusion_gpr_si_sf_store },
3501
3502 { DImode, DImode, RELOAD_REG_GPR,
3503 CODE_FOR_fusion_gpr_di_di_load,
3504 CODE_FOR_fusion_gpr_di_di_store },
3505
3506 { DFmode, DImode, RELOAD_REG_GPR,
3507 CODE_FOR_fusion_gpr_di_df_load,
3508 CODE_FOR_fusion_gpr_di_df_store },
3509 };
3510
3511 enum machine_mode cur_pmode = Pmode;
3512 size_t i;
3513
3514 for (i = 0; i < ARRAY_SIZE (addis_insns); i++)
3515 {
3516 enum machine_mode xmode = addis_insns[i].mode;
3517 enum rs6000_reload_reg_type rtype = addis_insns[i].rtype;
3518
3519 if (addis_insns[i].pmode != cur_pmode)
3520 continue;
3521
3522 if (rtype == RELOAD_REG_FPR
3523 && (!TARGET_HARD_FLOAT || !TARGET_FPRS))
3524 continue;
3525
3526 reg_addr[xmode].fusion_addis_ld[rtype] = addis_insns[i].load;
3527 reg_addr[xmode].fusion_addis_st[rtype] = addis_insns[i].store;
3528
3529 if (rtype == RELOAD_REG_FPR && TARGET_P9_DFORM_SCALAR)
3530 {
3531 reg_addr[xmode].fusion_addis_ld[RELOAD_REG_VMX]
3532 = addis_insns[i].load;
3533 reg_addr[xmode].fusion_addis_st[RELOAD_REG_VMX]
3534 = addis_insns[i].store;
3535 }
3536 }
3537 }
3538
3539 /* Note which types we support fusing TOC setup plus memory insn. We only do
3540 fused TOCs for medium/large code models. */
3541 if (TARGET_P8_FUSION && TARGET_TOC_FUSION && TARGET_POWERPC64
3542 && (TARGET_CMODEL != CMODEL_SMALL))
3543 {
3544 reg_addr[QImode].fused_toc = true;
3545 reg_addr[HImode].fused_toc = true;
3546 reg_addr[SImode].fused_toc = true;
3547 reg_addr[DImode].fused_toc = true;
3548 if (TARGET_HARD_FLOAT && TARGET_FPRS)
3549 {
3550 if (TARGET_SINGLE_FLOAT)
3551 reg_addr[SFmode].fused_toc = true;
3552 if (TARGET_DOUBLE_FLOAT)
3553 reg_addr[DFmode].fused_toc = true;
3554 }
3555 }
3556
3557 /* Precalculate HARD_REGNO_NREGS. */
3558 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3559 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3560 rs6000_hard_regno_nregs[m][r]
3561 = rs6000_hard_regno_nregs_internal (r, (machine_mode)m);
3562
3563 /* Precalculate HARD_REGNO_MODE_OK. */
3564 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
3565 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3566 if (rs6000_hard_regno_mode_ok (r, (machine_mode)m))
3567 rs6000_hard_regno_mode_ok_p[m][r] = true;
3568
3569 /* Precalculate CLASS_MAX_NREGS sizes. */
3570 for (c = 0; c < LIM_REG_CLASSES; ++c)
3571 {
3572 int reg_size;
3573
3574 if (TARGET_VSX && VSX_REG_CLASS_P (c))
3575 reg_size = UNITS_PER_VSX_WORD;
3576
3577 else if (c == ALTIVEC_REGS)
3578 reg_size = UNITS_PER_ALTIVEC_WORD;
3579
3580 else if (c == FLOAT_REGS)
3581 reg_size = UNITS_PER_FP_WORD;
3582
3583 else
3584 reg_size = UNITS_PER_WORD;
3585
3586 for (m = 0; m < NUM_MACHINE_MODES; ++m)
3587 {
3588 machine_mode m2 = (machine_mode)m;
3589 int reg_size2 = reg_size;
3590
3591 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3592 in VSX. */
3593 if (TARGET_VSX && VSX_REG_CLASS_P (c) && FLOAT128_2REG_P (m))
3594 reg_size2 = UNITS_PER_FP_WORD;
3595
3596 rs6000_class_max_nregs[m][c]
3597 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
3598 }
3599 }
3600
3601 if (TARGET_E500_DOUBLE)
3602 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
3603
3604 /* Calculate which modes to automatically generate code to use a the
3605 reciprocal divide and square root instructions. In the future, possibly
3606 automatically generate the instructions even if the user did not specify
3607 -mrecip. The older machines double precision reciprocal sqrt estimate is
3608 not accurate enough. */
3609 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
3610 if (TARGET_FRES)
3611 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3612 if (TARGET_FRE)
3613 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3614 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3615 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
3616 if (VECTOR_UNIT_VSX_P (V2DFmode))
3617 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
3618
3619 if (TARGET_FRSQRTES)
3620 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3621 if (TARGET_FRSQRTE)
3622 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3623 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
3624 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3625 if (VECTOR_UNIT_VSX_P (V2DFmode))
3626 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
3627
3628 if (rs6000_recip_control)
3629 {
3630 if (!flag_finite_math_only)
3631 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
3632 if (flag_trapping_math)
3633 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
3634 if (!flag_reciprocal_math)
3635 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
3636 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
3637 {
3638 if (RS6000_RECIP_HAVE_RE_P (SFmode)
3639 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
3640 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3641
3642 if (RS6000_RECIP_HAVE_RE_P (DFmode)
3643 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
3644 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3645
3646 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
3647 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
3648 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3649
3650 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
3651 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
3652 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
3653
3654 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
3655 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
3656 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3657
3658 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
3659 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
3660 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3661
3662 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
3663 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
3664 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3665
3666 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
3667 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
3668 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
3669 }
3670 }
3671
3672 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3673 legitimate address support to figure out the appropriate addressing to
3674 use. */
3675 rs6000_setup_reg_addr_masks ();
3676
3677 if (global_init_p || TARGET_DEBUG_TARGET)
3678 {
3679 if (TARGET_DEBUG_REG)
3680 rs6000_debug_reg_global ();
3681
3682 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
3683 fprintf (stderr,
3684 "SImode variable mult cost = %d\n"
3685 "SImode constant mult cost = %d\n"
3686 "SImode short constant mult cost = %d\n"
3687 "DImode multipliciation cost = %d\n"
3688 "SImode division cost = %d\n"
3689 "DImode division cost = %d\n"
3690 "Simple fp operation cost = %d\n"
3691 "DFmode multiplication cost = %d\n"
3692 "SFmode division cost = %d\n"
3693 "DFmode division cost = %d\n"
3694 "cache line size = %d\n"
3695 "l1 cache size = %d\n"
3696 "l2 cache size = %d\n"
3697 "simultaneous prefetches = %d\n"
3698 "\n",
3699 rs6000_cost->mulsi,
3700 rs6000_cost->mulsi_const,
3701 rs6000_cost->mulsi_const9,
3702 rs6000_cost->muldi,
3703 rs6000_cost->divsi,
3704 rs6000_cost->divdi,
3705 rs6000_cost->fp,
3706 rs6000_cost->dmul,
3707 rs6000_cost->sdiv,
3708 rs6000_cost->ddiv,
3709 rs6000_cost->cache_line_size,
3710 rs6000_cost->l1_cache_size,
3711 rs6000_cost->l2_cache_size,
3712 rs6000_cost->simultaneous_prefetches);
3713 }
3714 }
3715
3716 #if TARGET_MACHO
3717 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3718
3719 static void
3720 darwin_rs6000_override_options (void)
3721 {
3722 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3723 off. */
3724 rs6000_altivec_abi = 1;
3725 TARGET_ALTIVEC_VRSAVE = 1;
3726 rs6000_current_abi = ABI_DARWIN;
3727
3728 if (DEFAULT_ABI == ABI_DARWIN
3729 && TARGET_64BIT)
3730 darwin_one_byte_bool = 1;
3731
3732 if (TARGET_64BIT && ! TARGET_POWERPC64)
3733 {
3734 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3735 warning (0, "-m64 requires PowerPC64 architecture, enabling");
3736 }
3737 if (flag_mkernel)
3738 {
3739 rs6000_default_long_calls = 1;
3740 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3741 }
3742
3743 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3744 Altivec. */
3745 if (!flag_mkernel && !flag_apple_kext
3746 && TARGET_64BIT
3747 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3748 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3749
3750 /* Unless the user (not the configurer) has explicitly overridden
3751 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3752 G4 unless targeting the kernel. */
3753 if (!flag_mkernel
3754 && !flag_apple_kext
3755 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3756 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3757 && ! global_options_set.x_rs6000_cpu_index)
3758 {
3759 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3760 }
3761 }
3762 #endif
3763
3764 /* If not otherwise specified by a target, make 'long double' equivalent to
3765 'double'. */
3766
3767 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3768 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3769 #endif
3770
3771 /* Return the builtin mask of the various options used that could affect which
3772 builtins were used. In the past we used target_flags, but we've run out of
3773 bits, and some options like SPE and PAIRED are no longer in
3774 target_flags. */
3775
3776 HOST_WIDE_INT
3777 rs6000_builtin_mask_calculate (void)
3778 {
3779 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3780 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3781 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
3782 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3783 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3784 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3785 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3786 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3787 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3788 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3789 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3790 | ((TARGET_P9_VECTOR) ? RS6000_BTM_P9_VECTOR : 0)
3791 | ((TARGET_P9_MISC) ? RS6000_BTM_P9_MISC : 0)
3792 | ((TARGET_MODULO) ? RS6000_BTM_MODULO : 0)
3793 | ((TARGET_64BIT) ? RS6000_BTM_64BIT : 0)
3794 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3795 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3796 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3797 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3798 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0)
3799 | ((TARGET_FLOAT128_TYPE) ? RS6000_BTM_FLOAT128 : 0));
3800 }
3801
3802 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3803 to clobber the XER[CA] bit because clobbering that bit without telling
3804 the compiler worked just fine with versions of GCC before GCC 5, and
3805 breaking a lot of older code in ways that are hard to track down is
3806 not such a great idea. */
3807
3808 static rtx_insn *
3809 rs6000_md_asm_adjust (vec<rtx> &/*outputs*/, vec<rtx> &/*inputs*/,
3810 vec<const char *> &/*constraints*/,
3811 vec<rtx> &clobbers, HARD_REG_SET &clobbered_regs)
3812 {
3813 clobbers.safe_push (gen_rtx_REG (SImode, CA_REGNO));
3814 SET_HARD_REG_BIT (clobbered_regs, CA_REGNO);
3815 return NULL;
3816 }
3817
3818 /* Override command line options. Mostly we process the processor type and
3819 sometimes adjust other TARGET_ options. */
3820
3821 static bool
3822 rs6000_option_override_internal (bool global_init_p)
3823 {
3824 bool ret = true;
3825 bool have_cpu = false;
3826
3827 /* The default cpu requested at configure time, if any. */
3828 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3829
3830 HOST_WIDE_INT set_masks;
3831 int cpu_index;
3832 int tune_index;
3833 struct cl_target_option *main_target_opt
3834 = ((global_init_p || target_option_default_node == NULL)
3835 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3836
3837 /* Print defaults. */
3838 if ((TARGET_DEBUG_REG || TARGET_DEBUG_TARGET) && global_init_p)
3839 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
3840
3841 /* Remember the explicit arguments. */
3842 if (global_init_p)
3843 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3844
3845 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3846 library functions, so warn about it. The flag may be useful for
3847 performance studies from time to time though, so don't disable it
3848 entirely. */
3849 if (global_options_set.x_rs6000_alignment_flags
3850 && rs6000_alignment_flags == MASK_ALIGN_POWER
3851 && DEFAULT_ABI == ABI_DARWIN
3852 && TARGET_64BIT)
3853 warning (0, "-malign-power is not supported for 64-bit Darwin;"
3854 " it is incompatible with the installed C and C++ libraries");
3855
3856 /* Numerous experiment shows that IRA based loop pressure
3857 calculation works better for RTL loop invariant motion on targets
3858 with enough (>= 32) registers. It is an expensive optimization.
3859 So it is on only for peak performance. */
3860 if (optimize >= 3 && global_init_p
3861 && !global_options_set.x_flag_ira_loop_pressure)
3862 flag_ira_loop_pressure = 1;
3863
3864 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
3865 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
3866 options were already specified. */
3867 if (flag_sanitize & SANITIZE_USER_ADDRESS
3868 && !global_options_set.x_flag_asynchronous_unwind_tables)
3869 flag_asynchronous_unwind_tables = 1;
3870
3871 /* Set the pointer size. */
3872 if (TARGET_64BIT)
3873 {
3874 rs6000_pmode = (int)DImode;
3875 rs6000_pointer_size = 64;
3876 }
3877 else
3878 {
3879 rs6000_pmode = (int)SImode;
3880 rs6000_pointer_size = 32;
3881 }
3882
3883 /* Some OSs don't support saving the high part of 64-bit registers on context
3884 switch. Other OSs don't support saving Altivec registers. On those OSs,
3885 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3886 if the user wants either, the user must explicitly specify them and we
3887 won't interfere with the user's specification. */
3888
3889 set_masks = POWERPC_MASKS;
3890 #ifdef OS_MISSING_POWERPC64
3891 if (OS_MISSING_POWERPC64)
3892 set_masks &= ~OPTION_MASK_POWERPC64;
3893 #endif
3894 #ifdef OS_MISSING_ALTIVEC
3895 if (OS_MISSING_ALTIVEC)
3896 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX);
3897 #endif
3898
3899 /* Don't override by the processor default if given explicitly. */
3900 set_masks &= ~rs6000_isa_flags_explicit;
3901
3902 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3903 the cpu in a target attribute or pragma, but did not specify a tuning
3904 option, use the cpu for the tuning option rather than the option specified
3905 with -mtune on the command line. Process a '--with-cpu' configuration
3906 request as an implicit --cpu. */
3907 if (rs6000_cpu_index >= 0)
3908 {
3909 cpu_index = rs6000_cpu_index;
3910 have_cpu = true;
3911 }
3912 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3913 {
3914 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
3915 have_cpu = true;
3916 }
3917 else if (implicit_cpu)
3918 {
3919 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
3920 have_cpu = true;
3921 }
3922 else
3923 {
3924 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3925 const char *default_cpu = ((!TARGET_POWERPC64)
3926 ? "powerpc"
3927 : ((BYTES_BIG_ENDIAN)
3928 ? "powerpc64"
3929 : "powerpc64le"));
3930
3931 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
3932 have_cpu = false;
3933 }
3934
3935 gcc_assert (cpu_index >= 0);
3936
3937 if (have_cpu)
3938 {
3939 #ifndef HAVE_AS_POWER9
3940 if (processor_target_table[rs6000_cpu_index].processor
3941 == PROCESSOR_POWER9)
3942 {
3943 have_cpu = false;
3944 warning (0, "will not generate power9 instructions because "
3945 "assembler lacks power9 support");
3946 }
3947 #endif
3948 #ifndef HAVE_AS_POWER8
3949 if (processor_target_table[rs6000_cpu_index].processor
3950 == PROCESSOR_POWER8)
3951 {
3952 have_cpu = false;
3953 warning (0, "will not generate power8 instructions because "
3954 "assembler lacks power8 support");
3955 }
3956 #endif
3957 #ifndef HAVE_AS_POPCNTD
3958 if (processor_target_table[rs6000_cpu_index].processor
3959 == PROCESSOR_POWER7)
3960 {
3961 have_cpu = false;
3962 warning (0, "will not generate power7 instructions because "
3963 "assembler lacks power7 support");
3964 }
3965 #endif
3966 #ifndef HAVE_AS_DFP
3967 if (processor_target_table[rs6000_cpu_index].processor
3968 == PROCESSOR_POWER6)
3969 {
3970 have_cpu = false;
3971 warning (0, "will not generate power6 instructions because "
3972 "assembler lacks power6 support");
3973 }
3974 #endif
3975 #ifndef HAVE_AS_POPCNTB
3976 if (processor_target_table[rs6000_cpu_index].processor
3977 == PROCESSOR_POWER5)
3978 {
3979 have_cpu = false;
3980 warning (0, "will not generate power5 instructions because "
3981 "assembler lacks power5 support");
3982 }
3983 #endif
3984
3985 if (!have_cpu)
3986 {
3987 /* PowerPC 64-bit LE requires at least ISA 2.07. */
3988 const char *default_cpu = (!TARGET_POWERPC64
3989 ? "powerpc"
3990 : (BYTES_BIG_ENDIAN
3991 ? "powerpc64"
3992 : "powerpc64le"));
3993
3994 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
3995 }
3996 }
3997
3998 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3999 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4000 with those from the cpu, except for options that were explicitly set. If
4001 we don't have a cpu, do not override the target bits set in
4002 TARGET_DEFAULT. */
4003 if (have_cpu)
4004 {
4005 rs6000_isa_flags &= ~set_masks;
4006 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
4007 & set_masks);
4008 }
4009 else
4010 {
4011 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4012 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4013 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4014 to using rs6000_isa_flags, we need to do the initialization here.
4015
4016 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4017 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4018 HOST_WIDE_INT flags = ((TARGET_DEFAULT) ? TARGET_DEFAULT
4019 : processor_target_table[cpu_index].target_enable);
4020 rs6000_isa_flags |= (flags & ~rs6000_isa_flags_explicit);
4021 }
4022
4023 if (rs6000_tune_index >= 0)
4024 tune_index = rs6000_tune_index;
4025 else if (have_cpu)
4026 rs6000_tune_index = tune_index = cpu_index;
4027 else
4028 {
4029 size_t i;
4030 enum processor_type tune_proc
4031 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
4032
4033 tune_index = -1;
4034 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
4035 if (processor_target_table[i].processor == tune_proc)
4036 {
4037 rs6000_tune_index = tune_index = i;
4038 break;
4039 }
4040 }
4041
4042 gcc_assert (tune_index >= 0);
4043 rs6000_cpu = processor_target_table[tune_index].processor;
4044
4045 /* Pick defaults for SPE related control flags. Do this early to make sure
4046 that the TARGET_ macros are representative ASAP. */
4047 {
4048 int spe_capable_cpu =
4049 (rs6000_cpu == PROCESSOR_PPC8540
4050 || rs6000_cpu == PROCESSOR_PPC8548);
4051
4052 if (!global_options_set.x_rs6000_spe_abi)
4053 rs6000_spe_abi = spe_capable_cpu;
4054
4055 if (!global_options_set.x_rs6000_spe)
4056 rs6000_spe = spe_capable_cpu;
4057
4058 if (!global_options_set.x_rs6000_float_gprs)
4059 rs6000_float_gprs =
4060 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
4061 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
4062 : 0);
4063 }
4064
4065 if (global_options_set.x_rs6000_spe_abi
4066 && rs6000_spe_abi
4067 && !TARGET_SPE_ABI)
4068 error ("not configured for SPE ABI");
4069
4070 if (global_options_set.x_rs6000_spe
4071 && rs6000_spe
4072 && !TARGET_SPE)
4073 error ("not configured for SPE instruction set");
4074
4075 if (main_target_opt != NULL
4076 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
4077 || (main_target_opt->x_rs6000_spe != rs6000_spe)
4078 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
4079 error ("target attribute or pragma changes SPE ABI");
4080
4081 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
4082 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
4083 || rs6000_cpu == PROCESSOR_PPCE5500)
4084 {
4085 if (TARGET_ALTIVEC)
4086 error ("AltiVec not supported in this target");
4087 if (TARGET_SPE)
4088 error ("SPE not supported in this target");
4089 }
4090 if (rs6000_cpu == PROCESSOR_PPCE6500)
4091 {
4092 if (TARGET_SPE)
4093 error ("SPE not supported in this target");
4094 }
4095
4096 /* Disable Cell microcode if we are optimizing for the Cell
4097 and not optimizing for size. */
4098 if (rs6000_gen_cell_microcode == -1)
4099 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
4100 && !optimize_size);
4101
4102 /* If we are optimizing big endian systems for space and it's OK to
4103 use instructions that would be microcoded on the Cell, use the
4104 load/store multiple and string instructions. */
4105 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
4106 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
4107 | OPTION_MASK_STRING);
4108
4109 /* Don't allow -mmultiple or -mstring on little endian systems
4110 unless the cpu is a 750, because the hardware doesn't support the
4111 instructions used in little endian mode, and causes an alignment
4112 trap. The 750 does not cause an alignment trap (except when the
4113 target is unaligned). */
4114
4115 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
4116 {
4117 if (TARGET_MULTIPLE)
4118 {
4119 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
4120 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
4121 warning (0, "-mmultiple is not supported on little endian systems");
4122 }
4123
4124 if (TARGET_STRING)
4125 {
4126 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4127 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
4128 warning (0, "-mstring is not supported on little endian systems");
4129 }
4130 }
4131
4132 /* If little-endian, default to -mstrict-align on older processors.
4133 Testing for htm matches power8 and later. */
4134 if (!BYTES_BIG_ENDIAN
4135 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
4136 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
4137
4138 /* -maltivec={le,be} implies -maltivec. */
4139 if (rs6000_altivec_element_order != 0)
4140 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
4141
4142 /* Disallow -maltivec=le in big endian mode for now. This is not
4143 known to be useful for anyone. */
4144 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
4145 {
4146 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
4147 rs6000_altivec_element_order = 0;
4148 }
4149
4150 /* Add some warnings for VSX. */
4151 if (TARGET_VSX)
4152 {
4153 const char *msg = NULL;
4154 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
4155 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
4156 {
4157 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4158 msg = N_("-mvsx requires hardware floating point");
4159 else
4160 {
4161 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4162 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4163 }
4164 }
4165 else if (TARGET_PAIRED_FLOAT)
4166 msg = N_("-mvsx and -mpaired are incompatible");
4167 else if (TARGET_AVOID_XFORM > 0)
4168 msg = N_("-mvsx needs indexed addressing");
4169 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
4170 & OPTION_MASK_ALTIVEC))
4171 {
4172 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
4173 msg = N_("-mvsx and -mno-altivec are incompatible");
4174 else
4175 msg = N_("-mno-altivec disables vsx");
4176 }
4177
4178 if (msg)
4179 {
4180 warning (0, msg);
4181 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
4182 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
4183 }
4184 }
4185
4186 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4187 the -mcpu setting to enable options that conflict. */
4188 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
4189 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
4190 | OPTION_MASK_ALTIVEC
4191 | OPTION_MASK_VSX)) != 0)
4192 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
4193 | OPTION_MASK_DIRECT_MOVE)
4194 & ~rs6000_isa_flags_explicit);
4195
4196 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4197 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
4198
4199 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4200 unless the user explicitly used the -mno-<option> to disable the code. */
4201 if (TARGET_P9_VECTOR || TARGET_MODULO || TARGET_P9_DFORM_SCALAR
4202 || TARGET_P9_DFORM_VECTOR || TARGET_P9_DFORM_BOTH > 0 || TARGET_P9_MINMAX)
4203 rs6000_isa_flags |= (ISA_3_0_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4204 else if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
4205 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4206 else if (TARGET_VSX)
4207 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4208 else if (TARGET_POPCNTD)
4209 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
4210 else if (TARGET_DFP)
4211 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~rs6000_isa_flags_explicit);
4212 else if (TARGET_CMPB)
4213 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
4214 else if (TARGET_FPRND)
4215 rs6000_isa_flags |= (ISA_2_4_MASKS & ~rs6000_isa_flags_explicit);
4216 else if (TARGET_POPCNTB)
4217 rs6000_isa_flags |= (ISA_2_2_MASKS & ~rs6000_isa_flags_explicit);
4218 else if (TARGET_ALTIVEC)
4219 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~rs6000_isa_flags_explicit);
4220
4221 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
4222 {
4223 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
4224 error ("-mcrypto requires -maltivec");
4225 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
4226 }
4227
4228 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
4229 {
4230 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
4231 error ("-mdirect-move requires -mvsx");
4232 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
4233 }
4234
4235 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
4236 {
4237 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4238 error ("-mpower8-vector requires -maltivec");
4239 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4240 }
4241
4242 if (TARGET_P8_VECTOR && !TARGET_VSX)
4243 {
4244 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4245 error ("-mpower8-vector requires -mvsx");
4246 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
4247 }
4248
4249 if (TARGET_VSX_TIMODE && !TARGET_VSX)
4250 {
4251 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
4252 error ("-mvsx-timode requires -mvsx");
4253 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
4254 }
4255
4256 if (TARGET_DFP && !TARGET_HARD_FLOAT)
4257 {
4258 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
4259 error ("-mhard-dfp requires -mhard-float");
4260 rs6000_isa_flags &= ~OPTION_MASK_DFP;
4261 }
4262
4263 /* Allow an explicit -mupper-regs to set -mupper-regs-df, -mupper-regs-di,
4264 and -mupper-regs-sf, depending on the cpu, unless the user explicitly also
4265 set the individual option. */
4266 if (TARGET_UPPER_REGS > 0)
4267 {
4268 if (TARGET_VSX
4269 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
4270 {
4271 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_DF;
4272 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
4273 }
4274 if (TARGET_VSX
4275 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI))
4276 {
4277 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_DI;
4278 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DI;
4279 }
4280 if (TARGET_P8_VECTOR
4281 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
4282 {
4283 rs6000_isa_flags |= OPTION_MASK_UPPER_REGS_SF;
4284 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
4285 }
4286 }
4287 else if (TARGET_UPPER_REGS == 0)
4288 {
4289 if (TARGET_VSX
4290 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF))
4291 {
4292 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4293 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DF;
4294 }
4295 if (TARGET_VSX
4296 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DI))
4297 {
4298 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DI;
4299 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_DI;
4300 }
4301 if (TARGET_P8_VECTOR
4302 && !(rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF))
4303 {
4304 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
4305 rs6000_isa_flags_explicit |= OPTION_MASK_UPPER_REGS_SF;
4306 }
4307 }
4308
4309 if (TARGET_UPPER_REGS_DF && !TARGET_VSX)
4310 {
4311 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4312 error ("-mupper-regs-df requires -mvsx");
4313 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4314 }
4315
4316 if (TARGET_UPPER_REGS_DI && !TARGET_VSX)
4317 {
4318 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4319 error ("-mupper-regs-di requires -mvsx");
4320 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_DF;
4321 }
4322
4323 if (TARGET_UPPER_REGS_SF && !TARGET_P8_VECTOR)
4324 {
4325 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
4326 error ("-mupper-regs-sf requires -mpower8-vector");
4327 rs6000_isa_flags &= ~OPTION_MASK_UPPER_REGS_SF;
4328 }
4329
4330 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4331 silently turn off quad memory mode. */
4332 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
4333 {
4334 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4335 warning (0, N_("-mquad-memory requires 64-bit mode"));
4336
4337 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
4338 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4339
4340 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
4341 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
4342 }
4343
4344 /* Non-atomic quad memory load/store are disabled for little endian, since
4345 the words are reversed, but atomic operations can still be done by
4346 swapping the words. */
4347 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
4348 {
4349 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
4350 warning (0, N_("-mquad-memory is not available in little endian mode"));
4351
4352 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
4353 }
4354
4355 /* Assume if the user asked for normal quad memory instructions, they want
4356 the atomic versions as well, unless they explicity told us not to use quad
4357 word atomic instructions. */
4358 if (TARGET_QUAD_MEMORY
4359 && !TARGET_QUAD_MEMORY_ATOMIC
4360 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
4361 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
4362
4363 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4364 generating power8 instructions. */
4365 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
4366 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4367 & OPTION_MASK_P8_FUSION);
4368
4369 /* Setting additional fusion flags turns on base fusion. */
4370 if (!TARGET_P8_FUSION && (TARGET_P8_FUSION_SIGN || TARGET_TOC_FUSION))
4371 {
4372 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4373 {
4374 if (TARGET_P8_FUSION_SIGN)
4375 error ("-mpower8-fusion-sign requires -mpower8-fusion");
4376
4377 if (TARGET_TOC_FUSION)
4378 error ("-mtoc-fusion requires -mpower8-fusion");
4379
4380 rs6000_isa_flags &= ~OPTION_MASK_P8_FUSION;
4381 }
4382 else
4383 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4384 }
4385
4386 /* Power9 fusion is a superset over power8 fusion. */
4387 if (TARGET_P9_FUSION && !TARGET_P8_FUSION)
4388 {
4389 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION)
4390 {
4391 /* We prefer to not mention undocumented options in
4392 error messages. However, if users have managed to select
4393 power9-fusion without selecting power8-fusion, they
4394 already know about undocumented flags. */
4395 error ("-mpower9-fusion requires -mpower8-fusion");
4396 rs6000_isa_flags &= ~OPTION_MASK_P9_FUSION;
4397 }
4398 else
4399 rs6000_isa_flags |= OPTION_MASK_P8_FUSION;
4400 }
4401
4402 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4403 generating power9 instructions. */
4404 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_FUSION))
4405 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
4406 & OPTION_MASK_P9_FUSION);
4407
4408 /* Power8 does not fuse sign extended loads with the addis. If we are
4409 optimizing at high levels for speed, convert a sign extended load into a
4410 zero extending load, and an explicit sign extension. */
4411 if (TARGET_P8_FUSION
4412 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
4413 && optimize_function_for_speed_p (cfun)
4414 && optimize >= 3)
4415 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
4416
4417 /* TOC fusion requires 64-bit and medium/large code model. */
4418 if (TARGET_TOC_FUSION && !TARGET_POWERPC64)
4419 {
4420 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4421 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4422 warning (0, N_("-mtoc-fusion requires 64-bit"));
4423 }
4424
4425 if (TARGET_TOC_FUSION && (TARGET_CMODEL == CMODEL_SMALL))
4426 {
4427 rs6000_isa_flags &= ~OPTION_MASK_TOC_FUSION;
4428 if ((rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION) != 0)
4429 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4430 }
4431
4432 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4433 model. */
4434 if (TARGET_P8_FUSION && !TARGET_TOC_FUSION && TARGET_POWERPC64
4435 && (TARGET_CMODEL != CMODEL_SMALL)
4436 && !(rs6000_isa_flags_explicit & OPTION_MASK_TOC_FUSION))
4437 rs6000_isa_flags |= OPTION_MASK_TOC_FUSION;
4438
4439 /* ISA 3.0 vector instructions include ISA 2.07. */
4440 if (TARGET_P9_VECTOR && !TARGET_P8_VECTOR)
4441 {
4442 /* We prefer to not mention undocumented options in
4443 error messages. However, if users have managed to select
4444 power9-vector without selecting power8-vector, they
4445 already know about undocumented flags. */
4446 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
4447 error ("-mpower9-vector requires -mpower8-vector");
4448 rs6000_isa_flags &= ~OPTION_MASK_P9_VECTOR;
4449 }
4450
4451 /* -mpower9-dform turns on both -mpower9-dform-scalar and
4452 -mpower9-dform-vector. */
4453 if (TARGET_P9_DFORM_BOTH > 0)
4454 {
4455 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4456 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_VECTOR;
4457
4458 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4459 rs6000_isa_flags |= OPTION_MASK_P9_DFORM_SCALAR;
4460 }
4461 else if (TARGET_P9_DFORM_BOTH == 0)
4462 {
4463 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_VECTOR))
4464 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_VECTOR;
4465
4466 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P9_DFORM_SCALAR))
4467 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4468 }
4469
4470 /* ISA 3.0 D-form instructions require p9-vector and upper-regs. */
4471 if ((TARGET_P9_DFORM_SCALAR || TARGET_P9_DFORM_VECTOR) && !TARGET_P9_VECTOR)
4472 {
4473 /* We prefer to not mention undocumented options in
4474 error messages. However, if users have managed to select
4475 power9-dform without selecting power9-vector, they
4476 already know about undocumented flags. */
4477 if (rs6000_isa_flags_explicit & OPTION_MASK_P9_VECTOR)
4478 error ("-mpower9-dform requires -mpower9-vector");
4479 rs6000_isa_flags &= ~(OPTION_MASK_P9_DFORM_SCALAR
4480 | OPTION_MASK_P9_DFORM_VECTOR);
4481 }
4482
4483 if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_DF)
4484 {
4485 /* We prefer to not mention undocumented options in
4486 error messages. However, if users have managed to select
4487 power9-dform without selecting upper-regs-df, they
4488 already know about undocumented flags. */
4489 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_DF)
4490 error ("-mpower9-dform requires -mupper-regs-df");
4491 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4492 }
4493
4494 if (TARGET_P9_DFORM_SCALAR && !TARGET_UPPER_REGS_SF)
4495 {
4496 if (rs6000_isa_flags_explicit & OPTION_MASK_UPPER_REGS_SF)
4497 error ("-mpower9-dform requires -mupper-regs-sf");
4498 rs6000_isa_flags &= ~OPTION_MASK_P9_DFORM_SCALAR;
4499 }
4500
4501 /* Enable LRA by default. */
4502 if ((rs6000_isa_flags_explicit & OPTION_MASK_LRA) == 0)
4503 rs6000_isa_flags |= OPTION_MASK_LRA;
4504
4505 /* There have been bugs with -mvsx-timode that don't show up with -mlra,
4506 but do show up with -mno-lra. Given -mlra will become the default once
4507 PR 69847 is fixed, turn off the options with problems by default if
4508 -mno-lra was used, and warn if the user explicitly asked for the option.
4509
4510 Enable -mpower9-dform-vector by default if LRA and other power9 options.
4511 Enable -mvsx-timode by default if LRA and VSX. */
4512 if (!TARGET_LRA)
4513 {
4514 if (TARGET_VSX_TIMODE)
4515 {
4516 if ((rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) != 0)
4517 warning (0, "-mvsx-timode might need -mlra");
4518
4519 else
4520 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
4521 }
4522 }
4523
4524 else
4525 {
4526 if (TARGET_VSX && !TARGET_VSX_TIMODE
4527 && (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE) == 0)
4528 rs6000_isa_flags |= OPTION_MASK_VSX_TIMODE;
4529 }
4530
4531 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4532 support. If we only have ISA 2.06 support, and the user did not specify
4533 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4534 but we don't enable the full vectorization support */
4535 if (TARGET_ALLOW_MOVMISALIGN == -1 && TARGET_P8_VECTOR && TARGET_DIRECT_MOVE)
4536 TARGET_ALLOW_MOVMISALIGN = 1;
4537
4538 else if (TARGET_ALLOW_MOVMISALIGN && !TARGET_VSX)
4539 {
4540 if (TARGET_ALLOW_MOVMISALIGN > 0
4541 && global_options_set.x_TARGET_ALLOW_MOVMISALIGN)
4542 error ("-mallow-movmisalign requires -mvsx");
4543
4544 TARGET_ALLOW_MOVMISALIGN = 0;
4545 }
4546
4547 /* Determine when unaligned vector accesses are permitted, and when
4548 they are preferred over masked Altivec loads. Note that if
4549 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4550 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4551 not true. */
4552 if (TARGET_EFFICIENT_UNALIGNED_VSX)
4553 {
4554 if (!TARGET_VSX)
4555 {
4556 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4557 error ("-mefficient-unaligned-vsx requires -mvsx");
4558
4559 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4560 }
4561
4562 else if (!TARGET_ALLOW_MOVMISALIGN)
4563 {
4564 if (rs6000_isa_flags_explicit & OPTION_MASK_EFFICIENT_UNALIGNED_VSX)
4565 error ("-mefficient-unaligned-vsx requires -mallow-movmisalign");
4566
4567 rs6000_isa_flags &= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX;
4568 }
4569 }
4570
4571 /* Check whether we should allow small integers into VSX registers. We
4572 require direct move to prevent the register allocator from having to move
4573 variables through memory to do moves. SImode can be used on ISA 2.07,
4574 while HImode and QImode require ISA 3.0. */
4575 if (TARGET_VSX_SMALL_INTEGER
4576 && (!TARGET_DIRECT_MOVE || !TARGET_P8_VECTOR || !TARGET_UPPER_REGS_DI))
4577 {
4578 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_SMALL_INTEGER)
4579 error ("-mvsx-small-integer requires -mpower8-vector, "
4580 "-mupper-regs-di, and -mdirect-move");
4581
4582 rs6000_isa_flags &= ~OPTION_MASK_VSX_SMALL_INTEGER;
4583 }
4584
4585 /* Set long double size before the IEEE 128-bit tests. */
4586 if (!global_options_set.x_rs6000_long_double_type_size)
4587 {
4588 if (main_target_opt != NULL
4589 && (main_target_opt->x_rs6000_long_double_type_size
4590 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
4591 error ("target attribute or pragma changes long double size");
4592 else
4593 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
4594 }
4595
4596 /* Set -mabi=ieeelongdouble on some old targets. Note, AIX and Darwin
4597 explicitly redefine TARGET_IEEEQUAD to 0, so those systems will not
4598 pick up this default. */
4599 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
4600 if (!global_options_set.x_rs6000_ieeequad)
4601 rs6000_ieeequad = 1;
4602 #endif
4603
4604 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4605 sytems, but don't enable the __float128 keyword. */
4606 if (TARGET_VSX && TARGET_LONG_DOUBLE_128
4607 && (TARGET_FLOAT128_ENABLE_TYPE || TARGET_IEEEQUAD)
4608 && ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_TYPE) == 0))
4609 rs6000_isa_flags |= OPTION_MASK_FLOAT128_TYPE;
4610
4611 /* IEEE 128-bit floating point requires VSX support. */
4612 if (!TARGET_VSX)
4613 {
4614 if (TARGET_FLOAT128_KEYWORD)
4615 {
4616 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4617 error ("-mfloat128 requires VSX support");
4618
4619 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4620 | OPTION_MASK_FLOAT128_KEYWORD
4621 | OPTION_MASK_FLOAT128_HW);
4622 }
4623
4624 else if (TARGET_FLOAT128_TYPE)
4625 {
4626 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_TYPE) != 0)
4627 error ("-mfloat128-type requires VSX support");
4628
4629 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4630 | OPTION_MASK_FLOAT128_KEYWORD
4631 | OPTION_MASK_FLOAT128_HW);
4632 }
4633 }
4634
4635 /* -mfloat128 and -mfloat128-hardware internally require the underlying IEEE
4636 128-bit floating point support to be enabled. */
4637 if (!TARGET_FLOAT128_TYPE)
4638 {
4639 if (TARGET_FLOAT128_KEYWORD)
4640 {
4641 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) != 0)
4642 {
4643 error ("-mfloat128 requires -mfloat128-type");
4644 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4645 | OPTION_MASK_FLOAT128_KEYWORD
4646 | OPTION_MASK_FLOAT128_HW);
4647 }
4648 else
4649 rs6000_isa_flags |= OPTION_MASK_FLOAT128_TYPE;
4650 }
4651
4652 if (TARGET_FLOAT128_HW)
4653 {
4654 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4655 {
4656 error ("-mfloat128-hardware requires -mfloat128-type");
4657 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4658 }
4659 else
4660 rs6000_isa_flags &= ~(OPTION_MASK_FLOAT128_TYPE
4661 | OPTION_MASK_FLOAT128_KEYWORD
4662 | OPTION_MASK_FLOAT128_HW);
4663 }
4664 }
4665
4666 /* If we have -mfloat128-type and full ISA 3.0 support, enable
4667 -mfloat128-hardware by default. However, don't enable the __float128
4668 keyword. If the user explicitly turned on -mfloat128-hardware, enable the
4669 -mfloat128 option as well if it was not already set. */
4670 if (TARGET_FLOAT128_TYPE && !TARGET_FLOAT128_HW
4671 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) == ISA_3_0_MASKS_IEEE
4672 && !(rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW))
4673 rs6000_isa_flags |= OPTION_MASK_FLOAT128_HW;
4674
4675 if (TARGET_FLOAT128_HW
4676 && (rs6000_isa_flags & ISA_3_0_MASKS_IEEE) != ISA_3_0_MASKS_IEEE)
4677 {
4678 if ((rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0)
4679 error ("-mfloat128-hardware requires full ISA 3.0 support");
4680
4681 rs6000_isa_flags &= ~OPTION_MASK_FLOAT128_HW;
4682 }
4683
4684 if (TARGET_FLOAT128_HW && !TARGET_FLOAT128_KEYWORD
4685 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_HW) != 0
4686 && (rs6000_isa_flags_explicit & OPTION_MASK_FLOAT128_KEYWORD) == 0)
4687 rs6000_isa_flags |= OPTION_MASK_FLOAT128_KEYWORD;
4688
4689 /* Print the options after updating the defaults. */
4690 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4691 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
4692
4693 /* E500mc does "better" if we inline more aggressively. Respect the
4694 user's opinion, though. */
4695 if (rs6000_block_move_inline_limit == 0
4696 && (rs6000_cpu == PROCESSOR_PPCE500MC
4697 || rs6000_cpu == PROCESSOR_PPCE500MC64
4698 || rs6000_cpu == PROCESSOR_PPCE5500
4699 || rs6000_cpu == PROCESSOR_PPCE6500))
4700 rs6000_block_move_inline_limit = 128;
4701
4702 /* store_one_arg depends on expand_block_move to handle at least the
4703 size of reg_parm_stack_space. */
4704 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
4705 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
4706
4707 if (global_init_p)
4708 {
4709 /* If the appropriate debug option is enabled, replace the target hooks
4710 with debug versions that call the real version and then prints
4711 debugging information. */
4712 if (TARGET_DEBUG_COST)
4713 {
4714 targetm.rtx_costs = rs6000_debug_rtx_costs;
4715 targetm.address_cost = rs6000_debug_address_cost;
4716 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
4717 }
4718
4719 if (TARGET_DEBUG_ADDR)
4720 {
4721 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
4722 targetm.legitimize_address = rs6000_debug_legitimize_address;
4723 rs6000_secondary_reload_class_ptr
4724 = rs6000_debug_secondary_reload_class;
4725 rs6000_secondary_memory_needed_ptr
4726 = rs6000_debug_secondary_memory_needed;
4727 rs6000_cannot_change_mode_class_ptr
4728 = rs6000_debug_cannot_change_mode_class;
4729 rs6000_preferred_reload_class_ptr
4730 = rs6000_debug_preferred_reload_class;
4731 rs6000_legitimize_reload_address_ptr
4732 = rs6000_debug_legitimize_reload_address;
4733 rs6000_mode_dependent_address_ptr
4734 = rs6000_debug_mode_dependent_address;
4735 }
4736
4737 if (rs6000_veclibabi_name)
4738 {
4739 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
4740 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
4741 else
4742 {
4743 error ("unknown vectorization library ABI type (%s) for "
4744 "-mveclibabi= switch", rs6000_veclibabi_name);
4745 ret = false;
4746 }
4747 }
4748 }
4749
4750 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4751 target attribute or pragma which automatically enables both options,
4752 unless the altivec ABI was set. This is set by default for 64-bit, but
4753 not for 32-bit. */
4754 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4755 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC
4756 | OPTION_MASK_FLOAT128_TYPE
4757 | OPTION_MASK_FLOAT128_KEYWORD)
4758 & ~rs6000_isa_flags_explicit);
4759
4760 /* Enable Altivec ABI for AIX -maltivec. */
4761 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
4762 {
4763 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
4764 error ("target attribute or pragma changes AltiVec ABI");
4765 else
4766 rs6000_altivec_abi = 1;
4767 }
4768
4769 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4770 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4771 be explicitly overridden in either case. */
4772 if (TARGET_ELF)
4773 {
4774 if (!global_options_set.x_rs6000_altivec_abi
4775 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
4776 {
4777 if (main_target_opt != NULL &&
4778 !main_target_opt->x_rs6000_altivec_abi)
4779 error ("target attribute or pragma changes AltiVec ABI");
4780 else
4781 rs6000_altivec_abi = 1;
4782 }
4783 }
4784
4785 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4786 So far, the only darwin64 targets are also MACH-O. */
4787 if (TARGET_MACHO
4788 && DEFAULT_ABI == ABI_DARWIN
4789 && TARGET_64BIT)
4790 {
4791 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
4792 error ("target attribute or pragma changes darwin64 ABI");
4793 else
4794 {
4795 rs6000_darwin64_abi = 1;
4796 /* Default to natural alignment, for better performance. */
4797 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
4798 }
4799 }
4800
4801 /* Place FP constants in the constant pool instead of TOC
4802 if section anchors enabled. */
4803 if (flag_section_anchors
4804 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
4805 TARGET_NO_FP_IN_TOC = 1;
4806
4807 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4808 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
4809
4810 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4811 SUBTARGET_OVERRIDE_OPTIONS;
4812 #endif
4813 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4814 SUBSUBTARGET_OVERRIDE_OPTIONS;
4815 #endif
4816 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4817 SUB3TARGET_OVERRIDE_OPTIONS;
4818 #endif
4819
4820 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
4821 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
4822
4823 /* For the E500 family of cores, reset the single/double FP flags to let us
4824 check that they remain constant across attributes or pragmas. Also,
4825 clear a possible request for string instructions, not supported and which
4826 we might have silently queried above for -Os.
4827
4828 For other families, clear ISEL in case it was set implicitly.
4829 */
4830
4831 switch (rs6000_cpu)
4832 {
4833 case PROCESSOR_PPC8540:
4834 case PROCESSOR_PPC8548:
4835 case PROCESSOR_PPCE500MC:
4836 case PROCESSOR_PPCE500MC64:
4837 case PROCESSOR_PPCE5500:
4838 case PROCESSOR_PPCE6500:
4839
4840 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
4841 rs6000_double_float = TARGET_E500_DOUBLE;
4842
4843 rs6000_isa_flags &= ~OPTION_MASK_STRING;
4844
4845 break;
4846
4847 default:
4848
4849 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
4850 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
4851
4852 break;
4853 }
4854
4855 if (main_target_opt)
4856 {
4857 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
4858 error ("target attribute or pragma changes single precision floating "
4859 "point");
4860 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
4861 error ("target attribute or pragma changes double precision floating "
4862 "point");
4863 }
4864
4865 /* Detect invalid option combinations with E500. */
4866 CHECK_E500_OPTIONS;
4867
4868 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
4869 && rs6000_cpu != PROCESSOR_POWER5
4870 && rs6000_cpu != PROCESSOR_POWER6
4871 && rs6000_cpu != PROCESSOR_POWER7
4872 && rs6000_cpu != PROCESSOR_POWER8
4873 && rs6000_cpu != PROCESSOR_POWER9
4874 && rs6000_cpu != PROCESSOR_PPCA2
4875 && rs6000_cpu != PROCESSOR_CELL
4876 && rs6000_cpu != PROCESSOR_PPC476);
4877 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
4878 || rs6000_cpu == PROCESSOR_POWER5
4879 || rs6000_cpu == PROCESSOR_POWER7
4880 || rs6000_cpu == PROCESSOR_POWER8);
4881 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
4882 || rs6000_cpu == PROCESSOR_POWER5
4883 || rs6000_cpu == PROCESSOR_POWER6
4884 || rs6000_cpu == PROCESSOR_POWER7
4885 || rs6000_cpu == PROCESSOR_POWER8
4886 || rs6000_cpu == PROCESSOR_POWER9
4887 || rs6000_cpu == PROCESSOR_PPCE500MC
4888 || rs6000_cpu == PROCESSOR_PPCE500MC64
4889 || rs6000_cpu == PROCESSOR_PPCE5500
4890 || rs6000_cpu == PROCESSOR_PPCE6500);
4891
4892 /* Allow debug switches to override the above settings. These are set to -1
4893 in rs6000.opt to indicate the user hasn't directly set the switch. */
4894 if (TARGET_ALWAYS_HINT >= 0)
4895 rs6000_always_hint = TARGET_ALWAYS_HINT;
4896
4897 if (TARGET_SCHED_GROUPS >= 0)
4898 rs6000_sched_groups = TARGET_SCHED_GROUPS;
4899
4900 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
4901 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
4902
4903 rs6000_sched_restricted_insns_priority
4904 = (rs6000_sched_groups ? 1 : 0);
4905
4906 /* Handle -msched-costly-dep option. */
4907 rs6000_sched_costly_dep
4908 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
4909
4910 if (rs6000_sched_costly_dep_str)
4911 {
4912 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
4913 rs6000_sched_costly_dep = no_dep_costly;
4914 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
4915 rs6000_sched_costly_dep = all_deps_costly;
4916 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
4917 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
4918 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
4919 rs6000_sched_costly_dep = store_to_load_dep_costly;
4920 else
4921 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
4922 atoi (rs6000_sched_costly_dep_str));
4923 }
4924
4925 /* Handle -minsert-sched-nops option. */
4926 rs6000_sched_insert_nops
4927 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
4928
4929 if (rs6000_sched_insert_nops_str)
4930 {
4931 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
4932 rs6000_sched_insert_nops = sched_finish_none;
4933 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
4934 rs6000_sched_insert_nops = sched_finish_pad_groups;
4935 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
4936 rs6000_sched_insert_nops = sched_finish_regroup_exact;
4937 else
4938 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
4939 atoi (rs6000_sched_insert_nops_str));
4940 }
4941
4942 if (global_init_p)
4943 {
4944 #ifdef TARGET_REGNAMES
4945 /* If the user desires alternate register names, copy in the
4946 alternate names now. */
4947 if (TARGET_REGNAMES)
4948 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
4949 #endif
4950
4951 /* Set aix_struct_return last, after the ABI is determined.
4952 If -maix-struct-return or -msvr4-struct-return was explicitly
4953 used, don't override with the ABI default. */
4954 if (!global_options_set.x_aix_struct_return)
4955 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
4956
4957 #if 0
4958 /* IBM XL compiler defaults to unsigned bitfields. */
4959 if (TARGET_XL_COMPAT)
4960 flag_signed_bitfields = 0;
4961 #endif
4962
4963 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
4964 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
4965
4966 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
4967
4968 /* We can only guarantee the availability of DI pseudo-ops when
4969 assembling for 64-bit targets. */
4970 if (!TARGET_64BIT)
4971 {
4972 targetm.asm_out.aligned_op.di = NULL;
4973 targetm.asm_out.unaligned_op.di = NULL;
4974 }
4975
4976
4977 /* Set branch target alignment, if not optimizing for size. */
4978 if (!optimize_size)
4979 {
4980 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4981 aligned 8byte to avoid misprediction by the branch predictor. */
4982 if (rs6000_cpu == PROCESSOR_TITAN
4983 || rs6000_cpu == PROCESSOR_CELL)
4984 {
4985 if (align_functions <= 0)
4986 align_functions = 8;
4987 if (align_jumps <= 0)
4988 align_jumps = 8;
4989 if (align_loops <= 0)
4990 align_loops = 8;
4991 }
4992 if (rs6000_align_branch_targets)
4993 {
4994 if (align_functions <= 0)
4995 align_functions = 16;
4996 if (align_jumps <= 0)
4997 align_jumps = 16;
4998 if (align_loops <= 0)
4999 {
5000 can_override_loop_align = 1;
5001 align_loops = 16;
5002 }
5003 }
5004 if (align_jumps_max_skip <= 0)
5005 align_jumps_max_skip = 15;
5006 if (align_loops_max_skip <= 0)
5007 align_loops_max_skip = 15;
5008 }
5009
5010 /* Arrange to save and restore machine status around nested functions. */
5011 init_machine_status = rs6000_init_machine_status;
5012
5013 /* We should always be splitting complex arguments, but we can't break
5014 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
5015 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
5016 targetm.calls.split_complex_arg = NULL;
5017
5018 /* The AIX and ELFv1 ABIs define standard function descriptors. */
5019 if (DEFAULT_ABI == ABI_AIX)
5020 targetm.calls.custom_function_descriptors = 0;
5021 }
5022
5023 /* Initialize rs6000_cost with the appropriate target costs. */
5024 if (optimize_size)
5025 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
5026 else
5027 switch (rs6000_cpu)
5028 {
5029 case PROCESSOR_RS64A:
5030 rs6000_cost = &rs64a_cost;
5031 break;
5032
5033 case PROCESSOR_MPCCORE:
5034 rs6000_cost = &mpccore_cost;
5035 break;
5036
5037 case PROCESSOR_PPC403:
5038 rs6000_cost = &ppc403_cost;
5039 break;
5040
5041 case PROCESSOR_PPC405:
5042 rs6000_cost = &ppc405_cost;
5043 break;
5044
5045 case PROCESSOR_PPC440:
5046 rs6000_cost = &ppc440_cost;
5047 break;
5048
5049 case PROCESSOR_PPC476:
5050 rs6000_cost = &ppc476_cost;
5051 break;
5052
5053 case PROCESSOR_PPC601:
5054 rs6000_cost = &ppc601_cost;
5055 break;
5056
5057 case PROCESSOR_PPC603:
5058 rs6000_cost = &ppc603_cost;
5059 break;
5060
5061 case PROCESSOR_PPC604:
5062 rs6000_cost = &ppc604_cost;
5063 break;
5064
5065 case PROCESSOR_PPC604e:
5066 rs6000_cost = &ppc604e_cost;
5067 break;
5068
5069 case PROCESSOR_PPC620:
5070 rs6000_cost = &ppc620_cost;
5071 break;
5072
5073 case PROCESSOR_PPC630:
5074 rs6000_cost = &ppc630_cost;
5075 break;
5076
5077 case PROCESSOR_CELL:
5078 rs6000_cost = &ppccell_cost;
5079 break;
5080
5081 case PROCESSOR_PPC750:
5082 case PROCESSOR_PPC7400:
5083 rs6000_cost = &ppc750_cost;
5084 break;
5085
5086 case PROCESSOR_PPC7450:
5087 rs6000_cost = &ppc7450_cost;
5088 break;
5089
5090 case PROCESSOR_PPC8540:
5091 case PROCESSOR_PPC8548:
5092 rs6000_cost = &ppc8540_cost;
5093 break;
5094
5095 case PROCESSOR_PPCE300C2:
5096 case PROCESSOR_PPCE300C3:
5097 rs6000_cost = &ppce300c2c3_cost;
5098 break;
5099
5100 case PROCESSOR_PPCE500MC:
5101 rs6000_cost = &ppce500mc_cost;
5102 break;
5103
5104 case PROCESSOR_PPCE500MC64:
5105 rs6000_cost = &ppce500mc64_cost;
5106 break;
5107
5108 case PROCESSOR_PPCE5500:
5109 rs6000_cost = &ppce5500_cost;
5110 break;
5111
5112 case PROCESSOR_PPCE6500:
5113 rs6000_cost = &ppce6500_cost;
5114 break;
5115
5116 case PROCESSOR_TITAN:
5117 rs6000_cost = &titan_cost;
5118 break;
5119
5120 case PROCESSOR_POWER4:
5121 case PROCESSOR_POWER5:
5122 rs6000_cost = &power4_cost;
5123 break;
5124
5125 case PROCESSOR_POWER6:
5126 rs6000_cost = &power6_cost;
5127 break;
5128
5129 case PROCESSOR_POWER7:
5130 rs6000_cost = &power7_cost;
5131 break;
5132
5133 case PROCESSOR_POWER8:
5134 rs6000_cost = &power8_cost;
5135 break;
5136
5137 case PROCESSOR_POWER9:
5138 rs6000_cost = &power9_cost;
5139 break;
5140
5141 case PROCESSOR_PPCA2:
5142 rs6000_cost = &ppca2_cost;
5143 break;
5144
5145 default:
5146 gcc_unreachable ();
5147 }
5148
5149 if (global_init_p)
5150 {
5151 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
5152 rs6000_cost->simultaneous_prefetches,
5153 global_options.x_param_values,
5154 global_options_set.x_param_values);
5155 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
5156 global_options.x_param_values,
5157 global_options_set.x_param_values);
5158 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
5159 rs6000_cost->cache_line_size,
5160 global_options.x_param_values,
5161 global_options_set.x_param_values);
5162 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
5163 global_options.x_param_values,
5164 global_options_set.x_param_values);
5165
5166 /* Increase loop peeling limits based on performance analysis. */
5167 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
5168 global_options.x_param_values,
5169 global_options_set.x_param_values);
5170 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
5171 global_options.x_param_values,
5172 global_options_set.x_param_values);
5173
5174 /* Use the 'model' -fsched-pressure algorithm by default. */
5175 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM,
5176 SCHED_PRESSURE_MODEL,
5177 global_options.x_param_values,
5178 global_options_set.x_param_values);
5179
5180 /* If using typedef char *va_list, signal that
5181 __builtin_va_start (&ap, 0) can be optimized to
5182 ap = __builtin_next_arg (0). */
5183 if (DEFAULT_ABI != ABI_V4)
5184 targetm.expand_builtin_va_start = NULL;
5185 }
5186
5187 /* Set up single/double float flags.
5188 If TARGET_HARD_FLOAT is set, but neither single or double is set,
5189 then set both flags. */
5190 if (TARGET_HARD_FLOAT && TARGET_FPRS
5191 && rs6000_single_float == 0 && rs6000_double_float == 0)
5192 rs6000_single_float = rs6000_double_float = 1;
5193
5194 /* If not explicitly specified via option, decide whether to generate indexed
5195 load/store instructions. */
5196 if (TARGET_AVOID_XFORM == -1)
5197 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5198 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5199 need indexed accesses and the type used is the scalar type of the element
5200 being loaded or stored. */
5201 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
5202 && !TARGET_ALTIVEC);
5203
5204 /* Set the -mrecip options. */
5205 if (rs6000_recip_name)
5206 {
5207 char *p = ASTRDUP (rs6000_recip_name);
5208 char *q;
5209 unsigned int mask, i;
5210 bool invert;
5211
5212 while ((q = strtok (p, ",")) != NULL)
5213 {
5214 p = NULL;
5215 if (*q == '!')
5216 {
5217 invert = true;
5218 q++;
5219 }
5220 else
5221 invert = false;
5222
5223 if (!strcmp (q, "default"))
5224 mask = ((TARGET_RECIP_PRECISION)
5225 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
5226 else
5227 {
5228 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
5229 if (!strcmp (q, recip_options[i].string))
5230 {
5231 mask = recip_options[i].mask;
5232 break;
5233 }
5234
5235 if (i == ARRAY_SIZE (recip_options))
5236 {
5237 error ("unknown option for -mrecip=%s", q);
5238 invert = false;
5239 mask = 0;
5240 ret = false;
5241 }
5242 }
5243
5244 if (invert)
5245 rs6000_recip_control &= ~mask;
5246 else
5247 rs6000_recip_control |= mask;
5248 }
5249 }
5250
5251 /* Set the builtin mask of the various options used that could affect which
5252 builtins were used. In the past we used target_flags, but we've run out
5253 of bits, and some options like SPE and PAIRED are no longer in
5254 target_flags. */
5255 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
5256 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
5257 rs6000_print_builtin_options (stderr, 0, "builtin mask",
5258 rs6000_builtin_mask);
5259
5260 /* Initialize all of the registers. */
5261 rs6000_init_hard_regno_mode_ok (global_init_p);
5262
5263 /* Save the initial options in case the user does function specific options */
5264 if (global_init_p)
5265 target_option_default_node = target_option_current_node
5266 = build_target_option_node (&global_options);
5267
5268 /* If not explicitly specified via option, decide whether to generate the
5269 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5270 if (TARGET_LINK_STACK == -1)
5271 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
5272
5273 return ret;
5274 }
5275
5276 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5277 define the target cpu type. */
5278
5279 static void
5280 rs6000_option_override (void)
5281 {
5282 (void) rs6000_option_override_internal (true);
5283 }
5284
5285 \f
5286 /* Implement targetm.vectorize.builtin_mask_for_load. */
5287 static tree
5288 rs6000_builtin_mask_for_load (void)
5289 {
5290 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5291 if ((TARGET_ALTIVEC && !TARGET_VSX)
5292 || (TARGET_VSX && !TARGET_EFFICIENT_UNALIGNED_VSX))
5293 return altivec_builtin_mask_for_load;
5294 else
5295 return 0;
5296 }
5297
5298 /* Implement LOOP_ALIGN. */
5299 int
5300 rs6000_loop_align (rtx label)
5301 {
5302 basic_block bb;
5303 int ninsns;
5304
5305 /* Don't override loop alignment if -falign-loops was specified. */
5306 if (!can_override_loop_align)
5307 return align_loops_log;
5308
5309 bb = BLOCK_FOR_INSN (label);
5310 ninsns = num_loop_insns(bb->loop_father);
5311
5312 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5313 if (ninsns > 4 && ninsns <= 8
5314 && (rs6000_cpu == PROCESSOR_POWER4
5315 || rs6000_cpu == PROCESSOR_POWER5
5316 || rs6000_cpu == PROCESSOR_POWER6
5317 || rs6000_cpu == PROCESSOR_POWER7
5318 || rs6000_cpu == PROCESSOR_POWER8
5319 || rs6000_cpu == PROCESSOR_POWER9))
5320 return 5;
5321 else
5322 return align_loops_log;
5323 }
5324
5325 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5326 static int
5327 rs6000_loop_align_max_skip (rtx_insn *label)
5328 {
5329 return (1 << rs6000_loop_align (label)) - 1;
5330 }
5331
5332 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5333 after applying N number of iterations. This routine does not determine
5334 how may iterations are required to reach desired alignment. */
5335
5336 static bool
5337 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
5338 {
5339 if (is_packed)
5340 return false;
5341
5342 if (TARGET_32BIT)
5343 {
5344 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
5345 return true;
5346
5347 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
5348 return true;
5349
5350 return false;
5351 }
5352 else
5353 {
5354 if (TARGET_MACHO)
5355 return false;
5356
5357 /* Assuming that all other types are naturally aligned. CHECKME! */
5358 return true;
5359 }
5360 }
5361
5362 /* Return true if the vector misalignment factor is supported by the
5363 target. */
5364 static bool
5365 rs6000_builtin_support_vector_misalignment (machine_mode mode,
5366 const_tree type,
5367 int misalignment,
5368 bool is_packed)
5369 {
5370 if (TARGET_VSX)
5371 {
5372 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5373 return true;
5374
5375 /* Return if movmisalign pattern is not supported for this mode. */
5376 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
5377 return false;
5378
5379 if (misalignment == -1)
5380 {
5381 /* Misalignment factor is unknown at compile time but we know
5382 it's word aligned. */
5383 if (rs6000_vector_alignment_reachable (type, is_packed))
5384 {
5385 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
5386
5387 if (element_size == 64 || element_size == 32)
5388 return true;
5389 }
5390
5391 return false;
5392 }
5393
5394 /* VSX supports word-aligned vector. */
5395 if (misalignment % 4 == 0)
5396 return true;
5397 }
5398 return false;
5399 }
5400
5401 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5402 static int
5403 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
5404 tree vectype, int misalign)
5405 {
5406 unsigned elements;
5407 tree elem_type;
5408
5409 switch (type_of_cost)
5410 {
5411 case scalar_stmt:
5412 case scalar_load:
5413 case scalar_store:
5414 case vector_stmt:
5415 case vector_load:
5416 case vector_store:
5417 case vec_to_scalar:
5418 case scalar_to_vec:
5419 case cond_branch_not_taken:
5420 return 1;
5421
5422 case vec_perm:
5423 if (TARGET_VSX)
5424 return 3;
5425 else
5426 return 1;
5427
5428 case vec_promote_demote:
5429 if (TARGET_VSX)
5430 return 4;
5431 else
5432 return 1;
5433
5434 case cond_branch_taken:
5435 return 3;
5436
5437 case unaligned_load:
5438 if (TARGET_P9_VECTOR)
5439 return 3;
5440
5441 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5442 return 1;
5443
5444 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5445 {
5446 elements = TYPE_VECTOR_SUBPARTS (vectype);
5447 if (elements == 2)
5448 /* Double word aligned. */
5449 return 2;
5450
5451 if (elements == 4)
5452 {
5453 switch (misalign)
5454 {
5455 case 8:
5456 /* Double word aligned. */
5457 return 2;
5458
5459 case -1:
5460 /* Unknown misalignment. */
5461 case 4:
5462 case 12:
5463 /* Word aligned. */
5464 return 22;
5465
5466 default:
5467 gcc_unreachable ();
5468 }
5469 }
5470 }
5471
5472 if (TARGET_ALTIVEC)
5473 /* Misaligned loads are not supported. */
5474 gcc_unreachable ();
5475
5476 return 2;
5477
5478 case unaligned_store:
5479 if (TARGET_EFFICIENT_UNALIGNED_VSX)
5480 return 1;
5481
5482 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
5483 {
5484 elements = TYPE_VECTOR_SUBPARTS (vectype);
5485 if (elements == 2)
5486 /* Double word aligned. */
5487 return 2;
5488
5489 if (elements == 4)
5490 {
5491 switch (misalign)
5492 {
5493 case 8:
5494 /* Double word aligned. */
5495 return 2;
5496
5497 case -1:
5498 /* Unknown misalignment. */
5499 case 4:
5500 case 12:
5501 /* Word aligned. */
5502 return 23;
5503
5504 default:
5505 gcc_unreachable ();
5506 }
5507 }
5508 }
5509
5510 if (TARGET_ALTIVEC)
5511 /* Misaligned stores are not supported. */
5512 gcc_unreachable ();
5513
5514 return 2;
5515
5516 case vec_construct:
5517 /* This is a rough approximation assuming non-constant elements
5518 constructed into a vector via element insertion. FIXME:
5519 vec_construct is not granular enough for uniformly good
5520 decisions. If the initialization is a splat, this is
5521 cheaper than we estimate. Improve this someday. */
5522 elem_type = TREE_TYPE (vectype);
5523 /* 32-bit vectors loaded into registers are stored as double
5524 precision, so we need 2 permutes, 2 converts, and 1 merge
5525 to construct a vector of short floats from them. */
5526 if (SCALAR_FLOAT_TYPE_P (elem_type)
5527 && TYPE_PRECISION (elem_type) == 32)
5528 return 5;
5529 else
5530 return max (2, TYPE_VECTOR_SUBPARTS (vectype) - 1);
5531
5532 default:
5533 gcc_unreachable ();
5534 }
5535 }
5536
5537 /* Implement targetm.vectorize.preferred_simd_mode. */
5538
5539 static machine_mode
5540 rs6000_preferred_simd_mode (machine_mode mode)
5541 {
5542 if (TARGET_VSX)
5543 switch (mode)
5544 {
5545 case DFmode:
5546 return V2DFmode;
5547 default:;
5548 }
5549 if (TARGET_ALTIVEC || TARGET_VSX)
5550 switch (mode)
5551 {
5552 case SFmode:
5553 return V4SFmode;
5554 case TImode:
5555 return V1TImode;
5556 case DImode:
5557 return V2DImode;
5558 case SImode:
5559 return V4SImode;
5560 case HImode:
5561 return V8HImode;
5562 case QImode:
5563 return V16QImode;
5564 default:;
5565 }
5566 if (TARGET_SPE)
5567 switch (mode)
5568 {
5569 case SFmode:
5570 return V2SFmode;
5571 case SImode:
5572 return V2SImode;
5573 default:;
5574 }
5575 if (TARGET_PAIRED_FLOAT
5576 && mode == SFmode)
5577 return V2SFmode;
5578 return word_mode;
5579 }
5580
5581 typedef struct _rs6000_cost_data
5582 {
5583 struct loop *loop_info;
5584 unsigned cost[3];
5585 } rs6000_cost_data;
5586
5587 /* Test for likely overcommitment of vector hardware resources. If a
5588 loop iteration is relatively large, and too large a percentage of
5589 instructions in the loop are vectorized, the cost model may not
5590 adequately reflect delays from unavailable vector resources.
5591 Penalize the loop body cost for this case. */
5592
5593 static void
5594 rs6000_density_test (rs6000_cost_data *data)
5595 {
5596 const int DENSITY_PCT_THRESHOLD = 85;
5597 const int DENSITY_SIZE_THRESHOLD = 70;
5598 const int DENSITY_PENALTY = 10;
5599 struct loop *loop = data->loop_info;
5600 basic_block *bbs = get_loop_body (loop);
5601 int nbbs = loop->num_nodes;
5602 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
5603 int i, density_pct;
5604
5605 for (i = 0; i < nbbs; i++)
5606 {
5607 basic_block bb = bbs[i];
5608 gimple_stmt_iterator gsi;
5609
5610 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
5611 {
5612 gimple *stmt = gsi_stmt (gsi);
5613 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
5614
5615 if (!STMT_VINFO_RELEVANT_P (stmt_info)
5616 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
5617 not_vec_cost++;
5618 }
5619 }
5620
5621 free (bbs);
5622 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
5623
5624 if (density_pct > DENSITY_PCT_THRESHOLD
5625 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
5626 {
5627 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
5628 if (dump_enabled_p ())
5629 dump_printf_loc (MSG_NOTE, vect_location,
5630 "density %d%%, cost %d exceeds threshold, penalizing "
5631 "loop body cost by %d%%", density_pct,
5632 vec_cost + not_vec_cost, DENSITY_PENALTY);
5633 }
5634 }
5635
5636 /* Implement targetm.vectorize.init_cost. */
5637
5638 static void *
5639 rs6000_init_cost (struct loop *loop_info)
5640 {
5641 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
5642 data->loop_info = loop_info;
5643 data->cost[vect_prologue] = 0;
5644 data->cost[vect_body] = 0;
5645 data->cost[vect_epilogue] = 0;
5646 return data;
5647 }
5648
5649 /* Implement targetm.vectorize.add_stmt_cost. */
5650
5651 static unsigned
5652 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
5653 struct _stmt_vec_info *stmt_info, int misalign,
5654 enum vect_cost_model_location where)
5655 {
5656 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5657 unsigned retval = 0;
5658
5659 if (flag_vect_cost_model)
5660 {
5661 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
5662 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
5663 misalign);
5664 /* Statements in an inner loop relative to the loop being
5665 vectorized are weighted more heavily. The value here is
5666 arbitrary and could potentially be improved with analysis. */
5667 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
5668 count *= 50; /* FIXME. */
5669
5670 retval = (unsigned) (count * stmt_cost);
5671 cost_data->cost[where] += retval;
5672 }
5673
5674 return retval;
5675 }
5676
5677 /* Implement targetm.vectorize.finish_cost. */
5678
5679 static void
5680 rs6000_finish_cost (void *data, unsigned *prologue_cost,
5681 unsigned *body_cost, unsigned *epilogue_cost)
5682 {
5683 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
5684
5685 if (cost_data->loop_info)
5686 rs6000_density_test (cost_data);
5687
5688 *prologue_cost = cost_data->cost[vect_prologue];
5689 *body_cost = cost_data->cost[vect_body];
5690 *epilogue_cost = cost_data->cost[vect_epilogue];
5691 }
5692
5693 /* Implement targetm.vectorize.destroy_cost_data. */
5694
5695 static void
5696 rs6000_destroy_cost_data (void *data)
5697 {
5698 free (data);
5699 }
5700
5701 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5702 library with vectorized intrinsics. */
5703
5704 static tree
5705 rs6000_builtin_vectorized_libmass (combined_fn fn, tree type_out,
5706 tree type_in)
5707 {
5708 char name[32];
5709 const char *suffix = NULL;
5710 tree fntype, new_fndecl, bdecl = NULL_TREE;
5711 int n_args = 1;
5712 const char *bname;
5713 machine_mode el_mode, in_mode;
5714 int n, in_n;
5715
5716 /* Libmass is suitable for unsafe math only as it does not correctly support
5717 parts of IEEE with the required precision such as denormals. Only support
5718 it if we have VSX to use the simd d2 or f4 functions.
5719 XXX: Add variable length support. */
5720 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
5721 return NULL_TREE;
5722
5723 el_mode = TYPE_MODE (TREE_TYPE (type_out));
5724 n = TYPE_VECTOR_SUBPARTS (type_out);
5725 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5726 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5727 if (el_mode != in_mode
5728 || n != in_n)
5729 return NULL_TREE;
5730
5731 switch (fn)
5732 {
5733 CASE_CFN_ATAN2:
5734 CASE_CFN_HYPOT:
5735 CASE_CFN_POW:
5736 n_args = 2;
5737 gcc_fallthrough ();
5738
5739 CASE_CFN_ACOS:
5740 CASE_CFN_ACOSH:
5741 CASE_CFN_ASIN:
5742 CASE_CFN_ASINH:
5743 CASE_CFN_ATAN:
5744 CASE_CFN_ATANH:
5745 CASE_CFN_CBRT:
5746 CASE_CFN_COS:
5747 CASE_CFN_COSH:
5748 CASE_CFN_ERF:
5749 CASE_CFN_ERFC:
5750 CASE_CFN_EXP2:
5751 CASE_CFN_EXP:
5752 CASE_CFN_EXPM1:
5753 CASE_CFN_LGAMMA:
5754 CASE_CFN_LOG10:
5755 CASE_CFN_LOG1P:
5756 CASE_CFN_LOG2:
5757 CASE_CFN_LOG:
5758 CASE_CFN_SIN:
5759 CASE_CFN_SINH:
5760 CASE_CFN_SQRT:
5761 CASE_CFN_TAN:
5762 CASE_CFN_TANH:
5763 if (el_mode == DFmode && n == 2)
5764 {
5765 bdecl = mathfn_built_in (double_type_node, fn);
5766 suffix = "d2"; /* pow -> powd2 */
5767 }
5768 else if (el_mode == SFmode && n == 4)
5769 {
5770 bdecl = mathfn_built_in (float_type_node, fn);
5771 suffix = "4"; /* powf -> powf4 */
5772 }
5773 else
5774 return NULL_TREE;
5775 if (!bdecl)
5776 return NULL_TREE;
5777 break;
5778
5779 default:
5780 return NULL_TREE;
5781 }
5782
5783 gcc_assert (suffix != NULL);
5784 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
5785 if (!bname)
5786 return NULL_TREE;
5787
5788 strcpy (name, bname + sizeof ("__builtin_") - 1);
5789 strcat (name, suffix);
5790
5791 if (n_args == 1)
5792 fntype = build_function_type_list (type_out, type_in, NULL);
5793 else if (n_args == 2)
5794 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
5795 else
5796 gcc_unreachable ();
5797
5798 /* Build a function declaration for the vectorized function. */
5799 new_fndecl = build_decl (BUILTINS_LOCATION,
5800 FUNCTION_DECL, get_identifier (name), fntype);
5801 TREE_PUBLIC (new_fndecl) = 1;
5802 DECL_EXTERNAL (new_fndecl) = 1;
5803 DECL_IS_NOVOPS (new_fndecl) = 1;
5804 TREE_READONLY (new_fndecl) = 1;
5805
5806 return new_fndecl;
5807 }
5808
5809 /* Returns a function decl for a vectorized version of the builtin function
5810 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5811 if it is not available. */
5812
5813 static tree
5814 rs6000_builtin_vectorized_function (unsigned int fn, tree type_out,
5815 tree type_in)
5816 {
5817 machine_mode in_mode, out_mode;
5818 int in_n, out_n;
5819
5820 if (TARGET_DEBUG_BUILTIN)
5821 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5822 combined_fn_name (combined_fn (fn)),
5823 GET_MODE_NAME (TYPE_MODE (type_out)),
5824 GET_MODE_NAME (TYPE_MODE (type_in)));
5825
5826 if (TREE_CODE (type_out) != VECTOR_TYPE
5827 || TREE_CODE (type_in) != VECTOR_TYPE
5828 || !TARGET_VECTORIZE_BUILTINS)
5829 return NULL_TREE;
5830
5831 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5832 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5833 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5834 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5835
5836 switch (fn)
5837 {
5838 CASE_CFN_COPYSIGN:
5839 if (VECTOR_UNIT_VSX_P (V2DFmode)
5840 && out_mode == DFmode && out_n == 2
5841 && in_mode == DFmode && in_n == 2)
5842 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
5843 if (VECTOR_UNIT_VSX_P (V4SFmode)
5844 && out_mode == SFmode && out_n == 4
5845 && in_mode == SFmode && in_n == 4)
5846 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
5847 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5848 && out_mode == SFmode && out_n == 4
5849 && in_mode == SFmode && in_n == 4)
5850 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
5851 break;
5852 CASE_CFN_CEIL:
5853 if (VECTOR_UNIT_VSX_P (V2DFmode)
5854 && out_mode == DFmode && out_n == 2
5855 && in_mode == DFmode && in_n == 2)
5856 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
5857 if (VECTOR_UNIT_VSX_P (V4SFmode)
5858 && out_mode == SFmode && out_n == 4
5859 && in_mode == SFmode && in_n == 4)
5860 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
5861 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5862 && out_mode == SFmode && out_n == 4
5863 && in_mode == SFmode && in_n == 4)
5864 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
5865 break;
5866 CASE_CFN_FLOOR:
5867 if (VECTOR_UNIT_VSX_P (V2DFmode)
5868 && out_mode == DFmode && out_n == 2
5869 && in_mode == DFmode && in_n == 2)
5870 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
5871 if (VECTOR_UNIT_VSX_P (V4SFmode)
5872 && out_mode == SFmode && out_n == 4
5873 && in_mode == SFmode && in_n == 4)
5874 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
5875 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5876 && out_mode == SFmode && out_n == 4
5877 && in_mode == SFmode && in_n == 4)
5878 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
5879 break;
5880 CASE_CFN_FMA:
5881 if (VECTOR_UNIT_VSX_P (V2DFmode)
5882 && out_mode == DFmode && out_n == 2
5883 && in_mode == DFmode && in_n == 2)
5884 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
5885 if (VECTOR_UNIT_VSX_P (V4SFmode)
5886 && out_mode == SFmode && out_n == 4
5887 && in_mode == SFmode && in_n == 4)
5888 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
5889 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5890 && out_mode == SFmode && out_n == 4
5891 && in_mode == SFmode && in_n == 4)
5892 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
5893 break;
5894 CASE_CFN_TRUNC:
5895 if (VECTOR_UNIT_VSX_P (V2DFmode)
5896 && out_mode == DFmode && out_n == 2
5897 && in_mode == DFmode && in_n == 2)
5898 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
5899 if (VECTOR_UNIT_VSX_P (V4SFmode)
5900 && out_mode == SFmode && out_n == 4
5901 && in_mode == SFmode && in_n == 4)
5902 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
5903 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
5904 && out_mode == SFmode && out_n == 4
5905 && in_mode == SFmode && in_n == 4)
5906 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
5907 break;
5908 CASE_CFN_NEARBYINT:
5909 if (VECTOR_UNIT_VSX_P (V2DFmode)
5910 && flag_unsafe_math_optimizations
5911 && out_mode == DFmode && out_n == 2
5912 && in_mode == DFmode && in_n == 2)
5913 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
5914 if (VECTOR_UNIT_VSX_P (V4SFmode)
5915 && flag_unsafe_math_optimizations
5916 && out_mode == SFmode && out_n == 4
5917 && in_mode == SFmode && in_n == 4)
5918 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
5919 break;
5920 CASE_CFN_RINT:
5921 if (VECTOR_UNIT_VSX_P (V2DFmode)
5922 && !flag_trapping_math
5923 && out_mode == DFmode && out_n == 2
5924 && in_mode == DFmode && in_n == 2)
5925 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
5926 if (VECTOR_UNIT_VSX_P (V4SFmode)
5927 && !flag_trapping_math
5928 && out_mode == SFmode && out_n == 4
5929 && in_mode == SFmode && in_n == 4)
5930 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
5931 break;
5932 default:
5933 break;
5934 }
5935
5936 /* Generate calls to libmass if appropriate. */
5937 if (rs6000_veclib_handler)
5938 return rs6000_veclib_handler (combined_fn (fn), type_out, type_in);
5939
5940 return NULL_TREE;
5941 }
5942
5943 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5944
5945 static tree
5946 rs6000_builtin_md_vectorized_function (tree fndecl, tree type_out,
5947 tree type_in)
5948 {
5949 machine_mode in_mode, out_mode;
5950 int in_n, out_n;
5951
5952 if (TARGET_DEBUG_BUILTIN)
5953 fprintf (stderr, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5954 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
5955 GET_MODE_NAME (TYPE_MODE (type_out)),
5956 GET_MODE_NAME (TYPE_MODE (type_in)));
5957
5958 if (TREE_CODE (type_out) != VECTOR_TYPE
5959 || TREE_CODE (type_in) != VECTOR_TYPE
5960 || !TARGET_VECTORIZE_BUILTINS)
5961 return NULL_TREE;
5962
5963 out_mode = TYPE_MODE (TREE_TYPE (type_out));
5964 out_n = TYPE_VECTOR_SUBPARTS (type_out);
5965 in_mode = TYPE_MODE (TREE_TYPE (type_in));
5966 in_n = TYPE_VECTOR_SUBPARTS (type_in);
5967
5968 enum rs6000_builtins fn
5969 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
5970 switch (fn)
5971 {
5972 case RS6000_BUILTIN_RSQRTF:
5973 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5974 && out_mode == SFmode && out_n == 4
5975 && in_mode == SFmode && in_n == 4)
5976 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
5977 break;
5978 case RS6000_BUILTIN_RSQRT:
5979 if (VECTOR_UNIT_VSX_P (V2DFmode)
5980 && out_mode == DFmode && out_n == 2
5981 && in_mode == DFmode && in_n == 2)
5982 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
5983 break;
5984 case RS6000_BUILTIN_RECIPF:
5985 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
5986 && out_mode == SFmode && out_n == 4
5987 && in_mode == SFmode && in_n == 4)
5988 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
5989 break;
5990 case RS6000_BUILTIN_RECIP:
5991 if (VECTOR_UNIT_VSX_P (V2DFmode)
5992 && out_mode == DFmode && out_n == 2
5993 && in_mode == DFmode && in_n == 2)
5994 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
5995 break;
5996 default:
5997 break;
5998 }
5999 return NULL_TREE;
6000 }
6001 \f
6002 /* Default CPU string for rs6000*_file_start functions. */
6003 static const char *rs6000_default_cpu;
6004
6005 /* Do anything needed at the start of the asm file. */
6006
6007 static void
6008 rs6000_file_start (void)
6009 {
6010 char buffer[80];
6011 const char *start = buffer;
6012 FILE *file = asm_out_file;
6013
6014 rs6000_default_cpu = TARGET_CPU_DEFAULT;
6015
6016 default_file_start ();
6017
6018 if (flag_verbose_asm)
6019 {
6020 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
6021
6022 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
6023 {
6024 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
6025 start = "";
6026 }
6027
6028 if (global_options_set.x_rs6000_cpu_index)
6029 {
6030 fprintf (file, "%s -mcpu=%s", start,
6031 processor_target_table[rs6000_cpu_index].name);
6032 start = "";
6033 }
6034
6035 if (global_options_set.x_rs6000_tune_index)
6036 {
6037 fprintf (file, "%s -mtune=%s", start,
6038 processor_target_table[rs6000_tune_index].name);
6039 start = "";
6040 }
6041
6042 if (PPC405_ERRATUM77)
6043 {
6044 fprintf (file, "%s PPC405CR_ERRATUM77", start);
6045 start = "";
6046 }
6047
6048 #ifdef USING_ELFOS_H
6049 switch (rs6000_sdata)
6050 {
6051 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
6052 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
6053 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
6054 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
6055 }
6056
6057 if (rs6000_sdata && g_switch_value)
6058 {
6059 fprintf (file, "%s -G %d", start,
6060 g_switch_value);
6061 start = "";
6062 }
6063 #endif
6064
6065 if (*start == '\0')
6066 putc ('\n', file);
6067 }
6068
6069 #ifdef USING_ELFOS_H
6070 if (!(rs6000_default_cpu && rs6000_default_cpu[0])
6071 && !global_options_set.x_rs6000_cpu_index)
6072 {
6073 fputs ("\t.machine ", asm_out_file);
6074 if ((rs6000_isa_flags & OPTION_MASK_MODULO) != 0)
6075 fputs ("power9\n", asm_out_file);
6076 else if ((rs6000_isa_flags & OPTION_MASK_DIRECT_MOVE) != 0)
6077 fputs ("power8\n", asm_out_file);
6078 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTD) != 0)
6079 fputs ("power7\n", asm_out_file);
6080 else if ((rs6000_isa_flags & OPTION_MASK_CMPB) != 0)
6081 fputs ("power6\n", asm_out_file);
6082 else if ((rs6000_isa_flags & OPTION_MASK_POPCNTB) != 0)
6083 fputs ("power5\n", asm_out_file);
6084 else if ((rs6000_isa_flags & OPTION_MASK_MFCRF) != 0)
6085 fputs ("power4\n", asm_out_file);
6086 else if ((rs6000_isa_flags & OPTION_MASK_POWERPC64) != 0)
6087 fputs ("ppc64\n", asm_out_file);
6088 else
6089 fputs ("ppc\n", asm_out_file);
6090 }
6091 #endif
6092
6093 if (DEFAULT_ABI == ABI_ELFv2)
6094 fprintf (file, "\t.abiversion 2\n");
6095 }
6096
6097 \f
6098 /* Return nonzero if this function is known to have a null epilogue. */
6099
6100 int
6101 direct_return (void)
6102 {
6103 if (reload_completed)
6104 {
6105 rs6000_stack_t *info = rs6000_stack_info ();
6106
6107 if (info->first_gp_reg_save == 32
6108 && info->first_fp_reg_save == 64
6109 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
6110 && ! info->lr_save_p
6111 && ! info->cr_save_p
6112 && info->vrsave_size == 0
6113 && ! info->push_p)
6114 return 1;
6115 }
6116
6117 return 0;
6118 }
6119
6120 /* Return the number of instructions it takes to form a constant in an
6121 integer register. */
6122
6123 int
6124 num_insns_constant_wide (HOST_WIDE_INT value)
6125 {
6126 /* signed constant loadable with addi */
6127 if (((unsigned HOST_WIDE_INT) value + 0x8000) < 0x10000)
6128 return 1;
6129
6130 /* constant loadable with addis */
6131 else if ((value & 0xffff) == 0
6132 && (value >> 31 == -1 || value >> 31 == 0))
6133 return 1;
6134
6135 else if (TARGET_POWERPC64)
6136 {
6137 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
6138 HOST_WIDE_INT high = value >> 31;
6139
6140 if (high == 0 || high == -1)
6141 return 2;
6142
6143 high >>= 1;
6144
6145 if (low == 0)
6146 return num_insns_constant_wide (high) + 1;
6147 else if (high == 0)
6148 return num_insns_constant_wide (low) + 1;
6149 else
6150 return (num_insns_constant_wide (high)
6151 + num_insns_constant_wide (low) + 1);
6152 }
6153
6154 else
6155 return 2;
6156 }
6157
6158 int
6159 num_insns_constant (rtx op, machine_mode mode)
6160 {
6161 HOST_WIDE_INT low, high;
6162
6163 switch (GET_CODE (op))
6164 {
6165 case CONST_INT:
6166 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
6167 && rs6000_is_valid_and_mask (op, mode))
6168 return 2;
6169 else
6170 return num_insns_constant_wide (INTVAL (op));
6171
6172 case CONST_WIDE_INT:
6173 {
6174 int i;
6175 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
6176 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
6177 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
6178 return ins;
6179 }
6180
6181 case CONST_DOUBLE:
6182 if (mode == SFmode || mode == SDmode)
6183 {
6184 long l;
6185
6186 if (DECIMAL_FLOAT_MODE_P (mode))
6187 REAL_VALUE_TO_TARGET_DECIMAL32
6188 (*CONST_DOUBLE_REAL_VALUE (op), l);
6189 else
6190 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6191 return num_insns_constant_wide ((HOST_WIDE_INT) l);
6192 }
6193
6194 long l[2];
6195 if (DECIMAL_FLOAT_MODE_P (mode))
6196 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op), l);
6197 else
6198 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op), l);
6199 high = l[WORDS_BIG_ENDIAN == 0];
6200 low = l[WORDS_BIG_ENDIAN != 0];
6201
6202 if (TARGET_32BIT)
6203 return (num_insns_constant_wide (low)
6204 + num_insns_constant_wide (high));
6205 else
6206 {
6207 if ((high == 0 && low >= 0)
6208 || (high == -1 && low < 0))
6209 return num_insns_constant_wide (low);
6210
6211 else if (rs6000_is_valid_and_mask (op, mode))
6212 return 2;
6213
6214 else if (low == 0)
6215 return num_insns_constant_wide (high) + 1;
6216
6217 else
6218 return (num_insns_constant_wide (high)
6219 + num_insns_constant_wide (low) + 1);
6220 }
6221
6222 default:
6223 gcc_unreachable ();
6224 }
6225 }
6226
6227 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6228 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6229 corresponding element of the vector, but for V4SFmode and V2SFmode,
6230 the corresponding "float" is interpreted as an SImode integer. */
6231
6232 HOST_WIDE_INT
6233 const_vector_elt_as_int (rtx op, unsigned int elt)
6234 {
6235 rtx tmp;
6236
6237 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6238 gcc_assert (GET_MODE (op) != V2DImode
6239 && GET_MODE (op) != V2DFmode);
6240
6241 tmp = CONST_VECTOR_ELT (op, elt);
6242 if (GET_MODE (op) == V4SFmode
6243 || GET_MODE (op) == V2SFmode)
6244 tmp = gen_lowpart (SImode, tmp);
6245 return INTVAL (tmp);
6246 }
6247
6248 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6249 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6250 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6251 all items are set to the same value and contain COPIES replicas of the
6252 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6253 operand and the others are set to the value of the operand's msb. */
6254
6255 static bool
6256 vspltis_constant (rtx op, unsigned step, unsigned copies)
6257 {
6258 machine_mode mode = GET_MODE (op);
6259 machine_mode inner = GET_MODE_INNER (mode);
6260
6261 unsigned i;
6262 unsigned nunits;
6263 unsigned bitsize;
6264 unsigned mask;
6265
6266 HOST_WIDE_INT val;
6267 HOST_WIDE_INT splat_val;
6268 HOST_WIDE_INT msb_val;
6269
6270 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
6271 return false;
6272
6273 nunits = GET_MODE_NUNITS (mode);
6274 bitsize = GET_MODE_BITSIZE (inner);
6275 mask = GET_MODE_MASK (inner);
6276
6277 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6278 splat_val = val;
6279 msb_val = val >= 0 ? 0 : -1;
6280
6281 /* Construct the value to be splatted, if possible. If not, return 0. */
6282 for (i = 2; i <= copies; i *= 2)
6283 {
6284 HOST_WIDE_INT small_val;
6285 bitsize /= 2;
6286 small_val = splat_val >> bitsize;
6287 mask >>= bitsize;
6288 if (splat_val != ((HOST_WIDE_INT)
6289 ((unsigned HOST_WIDE_INT) small_val << bitsize)
6290 | (small_val & mask)))
6291 return false;
6292 splat_val = small_val;
6293 }
6294
6295 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6296 if (EASY_VECTOR_15 (splat_val))
6297 ;
6298
6299 /* Also check if we can splat, and then add the result to itself. Do so if
6300 the value is positive, of if the splat instruction is using OP's mode;
6301 for splat_val < 0, the splat and the add should use the same mode. */
6302 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
6303 && (splat_val >= 0 || (step == 1 && copies == 1)))
6304 ;
6305
6306 /* Also check if are loading up the most significant bit which can be done by
6307 loading up -1 and shifting the value left by -1. */
6308 else if (EASY_VECTOR_MSB (splat_val, inner))
6309 ;
6310
6311 else
6312 return false;
6313
6314 /* Check if VAL is present in every STEP-th element, and the
6315 other elements are filled with its most significant bit. */
6316 for (i = 1; i < nunits; ++i)
6317 {
6318 HOST_WIDE_INT desired_val;
6319 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
6320 if ((i & (step - 1)) == 0)
6321 desired_val = val;
6322 else
6323 desired_val = msb_val;
6324
6325 if (desired_val != const_vector_elt_as_int (op, elt))
6326 return false;
6327 }
6328
6329 return true;
6330 }
6331
6332 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6333 instruction, filling in the bottom elements with 0 or -1.
6334
6335 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6336 for the number of zeroes to shift in, or negative for the number of 0xff
6337 bytes to shift in.
6338
6339 OP is a CONST_VECTOR. */
6340
6341 int
6342 vspltis_shifted (rtx op)
6343 {
6344 machine_mode mode = GET_MODE (op);
6345 machine_mode inner = GET_MODE_INNER (mode);
6346
6347 unsigned i, j;
6348 unsigned nunits;
6349 unsigned mask;
6350
6351 HOST_WIDE_INT val;
6352
6353 if (mode != V16QImode && mode != V8HImode && mode != V4SImode)
6354 return false;
6355
6356 /* We need to create pseudo registers to do the shift, so don't recognize
6357 shift vector constants after reload. */
6358 if (!can_create_pseudo_p ())
6359 return false;
6360
6361 nunits = GET_MODE_NUNITS (mode);
6362 mask = GET_MODE_MASK (inner);
6363
6364 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? 0 : nunits - 1);
6365
6366 /* Check if the value can really be the operand of a vspltis[bhw]. */
6367 if (EASY_VECTOR_15 (val))
6368 ;
6369
6370 /* Also check if we are loading up the most significant bit which can be done
6371 by loading up -1 and shifting the value left by -1. */
6372 else if (EASY_VECTOR_MSB (val, inner))
6373 ;
6374
6375 else
6376 return 0;
6377
6378 /* Check if VAL is present in every STEP-th element until we find elements
6379 that are 0 or all 1 bits. */
6380 for (i = 1; i < nunits; ++i)
6381 {
6382 unsigned elt = BYTES_BIG_ENDIAN ? i : nunits - 1 - i;
6383 HOST_WIDE_INT elt_val = const_vector_elt_as_int (op, elt);
6384
6385 /* If the value isn't the splat value, check for the remaining elements
6386 being 0/-1. */
6387 if (val != elt_val)
6388 {
6389 if (elt_val == 0)
6390 {
6391 for (j = i+1; j < nunits; ++j)
6392 {
6393 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6394 if (const_vector_elt_as_int (op, elt2) != 0)
6395 return 0;
6396 }
6397
6398 return (nunits - i) * GET_MODE_SIZE (inner);
6399 }
6400
6401 else if ((elt_val & mask) == mask)
6402 {
6403 for (j = i+1; j < nunits; ++j)
6404 {
6405 unsigned elt2 = BYTES_BIG_ENDIAN ? j : nunits - 1 - j;
6406 if ((const_vector_elt_as_int (op, elt2) & mask) != mask)
6407 return 0;
6408 }
6409
6410 return -((nunits - i) * GET_MODE_SIZE (inner));
6411 }
6412
6413 else
6414 return 0;
6415 }
6416 }
6417
6418 /* If all elements are equal, we don't need to do VLSDOI. */
6419 return 0;
6420 }
6421
6422
6423 /* Return true if OP is of the given MODE and can be synthesized
6424 with a vspltisb, vspltish or vspltisw. */
6425
6426 bool
6427 easy_altivec_constant (rtx op, machine_mode mode)
6428 {
6429 unsigned step, copies;
6430
6431 if (mode == VOIDmode)
6432 mode = GET_MODE (op);
6433 else if (mode != GET_MODE (op))
6434 return false;
6435
6436 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6437 constants. */
6438 if (mode == V2DFmode)
6439 return zero_constant (op, mode);
6440
6441 else if (mode == V2DImode)
6442 {
6443 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
6444 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
6445 return false;
6446
6447 if (zero_constant (op, mode))
6448 return true;
6449
6450 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
6451 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
6452 return true;
6453
6454 return false;
6455 }
6456
6457 /* V1TImode is a special container for TImode. Ignore for now. */
6458 else if (mode == V1TImode)
6459 return false;
6460
6461 /* Start with a vspltisw. */
6462 step = GET_MODE_NUNITS (mode) / 4;
6463 copies = 1;
6464
6465 if (vspltis_constant (op, step, copies))
6466 return true;
6467
6468 /* Then try with a vspltish. */
6469 if (step == 1)
6470 copies <<= 1;
6471 else
6472 step >>= 1;
6473
6474 if (vspltis_constant (op, step, copies))
6475 return true;
6476
6477 /* And finally a vspltisb. */
6478 if (step == 1)
6479 copies <<= 1;
6480 else
6481 step >>= 1;
6482
6483 if (vspltis_constant (op, step, copies))
6484 return true;
6485
6486 if (vspltis_shifted (op) != 0)
6487 return true;
6488
6489 return false;
6490 }
6491
6492 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6493 result is OP. Abort if it is not possible. */
6494
6495 rtx
6496 gen_easy_altivec_constant (rtx op)
6497 {
6498 machine_mode mode = GET_MODE (op);
6499 int nunits = GET_MODE_NUNITS (mode);
6500 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
6501 unsigned step = nunits / 4;
6502 unsigned copies = 1;
6503
6504 /* Start with a vspltisw. */
6505 if (vspltis_constant (op, step, copies))
6506 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
6507
6508 /* Then try with a vspltish. */
6509 if (step == 1)
6510 copies <<= 1;
6511 else
6512 step >>= 1;
6513
6514 if (vspltis_constant (op, step, copies))
6515 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
6516
6517 /* And finally a vspltisb. */
6518 if (step == 1)
6519 copies <<= 1;
6520 else
6521 step >>= 1;
6522
6523 if (vspltis_constant (op, step, copies))
6524 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
6525
6526 gcc_unreachable ();
6527 }
6528
6529 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6530 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6531
6532 Return the number of instructions needed (1 or 2) into the address pointed
6533 via NUM_INSNS_PTR.
6534
6535 Return the constant that is being split via CONSTANT_PTR. */
6536
6537 bool
6538 xxspltib_constant_p (rtx op,
6539 machine_mode mode,
6540 int *num_insns_ptr,
6541 int *constant_ptr)
6542 {
6543 size_t nunits = GET_MODE_NUNITS (mode);
6544 size_t i;
6545 HOST_WIDE_INT value;
6546 rtx element;
6547
6548 /* Set the returned values to out of bound values. */
6549 *num_insns_ptr = -1;
6550 *constant_ptr = 256;
6551
6552 if (!TARGET_P9_VECTOR)
6553 return false;
6554
6555 if (mode == VOIDmode)
6556 mode = GET_MODE (op);
6557
6558 else if (mode != GET_MODE (op) && GET_MODE (op) != VOIDmode)
6559 return false;
6560
6561 /* Handle (vec_duplicate <constant>). */
6562 if (GET_CODE (op) == VEC_DUPLICATE)
6563 {
6564 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6565 && mode != V2DImode)
6566 return false;
6567
6568 element = XEXP (op, 0);
6569 if (!CONST_INT_P (element))
6570 return false;
6571
6572 value = INTVAL (element);
6573 if (!IN_RANGE (value, -128, 127))
6574 return false;
6575 }
6576
6577 /* Handle (const_vector [...]). */
6578 else if (GET_CODE (op) == CONST_VECTOR)
6579 {
6580 if (mode != V16QImode && mode != V8HImode && mode != V4SImode
6581 && mode != V2DImode)
6582 return false;
6583
6584 element = CONST_VECTOR_ELT (op, 0);
6585 if (!CONST_INT_P (element))
6586 return false;
6587
6588 value = INTVAL (element);
6589 if (!IN_RANGE (value, -128, 127))
6590 return false;
6591
6592 for (i = 1; i < nunits; i++)
6593 {
6594 element = CONST_VECTOR_ELT (op, i);
6595 if (!CONST_INT_P (element))
6596 return false;
6597
6598 if (value != INTVAL (element))
6599 return false;
6600 }
6601 }
6602
6603 /* Handle integer constants being loaded into the upper part of the VSX
6604 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6605 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6606 else if (CONST_INT_P (op))
6607 {
6608 if (!SCALAR_INT_MODE_P (mode))
6609 return false;
6610
6611 value = INTVAL (op);
6612 if (!IN_RANGE (value, -128, 127))
6613 return false;
6614
6615 if (!IN_RANGE (value, -1, 0))
6616 {
6617 if (!(reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID))
6618 return false;
6619
6620 if (EASY_VECTOR_15 (value))
6621 return false;
6622 }
6623 }
6624
6625 else
6626 return false;
6627
6628 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6629 sign extend. Special case 0/-1 to allow getting any VSX register instead
6630 of an Altivec register. */
6631 if ((mode == V4SImode || mode == V8HImode) && !IN_RANGE (value, -1, 0)
6632 && EASY_VECTOR_15 (value))
6633 return false;
6634
6635 /* Return # of instructions and the constant byte for XXSPLTIB. */
6636 if (mode == V16QImode)
6637 *num_insns_ptr = 1;
6638
6639 else if (IN_RANGE (value, -1, 0))
6640 *num_insns_ptr = 1;
6641
6642 else
6643 *num_insns_ptr = 2;
6644
6645 *constant_ptr = (int) value;
6646 return true;
6647 }
6648
6649 const char *
6650 output_vec_const_move (rtx *operands)
6651 {
6652 int cst, cst2, shift;
6653 machine_mode mode;
6654 rtx dest, vec;
6655
6656 dest = operands[0];
6657 vec = operands[1];
6658 mode = GET_MODE (dest);
6659
6660 if (TARGET_VSX)
6661 {
6662 bool dest_vmx_p = ALTIVEC_REGNO_P (REGNO (dest));
6663 int xxspltib_value = 256;
6664 int num_insns = -1;
6665
6666 if (zero_constant (vec, mode))
6667 {
6668 if (TARGET_P9_VECTOR)
6669 return "xxspltib %x0,0";
6670
6671 else if (dest_vmx_p)
6672 return "vspltisw %0,0";
6673
6674 else
6675 return "xxlxor %x0,%x0,%x0";
6676 }
6677
6678 if (all_ones_constant (vec, mode))
6679 {
6680 if (TARGET_P9_VECTOR)
6681 return "xxspltib %x0,255";
6682
6683 else if (dest_vmx_p)
6684 return "vspltisw %0,-1";
6685
6686 else if (TARGET_P8_VECTOR)
6687 return "xxlorc %x0,%x0,%x0";
6688
6689 else
6690 gcc_unreachable ();
6691 }
6692
6693 if (TARGET_P9_VECTOR
6694 && xxspltib_constant_p (vec, mode, &num_insns, &xxspltib_value))
6695 {
6696 if (num_insns == 1)
6697 {
6698 operands[2] = GEN_INT (xxspltib_value & 0xff);
6699 return "xxspltib %x0,%2";
6700 }
6701
6702 return "#";
6703 }
6704 }
6705
6706 if (TARGET_ALTIVEC)
6707 {
6708 rtx splat_vec;
6709
6710 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest)));
6711 if (zero_constant (vec, mode))
6712 return "vspltisw %0,0";
6713
6714 if (all_ones_constant (vec, mode))
6715 return "vspltisw %0,-1";
6716
6717 /* Do we need to construct a value using VSLDOI? */
6718 shift = vspltis_shifted (vec);
6719 if (shift != 0)
6720 return "#";
6721
6722 splat_vec = gen_easy_altivec_constant (vec);
6723 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
6724 operands[1] = XEXP (splat_vec, 0);
6725 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
6726 return "#";
6727
6728 switch (GET_MODE (splat_vec))
6729 {
6730 case V4SImode:
6731 return "vspltisw %0,%1";
6732
6733 case V8HImode:
6734 return "vspltish %0,%1";
6735
6736 case V16QImode:
6737 return "vspltisb %0,%1";
6738
6739 default:
6740 gcc_unreachable ();
6741 }
6742 }
6743
6744 gcc_assert (TARGET_SPE);
6745
6746 /* Vector constant 0 is handled as a splitter of V2SI, and in the
6747 pattern of V1DI, V4HI, and V2SF.
6748
6749 FIXME: We should probably return # and add post reload
6750 splitters for these, but this way is so easy ;-). */
6751 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
6752 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
6753 operands[1] = CONST_VECTOR_ELT (vec, 0);
6754 operands[2] = CONST_VECTOR_ELT (vec, 1);
6755 if (cst == cst2)
6756 return "li %0,%1\n\tevmergelo %0,%0,%0";
6757 else if (WORDS_BIG_ENDIAN)
6758 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
6759 else
6760 return "li %0,%2\n\tevmergelo %0,%0,%0\n\tli %0,%1";
6761 }
6762
6763 /* Initialize TARGET of vector PAIRED to VALS. */
6764
6765 void
6766 paired_expand_vector_init (rtx target, rtx vals)
6767 {
6768 machine_mode mode = GET_MODE (target);
6769 int n_elts = GET_MODE_NUNITS (mode);
6770 int n_var = 0;
6771 rtx x, new_rtx, tmp, constant_op, op1, op2;
6772 int i;
6773
6774 for (i = 0; i < n_elts; ++i)
6775 {
6776 x = XVECEXP (vals, 0, i);
6777 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6778 ++n_var;
6779 }
6780 if (n_var == 0)
6781 {
6782 /* Load from constant pool. */
6783 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6784 return;
6785 }
6786
6787 if (n_var == 2)
6788 {
6789 /* The vector is initialized only with non-constants. */
6790 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
6791 XVECEXP (vals, 0, 1));
6792
6793 emit_move_insn (target, new_rtx);
6794 return;
6795 }
6796
6797 /* One field is non-constant and the other one is a constant. Load the
6798 constant from the constant pool and use ps_merge instruction to
6799 construct the whole vector. */
6800 op1 = XVECEXP (vals, 0, 0);
6801 op2 = XVECEXP (vals, 0, 1);
6802
6803 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
6804
6805 tmp = gen_reg_rtx (GET_MODE (constant_op));
6806 emit_move_insn (tmp, constant_op);
6807
6808 if (CONSTANT_P (op1))
6809 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
6810 else
6811 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
6812
6813 emit_move_insn (target, new_rtx);
6814 }
6815
6816 void
6817 paired_expand_vector_move (rtx operands[])
6818 {
6819 rtx op0 = operands[0], op1 = operands[1];
6820
6821 emit_move_insn (op0, op1);
6822 }
6823
6824 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
6825 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
6826 operands for the relation operation COND. This is a recursive
6827 function. */
6828
6829 static void
6830 paired_emit_vector_compare (enum rtx_code rcode,
6831 rtx dest, rtx op0, rtx op1,
6832 rtx cc_op0, rtx cc_op1)
6833 {
6834 rtx tmp = gen_reg_rtx (V2SFmode);
6835 rtx tmp1, max, min;
6836
6837 gcc_assert (TARGET_PAIRED_FLOAT);
6838 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6839
6840 switch (rcode)
6841 {
6842 case LT:
6843 case LTU:
6844 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6845 return;
6846 case GE:
6847 case GEU:
6848 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6849 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
6850 return;
6851 case LE:
6852 case LEU:
6853 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
6854 return;
6855 case GT:
6856 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6857 return;
6858 case EQ:
6859 tmp1 = gen_reg_rtx (V2SFmode);
6860 max = gen_reg_rtx (V2SFmode);
6861 min = gen_reg_rtx (V2SFmode);
6862 gen_reg_rtx (V2SFmode);
6863
6864 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
6865 emit_insn (gen_selv2sf4
6866 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6867 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
6868 emit_insn (gen_selv2sf4
6869 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
6870 emit_insn (gen_subv2sf3 (tmp1, min, max));
6871 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
6872 return;
6873 case NE:
6874 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
6875 return;
6876 case UNLE:
6877 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
6878 return;
6879 case UNLT:
6880 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
6881 return;
6882 case UNGE:
6883 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
6884 return;
6885 case UNGT:
6886 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
6887 return;
6888 default:
6889 gcc_unreachable ();
6890 }
6891
6892 return;
6893 }
6894
6895 /* Emit vector conditional expression.
6896 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6897 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6898
6899 int
6900 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6901 rtx cond, rtx cc_op0, rtx cc_op1)
6902 {
6903 enum rtx_code rcode = GET_CODE (cond);
6904
6905 if (!TARGET_PAIRED_FLOAT)
6906 return 0;
6907
6908 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
6909
6910 return 1;
6911 }
6912
6913 /* Initialize vector TARGET to VALS. */
6914
6915 void
6916 rs6000_expand_vector_init (rtx target, rtx vals)
6917 {
6918 machine_mode mode = GET_MODE (target);
6919 machine_mode inner_mode = GET_MODE_INNER (mode);
6920 int n_elts = GET_MODE_NUNITS (mode);
6921 int n_var = 0, one_var = -1;
6922 bool all_same = true, all_const_zero = true;
6923 rtx x, mem;
6924 int i;
6925
6926 for (i = 0; i < n_elts; ++i)
6927 {
6928 x = XVECEXP (vals, 0, i);
6929 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
6930 ++n_var, one_var = i;
6931 else if (x != CONST0_RTX (inner_mode))
6932 all_const_zero = false;
6933
6934 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
6935 all_same = false;
6936 }
6937
6938 if (n_var == 0)
6939 {
6940 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
6941 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
6942 if ((int_vector_p || TARGET_VSX) && all_const_zero)
6943 {
6944 /* Zero register. */
6945 emit_move_insn (target, CONST0_RTX (mode));
6946 return;
6947 }
6948 else if (int_vector_p && easy_vector_constant (const_vec, mode))
6949 {
6950 /* Splat immediate. */
6951 emit_insn (gen_rtx_SET (target, const_vec));
6952 return;
6953 }
6954 else
6955 {
6956 /* Load from constant pool. */
6957 emit_move_insn (target, const_vec);
6958 return;
6959 }
6960 }
6961
6962 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6963 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
6964 {
6965 rtx op[2];
6966 size_t i;
6967 size_t num_elements = all_same ? 1 : 2;
6968 for (i = 0; i < num_elements; i++)
6969 {
6970 op[i] = XVECEXP (vals, 0, i);
6971 /* Just in case there is a SUBREG with a smaller mode, do a
6972 conversion. */
6973 if (GET_MODE (op[i]) != inner_mode)
6974 {
6975 rtx tmp = gen_reg_rtx (inner_mode);
6976 convert_move (tmp, op[i], 0);
6977 op[i] = tmp;
6978 }
6979 /* Allow load with splat double word. */
6980 else if (MEM_P (op[i]))
6981 {
6982 if (!all_same)
6983 op[i] = force_reg (inner_mode, op[i]);
6984 }
6985 else if (!REG_P (op[i]))
6986 op[i] = force_reg (inner_mode, op[i]);
6987 }
6988
6989 if (all_same)
6990 {
6991 if (mode == V2DFmode)
6992 emit_insn (gen_vsx_splat_v2df (target, op[0]));
6993 else
6994 emit_insn (gen_vsx_splat_v2di (target, op[0]));
6995 }
6996 else
6997 {
6998 if (mode == V2DFmode)
6999 emit_insn (gen_vsx_concat_v2df (target, op[0], op[1]));
7000 else
7001 emit_insn (gen_vsx_concat_v2di (target, op[0], op[1]));
7002 }
7003 return;
7004 }
7005
7006 /* Special case initializing vector int if we are on 64-bit systems with
7007 direct move or we have the ISA 3.0 instructions. */
7008 if (mode == V4SImode && VECTOR_MEM_VSX_P (V4SImode)
7009 && TARGET_DIRECT_MOVE_64BIT)
7010 {
7011 if (all_same)
7012 {
7013 rtx element0 = XVECEXP (vals, 0, 0);
7014 if (MEM_P (element0))
7015 element0 = rs6000_address_for_fpconvert (element0);
7016 else
7017 element0 = force_reg (SImode, element0);
7018
7019 if (TARGET_P9_VECTOR)
7020 emit_insn (gen_vsx_splat_v4si (target, element0));
7021 else
7022 {
7023 rtx tmp = gen_reg_rtx (DImode);
7024 emit_insn (gen_zero_extendsidi2 (tmp, element0));
7025 emit_insn (gen_vsx_splat_v4si_di (target, tmp));
7026 }
7027 return;
7028 }
7029 else
7030 {
7031 rtx elements[4];
7032 size_t i;
7033
7034 for (i = 0; i < 4; i++)
7035 {
7036 elements[i] = XVECEXP (vals, 0, i);
7037 if (!CONST_INT_P (elements[i]) && !REG_P (elements[i]))
7038 elements[i] = copy_to_mode_reg (SImode, elements[i]);
7039 }
7040
7041 emit_insn (gen_vsx_init_v4si (target, elements[0], elements[1],
7042 elements[2], elements[3]));
7043 return;
7044 }
7045 }
7046
7047 /* With single precision floating point on VSX, know that internally single
7048 precision is actually represented as a double, and either make 2 V2DF
7049 vectors, and convert these vectors to single precision, or do one
7050 conversion, and splat the result to the other elements. */
7051 if (mode == V4SFmode && VECTOR_MEM_VSX_P (V4SFmode))
7052 {
7053 if (all_same)
7054 {
7055 rtx element0 = XVECEXP (vals, 0, 0);
7056
7057 if (TARGET_P9_VECTOR)
7058 {
7059 if (MEM_P (element0))
7060 element0 = rs6000_address_for_fpconvert (element0);
7061
7062 emit_insn (gen_vsx_splat_v4sf (target, element0));
7063 }
7064
7065 else
7066 {
7067 rtx freg = gen_reg_rtx (V4SFmode);
7068 rtx sreg = force_reg (SFmode, element0);
7069 rtx cvt = (TARGET_XSCVDPSPN
7070 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
7071 : gen_vsx_xscvdpsp_scalar (freg, sreg));
7072
7073 emit_insn (cvt);
7074 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg,
7075 const0_rtx));
7076 }
7077 }
7078 else
7079 {
7080 rtx dbl_even = gen_reg_rtx (V2DFmode);
7081 rtx dbl_odd = gen_reg_rtx (V2DFmode);
7082 rtx flt_even = gen_reg_rtx (V4SFmode);
7083 rtx flt_odd = gen_reg_rtx (V4SFmode);
7084 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
7085 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
7086 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
7087 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
7088
7089 /* Use VMRGEW if we can instead of doing a permute. */
7090 if (TARGET_P8_VECTOR)
7091 {
7092 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op2));
7093 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op1, op3));
7094 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7095 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7096 if (BYTES_BIG_ENDIAN)
7097 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_even, flt_odd));
7098 else
7099 emit_insn (gen_p8_vmrgew_v4sf_direct (target, flt_odd, flt_even));
7100 }
7101 else
7102 {
7103 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
7104 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
7105 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
7106 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
7107 rs6000_expand_extract_even (target, flt_even, flt_odd);
7108 }
7109 }
7110 return;
7111 }
7112
7113 /* Special case initializing vector short/char that are splats if we are on
7114 64-bit systems with direct move. */
7115 if (all_same && TARGET_DIRECT_MOVE_64BIT
7116 && (mode == V16QImode || mode == V8HImode))
7117 {
7118 rtx op0 = XVECEXP (vals, 0, 0);
7119 rtx di_tmp = gen_reg_rtx (DImode);
7120
7121 if (!REG_P (op0))
7122 op0 = force_reg (GET_MODE_INNER (mode), op0);
7123
7124 if (mode == V16QImode)
7125 {
7126 emit_insn (gen_zero_extendqidi2 (di_tmp, op0));
7127 emit_insn (gen_vsx_vspltb_di (target, di_tmp));
7128 return;
7129 }
7130
7131 if (mode == V8HImode)
7132 {
7133 emit_insn (gen_zero_extendhidi2 (di_tmp, op0));
7134 emit_insn (gen_vsx_vsplth_di (target, di_tmp));
7135 return;
7136 }
7137 }
7138
7139 /* Store value to stack temp. Load vector element. Splat. However, splat
7140 of 64-bit items is not supported on Altivec. */
7141 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
7142 {
7143 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7144 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
7145 XVECEXP (vals, 0, 0));
7146 x = gen_rtx_UNSPEC (VOIDmode,
7147 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7148 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7149 gen_rtvec (2,
7150 gen_rtx_SET (target, mem),
7151 x)));
7152 x = gen_rtx_VEC_SELECT (inner_mode, target,
7153 gen_rtx_PARALLEL (VOIDmode,
7154 gen_rtvec (1, const0_rtx)));
7155 emit_insn (gen_rtx_SET (target, gen_rtx_VEC_DUPLICATE (mode, x)));
7156 return;
7157 }
7158
7159 /* One field is non-constant. Load constant then overwrite
7160 varying field. */
7161 if (n_var == 1)
7162 {
7163 rtx copy = copy_rtx (vals);
7164
7165 /* Load constant part of vector, substitute neighboring value for
7166 varying element. */
7167 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
7168 rs6000_expand_vector_init (target, copy);
7169
7170 /* Insert variable. */
7171 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
7172 return;
7173 }
7174
7175 /* Construct the vector in memory one field at a time
7176 and load the whole vector. */
7177 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7178 for (i = 0; i < n_elts; i++)
7179 emit_move_insn (adjust_address_nv (mem, inner_mode,
7180 i * GET_MODE_SIZE (inner_mode)),
7181 XVECEXP (vals, 0, i));
7182 emit_move_insn (target, mem);
7183 }
7184
7185 /* Set field ELT of TARGET to VAL. */
7186
7187 void
7188 rs6000_expand_vector_set (rtx target, rtx val, int elt)
7189 {
7190 machine_mode mode = GET_MODE (target);
7191 machine_mode inner_mode = GET_MODE_INNER (mode);
7192 rtx reg = gen_reg_rtx (mode);
7193 rtx mask, mem, x;
7194 int width = GET_MODE_SIZE (inner_mode);
7195 int i;
7196
7197 val = force_reg (GET_MODE (val), val);
7198
7199 if (VECTOR_MEM_VSX_P (mode))
7200 {
7201 rtx insn = NULL_RTX;
7202 rtx elt_rtx = GEN_INT (elt);
7203
7204 if (mode == V2DFmode)
7205 insn = gen_vsx_set_v2df (target, target, val, elt_rtx);
7206
7207 else if (mode == V2DImode)
7208 insn = gen_vsx_set_v2di (target, target, val, elt_rtx);
7209
7210 else if (TARGET_P9_VECTOR && TARGET_VSX_SMALL_INTEGER
7211 && TARGET_UPPER_REGS_DI && TARGET_POWERPC64)
7212 {
7213 if (mode == V4SImode)
7214 insn = gen_vsx_set_v4si_p9 (target, target, val, elt_rtx);
7215 else if (mode == V8HImode)
7216 insn = gen_vsx_set_v8hi_p9 (target, target, val, elt_rtx);
7217 else if (mode == V16QImode)
7218 insn = gen_vsx_set_v16qi_p9 (target, target, val, elt_rtx);
7219 }
7220
7221 if (insn)
7222 {
7223 emit_insn (insn);
7224 return;
7225 }
7226 }
7227
7228 /* Simplify setting single element vectors like V1TImode. */
7229 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
7230 {
7231 emit_move_insn (target, gen_lowpart (mode, val));
7232 return;
7233 }
7234
7235 /* Load single variable value. */
7236 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
7237 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
7238 x = gen_rtx_UNSPEC (VOIDmode,
7239 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
7240 emit_insn (gen_rtx_PARALLEL (VOIDmode,
7241 gen_rtvec (2,
7242 gen_rtx_SET (reg, mem),
7243 x)));
7244
7245 /* Linear sequence. */
7246 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
7247 for (i = 0; i < 16; ++i)
7248 XVECEXP (mask, 0, i) = GEN_INT (i);
7249
7250 /* Set permute mask to insert element into target. */
7251 for (i = 0; i < width; ++i)
7252 XVECEXP (mask, 0, elt*width + i)
7253 = GEN_INT (i + 0x10);
7254 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
7255
7256 if (BYTES_BIG_ENDIAN)
7257 x = gen_rtx_UNSPEC (mode,
7258 gen_rtvec (3, target, reg,
7259 force_reg (V16QImode, x)),
7260 UNSPEC_VPERM);
7261 else
7262 {
7263 if (TARGET_P9_VECTOR)
7264 x = gen_rtx_UNSPEC (mode,
7265 gen_rtvec (3, target, reg,
7266 force_reg (V16QImode, x)),
7267 UNSPEC_VPERMR);
7268 else
7269 {
7270 /* Invert selector. We prefer to generate VNAND on P8 so
7271 that future fusion opportunities can kick in, but must
7272 generate VNOR elsewhere. */
7273 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
7274 rtx iorx = (TARGET_P8_VECTOR
7275 ? gen_rtx_IOR (V16QImode, notx, notx)
7276 : gen_rtx_AND (V16QImode, notx, notx));
7277 rtx tmp = gen_reg_rtx (V16QImode);
7278 emit_insn (gen_rtx_SET (tmp, iorx));
7279
7280 /* Permute with operands reversed and adjusted selector. */
7281 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
7282 UNSPEC_VPERM);
7283 }
7284 }
7285
7286 emit_insn (gen_rtx_SET (target, x));
7287 }
7288
7289 /* Extract field ELT from VEC into TARGET. */
7290
7291 void
7292 rs6000_expand_vector_extract (rtx target, rtx vec, rtx elt)
7293 {
7294 machine_mode mode = GET_MODE (vec);
7295 machine_mode inner_mode = GET_MODE_INNER (mode);
7296 rtx mem;
7297
7298 if (VECTOR_MEM_VSX_P (mode) && CONST_INT_P (elt))
7299 {
7300 switch (mode)
7301 {
7302 default:
7303 break;
7304 case V1TImode:
7305 gcc_assert (INTVAL (elt) == 0 && inner_mode == TImode);
7306 emit_move_insn (target, gen_lowpart (TImode, vec));
7307 break;
7308 case V2DFmode:
7309 emit_insn (gen_vsx_extract_v2df (target, vec, elt));
7310 return;
7311 case V2DImode:
7312 emit_insn (gen_vsx_extract_v2di (target, vec, elt));
7313 return;
7314 case V4SFmode:
7315 emit_insn (gen_vsx_extract_v4sf (target, vec, elt));
7316 return;
7317 case V16QImode:
7318 if (TARGET_DIRECT_MOVE_64BIT)
7319 {
7320 emit_insn (gen_vsx_extract_v16qi (target, vec, elt));
7321 return;
7322 }
7323 else
7324 break;
7325 case V8HImode:
7326 if (TARGET_DIRECT_MOVE_64BIT)
7327 {
7328 emit_insn (gen_vsx_extract_v8hi (target, vec, elt));
7329 return;
7330 }
7331 else
7332 break;
7333 case V4SImode:
7334 if (TARGET_DIRECT_MOVE_64BIT)
7335 {
7336 emit_insn (gen_vsx_extract_v4si (target, vec, elt));
7337 return;
7338 }
7339 break;
7340 }
7341 }
7342 else if (VECTOR_MEM_VSX_P (mode) && !CONST_INT_P (elt)
7343 && TARGET_DIRECT_MOVE_64BIT)
7344 {
7345 if (GET_MODE (elt) != DImode)
7346 {
7347 rtx tmp = gen_reg_rtx (DImode);
7348 convert_move (tmp, elt, 0);
7349 elt = tmp;
7350 }
7351 else if (!REG_P (elt))
7352 elt = force_reg (DImode, elt);
7353
7354 switch (mode)
7355 {
7356 case V2DFmode:
7357 emit_insn (gen_vsx_extract_v2df_var (target, vec, elt));
7358 return;
7359
7360 case V2DImode:
7361 emit_insn (gen_vsx_extract_v2di_var (target, vec, elt));
7362 return;
7363
7364 case V4SFmode:
7365 if (TARGET_UPPER_REGS_SF)
7366 {
7367 emit_insn (gen_vsx_extract_v4sf_var (target, vec, elt));
7368 return;
7369 }
7370 break;
7371
7372 case V4SImode:
7373 emit_insn (gen_vsx_extract_v4si_var (target, vec, elt));
7374 return;
7375
7376 case V8HImode:
7377 emit_insn (gen_vsx_extract_v8hi_var (target, vec, elt));
7378 return;
7379
7380 case V16QImode:
7381 emit_insn (gen_vsx_extract_v16qi_var (target, vec, elt));
7382 return;
7383
7384 default:
7385 gcc_unreachable ();
7386 }
7387 }
7388
7389 gcc_assert (CONST_INT_P (elt));
7390
7391 /* Allocate mode-sized buffer. */
7392 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
7393
7394 emit_move_insn (mem, vec);
7395
7396 /* Add offset to field within buffer matching vector element. */
7397 mem = adjust_address_nv (mem, inner_mode,
7398 INTVAL (elt) * GET_MODE_SIZE (inner_mode));
7399
7400 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
7401 }
7402
7403 /* Helper function to return the register number of a RTX. */
7404 static inline int
7405 regno_or_subregno (rtx op)
7406 {
7407 if (REG_P (op))
7408 return REGNO (op);
7409 else if (SUBREG_P (op))
7410 return subreg_regno (op);
7411 else
7412 gcc_unreachable ();
7413 }
7414
7415 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7416 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7417 temporary (BASE_TMP) to fixup the address. Return the new memory address
7418 that is valid for reads or writes to a given register (SCALAR_REG). */
7419
7420 rtx
7421 rs6000_adjust_vec_address (rtx scalar_reg,
7422 rtx mem,
7423 rtx element,
7424 rtx base_tmp,
7425 machine_mode scalar_mode)
7426 {
7427 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7428 rtx addr = XEXP (mem, 0);
7429 rtx element_offset;
7430 rtx new_addr;
7431 bool valid_addr_p;
7432
7433 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7434 gcc_assert (GET_RTX_CLASS (GET_CODE (addr)) != RTX_AUTOINC);
7435
7436 /* Calculate what we need to add to the address to get the element
7437 address. */
7438 if (CONST_INT_P (element))
7439 element_offset = GEN_INT (INTVAL (element) * scalar_size);
7440 else
7441 {
7442 int byte_shift = exact_log2 (scalar_size);
7443 gcc_assert (byte_shift >= 0);
7444
7445 if (byte_shift == 0)
7446 element_offset = element;
7447
7448 else
7449 {
7450 if (TARGET_POWERPC64)
7451 emit_insn (gen_ashldi3 (base_tmp, element, GEN_INT (byte_shift)));
7452 else
7453 emit_insn (gen_ashlsi3 (base_tmp, element, GEN_INT (byte_shift)));
7454
7455 element_offset = base_tmp;
7456 }
7457 }
7458
7459 /* Create the new address pointing to the element within the vector. If we
7460 are adding 0, we don't have to change the address. */
7461 if (element_offset == const0_rtx)
7462 new_addr = addr;
7463
7464 /* A simple indirect address can be converted into a reg + offset
7465 address. */
7466 else if (REG_P (addr) || SUBREG_P (addr))
7467 new_addr = gen_rtx_PLUS (Pmode, addr, element_offset);
7468
7469 /* Optimize D-FORM addresses with constant offset with a constant element, to
7470 include the element offset in the address directly. */
7471 else if (GET_CODE (addr) == PLUS)
7472 {
7473 rtx op0 = XEXP (addr, 0);
7474 rtx op1 = XEXP (addr, 1);
7475 rtx insn;
7476
7477 gcc_assert (REG_P (op0) || SUBREG_P (op0));
7478 if (CONST_INT_P (op1) && CONST_INT_P (element_offset))
7479 {
7480 HOST_WIDE_INT offset = INTVAL (op1) + INTVAL (element_offset);
7481 rtx offset_rtx = GEN_INT (offset);
7482
7483 if (IN_RANGE (offset, -32768, 32767)
7484 && (scalar_size < 8 || (offset & 0x3) == 0))
7485 new_addr = gen_rtx_PLUS (Pmode, op0, offset_rtx);
7486 else
7487 {
7488 emit_move_insn (base_tmp, offset_rtx);
7489 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7490 }
7491 }
7492 else
7493 {
7494 bool op1_reg_p = (REG_P (op1) || SUBREG_P (op1));
7495 bool ele_reg_p = (REG_P (element_offset) || SUBREG_P (element_offset));
7496
7497 /* Note, ADDI requires the register being added to be a base
7498 register. If the register was R0, load it up into the temporary
7499 and do the add. */
7500 if (op1_reg_p
7501 && (ele_reg_p || reg_or_subregno (op1) != FIRST_GPR_REGNO))
7502 {
7503 insn = gen_add3_insn (base_tmp, op1, element_offset);
7504 gcc_assert (insn != NULL_RTX);
7505 emit_insn (insn);
7506 }
7507
7508 else if (ele_reg_p
7509 && reg_or_subregno (element_offset) != FIRST_GPR_REGNO)
7510 {
7511 insn = gen_add3_insn (base_tmp, element_offset, op1);
7512 gcc_assert (insn != NULL_RTX);
7513 emit_insn (insn);
7514 }
7515
7516 else
7517 {
7518 emit_move_insn (base_tmp, op1);
7519 emit_insn (gen_add2_insn (base_tmp, element_offset));
7520 }
7521
7522 new_addr = gen_rtx_PLUS (Pmode, op0, base_tmp);
7523 }
7524 }
7525
7526 else
7527 {
7528 emit_move_insn (base_tmp, addr);
7529 new_addr = gen_rtx_PLUS (Pmode, base_tmp, element_offset);
7530 }
7531
7532 /* If we have a PLUS, we need to see whether the particular register class
7533 allows for D-FORM or X-FORM addressing. */
7534 if (GET_CODE (new_addr) == PLUS)
7535 {
7536 rtx op1 = XEXP (new_addr, 1);
7537 addr_mask_type addr_mask;
7538 int scalar_regno = regno_or_subregno (scalar_reg);
7539
7540 gcc_assert (scalar_regno < FIRST_PSEUDO_REGISTER);
7541 if (INT_REGNO_P (scalar_regno))
7542 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_GPR];
7543
7544 else if (FP_REGNO_P (scalar_regno))
7545 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_FPR];
7546
7547 else if (ALTIVEC_REGNO_P (scalar_regno))
7548 addr_mask = reg_addr[scalar_mode].addr_mask[RELOAD_REG_VMX];
7549
7550 else
7551 gcc_unreachable ();
7552
7553 if (REG_P (op1) || SUBREG_P (op1))
7554 valid_addr_p = (addr_mask & RELOAD_REG_INDEXED) != 0;
7555 else
7556 valid_addr_p = (addr_mask & RELOAD_REG_OFFSET) != 0;
7557 }
7558
7559 else if (REG_P (new_addr) || SUBREG_P (new_addr))
7560 valid_addr_p = true;
7561
7562 else
7563 valid_addr_p = false;
7564
7565 if (!valid_addr_p)
7566 {
7567 emit_move_insn (base_tmp, new_addr);
7568 new_addr = base_tmp;
7569 }
7570
7571 return change_address (mem, scalar_mode, new_addr);
7572 }
7573
7574 /* Split a variable vec_extract operation into the component instructions. */
7575
7576 void
7577 rs6000_split_vec_extract_var (rtx dest, rtx src, rtx element, rtx tmp_gpr,
7578 rtx tmp_altivec)
7579 {
7580 machine_mode mode = GET_MODE (src);
7581 machine_mode scalar_mode = GET_MODE (dest);
7582 unsigned scalar_size = GET_MODE_SIZE (scalar_mode);
7583 int byte_shift = exact_log2 (scalar_size);
7584
7585 gcc_assert (byte_shift >= 0);
7586
7587 /* If we are given a memory address, optimize to load just the element. We
7588 don't have to adjust the vector element number on little endian
7589 systems. */
7590 if (MEM_P (src))
7591 {
7592 gcc_assert (REG_P (tmp_gpr));
7593 emit_move_insn (dest, rs6000_adjust_vec_address (dest, src, element,
7594 tmp_gpr, scalar_mode));
7595 return;
7596 }
7597
7598 else if (REG_P (src) || SUBREG_P (src))
7599 {
7600 int bit_shift = byte_shift + 3;
7601 rtx element2;
7602 int dest_regno = regno_or_subregno (dest);
7603 int src_regno = regno_or_subregno (src);
7604 int element_regno = regno_or_subregno (element);
7605
7606 gcc_assert (REG_P (tmp_gpr));
7607
7608 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7609 a general purpose register. */
7610 if (TARGET_P9_VECTOR
7611 && (mode == V16QImode || mode == V8HImode || mode == V4SImode)
7612 && INT_REGNO_P (dest_regno)
7613 && ALTIVEC_REGNO_P (src_regno)
7614 && INT_REGNO_P (element_regno))
7615 {
7616 rtx dest_si = gen_rtx_REG (SImode, dest_regno);
7617 rtx element_si = gen_rtx_REG (SImode, element_regno);
7618
7619 if (mode == V16QImode)
7620 emit_insn (VECTOR_ELT_ORDER_BIG
7621 ? gen_vextublx (dest_si, element_si, src)
7622 : gen_vextubrx (dest_si, element_si, src));
7623
7624 else if (mode == V8HImode)
7625 {
7626 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7627 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const1_rtx));
7628 emit_insn (VECTOR_ELT_ORDER_BIG
7629 ? gen_vextuhlx (dest_si, tmp_gpr_si, src)
7630 : gen_vextuhrx (dest_si, tmp_gpr_si, src));
7631 }
7632
7633
7634 else
7635 {
7636 rtx tmp_gpr_si = gen_rtx_REG (SImode, REGNO (tmp_gpr));
7637 emit_insn (gen_ashlsi3 (tmp_gpr_si, element_si, const2_rtx));
7638 emit_insn (VECTOR_ELT_ORDER_BIG
7639 ? gen_vextuwlx (dest_si, tmp_gpr_si, src)
7640 : gen_vextuwrx (dest_si, tmp_gpr_si, src));
7641 }
7642
7643 return;
7644 }
7645
7646
7647 gcc_assert (REG_P (tmp_altivec));
7648
7649 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7650 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7651 will shift the element into the upper position (adding 3 to convert a
7652 byte shift into a bit shift). */
7653 if (scalar_size == 8)
7654 {
7655 if (!VECTOR_ELT_ORDER_BIG)
7656 {
7657 emit_insn (gen_xordi3 (tmp_gpr, element, const1_rtx));
7658 element2 = tmp_gpr;
7659 }
7660 else
7661 element2 = element;
7662
7663 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7664 bit. */
7665 emit_insn (gen_rtx_SET (tmp_gpr,
7666 gen_rtx_AND (DImode,
7667 gen_rtx_ASHIFT (DImode,
7668 element2,
7669 GEN_INT (6)),
7670 GEN_INT (64))));
7671 }
7672 else
7673 {
7674 if (!VECTOR_ELT_ORDER_BIG)
7675 {
7676 rtx num_ele_m1 = GEN_INT (GET_MODE_NUNITS (mode) - 1);
7677
7678 emit_insn (gen_anddi3 (tmp_gpr, element, num_ele_m1));
7679 emit_insn (gen_subdi3 (tmp_gpr, num_ele_m1, tmp_gpr));
7680 element2 = tmp_gpr;
7681 }
7682 else
7683 element2 = element;
7684
7685 emit_insn (gen_ashldi3 (tmp_gpr, element2, GEN_INT (bit_shift)));
7686 }
7687
7688 /* Get the value into the lower byte of the Altivec register where VSLO
7689 expects it. */
7690 if (TARGET_P9_VECTOR)
7691 emit_insn (gen_vsx_splat_v2di (tmp_altivec, tmp_gpr));
7692 else if (can_create_pseudo_p ())
7693 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_gpr, tmp_gpr));
7694 else
7695 {
7696 rtx tmp_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7697 emit_move_insn (tmp_di, tmp_gpr);
7698 emit_insn (gen_vsx_concat_v2di (tmp_altivec, tmp_di, tmp_di));
7699 }
7700
7701 /* Do the VSLO to get the value into the final location. */
7702 switch (mode)
7703 {
7704 case V2DFmode:
7705 emit_insn (gen_vsx_vslo_v2df (dest, src, tmp_altivec));
7706 return;
7707
7708 case V2DImode:
7709 emit_insn (gen_vsx_vslo_v2di (dest, src, tmp_altivec));
7710 return;
7711
7712 case V4SFmode:
7713 {
7714 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7715 rtx tmp_altivec_v4sf = gen_rtx_REG (V4SFmode, REGNO (tmp_altivec));
7716 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7717 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7718 tmp_altivec));
7719
7720 emit_insn (gen_vsx_xscvspdp_scalar2 (dest, tmp_altivec_v4sf));
7721 return;
7722 }
7723
7724 case V4SImode:
7725 case V8HImode:
7726 case V16QImode:
7727 {
7728 rtx tmp_altivec_di = gen_rtx_REG (DImode, REGNO (tmp_altivec));
7729 rtx src_v2di = gen_rtx_REG (V2DImode, REGNO (src));
7730 rtx tmp_gpr_di = gen_rtx_REG (DImode, REGNO (dest));
7731 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di, src_v2di,
7732 tmp_altivec));
7733 emit_move_insn (tmp_gpr_di, tmp_altivec_di);
7734 emit_insn (gen_ashrdi3 (tmp_gpr_di, tmp_gpr_di,
7735 GEN_INT (64 - (8 * scalar_size))));
7736 return;
7737 }
7738
7739 default:
7740 gcc_unreachable ();
7741 }
7742
7743 return;
7744 }
7745 else
7746 gcc_unreachable ();
7747 }
7748
7749 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7750 two SImode values. */
7751
7752 static void
7753 rs6000_split_v4si_init_di_reg (rtx dest, rtx si1, rtx si2, rtx tmp)
7754 {
7755 const unsigned HOST_WIDE_INT mask_32bit = HOST_WIDE_INT_C (0xffffffff);
7756
7757 if (CONST_INT_P (si1) && CONST_INT_P (si2))
7758 {
7759 unsigned HOST_WIDE_INT const1 = (UINTVAL (si1) & mask_32bit) << 32;
7760 unsigned HOST_WIDE_INT const2 = UINTVAL (si2) & mask_32bit;
7761
7762 emit_move_insn (dest, GEN_INT (const1 | const2));
7763 return;
7764 }
7765
7766 /* Put si1 into upper 32-bits of dest. */
7767 if (CONST_INT_P (si1))
7768 emit_move_insn (dest, GEN_INT ((UINTVAL (si1) & mask_32bit) << 32));
7769 else
7770 {
7771 /* Generate RLDIC. */
7772 rtx si1_di = gen_rtx_REG (DImode, regno_or_subregno (si1));
7773 rtx shift_rtx = gen_rtx_ASHIFT (DImode, si1_di, GEN_INT (32));
7774 rtx mask_rtx = GEN_INT (mask_32bit << 32);
7775 rtx and_rtx = gen_rtx_AND (DImode, shift_rtx, mask_rtx);
7776 gcc_assert (!reg_overlap_mentioned_p (dest, si1));
7777 emit_insn (gen_rtx_SET (dest, and_rtx));
7778 }
7779
7780 /* Put si2 into the temporary. */
7781 gcc_assert (!reg_overlap_mentioned_p (dest, tmp));
7782 if (CONST_INT_P (si2))
7783 emit_move_insn (tmp, GEN_INT (UINTVAL (si2) & mask_32bit));
7784 else
7785 emit_insn (gen_zero_extendsidi2 (tmp, si2));
7786
7787 /* Combine the two parts. */
7788 emit_insn (gen_iordi3 (dest, dest, tmp));
7789 return;
7790 }
7791
7792 /* Split a V4SI initialization. */
7793
7794 void
7795 rs6000_split_v4si_init (rtx operands[])
7796 {
7797 rtx dest = operands[0];
7798
7799 /* Destination is a GPR, build up the two DImode parts in place. */
7800 if (REG_P (dest) || SUBREG_P (dest))
7801 {
7802 int d_regno = regno_or_subregno (dest);
7803 rtx scalar1 = operands[1];
7804 rtx scalar2 = operands[2];
7805 rtx scalar3 = operands[3];
7806 rtx scalar4 = operands[4];
7807 rtx tmp1 = operands[5];
7808 rtx tmp2 = operands[6];
7809
7810 /* Even though we only need one temporary (plus the destination, which
7811 has an early clobber constraint, try to use two temporaries, one for
7812 each double word created. That way the 2nd insn scheduling pass can
7813 rearrange things so the two parts are done in parallel. */
7814 if (BYTES_BIG_ENDIAN)
7815 {
7816 rtx di_lo = gen_rtx_REG (DImode, d_regno);
7817 rtx di_hi = gen_rtx_REG (DImode, d_regno + 1);
7818 rs6000_split_v4si_init_di_reg (di_lo, scalar1, scalar2, tmp1);
7819 rs6000_split_v4si_init_di_reg (di_hi, scalar3, scalar4, tmp2);
7820 }
7821 else
7822 {
7823 rtx di_lo = gen_rtx_REG (DImode, d_regno + 1);
7824 rtx di_hi = gen_rtx_REG (DImode, d_regno);
7825 gcc_assert (!VECTOR_ELT_ORDER_BIG);
7826 rs6000_split_v4si_init_di_reg (di_lo, scalar4, scalar3, tmp1);
7827 rs6000_split_v4si_init_di_reg (di_hi, scalar2, scalar1, tmp2);
7828 }
7829 return;
7830 }
7831
7832 else
7833 gcc_unreachable ();
7834 }
7835
7836 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
7837
7838 bool
7839 invalid_e500_subreg (rtx op, machine_mode mode)
7840 {
7841 if (TARGET_E500_DOUBLE)
7842 {
7843 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
7844 subreg:TI and reg:TF. Decimal float modes are like integer
7845 modes (only low part of each register used) for this
7846 purpose. */
7847 if (GET_CODE (op) == SUBREG
7848 && (mode == SImode || mode == DImode || mode == TImode
7849 || mode == DDmode || mode == TDmode || mode == PTImode)
7850 && REG_P (SUBREG_REG (op))
7851 && (GET_MODE (SUBREG_REG (op)) == DFmode
7852 || GET_MODE (SUBREG_REG (op)) == TFmode
7853 || GET_MODE (SUBREG_REG (op)) == IFmode
7854 || GET_MODE (SUBREG_REG (op)) == KFmode))
7855 return true;
7856
7857 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
7858 reg:TI. */
7859 if (GET_CODE (op) == SUBREG
7860 && (mode == DFmode || mode == TFmode || mode == IFmode
7861 || mode == KFmode)
7862 && REG_P (SUBREG_REG (op))
7863 && (GET_MODE (SUBREG_REG (op)) == DImode
7864 || GET_MODE (SUBREG_REG (op)) == TImode
7865 || GET_MODE (SUBREG_REG (op)) == PTImode
7866 || GET_MODE (SUBREG_REG (op)) == DDmode
7867 || GET_MODE (SUBREG_REG (op)) == TDmode))
7868 return true;
7869 }
7870
7871 if (TARGET_SPE
7872 && GET_CODE (op) == SUBREG
7873 && mode == SImode
7874 && REG_P (SUBREG_REG (op))
7875 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
7876 return true;
7877
7878 return false;
7879 }
7880
7881 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7882 selects whether the alignment is abi mandated, optional, or
7883 both abi and optional alignment. */
7884
7885 unsigned int
7886 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
7887 {
7888 if (how != align_opt)
7889 {
7890 if (TREE_CODE (type) == VECTOR_TYPE)
7891 {
7892 if ((TARGET_SPE && SPE_VECTOR_MODE (TYPE_MODE (type)))
7893 || (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type))))
7894 {
7895 if (align < 64)
7896 align = 64;
7897 }
7898 else if (align < 128)
7899 align = 128;
7900 }
7901 else if (TARGET_E500_DOUBLE
7902 && TREE_CODE (type) == REAL_TYPE
7903 && TYPE_MODE (type) == DFmode)
7904 {
7905 if (align < 64)
7906 align = 64;
7907 }
7908 }
7909
7910 if (how != align_abi)
7911 {
7912 if (TREE_CODE (type) == ARRAY_TYPE
7913 && TYPE_MODE (TREE_TYPE (type)) == QImode)
7914 {
7915 if (align < BITS_PER_WORD)
7916 align = BITS_PER_WORD;
7917 }
7918 }
7919
7920 return align;
7921 }
7922
7923 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7924
7925 bool
7926 rs6000_special_adjust_field_align_p (tree field, unsigned int computed)
7927 {
7928 if (TARGET_ALTIVEC && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
7929 {
7930 if (computed != 128)
7931 {
7932 static bool warned;
7933 if (!warned && warn_psabi)
7934 {
7935 warned = true;
7936 inform (input_location,
7937 "the layout of aggregates containing vectors with"
7938 " %d-byte alignment has changed in GCC 5",
7939 computed / BITS_PER_UNIT);
7940 }
7941 }
7942 /* In current GCC there is no special case. */
7943 return false;
7944 }
7945
7946 return false;
7947 }
7948
7949 /* AIX increases natural record alignment to doubleword if the first
7950 field is an FP double while the FP fields remain word aligned. */
7951
7952 unsigned int
7953 rs6000_special_round_type_align (tree type, unsigned int computed,
7954 unsigned int specified)
7955 {
7956 unsigned int align = MAX (computed, specified);
7957 tree field = TYPE_FIELDS (type);
7958
7959 /* Skip all non field decls */
7960 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7961 field = DECL_CHAIN (field);
7962
7963 if (field != NULL && field != type)
7964 {
7965 type = TREE_TYPE (field);
7966 while (TREE_CODE (type) == ARRAY_TYPE)
7967 type = TREE_TYPE (type);
7968
7969 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
7970 align = MAX (align, 64);
7971 }
7972
7973 return align;
7974 }
7975
7976 /* Darwin increases record alignment to the natural alignment of
7977 the first field. */
7978
7979 unsigned int
7980 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
7981 unsigned int specified)
7982 {
7983 unsigned int align = MAX (computed, specified);
7984
7985 if (TYPE_PACKED (type))
7986 return align;
7987
7988 /* Find the first field, looking down into aggregates. */
7989 do {
7990 tree field = TYPE_FIELDS (type);
7991 /* Skip all non field decls */
7992 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
7993 field = DECL_CHAIN (field);
7994 if (! field)
7995 break;
7996 /* A packed field does not contribute any extra alignment. */
7997 if (DECL_PACKED (field))
7998 return align;
7999 type = TREE_TYPE (field);
8000 while (TREE_CODE (type) == ARRAY_TYPE)
8001 type = TREE_TYPE (type);
8002 } while (AGGREGATE_TYPE_P (type));
8003
8004 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
8005 align = MAX (align, TYPE_ALIGN (type));
8006
8007 return align;
8008 }
8009
8010 /* Return 1 for an operand in small memory on V.4/eabi. */
8011
8012 int
8013 small_data_operand (rtx op ATTRIBUTE_UNUSED,
8014 machine_mode mode ATTRIBUTE_UNUSED)
8015 {
8016 #if TARGET_ELF
8017 rtx sym_ref;
8018
8019 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
8020 return 0;
8021
8022 if (DEFAULT_ABI != ABI_V4)
8023 return 0;
8024
8025 /* Vector and float memory instructions have a limited offset on the
8026 SPE, so using a vector or float variable directly as an operand is
8027 not useful. */
8028 if (TARGET_SPE
8029 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
8030 return 0;
8031
8032 if (GET_CODE (op) == SYMBOL_REF)
8033 sym_ref = op;
8034
8035 else if (GET_CODE (op) != CONST
8036 || GET_CODE (XEXP (op, 0)) != PLUS
8037 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
8038 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
8039 return 0;
8040
8041 else
8042 {
8043 rtx sum = XEXP (op, 0);
8044 HOST_WIDE_INT summand;
8045
8046 /* We have to be careful here, because it is the referenced address
8047 that must be 32k from _SDA_BASE_, not just the symbol. */
8048 summand = INTVAL (XEXP (sum, 1));
8049 if (summand < 0 || summand > g_switch_value)
8050 return 0;
8051
8052 sym_ref = XEXP (sum, 0);
8053 }
8054
8055 return SYMBOL_REF_SMALL_P (sym_ref);
8056 #else
8057 return 0;
8058 #endif
8059 }
8060
8061 /* Return true if either operand is a general purpose register. */
8062
8063 bool
8064 gpr_or_gpr_p (rtx op0, rtx op1)
8065 {
8066 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
8067 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
8068 }
8069
8070 /* Return true if this is a move direct operation between GPR registers and
8071 floating point/VSX registers. */
8072
8073 bool
8074 direct_move_p (rtx op0, rtx op1)
8075 {
8076 int regno0, regno1;
8077
8078 if (!REG_P (op0) || !REG_P (op1))
8079 return false;
8080
8081 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
8082 return false;
8083
8084 regno0 = REGNO (op0);
8085 regno1 = REGNO (op1);
8086 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
8087 return false;
8088
8089 if (INT_REGNO_P (regno0))
8090 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
8091
8092 else if (INT_REGNO_P (regno1))
8093 {
8094 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
8095 return true;
8096
8097 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
8098 return true;
8099 }
8100
8101 return false;
8102 }
8103
8104 /* Return true if the OFFSET is valid for the quad address instructions that
8105 use d-form (register + offset) addressing. */
8106
8107 static inline bool
8108 quad_address_offset_p (HOST_WIDE_INT offset)
8109 {
8110 return (IN_RANGE (offset, -32768, 32767) && ((offset) & 0xf) == 0);
8111 }
8112
8113 /* Return true if the ADDR is an acceptable address for a quad memory
8114 operation of mode MODE (either LQ/STQ for general purpose registers, or
8115 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
8116 is intended for LQ/STQ. If it is false, the address is intended for the ISA
8117 3.0 LXV/STXV instruction. */
8118
8119 bool
8120 quad_address_p (rtx addr, machine_mode mode, bool strict)
8121 {
8122 rtx op0, op1;
8123
8124 if (GET_MODE_SIZE (mode) != 16)
8125 return false;
8126
8127 if (legitimate_indirect_address_p (addr, strict))
8128 return true;
8129
8130 if (VECTOR_MODE_P (mode) && !mode_supports_vsx_dform_quad (mode))
8131 return false;
8132
8133 if (GET_CODE (addr) != PLUS)
8134 return false;
8135
8136 op0 = XEXP (addr, 0);
8137 if (!REG_P (op0) || !INT_REG_OK_FOR_BASE_P (op0, strict))
8138 return false;
8139
8140 op1 = XEXP (addr, 1);
8141 if (!CONST_INT_P (op1))
8142 return false;
8143
8144 return quad_address_offset_p (INTVAL (op1));
8145 }
8146
8147 /* Return true if this is a load or store quad operation. This function does
8148 not handle the atomic quad memory instructions. */
8149
8150 bool
8151 quad_load_store_p (rtx op0, rtx op1)
8152 {
8153 bool ret;
8154
8155 if (!TARGET_QUAD_MEMORY)
8156 ret = false;
8157
8158 else if (REG_P (op0) && MEM_P (op1))
8159 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
8160 && quad_memory_operand (op1, GET_MODE (op1))
8161 && !reg_overlap_mentioned_p (op0, op1));
8162
8163 else if (MEM_P (op0) && REG_P (op1))
8164 ret = (quad_memory_operand (op0, GET_MODE (op0))
8165 && quad_int_reg_operand (op1, GET_MODE (op1)));
8166
8167 else
8168 ret = false;
8169
8170 if (TARGET_DEBUG_ADDR)
8171 {
8172 fprintf (stderr, "\n========== quad_load_store, return %s\n",
8173 ret ? "true" : "false");
8174 debug_rtx (gen_rtx_SET (op0, op1));
8175 }
8176
8177 return ret;
8178 }
8179
8180 /* Given an address, return a constant offset term if one exists. */
8181
8182 static rtx
8183 address_offset (rtx op)
8184 {
8185 if (GET_CODE (op) == PRE_INC
8186 || GET_CODE (op) == PRE_DEC)
8187 op = XEXP (op, 0);
8188 else if (GET_CODE (op) == PRE_MODIFY
8189 || GET_CODE (op) == LO_SUM)
8190 op = XEXP (op, 1);
8191
8192 if (GET_CODE (op) == CONST)
8193 op = XEXP (op, 0);
8194
8195 if (GET_CODE (op) == PLUS)
8196 op = XEXP (op, 1);
8197
8198 if (CONST_INT_P (op))
8199 return op;
8200
8201 return NULL_RTX;
8202 }
8203
8204 /* Return true if the MEM operand is a memory operand suitable for use
8205 with a (full width, possibly multiple) gpr load/store. On
8206 powerpc64 this means the offset must be divisible by 4.
8207 Implements 'Y' constraint.
8208
8209 Accept direct, indexed, offset, lo_sum and tocref. Since this is
8210 a constraint function we know the operand has satisfied a suitable
8211 memory predicate. Also accept some odd rtl generated by reload
8212 (see rs6000_legitimize_reload_address for various forms). It is
8213 important that reload rtl be accepted by appropriate constraints
8214 but not by the operand predicate.
8215
8216 Offsetting a lo_sum should not be allowed, except where we know by
8217 alignment that a 32k boundary is not crossed, but see the ???
8218 comment in rs6000_legitimize_reload_address. Note that by
8219 "offsetting" here we mean a further offset to access parts of the
8220 MEM. It's fine to have a lo_sum where the inner address is offset
8221 from a sym, since the same sym+offset will appear in the high part
8222 of the address calculation. */
8223
8224 bool
8225 mem_operand_gpr (rtx op, machine_mode mode)
8226 {
8227 unsigned HOST_WIDE_INT offset;
8228 int extra;
8229 rtx addr = XEXP (op, 0);
8230
8231 op = address_offset (addr);
8232 if (op == NULL_RTX)
8233 return true;
8234
8235 offset = INTVAL (op);
8236 if (TARGET_POWERPC64 && (offset & 3) != 0)
8237 return false;
8238
8239 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8240 if (extra < 0)
8241 extra = 0;
8242
8243 if (GET_CODE (addr) == LO_SUM)
8244 /* For lo_sum addresses, we must allow any offset except one that
8245 causes a wrap, so test only the low 16 bits. */
8246 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8247
8248 return offset + 0x8000 < 0x10000u - extra;
8249 }
8250
8251 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8252 enforce an offset divisible by 4 even for 32-bit. */
8253
8254 bool
8255 mem_operand_ds_form (rtx op, machine_mode mode)
8256 {
8257 unsigned HOST_WIDE_INT offset;
8258 int extra;
8259 rtx addr = XEXP (op, 0);
8260
8261 if (!offsettable_address_p (false, mode, addr))
8262 return false;
8263
8264 op = address_offset (addr);
8265 if (op == NULL_RTX)
8266 return true;
8267
8268 offset = INTVAL (op);
8269 if ((offset & 3) != 0)
8270 return false;
8271
8272 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
8273 if (extra < 0)
8274 extra = 0;
8275
8276 if (GET_CODE (addr) == LO_SUM)
8277 /* For lo_sum addresses, we must allow any offset except one that
8278 causes a wrap, so test only the low 16 bits. */
8279 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
8280
8281 return offset + 0x8000 < 0x10000u - extra;
8282 }
8283 \f
8284 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8285
8286 static bool
8287 reg_offset_addressing_ok_p (machine_mode mode)
8288 {
8289 switch (mode)
8290 {
8291 case V16QImode:
8292 case V8HImode:
8293 case V4SFmode:
8294 case V4SImode:
8295 case V2DFmode:
8296 case V2DImode:
8297 case V1TImode:
8298 case TImode:
8299 case TFmode:
8300 case KFmode:
8301 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8302 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8303 a vector mode, if we want to use the VSX registers to move it around,
8304 we need to restrict ourselves to reg+reg addressing. Similarly for
8305 IEEE 128-bit floating point that is passed in a single vector
8306 register. */
8307 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
8308 return mode_supports_vsx_dform_quad (mode);
8309 break;
8310
8311 case V4HImode:
8312 case V2SImode:
8313 case V1DImode:
8314 case V2SFmode:
8315 /* Paired vector modes. Only reg+reg addressing is valid. */
8316 if (TARGET_PAIRED_FLOAT)
8317 return false;
8318 break;
8319
8320 case SDmode:
8321 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8322 addressing for the LFIWZX and STFIWX instructions. */
8323 if (TARGET_NO_SDMODE_STACK)
8324 return false;
8325 break;
8326
8327 default:
8328 break;
8329 }
8330
8331 return true;
8332 }
8333
8334 static bool
8335 virtual_stack_registers_memory_p (rtx op)
8336 {
8337 int regnum;
8338
8339 if (GET_CODE (op) == REG)
8340 regnum = REGNO (op);
8341
8342 else if (GET_CODE (op) == PLUS
8343 && GET_CODE (XEXP (op, 0)) == REG
8344 && GET_CODE (XEXP (op, 1)) == CONST_INT)
8345 regnum = REGNO (XEXP (op, 0));
8346
8347 else
8348 return false;
8349
8350 return (regnum >= FIRST_VIRTUAL_REGISTER
8351 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
8352 }
8353
8354 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8355 is known to not straddle a 32k boundary. This function is used
8356 to determine whether -mcmodel=medium code can use TOC pointer
8357 relative addressing for OP. This means the alignment of the TOC
8358 pointer must also be taken into account, and unfortunately that is
8359 only 8 bytes. */
8360
8361 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8362 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8363 #endif
8364
8365 static bool
8366 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
8367 machine_mode mode)
8368 {
8369 tree decl;
8370 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
8371
8372 if (GET_CODE (op) != SYMBOL_REF)
8373 return false;
8374
8375 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8376 SYMBOL_REF. */
8377 if (mode_supports_vsx_dform_quad (mode))
8378 return false;
8379
8380 dsize = GET_MODE_SIZE (mode);
8381 decl = SYMBOL_REF_DECL (op);
8382 if (!decl)
8383 {
8384 if (dsize == 0)
8385 return false;
8386
8387 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8388 replacing memory addresses with an anchor plus offset. We
8389 could find the decl by rummaging around in the block->objects
8390 VEC for the given offset but that seems like too much work. */
8391 dalign = BITS_PER_UNIT;
8392 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
8393 && SYMBOL_REF_ANCHOR_P (op)
8394 && SYMBOL_REF_BLOCK (op) != NULL)
8395 {
8396 struct object_block *block = SYMBOL_REF_BLOCK (op);
8397
8398 dalign = block->alignment;
8399 offset += SYMBOL_REF_BLOCK_OFFSET (op);
8400 }
8401 else if (CONSTANT_POOL_ADDRESS_P (op))
8402 {
8403 /* It would be nice to have get_pool_align().. */
8404 machine_mode cmode = get_pool_mode (op);
8405
8406 dalign = GET_MODE_ALIGNMENT (cmode);
8407 }
8408 }
8409 else if (DECL_P (decl))
8410 {
8411 dalign = DECL_ALIGN (decl);
8412
8413 if (dsize == 0)
8414 {
8415 /* Allow BLKmode when the entire object is known to not
8416 cross a 32k boundary. */
8417 if (!DECL_SIZE_UNIT (decl))
8418 return false;
8419
8420 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
8421 return false;
8422
8423 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
8424 if (dsize > 32768)
8425 return false;
8426
8427 dalign /= BITS_PER_UNIT;
8428 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8429 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8430 return dalign >= dsize;
8431 }
8432 }
8433 else
8434 gcc_unreachable ();
8435
8436 /* Find how many bits of the alignment we know for this access. */
8437 dalign /= BITS_PER_UNIT;
8438 if (dalign > POWERPC64_TOC_POINTER_ALIGNMENT)
8439 dalign = POWERPC64_TOC_POINTER_ALIGNMENT;
8440 mask = dalign - 1;
8441 lsb = offset & -offset;
8442 mask &= lsb - 1;
8443 dalign = mask + 1;
8444
8445 return dalign >= dsize;
8446 }
8447
8448 static bool
8449 constant_pool_expr_p (rtx op)
8450 {
8451 rtx base, offset;
8452
8453 split_const (op, &base, &offset);
8454 return (GET_CODE (base) == SYMBOL_REF
8455 && CONSTANT_POOL_ADDRESS_P (base)
8456 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
8457 }
8458
8459 static const_rtx tocrel_base, tocrel_offset;
8460
8461 /* Return true if OP is a toc pointer relative address (the output
8462 of create_TOC_reference). If STRICT, do not match non-split
8463 -mcmodel=large/medium toc pointer relative addresses. */
8464
8465 bool
8466 toc_relative_expr_p (const_rtx op, bool strict)
8467 {
8468 if (!TARGET_TOC)
8469 return false;
8470
8471 if (TARGET_CMODEL != CMODEL_SMALL)
8472 {
8473 /* When strict ensure we have everything tidy. */
8474 if (strict
8475 && !(GET_CODE (op) == LO_SUM
8476 && REG_P (XEXP (op, 0))
8477 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict)))
8478 return false;
8479
8480 /* When not strict, allow non-split TOC addresses and also allow
8481 (lo_sum (high ..)) TOC addresses created during reload. */
8482 if (GET_CODE (op) == LO_SUM)
8483 op = XEXP (op, 1);
8484 }
8485
8486 tocrel_base = op;
8487 tocrel_offset = const0_rtx;
8488 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
8489 {
8490 tocrel_base = XEXP (op, 0);
8491 tocrel_offset = XEXP (op, 1);
8492 }
8493
8494 return (GET_CODE (tocrel_base) == UNSPEC
8495 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
8496 }
8497
8498 /* Return true if X is a constant pool address, and also for cmodel=medium
8499 if X is a toc-relative address known to be offsettable within MODE. */
8500
8501 bool
8502 legitimate_constant_pool_address_p (const_rtx x, machine_mode mode,
8503 bool strict)
8504 {
8505 return (toc_relative_expr_p (x, strict)
8506 && (TARGET_CMODEL != CMODEL_MEDIUM
8507 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
8508 || mode == QImode
8509 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
8510 INTVAL (tocrel_offset), mode)));
8511 }
8512
8513 static bool
8514 legitimate_small_data_p (machine_mode mode, rtx x)
8515 {
8516 return (DEFAULT_ABI == ABI_V4
8517 && !flag_pic && !TARGET_TOC
8518 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
8519 && small_data_operand (x, mode));
8520 }
8521
8522 /* SPE offset addressing is limited to 5-bits worth of double words. */
8523 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
8524
8525 bool
8526 rs6000_legitimate_offset_address_p (machine_mode mode, rtx x,
8527 bool strict, bool worst_case)
8528 {
8529 unsigned HOST_WIDE_INT offset;
8530 unsigned int extra;
8531
8532 if (GET_CODE (x) != PLUS)
8533 return false;
8534 if (!REG_P (XEXP (x, 0)))
8535 return false;
8536 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8537 return false;
8538 if (mode_supports_vsx_dform_quad (mode))
8539 return quad_address_p (x, mode, strict);
8540 if (!reg_offset_addressing_ok_p (mode))
8541 return virtual_stack_registers_memory_p (x);
8542 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
8543 return true;
8544 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
8545 return false;
8546
8547 offset = INTVAL (XEXP (x, 1));
8548 extra = 0;
8549 switch (mode)
8550 {
8551 case V4HImode:
8552 case V2SImode:
8553 case V1DImode:
8554 case V2SFmode:
8555 /* SPE vector modes. */
8556 return SPE_CONST_OFFSET_OK (offset);
8557
8558 case DFmode:
8559 case DDmode:
8560 case DImode:
8561 /* On e500v2, we may have:
8562
8563 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
8564
8565 Which gets addressed with evldd instructions. */
8566 if (TARGET_E500_DOUBLE)
8567 return SPE_CONST_OFFSET_OK (offset);
8568
8569 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8570 addressing. */
8571 if (VECTOR_MEM_VSX_P (mode))
8572 return false;
8573
8574 if (!worst_case)
8575 break;
8576 if (!TARGET_POWERPC64)
8577 extra = 4;
8578 else if (offset & 3)
8579 return false;
8580 break;
8581
8582 case TFmode:
8583 case IFmode:
8584 case KFmode:
8585 case TDmode:
8586 case TImode:
8587 case PTImode:
8588 if (TARGET_E500_DOUBLE)
8589 return (SPE_CONST_OFFSET_OK (offset)
8590 && SPE_CONST_OFFSET_OK (offset + 8));
8591
8592 extra = 8;
8593 if (!worst_case)
8594 break;
8595 if (!TARGET_POWERPC64)
8596 extra = 12;
8597 else if (offset & 3)
8598 return false;
8599 break;
8600
8601 default:
8602 break;
8603 }
8604
8605 offset += 0x8000;
8606 return offset < 0x10000 - extra;
8607 }
8608
8609 bool
8610 legitimate_indexed_address_p (rtx x, int strict)
8611 {
8612 rtx op0, op1;
8613
8614 if (GET_CODE (x) != PLUS)
8615 return false;
8616
8617 op0 = XEXP (x, 0);
8618 op1 = XEXP (x, 1);
8619
8620 /* Recognize the rtl generated by reload which we know will later be
8621 replaced with proper base and index regs. */
8622 if (!strict
8623 && reload_in_progress
8624 && (REG_P (op0) || GET_CODE (op0) == PLUS)
8625 && REG_P (op1))
8626 return true;
8627
8628 return (REG_P (op0) && REG_P (op1)
8629 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
8630 && INT_REG_OK_FOR_INDEX_P (op1, strict))
8631 || (INT_REG_OK_FOR_BASE_P (op1, strict)
8632 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
8633 }
8634
8635 bool
8636 avoiding_indexed_address_p (machine_mode mode)
8637 {
8638 /* Avoid indexed addressing for modes that have non-indexed
8639 load/store instruction forms. */
8640 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
8641 }
8642
8643 bool
8644 legitimate_indirect_address_p (rtx x, int strict)
8645 {
8646 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
8647 }
8648
8649 bool
8650 macho_lo_sum_memory_operand (rtx x, machine_mode mode)
8651 {
8652 if (!TARGET_MACHO || !flag_pic
8653 || mode != SImode || GET_CODE (x) != MEM)
8654 return false;
8655 x = XEXP (x, 0);
8656
8657 if (GET_CODE (x) != LO_SUM)
8658 return false;
8659 if (GET_CODE (XEXP (x, 0)) != REG)
8660 return false;
8661 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
8662 return false;
8663 x = XEXP (x, 1);
8664
8665 return CONSTANT_P (x);
8666 }
8667
8668 static bool
8669 legitimate_lo_sum_address_p (machine_mode mode, rtx x, int strict)
8670 {
8671 if (GET_CODE (x) != LO_SUM)
8672 return false;
8673 if (GET_CODE (XEXP (x, 0)) != REG)
8674 return false;
8675 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
8676 return false;
8677 /* quad word addresses are restricted, and we can't use LO_SUM. */
8678 if (mode_supports_vsx_dform_quad (mode))
8679 return false;
8680 /* Restrict addressing for DI because of our SUBREG hackery. */
8681 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
8682 return false;
8683 x = XEXP (x, 1);
8684
8685 if (TARGET_ELF || TARGET_MACHO)
8686 {
8687 bool large_toc_ok;
8688
8689 if (DEFAULT_ABI == ABI_V4 && flag_pic)
8690 return false;
8691 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8692 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8693 recognizes some LO_SUM addresses as valid although this
8694 function says opposite. In most cases, LRA through different
8695 transformations can generate correct code for address reloads.
8696 It can not manage only some LO_SUM cases. So we need to add
8697 code analogous to one in rs6000_legitimize_reload_address for
8698 LOW_SUM here saying that some addresses are still valid. */
8699 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
8700 && small_toc_ref (x, VOIDmode));
8701 if (TARGET_TOC && ! large_toc_ok)
8702 return false;
8703 if (GET_MODE_NUNITS (mode) != 1)
8704 return false;
8705 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
8706 && !(/* ??? Assume floating point reg based on mode? */
8707 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
8708 && (mode == DFmode || mode == DDmode)))
8709 return false;
8710
8711 return CONSTANT_P (x) || large_toc_ok;
8712 }
8713
8714 return false;
8715 }
8716
8717
8718 /* Try machine-dependent ways of modifying an illegitimate address
8719 to be legitimate. If we find one, return the new, valid address.
8720 This is used from only one place: `memory_address' in explow.c.
8721
8722 OLDX is the address as it was before break_out_memory_refs was
8723 called. In some cases it is useful to look at this to decide what
8724 needs to be done.
8725
8726 It is always safe for this function to do nothing. It exists to
8727 recognize opportunities to optimize the output.
8728
8729 On RS/6000, first check for the sum of a register with a constant
8730 integer that is out of range. If so, generate code to add the
8731 constant with the low-order 16 bits masked to the register and force
8732 this result into another register (this can be done with `cau').
8733 Then generate an address of REG+(CONST&0xffff), allowing for the
8734 possibility of bit 16 being a one.
8735
8736 Then check for the sum of a register and something not constant, try to
8737 load the other things into a register and return the sum. */
8738
8739 static rtx
8740 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
8741 machine_mode mode)
8742 {
8743 unsigned int extra;
8744
8745 if (!reg_offset_addressing_ok_p (mode)
8746 || mode_supports_vsx_dform_quad (mode))
8747 {
8748 if (virtual_stack_registers_memory_p (x))
8749 return x;
8750
8751 /* In theory we should not be seeing addresses of the form reg+0,
8752 but just in case it is generated, optimize it away. */
8753 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
8754 return force_reg (Pmode, XEXP (x, 0));
8755
8756 /* For TImode with load/store quad, restrict addresses to just a single
8757 pointer, so it works with both GPRs and VSX registers. */
8758 /* Make sure both operands are registers. */
8759 else if (GET_CODE (x) == PLUS
8760 && (mode != TImode || !TARGET_VSX_TIMODE))
8761 return gen_rtx_PLUS (Pmode,
8762 force_reg (Pmode, XEXP (x, 0)),
8763 force_reg (Pmode, XEXP (x, 1)));
8764 else
8765 return force_reg (Pmode, x);
8766 }
8767 if (GET_CODE (x) == SYMBOL_REF)
8768 {
8769 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
8770 if (model != 0)
8771 return rs6000_legitimize_tls_address (x, model);
8772 }
8773
8774 extra = 0;
8775 switch (mode)
8776 {
8777 case TFmode:
8778 case TDmode:
8779 case TImode:
8780 case PTImode:
8781 case IFmode:
8782 case KFmode:
8783 /* As in legitimate_offset_address_p we do not assume
8784 worst-case. The mode here is just a hint as to the registers
8785 used. A TImode is usually in gprs, but may actually be in
8786 fprs. Leave worst-case scenario for reload to handle via
8787 insn constraints. PTImode is only GPRs. */
8788 extra = 8;
8789 break;
8790 default:
8791 break;
8792 }
8793
8794 if (GET_CODE (x) == PLUS
8795 && GET_CODE (XEXP (x, 0)) == REG
8796 && GET_CODE (XEXP (x, 1)) == CONST_INT
8797 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
8798 >= 0x10000 - extra)
8799 && !(SPE_VECTOR_MODE (mode)
8800 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
8801 {
8802 HOST_WIDE_INT high_int, low_int;
8803 rtx sum;
8804 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8805 if (low_int >= 0x8000 - extra)
8806 low_int = 0;
8807 high_int = INTVAL (XEXP (x, 1)) - low_int;
8808 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
8809 GEN_INT (high_int)), 0);
8810 return plus_constant (Pmode, sum, low_int);
8811 }
8812 else if (GET_CODE (x) == PLUS
8813 && GET_CODE (XEXP (x, 0)) == REG
8814 && GET_CODE (XEXP (x, 1)) != CONST_INT
8815 && GET_MODE_NUNITS (mode) == 1
8816 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8817 || (/* ??? Assume floating point reg based on mode? */
8818 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
8819 && (mode == DFmode || mode == DDmode)))
8820 && !avoiding_indexed_address_p (mode))
8821 {
8822 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
8823 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
8824 }
8825 else if (SPE_VECTOR_MODE (mode)
8826 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
8827 {
8828 if (mode == DImode)
8829 return x;
8830 /* We accept [reg + reg] and [reg + OFFSET]. */
8831
8832 if (GET_CODE (x) == PLUS)
8833 {
8834 rtx op1 = XEXP (x, 0);
8835 rtx op2 = XEXP (x, 1);
8836 rtx y;
8837
8838 op1 = force_reg (Pmode, op1);
8839
8840 if (GET_CODE (op2) != REG
8841 && (GET_CODE (op2) != CONST_INT
8842 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
8843 || (GET_MODE_SIZE (mode) > 8
8844 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
8845 op2 = force_reg (Pmode, op2);
8846
8847 /* We can't always do [reg + reg] for these, because [reg +
8848 reg + offset] is not a legitimate addressing mode. */
8849 y = gen_rtx_PLUS (Pmode, op1, op2);
8850
8851 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
8852 return force_reg (Pmode, y);
8853 else
8854 return y;
8855 }
8856
8857 return force_reg (Pmode, x);
8858 }
8859 else if ((TARGET_ELF
8860 #if TARGET_MACHO
8861 || !MACHO_DYNAMIC_NO_PIC_P
8862 #endif
8863 )
8864 && TARGET_32BIT
8865 && TARGET_NO_TOC
8866 && ! flag_pic
8867 && GET_CODE (x) != CONST_INT
8868 && GET_CODE (x) != CONST_WIDE_INT
8869 && GET_CODE (x) != CONST_DOUBLE
8870 && CONSTANT_P (x)
8871 && GET_MODE_NUNITS (mode) == 1
8872 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
8873 || (/* ??? Assume floating point reg based on mode? */
8874 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
8875 && (mode == DFmode || mode == DDmode))))
8876 {
8877 rtx reg = gen_reg_rtx (Pmode);
8878 if (TARGET_ELF)
8879 emit_insn (gen_elf_high (reg, x));
8880 else
8881 emit_insn (gen_macho_high (reg, x));
8882 return gen_rtx_LO_SUM (Pmode, reg, x);
8883 }
8884 else if (TARGET_TOC
8885 && GET_CODE (x) == SYMBOL_REF
8886 && constant_pool_expr_p (x)
8887 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
8888 return create_TOC_reference (x, NULL_RTX);
8889 else
8890 return x;
8891 }
8892
8893 /* Debug version of rs6000_legitimize_address. */
8894 static rtx
8895 rs6000_debug_legitimize_address (rtx x, rtx oldx, machine_mode mode)
8896 {
8897 rtx ret;
8898 rtx_insn *insns;
8899
8900 start_sequence ();
8901 ret = rs6000_legitimize_address (x, oldx, mode);
8902 insns = get_insns ();
8903 end_sequence ();
8904
8905 if (ret != x)
8906 {
8907 fprintf (stderr,
8908 "\nrs6000_legitimize_address: mode %s, old code %s, "
8909 "new code %s, modified\n",
8910 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
8911 GET_RTX_NAME (GET_CODE (ret)));
8912
8913 fprintf (stderr, "Original address:\n");
8914 debug_rtx (x);
8915
8916 fprintf (stderr, "oldx:\n");
8917 debug_rtx (oldx);
8918
8919 fprintf (stderr, "New address:\n");
8920 debug_rtx (ret);
8921
8922 if (insns)
8923 {
8924 fprintf (stderr, "Insns added:\n");
8925 debug_rtx_list (insns, 20);
8926 }
8927 }
8928 else
8929 {
8930 fprintf (stderr,
8931 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8932 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
8933
8934 debug_rtx (x);
8935 }
8936
8937 if (insns)
8938 emit_insn (insns);
8939
8940 return ret;
8941 }
8942
8943 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8944 We need to emit DTP-relative relocations. */
8945
8946 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
8947 static void
8948 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
8949 {
8950 switch (size)
8951 {
8952 case 4:
8953 fputs ("\t.long\t", file);
8954 break;
8955 case 8:
8956 fputs (DOUBLE_INT_ASM_OP, file);
8957 break;
8958 default:
8959 gcc_unreachable ();
8960 }
8961 output_addr_const (file, x);
8962 if (TARGET_ELF)
8963 fputs ("@dtprel+0x8000", file);
8964 else if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF)
8965 {
8966 switch (SYMBOL_REF_TLS_MODEL (x))
8967 {
8968 case 0:
8969 break;
8970 case TLS_MODEL_LOCAL_EXEC:
8971 fputs ("@le", file);
8972 break;
8973 case TLS_MODEL_INITIAL_EXEC:
8974 fputs ("@ie", file);
8975 break;
8976 case TLS_MODEL_GLOBAL_DYNAMIC:
8977 case TLS_MODEL_LOCAL_DYNAMIC:
8978 fputs ("@m", file);
8979 break;
8980 default:
8981 gcc_unreachable ();
8982 }
8983 }
8984 }
8985
8986 /* Return true if X is a symbol that refers to real (rather than emulated)
8987 TLS. */
8988
8989 static bool
8990 rs6000_real_tls_symbol_ref_p (rtx x)
8991 {
8992 return (GET_CODE (x) == SYMBOL_REF
8993 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
8994 }
8995
8996 /* In the name of slightly smaller debug output, and to cater to
8997 general assembler lossage, recognize various UNSPEC sequences
8998 and turn them back into a direct symbol reference. */
8999
9000 static rtx
9001 rs6000_delegitimize_address (rtx orig_x)
9002 {
9003 rtx x, y, offset;
9004
9005 orig_x = delegitimize_mem_from_attrs (orig_x);
9006 x = orig_x;
9007 if (MEM_P (x))
9008 x = XEXP (x, 0);
9009
9010 y = x;
9011 if (TARGET_CMODEL != CMODEL_SMALL
9012 && GET_CODE (y) == LO_SUM)
9013 y = XEXP (y, 1);
9014
9015 offset = NULL_RTX;
9016 if (GET_CODE (y) == PLUS
9017 && GET_MODE (y) == Pmode
9018 && CONST_INT_P (XEXP (y, 1)))
9019 {
9020 offset = XEXP (y, 1);
9021 y = XEXP (y, 0);
9022 }
9023
9024 if (GET_CODE (y) == UNSPEC
9025 && XINT (y, 1) == UNSPEC_TOCREL)
9026 {
9027 y = XVECEXP (y, 0, 0);
9028
9029 #ifdef HAVE_AS_TLS
9030 /* Do not associate thread-local symbols with the original
9031 constant pool symbol. */
9032 if (TARGET_XCOFF
9033 && GET_CODE (y) == SYMBOL_REF
9034 && CONSTANT_POOL_ADDRESS_P (y)
9035 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
9036 return orig_x;
9037 #endif
9038
9039 if (offset != NULL_RTX)
9040 y = gen_rtx_PLUS (Pmode, y, offset);
9041 if (!MEM_P (orig_x))
9042 return y;
9043 else
9044 return replace_equiv_address_nv (orig_x, y);
9045 }
9046
9047 if (TARGET_MACHO
9048 && GET_CODE (orig_x) == LO_SUM
9049 && GET_CODE (XEXP (orig_x, 1)) == CONST)
9050 {
9051 y = XEXP (XEXP (orig_x, 1), 0);
9052 if (GET_CODE (y) == UNSPEC
9053 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
9054 return XVECEXP (y, 0, 0);
9055 }
9056
9057 return orig_x;
9058 }
9059
9060 /* Return true if X shouldn't be emitted into the debug info.
9061 The linker doesn't like .toc section references from
9062 .debug_* sections, so reject .toc section symbols. */
9063
9064 static bool
9065 rs6000_const_not_ok_for_debug_p (rtx x)
9066 {
9067 if (GET_CODE (x) == SYMBOL_REF
9068 && CONSTANT_POOL_ADDRESS_P (x))
9069 {
9070 rtx c = get_pool_constant (x);
9071 machine_mode cmode = get_pool_mode (x);
9072 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
9073 return true;
9074 }
9075
9076 return false;
9077 }
9078
9079 /* Construct the SYMBOL_REF for the tls_get_addr function. */
9080
9081 static GTY(()) rtx rs6000_tls_symbol;
9082 static rtx
9083 rs6000_tls_get_addr (void)
9084 {
9085 if (!rs6000_tls_symbol)
9086 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
9087
9088 return rs6000_tls_symbol;
9089 }
9090
9091 /* Construct the SYMBOL_REF for TLS GOT references. */
9092
9093 static GTY(()) rtx rs6000_got_symbol;
9094 static rtx
9095 rs6000_got_sym (void)
9096 {
9097 if (!rs6000_got_symbol)
9098 {
9099 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
9100 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
9101 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
9102 }
9103
9104 return rs6000_got_symbol;
9105 }
9106
9107 /* AIX Thread-Local Address support. */
9108
9109 static rtx
9110 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
9111 {
9112 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
9113 const char *name;
9114 char *tlsname;
9115
9116 name = XSTR (addr, 0);
9117 /* Append TLS CSECT qualifier, unless the symbol already is qualified
9118 or the symbol will be in TLS private data section. */
9119 if (name[strlen (name) - 1] != ']'
9120 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
9121 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
9122 {
9123 tlsname = XALLOCAVEC (char, strlen (name) + 4);
9124 strcpy (tlsname, name);
9125 strcat (tlsname,
9126 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
9127 tlsaddr = copy_rtx (addr);
9128 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
9129 }
9130 else
9131 tlsaddr = addr;
9132
9133 /* Place addr into TOC constant pool. */
9134 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
9135
9136 /* Output the TOC entry and create the MEM referencing the value. */
9137 if (constant_pool_expr_p (XEXP (sym, 0))
9138 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
9139 {
9140 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
9141 mem = gen_const_mem (Pmode, tocref);
9142 set_mem_alias_set (mem, get_TOC_alias_set ());
9143 }
9144 else
9145 return sym;
9146
9147 /* Use global-dynamic for local-dynamic. */
9148 if (model == TLS_MODEL_GLOBAL_DYNAMIC
9149 || model == TLS_MODEL_LOCAL_DYNAMIC)
9150 {
9151 /* Create new TOC reference for @m symbol. */
9152 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
9153 tlsname = XALLOCAVEC (char, strlen (name) + 1);
9154 strcpy (tlsname, "*LCM");
9155 strcat (tlsname, name + 3);
9156 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
9157 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
9158 tocref = create_TOC_reference (modaddr, NULL_RTX);
9159 rtx modmem = gen_const_mem (Pmode, tocref);
9160 set_mem_alias_set (modmem, get_TOC_alias_set ());
9161
9162 rtx modreg = gen_reg_rtx (Pmode);
9163 emit_insn (gen_rtx_SET (modreg, modmem));
9164
9165 tmpreg = gen_reg_rtx (Pmode);
9166 emit_insn (gen_rtx_SET (tmpreg, mem));
9167
9168 dest = gen_reg_rtx (Pmode);
9169 if (TARGET_32BIT)
9170 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
9171 else
9172 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
9173 return dest;
9174 }
9175 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
9176 else if (TARGET_32BIT)
9177 {
9178 tlsreg = gen_reg_rtx (SImode);
9179 emit_insn (gen_tls_get_tpointer (tlsreg));
9180 }
9181 else
9182 tlsreg = gen_rtx_REG (DImode, 13);
9183
9184 /* Load the TOC value into temporary register. */
9185 tmpreg = gen_reg_rtx (Pmode);
9186 emit_insn (gen_rtx_SET (tmpreg, mem));
9187 set_unique_reg_note (get_last_insn (), REG_EQUAL,
9188 gen_rtx_MINUS (Pmode, addr, tlsreg));
9189
9190 /* Add TOC symbol value to TLS pointer. */
9191 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
9192
9193 return dest;
9194 }
9195
9196 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
9197 this (thread-local) address. */
9198
9199 static rtx
9200 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
9201 {
9202 rtx dest, insn;
9203
9204 if (TARGET_XCOFF)
9205 return rs6000_legitimize_tls_address_aix (addr, model);
9206
9207 dest = gen_reg_rtx (Pmode);
9208 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
9209 {
9210 rtx tlsreg;
9211
9212 if (TARGET_64BIT)
9213 {
9214 tlsreg = gen_rtx_REG (Pmode, 13);
9215 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
9216 }
9217 else
9218 {
9219 tlsreg = gen_rtx_REG (Pmode, 2);
9220 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
9221 }
9222 emit_insn (insn);
9223 }
9224 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
9225 {
9226 rtx tlsreg, tmp;
9227
9228 tmp = gen_reg_rtx (Pmode);
9229 if (TARGET_64BIT)
9230 {
9231 tlsreg = gen_rtx_REG (Pmode, 13);
9232 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
9233 }
9234 else
9235 {
9236 tlsreg = gen_rtx_REG (Pmode, 2);
9237 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
9238 }
9239 emit_insn (insn);
9240 if (TARGET_64BIT)
9241 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
9242 else
9243 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
9244 emit_insn (insn);
9245 }
9246 else
9247 {
9248 rtx r3, got, tga, tmp1, tmp2, call_insn;
9249
9250 /* We currently use relocations like @got@tlsgd for tls, which
9251 means the linker will handle allocation of tls entries, placing
9252 them in the .got section. So use a pointer to the .got section,
9253 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9254 or to secondary GOT sections used by 32-bit -fPIC. */
9255 if (TARGET_64BIT)
9256 got = gen_rtx_REG (Pmode, 2);
9257 else
9258 {
9259 if (flag_pic == 1)
9260 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
9261 else
9262 {
9263 rtx gsym = rs6000_got_sym ();
9264 got = gen_reg_rtx (Pmode);
9265 if (flag_pic == 0)
9266 rs6000_emit_move (got, gsym, Pmode);
9267 else
9268 {
9269 rtx mem, lab;
9270
9271 tmp1 = gen_reg_rtx (Pmode);
9272 tmp2 = gen_reg_rtx (Pmode);
9273 mem = gen_const_mem (Pmode, tmp1);
9274 lab = gen_label_rtx ();
9275 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
9276 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
9277 if (TARGET_LINK_STACK)
9278 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
9279 emit_move_insn (tmp2, mem);
9280 rtx_insn *last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
9281 set_unique_reg_note (last, REG_EQUAL, gsym);
9282 }
9283 }
9284 }
9285
9286 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
9287 {
9288 tga = rs6000_tls_get_addr ();
9289 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
9290 1, const0_rtx, Pmode);
9291
9292 r3 = gen_rtx_REG (Pmode, 3);
9293 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9294 {
9295 if (TARGET_64BIT)
9296 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
9297 else
9298 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
9299 }
9300 else if (DEFAULT_ABI == ABI_V4)
9301 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
9302 else
9303 gcc_unreachable ();
9304 call_insn = last_call_insn ();
9305 PATTERN (call_insn) = insn;
9306 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9307 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9308 pic_offset_table_rtx);
9309 }
9310 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
9311 {
9312 tga = rs6000_tls_get_addr ();
9313 tmp1 = gen_reg_rtx (Pmode);
9314 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
9315 1, const0_rtx, Pmode);
9316
9317 r3 = gen_rtx_REG (Pmode, 3);
9318 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9319 {
9320 if (TARGET_64BIT)
9321 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
9322 else
9323 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
9324 }
9325 else if (DEFAULT_ABI == ABI_V4)
9326 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
9327 else
9328 gcc_unreachable ();
9329 call_insn = last_call_insn ();
9330 PATTERN (call_insn) = insn;
9331 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
9332 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
9333 pic_offset_table_rtx);
9334
9335 if (rs6000_tls_size == 16)
9336 {
9337 if (TARGET_64BIT)
9338 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
9339 else
9340 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
9341 }
9342 else if (rs6000_tls_size == 32)
9343 {
9344 tmp2 = gen_reg_rtx (Pmode);
9345 if (TARGET_64BIT)
9346 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
9347 else
9348 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
9349 emit_insn (insn);
9350 if (TARGET_64BIT)
9351 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
9352 else
9353 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
9354 }
9355 else
9356 {
9357 tmp2 = gen_reg_rtx (Pmode);
9358 if (TARGET_64BIT)
9359 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
9360 else
9361 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
9362 emit_insn (insn);
9363 insn = gen_rtx_SET (dest, gen_rtx_PLUS (Pmode, tmp2, tmp1));
9364 }
9365 emit_insn (insn);
9366 }
9367 else
9368 {
9369 /* IE, or 64-bit offset LE. */
9370 tmp2 = gen_reg_rtx (Pmode);
9371 if (TARGET_64BIT)
9372 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
9373 else
9374 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
9375 emit_insn (insn);
9376 if (TARGET_64BIT)
9377 insn = gen_tls_tls_64 (dest, tmp2, addr);
9378 else
9379 insn = gen_tls_tls_32 (dest, tmp2, addr);
9380 emit_insn (insn);
9381 }
9382 }
9383
9384 return dest;
9385 }
9386
9387 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9388
9389 static bool
9390 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
9391 {
9392 if (GET_CODE (x) == HIGH
9393 && GET_CODE (XEXP (x, 0)) == UNSPEC)
9394 return true;
9395
9396 /* A TLS symbol in the TOC cannot contain a sum. */
9397 if (GET_CODE (x) == CONST
9398 && GET_CODE (XEXP (x, 0)) == PLUS
9399 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
9400 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
9401 return true;
9402
9403 /* Do not place an ELF TLS symbol in the constant pool. */
9404 return TARGET_ELF && tls_referenced_p (x);
9405 }
9406
9407 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9408 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9409 can be addressed relative to the toc pointer. */
9410
9411 static bool
9412 use_toc_relative_ref (rtx sym, machine_mode mode)
9413 {
9414 return ((constant_pool_expr_p (sym)
9415 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
9416 get_pool_mode (sym)))
9417 || (TARGET_CMODEL == CMODEL_MEDIUM
9418 && SYMBOL_REF_LOCAL_P (sym)
9419 && GET_MODE_SIZE (mode) <= POWERPC64_TOC_POINTER_ALIGNMENT));
9420 }
9421
9422 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9423 replace the input X, or the original X if no replacement is called for.
9424 The output parameter *WIN is 1 if the calling macro should goto WIN,
9425 0 if it should not.
9426
9427 For RS/6000, we wish to handle large displacements off a base
9428 register by splitting the addend across an addiu/addis and the mem insn.
9429 This cuts number of extra insns needed from 3 to 1.
9430
9431 On Darwin, we use this to generate code for floating point constants.
9432 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9433 The Darwin code is inside #if TARGET_MACHO because only then are the
9434 machopic_* functions defined. */
9435 static rtx
9436 rs6000_legitimize_reload_address (rtx x, machine_mode mode,
9437 int opnum, int type,
9438 int ind_levels ATTRIBUTE_UNUSED, int *win)
9439 {
9440 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9441 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9442
9443 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9444 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9445 if (reg_offset_p
9446 && opnum == 1
9447 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
9448 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)
9449 || (mode == SFmode && recog_data.operand_mode[0] == V4SFmode
9450 && TARGET_P9_VECTOR)
9451 || (mode == SImode && recog_data.operand_mode[0] == V4SImode
9452 && TARGET_P9_VECTOR)))
9453 reg_offset_p = false;
9454
9455 /* We must recognize output that we have already generated ourselves. */
9456 if (GET_CODE (x) == PLUS
9457 && GET_CODE (XEXP (x, 0)) == PLUS
9458 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9459 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9460 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9461 {
9462 if (TARGET_DEBUG_ADDR)
9463 {
9464 fprintf (stderr, "\nlegitimize_reload_address push_reload #1:\n");
9465 debug_rtx (x);
9466 }
9467 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9468 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9469 opnum, (enum reload_type) type);
9470 *win = 1;
9471 return x;
9472 }
9473
9474 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9475 if (GET_CODE (x) == LO_SUM
9476 && GET_CODE (XEXP (x, 0)) == HIGH)
9477 {
9478 if (TARGET_DEBUG_ADDR)
9479 {
9480 fprintf (stderr, "\nlegitimize_reload_address push_reload #2:\n");
9481 debug_rtx (x);
9482 }
9483 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9484 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9485 opnum, (enum reload_type) type);
9486 *win = 1;
9487 return x;
9488 }
9489
9490 #if TARGET_MACHO
9491 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
9492 && GET_CODE (x) == LO_SUM
9493 && GET_CODE (XEXP (x, 0)) == PLUS
9494 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
9495 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
9496 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
9497 && machopic_operand_p (XEXP (x, 1)))
9498 {
9499 /* Result of previous invocation of this function on Darwin
9500 floating point constant. */
9501 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9502 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9503 opnum, (enum reload_type) type);
9504 *win = 1;
9505 return x;
9506 }
9507 #endif
9508
9509 if (TARGET_CMODEL != CMODEL_SMALL
9510 && reg_offset_p
9511 && !quad_offset_p
9512 && small_toc_ref (x, VOIDmode))
9513 {
9514 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
9515 x = gen_rtx_LO_SUM (Pmode, hi, x);
9516 if (TARGET_DEBUG_ADDR)
9517 {
9518 fprintf (stderr, "\nlegitimize_reload_address push_reload #3:\n");
9519 debug_rtx (x);
9520 }
9521 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9522 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9523 opnum, (enum reload_type) type);
9524 *win = 1;
9525 return x;
9526 }
9527
9528 if (GET_CODE (x) == PLUS
9529 && REG_P (XEXP (x, 0))
9530 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
9531 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
9532 && CONST_INT_P (XEXP (x, 1))
9533 && reg_offset_p
9534 && !SPE_VECTOR_MODE (mode)
9535 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
9536 && (quad_offset_p || !VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
9537 {
9538 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
9539 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
9540 HOST_WIDE_INT high
9541 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9542
9543 /* Check for 32-bit overflow or quad addresses with one of the
9544 four least significant bits set. */
9545 if (high + low != val
9546 || (quad_offset_p && (low & 0xf)))
9547 {
9548 *win = 0;
9549 return x;
9550 }
9551
9552 /* Reload the high part into a base reg; leave the low part
9553 in the mem directly. */
9554
9555 x = gen_rtx_PLUS (GET_MODE (x),
9556 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
9557 GEN_INT (high)),
9558 GEN_INT (low));
9559
9560 if (TARGET_DEBUG_ADDR)
9561 {
9562 fprintf (stderr, "\nlegitimize_reload_address push_reload #4:\n");
9563 debug_rtx (x);
9564 }
9565 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9566 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
9567 opnum, (enum reload_type) type);
9568 *win = 1;
9569 return x;
9570 }
9571
9572 if (GET_CODE (x) == SYMBOL_REF
9573 && reg_offset_p
9574 && !quad_offset_p
9575 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
9576 && !SPE_VECTOR_MODE (mode)
9577 #if TARGET_MACHO
9578 && DEFAULT_ABI == ABI_DARWIN
9579 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
9580 && machopic_symbol_defined_p (x)
9581 #else
9582 && DEFAULT_ABI == ABI_V4
9583 && !flag_pic
9584 #endif
9585 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9586 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9587 without fprs.
9588 ??? Assume floating point reg based on mode? This assumption is
9589 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9590 where reload ends up doing a DFmode load of a constant from
9591 mem using two gprs. Unfortunately, at this point reload
9592 hasn't yet selected regs so poking around in reload data
9593 won't help and even if we could figure out the regs reliably,
9594 we'd still want to allow this transformation when the mem is
9595 naturally aligned. Since we say the address is good here, we
9596 can't disable offsets from LO_SUMs in mem_operand_gpr.
9597 FIXME: Allow offset from lo_sum for other modes too, when
9598 mem is sufficiently aligned.
9599
9600 Also disallow this if the type can go in VMX/Altivec registers, since
9601 those registers do not have d-form (reg+offset) address modes. */
9602 && !reg_addr[mode].scalar_in_vmx_p
9603 && mode != TFmode
9604 && mode != TDmode
9605 && mode != IFmode
9606 && mode != KFmode
9607 && (mode != TImode || !TARGET_VSX_TIMODE)
9608 && mode != PTImode
9609 && (mode != DImode || TARGET_POWERPC64)
9610 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
9611 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
9612 {
9613 #if TARGET_MACHO
9614 if (flag_pic)
9615 {
9616 rtx offset = machopic_gen_offset (x);
9617 x = gen_rtx_LO_SUM (GET_MODE (x),
9618 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
9619 gen_rtx_HIGH (Pmode, offset)), offset);
9620 }
9621 else
9622 #endif
9623 x = gen_rtx_LO_SUM (GET_MODE (x),
9624 gen_rtx_HIGH (Pmode, x), x);
9625
9626 if (TARGET_DEBUG_ADDR)
9627 {
9628 fprintf (stderr, "\nlegitimize_reload_address push_reload #5:\n");
9629 debug_rtx (x);
9630 }
9631 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9632 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9633 opnum, (enum reload_type) type);
9634 *win = 1;
9635 return x;
9636 }
9637
9638 /* Reload an offset address wrapped by an AND that represents the
9639 masking of the lower bits. Strip the outer AND and let reload
9640 convert the offset address into an indirect address. For VSX,
9641 force reload to create the address with an AND in a separate
9642 register, because we can't guarantee an altivec register will
9643 be used. */
9644 if (VECTOR_MEM_ALTIVEC_P (mode)
9645 && GET_CODE (x) == AND
9646 && GET_CODE (XEXP (x, 0)) == PLUS
9647 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
9648 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
9649 && GET_CODE (XEXP (x, 1)) == CONST_INT
9650 && INTVAL (XEXP (x, 1)) == -16)
9651 {
9652 x = XEXP (x, 0);
9653 *win = 1;
9654 return x;
9655 }
9656
9657 if (TARGET_TOC
9658 && reg_offset_p
9659 && !quad_offset_p
9660 && GET_CODE (x) == SYMBOL_REF
9661 && use_toc_relative_ref (x, mode))
9662 {
9663 x = create_TOC_reference (x, NULL_RTX);
9664 if (TARGET_CMODEL != CMODEL_SMALL)
9665 {
9666 if (TARGET_DEBUG_ADDR)
9667 {
9668 fprintf (stderr, "\nlegitimize_reload_address push_reload #6:\n");
9669 debug_rtx (x);
9670 }
9671 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
9672 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
9673 opnum, (enum reload_type) type);
9674 }
9675 *win = 1;
9676 return x;
9677 }
9678 *win = 0;
9679 return x;
9680 }
9681
9682 /* Debug version of rs6000_legitimize_reload_address. */
9683 static rtx
9684 rs6000_debug_legitimize_reload_address (rtx x, machine_mode mode,
9685 int opnum, int type,
9686 int ind_levels, int *win)
9687 {
9688 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
9689 ind_levels, win);
9690 fprintf (stderr,
9691 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9692 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9693 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
9694 debug_rtx (x);
9695
9696 if (x == ret)
9697 fprintf (stderr, "Same address returned\n");
9698 else if (!ret)
9699 fprintf (stderr, "NULL returned\n");
9700 else
9701 {
9702 fprintf (stderr, "New address:\n");
9703 debug_rtx (ret);
9704 }
9705
9706 return ret;
9707 }
9708
9709 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9710 that is a valid memory address for an instruction.
9711 The MODE argument is the machine mode for the MEM expression
9712 that wants to use this address.
9713
9714 On the RS/6000, there are four valid address: a SYMBOL_REF that
9715 refers to a constant pool entry of an address (or the sum of it
9716 plus a constant), a short (16-bit signed) constant plus a register,
9717 the sum of two registers, or a register indirect, possibly with an
9718 auto-increment. For DFmode, DDmode and DImode with a constant plus
9719 register, we must ensure that both words are addressable or PowerPC64
9720 with offset word aligned.
9721
9722 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9723 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9724 because adjacent memory cells are accessed by adding word-sized offsets
9725 during assembly output. */
9726 static bool
9727 rs6000_legitimate_address_p (machine_mode mode, rtx x, bool reg_ok_strict)
9728 {
9729 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
9730 bool quad_offset_p = mode_supports_vsx_dform_quad (mode);
9731
9732 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9733 if (VECTOR_MEM_ALTIVEC_P (mode)
9734 && GET_CODE (x) == AND
9735 && GET_CODE (XEXP (x, 1)) == CONST_INT
9736 && INTVAL (XEXP (x, 1)) == -16)
9737 x = XEXP (x, 0);
9738
9739 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
9740 return 0;
9741 if (legitimate_indirect_address_p (x, reg_ok_strict))
9742 return 1;
9743 if (TARGET_UPDATE
9744 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
9745 && mode_supports_pre_incdec_p (mode)
9746 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
9747 return 1;
9748 /* Handle restricted vector d-form offsets in ISA 3.0. */
9749 if (quad_offset_p)
9750 {
9751 if (quad_address_p (x, mode, reg_ok_strict))
9752 return 1;
9753 }
9754 else if (virtual_stack_registers_memory_p (x))
9755 return 1;
9756
9757 else if (reg_offset_p)
9758 {
9759 if (legitimate_small_data_p (mode, x))
9760 return 1;
9761 if (legitimate_constant_pool_address_p (x, mode,
9762 reg_ok_strict || lra_in_progress))
9763 return 1;
9764 if (reg_addr[mode].fused_toc && GET_CODE (x) == UNSPEC
9765 && XINT (x, 1) == UNSPEC_FUSION_ADDIS)
9766 return 1;
9767 }
9768
9769 /* For TImode, if we have TImode in VSX registers, only allow register
9770 indirect addresses. This will allow the values to go in either GPRs
9771 or VSX registers without reloading. The vector types would tend to
9772 go into VSX registers, so we allow REG+REG, while TImode seems
9773 somewhat split, in that some uses are GPR based, and some VSX based. */
9774 /* FIXME: We could loosen this by changing the following to
9775 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX_TIMODE)
9776 but currently we cannot allow REG+REG addressing for TImode. See
9777 PR72827 for complete details on how this ends up hoodwinking DSE. */
9778 if (mode == TImode && TARGET_VSX_TIMODE)
9779 return 0;
9780 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9781 if (! reg_ok_strict
9782 && reg_offset_p
9783 && GET_CODE (x) == PLUS
9784 && GET_CODE (XEXP (x, 0)) == REG
9785 && (XEXP (x, 0) == virtual_stack_vars_rtx
9786 || XEXP (x, 0) == arg_pointer_rtx)
9787 && GET_CODE (XEXP (x, 1)) == CONST_INT)
9788 return 1;
9789 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
9790 return 1;
9791 if (!FLOAT128_2REG_P (mode)
9792 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
9793 || TARGET_POWERPC64
9794 || (mode != DFmode && mode != DDmode)
9795 || (TARGET_E500_DOUBLE && mode != DDmode))
9796 && (TARGET_POWERPC64 || mode != DImode)
9797 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
9798 && mode != PTImode
9799 && !avoiding_indexed_address_p (mode)
9800 && legitimate_indexed_address_p (x, reg_ok_strict))
9801 return 1;
9802 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
9803 && mode_supports_pre_modify_p (mode)
9804 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
9805 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
9806 reg_ok_strict, false)
9807 || (!avoiding_indexed_address_p (mode)
9808 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
9809 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
9810 return 1;
9811 if (reg_offset_p && !quad_offset_p
9812 && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
9813 return 1;
9814 return 0;
9815 }
9816
9817 /* Debug version of rs6000_legitimate_address_p. */
9818 static bool
9819 rs6000_debug_legitimate_address_p (machine_mode mode, rtx x,
9820 bool reg_ok_strict)
9821 {
9822 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
9823 fprintf (stderr,
9824 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9825 "strict = %d, reload = %s, code = %s\n",
9826 ret ? "true" : "false",
9827 GET_MODE_NAME (mode),
9828 reg_ok_strict,
9829 (reload_completed
9830 ? "after"
9831 : (reload_in_progress ? "progress" : "before")),
9832 GET_RTX_NAME (GET_CODE (x)));
9833 debug_rtx (x);
9834
9835 return ret;
9836 }
9837
9838 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9839
9840 static bool
9841 rs6000_mode_dependent_address_p (const_rtx addr,
9842 addr_space_t as ATTRIBUTE_UNUSED)
9843 {
9844 return rs6000_mode_dependent_address_ptr (addr);
9845 }
9846
9847 /* Go to LABEL if ADDR (a legitimate address expression)
9848 has an effect that depends on the machine mode it is used for.
9849
9850 On the RS/6000 this is true of all integral offsets (since AltiVec
9851 and VSX modes don't allow them) or is a pre-increment or decrement.
9852
9853 ??? Except that due to conceptual problems in offsettable_address_p
9854 we can't really report the problems of integral offsets. So leave
9855 this assuming that the adjustable offset must be valid for the
9856 sub-words of a TFmode operand, which is what we had before. */
9857
9858 static bool
9859 rs6000_mode_dependent_address (const_rtx addr)
9860 {
9861 switch (GET_CODE (addr))
9862 {
9863 case PLUS:
9864 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9865 is considered a legitimate address before reload, so there
9866 are no offset restrictions in that case. Note that this
9867 condition is safe in strict mode because any address involving
9868 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9869 been rejected as illegitimate. */
9870 if (XEXP (addr, 0) != virtual_stack_vars_rtx
9871 && XEXP (addr, 0) != arg_pointer_rtx
9872 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
9873 {
9874 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
9875 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
9876 }
9877 break;
9878
9879 case LO_SUM:
9880 /* Anything in the constant pool is sufficiently aligned that
9881 all bytes have the same high part address. */
9882 return !legitimate_constant_pool_address_p (addr, QImode, false);
9883
9884 /* Auto-increment cases are now treated generically in recog.c. */
9885 case PRE_MODIFY:
9886 return TARGET_UPDATE;
9887
9888 /* AND is only allowed in Altivec loads. */
9889 case AND:
9890 return true;
9891
9892 default:
9893 break;
9894 }
9895
9896 return false;
9897 }
9898
9899 /* Debug version of rs6000_mode_dependent_address. */
9900 static bool
9901 rs6000_debug_mode_dependent_address (const_rtx addr)
9902 {
9903 bool ret = rs6000_mode_dependent_address (addr);
9904
9905 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
9906 ret ? "true" : "false");
9907 debug_rtx (addr);
9908
9909 return ret;
9910 }
9911
9912 /* Implement FIND_BASE_TERM. */
9913
9914 rtx
9915 rs6000_find_base_term (rtx op)
9916 {
9917 rtx base;
9918
9919 base = op;
9920 if (GET_CODE (base) == CONST)
9921 base = XEXP (base, 0);
9922 if (GET_CODE (base) == PLUS)
9923 base = XEXP (base, 0);
9924 if (GET_CODE (base) == UNSPEC)
9925 switch (XINT (base, 1))
9926 {
9927 case UNSPEC_TOCREL:
9928 case UNSPEC_MACHOPIC_OFFSET:
9929 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9930 for aliasing purposes. */
9931 return XVECEXP (base, 0, 0);
9932 }
9933
9934 return op;
9935 }
9936
9937 /* More elaborate version of recog's offsettable_memref_p predicate
9938 that works around the ??? note of rs6000_mode_dependent_address.
9939 In particular it accepts
9940
9941 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9942
9943 in 32-bit mode, that the recog predicate rejects. */
9944
9945 static bool
9946 rs6000_offsettable_memref_p (rtx op, machine_mode reg_mode)
9947 {
9948 bool worst_case;
9949
9950 if (!MEM_P (op))
9951 return false;
9952
9953 /* First mimic offsettable_memref_p. */
9954 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
9955 return true;
9956
9957 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9958 the latter predicate knows nothing about the mode of the memory
9959 reference and, therefore, assumes that it is the largest supported
9960 mode (TFmode). As a consequence, legitimate offsettable memory
9961 references are rejected. rs6000_legitimate_offset_address_p contains
9962 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9963 at least with a little bit of help here given that we know the
9964 actual registers used. */
9965 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
9966 || GET_MODE_SIZE (reg_mode) == 4);
9967 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
9968 true, worst_case);
9969 }
9970
9971 /* Determine the reassociation width to be used in reassociate_bb.
9972 This takes into account how many parallel operations we
9973 can actually do of a given type, and also the latency.
9974 P8:
9975 int add/sub 6/cycle
9976 mul 2/cycle
9977 vect add/sub/mul 2/cycle
9978 fp add/sub/mul 2/cycle
9979 dfp 1/cycle
9980 */
9981
9982 static int
9983 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED,
9984 enum machine_mode mode)
9985 {
9986 switch (rs6000_cpu)
9987 {
9988 case PROCESSOR_POWER8:
9989 case PROCESSOR_POWER9:
9990 if (DECIMAL_FLOAT_MODE_P (mode))
9991 return 1;
9992 if (VECTOR_MODE_P (mode))
9993 return 4;
9994 if (INTEGRAL_MODE_P (mode))
9995 return opc == MULT_EXPR ? 4 : 6;
9996 if (FLOAT_MODE_P (mode))
9997 return 4;
9998 break;
9999 default:
10000 break;
10001 }
10002 return 1;
10003 }
10004
10005 /* Change register usage conditional on target flags. */
10006 static void
10007 rs6000_conditional_register_usage (void)
10008 {
10009 int i;
10010
10011 if (TARGET_DEBUG_TARGET)
10012 fprintf (stderr, "rs6000_conditional_register_usage called\n");
10013
10014 /* Set MQ register fixed (already call_used) so that it will not be
10015 allocated. */
10016 fixed_regs[64] = 1;
10017
10018 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
10019 if (TARGET_64BIT)
10020 fixed_regs[13] = call_used_regs[13]
10021 = call_really_used_regs[13] = 1;
10022
10023 /* Conditionally disable FPRs. */
10024 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
10025 for (i = 32; i < 64; i++)
10026 fixed_regs[i] = call_used_regs[i]
10027 = call_really_used_regs[i] = 1;
10028
10029 /* The TOC register is not killed across calls in a way that is
10030 visible to the compiler. */
10031 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10032 call_really_used_regs[2] = 0;
10033
10034 if (DEFAULT_ABI == ABI_V4 && flag_pic == 2)
10035 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10036
10037 if (DEFAULT_ABI == ABI_V4 && flag_pic == 1)
10038 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10039 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10040 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10041
10042 if (DEFAULT_ABI == ABI_DARWIN && flag_pic)
10043 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10044 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10045 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10046
10047 if (TARGET_TOC && TARGET_MINIMAL_TOC)
10048 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
10049 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
10050
10051 if (TARGET_SPE)
10052 {
10053 global_regs[SPEFSCR_REGNO] = 1;
10054 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
10055 registers in prologues and epilogues. We no longer use r14
10056 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
10057 pool for link-compatibility with older versions of GCC. Once
10058 "old" code has died out, we can return r14 to the allocation
10059 pool. */
10060 fixed_regs[14]
10061 = call_used_regs[14]
10062 = call_really_used_regs[14] = 1;
10063 }
10064
10065 if (!TARGET_ALTIVEC && !TARGET_VSX)
10066 {
10067 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
10068 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10069 call_really_used_regs[VRSAVE_REGNO] = 1;
10070 }
10071
10072 if (TARGET_ALTIVEC || TARGET_VSX)
10073 global_regs[VSCR_REGNO] = 1;
10074
10075 if (TARGET_ALTIVEC_ABI)
10076 {
10077 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
10078 call_used_regs[i] = call_really_used_regs[i] = 1;
10079
10080 /* AIX reserves VR20:31 in non-extended ABI mode. */
10081 if (TARGET_XCOFF)
10082 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
10083 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
10084 }
10085 }
10086
10087 \f
10088 /* Output insns to set DEST equal to the constant SOURCE as a series of
10089 lis, ori and shl instructions and return TRUE. */
10090
10091 bool
10092 rs6000_emit_set_const (rtx dest, rtx source)
10093 {
10094 machine_mode mode = GET_MODE (dest);
10095 rtx temp, set;
10096 rtx_insn *insn;
10097 HOST_WIDE_INT c;
10098
10099 gcc_checking_assert (CONST_INT_P (source));
10100 c = INTVAL (source);
10101 switch (mode)
10102 {
10103 case QImode:
10104 case HImode:
10105 emit_insn (gen_rtx_SET (dest, source));
10106 return true;
10107
10108 case SImode:
10109 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
10110
10111 emit_insn (gen_rtx_SET (copy_rtx (temp),
10112 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
10113 emit_insn (gen_rtx_SET (dest,
10114 gen_rtx_IOR (SImode, copy_rtx (temp),
10115 GEN_INT (c & 0xffff))));
10116 break;
10117
10118 case DImode:
10119 if (!TARGET_POWERPC64)
10120 {
10121 rtx hi, lo;
10122
10123 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
10124 DImode);
10125 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
10126 DImode);
10127 emit_move_insn (hi, GEN_INT (c >> 32));
10128 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
10129 emit_move_insn (lo, GEN_INT (c));
10130 }
10131 else
10132 rs6000_emit_set_long_const (dest, c);
10133 break;
10134
10135 default:
10136 gcc_unreachable ();
10137 }
10138
10139 insn = get_last_insn ();
10140 set = single_set (insn);
10141 if (! CONSTANT_P (SET_SRC (set)))
10142 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
10143
10144 return true;
10145 }
10146
10147 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
10148 Output insns to set DEST equal to the constant C as a series of
10149 lis, ori and shl instructions. */
10150
10151 static void
10152 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
10153 {
10154 rtx temp;
10155 HOST_WIDE_INT ud1, ud2, ud3, ud4;
10156
10157 ud1 = c & 0xffff;
10158 c = c >> 16;
10159 ud2 = c & 0xffff;
10160 c = c >> 16;
10161 ud3 = c & 0xffff;
10162 c = c >> 16;
10163 ud4 = c & 0xffff;
10164
10165 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
10166 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
10167 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
10168
10169 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
10170 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
10171 {
10172 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10173
10174 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10175 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10176 if (ud1 != 0)
10177 emit_move_insn (dest,
10178 gen_rtx_IOR (DImode, copy_rtx (temp),
10179 GEN_INT (ud1)));
10180 }
10181 else if (ud3 == 0 && ud4 == 0)
10182 {
10183 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10184
10185 gcc_assert (ud2 & 0x8000);
10186 emit_move_insn (copy_rtx (temp),
10187 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
10188 if (ud1 != 0)
10189 emit_move_insn (copy_rtx (temp),
10190 gen_rtx_IOR (DImode, copy_rtx (temp),
10191 GEN_INT (ud1)));
10192 emit_move_insn (dest,
10193 gen_rtx_ZERO_EXTEND (DImode,
10194 gen_lowpart (SImode,
10195 copy_rtx (temp))));
10196 }
10197 else if ((ud4 == 0xffff && (ud3 & 0x8000))
10198 || (ud4 == 0 && ! (ud3 & 0x8000)))
10199 {
10200 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10201
10202 emit_move_insn (copy_rtx (temp),
10203 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
10204 if (ud2 != 0)
10205 emit_move_insn (copy_rtx (temp),
10206 gen_rtx_IOR (DImode, copy_rtx (temp),
10207 GEN_INT (ud2)));
10208 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10209 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10210 GEN_INT (16)));
10211 if (ud1 != 0)
10212 emit_move_insn (dest,
10213 gen_rtx_IOR (DImode, copy_rtx (temp),
10214 GEN_INT (ud1)));
10215 }
10216 else
10217 {
10218 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
10219
10220 emit_move_insn (copy_rtx (temp),
10221 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
10222 if (ud3 != 0)
10223 emit_move_insn (copy_rtx (temp),
10224 gen_rtx_IOR (DImode, copy_rtx (temp),
10225 GEN_INT (ud3)));
10226
10227 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
10228 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
10229 GEN_INT (32)));
10230 if (ud2 != 0)
10231 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
10232 gen_rtx_IOR (DImode, copy_rtx (temp),
10233 GEN_INT (ud2 << 16)));
10234 if (ud1 != 0)
10235 emit_move_insn (dest,
10236 gen_rtx_IOR (DImode, copy_rtx (temp),
10237 GEN_INT (ud1)));
10238 }
10239 }
10240
10241 /* Helper for the following. Get rid of [r+r] memory refs
10242 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10243
10244 static void
10245 rs6000_eliminate_indexed_memrefs (rtx operands[2])
10246 {
10247 if (reload_in_progress)
10248 return;
10249
10250 if (GET_CODE (operands[0]) == MEM
10251 && GET_CODE (XEXP (operands[0], 0)) != REG
10252 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
10253 GET_MODE (operands[0]), false))
10254 operands[0]
10255 = replace_equiv_address (operands[0],
10256 copy_addr_to_reg (XEXP (operands[0], 0)));
10257
10258 if (GET_CODE (operands[1]) == MEM
10259 && GET_CODE (XEXP (operands[1], 0)) != REG
10260 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
10261 GET_MODE (operands[1]), false))
10262 operands[1]
10263 = replace_equiv_address (operands[1],
10264 copy_addr_to_reg (XEXP (operands[1], 0)));
10265 }
10266
10267 /* Generate a vector of constants to permute MODE for a little-endian
10268 storage operation by swapping the two halves of a vector. */
10269 static rtvec
10270 rs6000_const_vec (machine_mode mode)
10271 {
10272 int i, subparts;
10273 rtvec v;
10274
10275 switch (mode)
10276 {
10277 case V1TImode:
10278 subparts = 1;
10279 break;
10280 case V2DFmode:
10281 case V2DImode:
10282 subparts = 2;
10283 break;
10284 case V4SFmode:
10285 case V4SImode:
10286 subparts = 4;
10287 break;
10288 case V8HImode:
10289 subparts = 8;
10290 break;
10291 case V16QImode:
10292 subparts = 16;
10293 break;
10294 default:
10295 gcc_unreachable();
10296 }
10297
10298 v = rtvec_alloc (subparts);
10299
10300 for (i = 0; i < subparts / 2; ++i)
10301 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
10302 for (i = subparts / 2; i < subparts; ++i)
10303 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
10304
10305 return v;
10306 }
10307
10308 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
10309 for a VSX load or store operation. */
10310 rtx
10311 rs6000_gen_le_vsx_permute (rtx source, machine_mode mode)
10312 {
10313 /* Use ROTATE instead of VEC_SELECT on IEEE 128-bit floating point, and
10314 128-bit integers if they are allowed in VSX registers. */
10315 if (FLOAT128_VECTOR_P (mode) || mode == TImode)
10316 return gen_rtx_ROTATE (mode, source, GEN_INT (64));
10317 else
10318 {
10319 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
10320 return gen_rtx_VEC_SELECT (mode, source, par);
10321 }
10322 }
10323
10324 /* Emit a little-endian load from vector memory location SOURCE to VSX
10325 register DEST in mode MODE. The load is done with two permuting
10326 insn's that represent an lxvd2x and xxpermdi. */
10327 void
10328 rs6000_emit_le_vsx_load (rtx dest, rtx source, machine_mode mode)
10329 {
10330 rtx tmp, permute_mem, permute_reg;
10331
10332 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10333 V1TImode). */
10334 if (mode == TImode || mode == V1TImode)
10335 {
10336 mode = V2DImode;
10337 dest = gen_lowpart (V2DImode, dest);
10338 source = adjust_address (source, V2DImode, 0);
10339 }
10340
10341 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
10342 permute_mem = rs6000_gen_le_vsx_permute (source, mode);
10343 permute_reg = rs6000_gen_le_vsx_permute (tmp, mode);
10344 emit_insn (gen_rtx_SET (tmp, permute_mem));
10345 emit_insn (gen_rtx_SET (dest, permute_reg));
10346 }
10347
10348 /* Emit a little-endian store to vector memory location DEST from VSX
10349 register SOURCE in mode MODE. The store is done with two permuting
10350 insn's that represent an xxpermdi and an stxvd2x. */
10351 void
10352 rs6000_emit_le_vsx_store (rtx dest, rtx source, machine_mode mode)
10353 {
10354 rtx tmp, permute_src, permute_tmp;
10355
10356 /* This should never be called during or after reload, because it does
10357 not re-permute the source register. It is intended only for use
10358 during expand. */
10359 gcc_assert (!reload_in_progress && !lra_in_progress && !reload_completed);
10360
10361 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10362 V1TImode). */
10363 if (mode == TImode || mode == V1TImode)
10364 {
10365 mode = V2DImode;
10366 dest = adjust_address (dest, V2DImode, 0);
10367 source = gen_lowpart (V2DImode, source);
10368 }
10369
10370 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
10371 permute_src = rs6000_gen_le_vsx_permute (source, mode);
10372 permute_tmp = rs6000_gen_le_vsx_permute (tmp, mode);
10373 emit_insn (gen_rtx_SET (tmp, permute_src));
10374 emit_insn (gen_rtx_SET (dest, permute_tmp));
10375 }
10376
10377 /* Emit a sequence representing a little-endian VSX load or store,
10378 moving data from SOURCE to DEST in mode MODE. This is done
10379 separately from rs6000_emit_move to ensure it is called only
10380 during expand. LE VSX loads and stores introduced later are
10381 handled with a split. The expand-time RTL generation allows
10382 us to optimize away redundant pairs of register-permutes. */
10383 void
10384 rs6000_emit_le_vsx_move (rtx dest, rtx source, machine_mode mode)
10385 {
10386 gcc_assert (!BYTES_BIG_ENDIAN
10387 && VECTOR_MEM_VSX_P (mode)
10388 && !TARGET_P9_VECTOR
10389 && !gpr_or_gpr_p (dest, source)
10390 && (MEM_P (source) ^ MEM_P (dest)));
10391
10392 if (MEM_P (source))
10393 {
10394 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
10395 rs6000_emit_le_vsx_load (dest, source, mode);
10396 }
10397 else
10398 {
10399 if (!REG_P (source))
10400 source = force_reg (mode, source);
10401 rs6000_emit_le_vsx_store (dest, source, mode);
10402 }
10403 }
10404
10405 /* Return whether a SFmode or SImode move can be done without converting one
10406 mode to another. This arrises when we have:
10407
10408 (SUBREG:SF (REG:SI ...))
10409 (SUBREG:SI (REG:SF ...))
10410
10411 and one of the values is in a floating point/vector register, where SFmode
10412 scalars are stored in DFmode format. */
10413
10414 bool
10415 valid_sf_si_move (rtx dest, rtx src, machine_mode mode)
10416 {
10417 if (TARGET_ALLOW_SF_SUBREG)
10418 return true;
10419
10420 if (mode != SFmode && GET_MODE_CLASS (mode) != MODE_INT)
10421 return true;
10422
10423 if (!SUBREG_P (src) || !sf_subreg_operand (src, mode))
10424 return true;
10425
10426 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10427 if (SUBREG_P (dest))
10428 {
10429 rtx dest_subreg = SUBREG_REG (dest);
10430 rtx src_subreg = SUBREG_REG (src);
10431 return GET_MODE (dest_subreg) == GET_MODE (src_subreg);
10432 }
10433
10434 return false;
10435 }
10436
10437
10438 /* Helper function to change moves with:
10439
10440 (SUBREG:SF (REG:SI)) and
10441 (SUBREG:SI (REG:SF))
10442
10443 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10444 values are stored as DFmode values in the VSX registers. We need to convert
10445 the bits before we can use a direct move or operate on the bits in the
10446 vector register as an integer type.
10447
10448 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10449
10450 static bool
10451 rs6000_emit_move_si_sf_subreg (rtx dest, rtx source, machine_mode mode)
10452 {
10453 if (TARGET_DIRECT_MOVE_64BIT && !reload_in_progress && !reload_completed
10454 && !lra_in_progress
10455 && (!SUBREG_P (dest) || !sf_subreg_operand (dest, mode))
10456 && SUBREG_P (source) && sf_subreg_operand (source, mode))
10457 {
10458 rtx inner_source = SUBREG_REG (source);
10459 machine_mode inner_mode = GET_MODE (inner_source);
10460
10461 if (mode == SImode && inner_mode == SFmode)
10462 {
10463 emit_insn (gen_movsi_from_sf (dest, inner_source));
10464 return true;
10465 }
10466
10467 if (mode == SFmode && inner_mode == SImode)
10468 {
10469 emit_insn (gen_movsf_from_si (dest, inner_source));
10470 return true;
10471 }
10472 }
10473
10474 return false;
10475 }
10476
10477 /* Emit a move from SOURCE to DEST in mode MODE. */
10478 void
10479 rs6000_emit_move (rtx dest, rtx source, machine_mode mode)
10480 {
10481 rtx operands[2];
10482 operands[0] = dest;
10483 operands[1] = source;
10484
10485 if (TARGET_DEBUG_ADDR)
10486 {
10487 fprintf (stderr,
10488 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
10489 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10490 GET_MODE_NAME (mode),
10491 reload_in_progress,
10492 reload_completed,
10493 can_create_pseudo_p ());
10494 debug_rtx (dest);
10495 fprintf (stderr, "source:\n");
10496 debug_rtx (source);
10497 }
10498
10499 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10500 if (CONST_WIDE_INT_P (operands[1])
10501 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
10502 {
10503 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10504 gcc_unreachable ();
10505 }
10506
10507 /* See if we need to special case SImode/SFmode SUBREG moves. */
10508 if ((mode == SImode || mode == SFmode) && SUBREG_P (source)
10509 && rs6000_emit_move_si_sf_subreg (dest, source, mode))
10510 return;
10511
10512 /* Check if GCC is setting up a block move that will end up using FP
10513 registers as temporaries. We must make sure this is acceptable. */
10514 if (GET_CODE (operands[0]) == MEM
10515 && GET_CODE (operands[1]) == MEM
10516 && mode == DImode
10517 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
10518 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
10519 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
10520 ? 32 : MEM_ALIGN (operands[0])))
10521 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
10522 ? 32
10523 : MEM_ALIGN (operands[1]))))
10524 && ! MEM_VOLATILE_P (operands [0])
10525 && ! MEM_VOLATILE_P (operands [1]))
10526 {
10527 emit_move_insn (adjust_address (operands[0], SImode, 0),
10528 adjust_address (operands[1], SImode, 0));
10529 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
10530 adjust_address (copy_rtx (operands[1]), SImode, 4));
10531 return;
10532 }
10533
10534 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
10535 && !gpc_reg_operand (operands[1], mode))
10536 operands[1] = force_reg (mode, operands[1]);
10537
10538 /* Recognize the case where operand[1] is a reference to thread-local
10539 data and load its address to a register. */
10540 if (tls_referenced_p (operands[1]))
10541 {
10542 enum tls_model model;
10543 rtx tmp = operands[1];
10544 rtx addend = NULL;
10545
10546 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
10547 {
10548 addend = XEXP (XEXP (tmp, 0), 1);
10549 tmp = XEXP (XEXP (tmp, 0), 0);
10550 }
10551
10552 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
10553 model = SYMBOL_REF_TLS_MODEL (tmp);
10554 gcc_assert (model != 0);
10555
10556 tmp = rs6000_legitimize_tls_address (tmp, model);
10557 if (addend)
10558 {
10559 tmp = gen_rtx_PLUS (mode, tmp, addend);
10560 tmp = force_operand (tmp, operands[0]);
10561 }
10562 operands[1] = tmp;
10563 }
10564
10565 /* Handle the case where reload calls us with an invalid address. */
10566 if (reload_in_progress && mode == Pmode
10567 && (! general_operand (operands[1], mode)
10568 || ! nonimmediate_operand (operands[0], mode)))
10569 goto emit_set;
10570
10571 /* 128-bit constant floating-point values on Darwin should really be loaded
10572 as two parts. However, this premature splitting is a problem when DFmode
10573 values can go into Altivec registers. */
10574 if (FLOAT128_IBM_P (mode) && !reg_addr[DFmode].scalar_in_vmx_p
10575 && GET_CODE (operands[1]) == CONST_DOUBLE)
10576 {
10577 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
10578 simplify_gen_subreg (DFmode, operands[1], mode, 0),
10579 DFmode);
10580 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
10581 GET_MODE_SIZE (DFmode)),
10582 simplify_gen_subreg (DFmode, operands[1], mode,
10583 GET_MODE_SIZE (DFmode)),
10584 DFmode);
10585 return;
10586 }
10587
10588 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
10589 cfun->machine->sdmode_stack_slot =
10590 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
10591
10592
10593 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10594 p1:SD) if p1 is not of floating point class and p0 is spilled as
10595 we can have no analogous movsd_store for this. */
10596 if (lra_in_progress && mode == DDmode
10597 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10598 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10599 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
10600 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
10601 {
10602 enum reg_class cl;
10603 int regno = REGNO (SUBREG_REG (operands[1]));
10604
10605 if (regno >= FIRST_PSEUDO_REGISTER)
10606 {
10607 cl = reg_preferred_class (regno);
10608 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
10609 }
10610 if (regno >= 0 && ! FP_REGNO_P (regno))
10611 {
10612 mode = SDmode;
10613 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
10614 operands[1] = SUBREG_REG (operands[1]);
10615 }
10616 }
10617 if (lra_in_progress
10618 && mode == SDmode
10619 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
10620 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
10621 && (REG_P (operands[1])
10622 || (GET_CODE (operands[1]) == SUBREG
10623 && REG_P (SUBREG_REG (operands[1])))))
10624 {
10625 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
10626 ? SUBREG_REG (operands[1]) : operands[1]);
10627 enum reg_class cl;
10628
10629 if (regno >= FIRST_PSEUDO_REGISTER)
10630 {
10631 cl = reg_preferred_class (regno);
10632 gcc_assert (cl != NO_REGS);
10633 regno = ira_class_hard_regs[cl][0];
10634 }
10635 if (FP_REGNO_P (regno))
10636 {
10637 if (GET_MODE (operands[0]) != DDmode)
10638 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
10639 emit_insn (gen_movsd_store (operands[0], operands[1]));
10640 }
10641 else if (INT_REGNO_P (regno))
10642 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10643 else
10644 gcc_unreachable();
10645 return;
10646 }
10647 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10648 p:DD)) if p0 is not of floating point class and p1 is spilled as
10649 we can have no analogous movsd_load for this. */
10650 if (lra_in_progress && mode == DDmode
10651 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
10652 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
10653 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10654 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10655 {
10656 enum reg_class cl;
10657 int regno = REGNO (SUBREG_REG (operands[0]));
10658
10659 if (regno >= FIRST_PSEUDO_REGISTER)
10660 {
10661 cl = reg_preferred_class (regno);
10662 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
10663 }
10664 if (regno >= 0 && ! FP_REGNO_P (regno))
10665 {
10666 mode = SDmode;
10667 operands[0] = SUBREG_REG (operands[0]);
10668 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
10669 }
10670 }
10671 if (lra_in_progress
10672 && mode == SDmode
10673 && (REG_P (operands[0])
10674 || (GET_CODE (operands[0]) == SUBREG
10675 && REG_P (SUBREG_REG (operands[0]))))
10676 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
10677 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
10678 {
10679 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
10680 ? SUBREG_REG (operands[0]) : operands[0]);
10681 enum reg_class cl;
10682
10683 if (regno >= FIRST_PSEUDO_REGISTER)
10684 {
10685 cl = reg_preferred_class (regno);
10686 gcc_assert (cl != NO_REGS);
10687 regno = ira_class_hard_regs[cl][0];
10688 }
10689 if (FP_REGNO_P (regno))
10690 {
10691 if (GET_MODE (operands[1]) != DDmode)
10692 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
10693 emit_insn (gen_movsd_load (operands[0], operands[1]));
10694 }
10695 else if (INT_REGNO_P (regno))
10696 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
10697 else
10698 gcc_unreachable();
10699 return;
10700 }
10701
10702 if (reload_in_progress
10703 && mode == SDmode
10704 && cfun->machine->sdmode_stack_slot != NULL_RTX
10705 && MEM_P (operands[0])
10706 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
10707 && REG_P (operands[1]))
10708 {
10709 if (FP_REGNO_P (REGNO (operands[1])))
10710 {
10711 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
10712 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
10713 emit_insn (gen_movsd_store (mem, operands[1]));
10714 }
10715 else if (INT_REGNO_P (REGNO (operands[1])))
10716 {
10717 rtx mem = operands[0];
10718 if (BYTES_BIG_ENDIAN)
10719 mem = adjust_address_nv (mem, mode, 4);
10720 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
10721 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
10722 }
10723 else
10724 gcc_unreachable();
10725 return;
10726 }
10727 if (reload_in_progress
10728 && mode == SDmode
10729 && REG_P (operands[0])
10730 && MEM_P (operands[1])
10731 && cfun->machine->sdmode_stack_slot != NULL_RTX
10732 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
10733 {
10734 if (FP_REGNO_P (REGNO (operands[0])))
10735 {
10736 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
10737 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
10738 emit_insn (gen_movsd_load (operands[0], mem));
10739 }
10740 else if (INT_REGNO_P (REGNO (operands[0])))
10741 {
10742 rtx mem = operands[1];
10743 if (BYTES_BIG_ENDIAN)
10744 mem = adjust_address_nv (mem, mode, 4);
10745 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
10746 emit_insn (gen_movsd_hardfloat (operands[0], mem));
10747 }
10748 else
10749 gcc_unreachable();
10750 return;
10751 }
10752
10753 /* FIXME: In the long term, this switch statement should go away
10754 and be replaced by a sequence of tests based on things like
10755 mode == Pmode. */
10756 switch (mode)
10757 {
10758 case HImode:
10759 case QImode:
10760 if (CONSTANT_P (operands[1])
10761 && GET_CODE (operands[1]) != CONST_INT)
10762 operands[1] = force_const_mem (mode, operands[1]);
10763 break;
10764
10765 case TFmode:
10766 case TDmode:
10767 case IFmode:
10768 case KFmode:
10769 if (FLOAT128_2REG_P (mode))
10770 rs6000_eliminate_indexed_memrefs (operands);
10771 /* fall through */
10772
10773 case DFmode:
10774 case DDmode:
10775 case SFmode:
10776 case SDmode:
10777 if (CONSTANT_P (operands[1])
10778 && ! easy_fp_constant (operands[1], mode))
10779 operands[1] = force_const_mem (mode, operands[1]);
10780 break;
10781
10782 case V16QImode:
10783 case V8HImode:
10784 case V4SFmode:
10785 case V4SImode:
10786 case V4HImode:
10787 case V2SFmode:
10788 case V2SImode:
10789 case V1DImode:
10790 case V2DFmode:
10791 case V2DImode:
10792 case V1TImode:
10793 if (CONSTANT_P (operands[1])
10794 && !easy_vector_constant (operands[1], mode))
10795 operands[1] = force_const_mem (mode, operands[1]);
10796 break;
10797
10798 case SImode:
10799 case DImode:
10800 /* Use default pattern for address of ELF small data */
10801 if (TARGET_ELF
10802 && mode == Pmode
10803 && DEFAULT_ABI == ABI_V4
10804 && (GET_CODE (operands[1]) == SYMBOL_REF
10805 || GET_CODE (operands[1]) == CONST)
10806 && small_data_operand (operands[1], mode))
10807 {
10808 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10809 return;
10810 }
10811
10812 if (DEFAULT_ABI == ABI_V4
10813 && mode == Pmode && mode == SImode
10814 && flag_pic == 1 && got_operand (operands[1], mode))
10815 {
10816 emit_insn (gen_movsi_got (operands[0], operands[1]));
10817 return;
10818 }
10819
10820 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
10821 && TARGET_NO_TOC
10822 && ! flag_pic
10823 && mode == Pmode
10824 && CONSTANT_P (operands[1])
10825 && GET_CODE (operands[1]) != HIGH
10826 && GET_CODE (operands[1]) != CONST_INT)
10827 {
10828 rtx target = (!can_create_pseudo_p ()
10829 ? operands[0]
10830 : gen_reg_rtx (mode));
10831
10832 /* If this is a function address on -mcall-aixdesc,
10833 convert it to the address of the descriptor. */
10834 if (DEFAULT_ABI == ABI_AIX
10835 && GET_CODE (operands[1]) == SYMBOL_REF
10836 && XSTR (operands[1], 0)[0] == '.')
10837 {
10838 const char *name = XSTR (operands[1], 0);
10839 rtx new_ref;
10840 while (*name == '.')
10841 name++;
10842 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
10843 CONSTANT_POOL_ADDRESS_P (new_ref)
10844 = CONSTANT_POOL_ADDRESS_P (operands[1]);
10845 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
10846 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
10847 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
10848 operands[1] = new_ref;
10849 }
10850
10851 if (DEFAULT_ABI == ABI_DARWIN)
10852 {
10853 #if TARGET_MACHO
10854 if (MACHO_DYNAMIC_NO_PIC_P)
10855 {
10856 /* Take care of any required data indirection. */
10857 operands[1] = rs6000_machopic_legitimize_pic_address (
10858 operands[1], mode, operands[0]);
10859 if (operands[0] != operands[1])
10860 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10861 return;
10862 }
10863 #endif
10864 emit_insn (gen_macho_high (target, operands[1]));
10865 emit_insn (gen_macho_low (operands[0], target, operands[1]));
10866 return;
10867 }
10868
10869 emit_insn (gen_elf_high (target, operands[1]));
10870 emit_insn (gen_elf_low (operands[0], target, operands[1]));
10871 return;
10872 }
10873
10874 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10875 and we have put it in the TOC, we just need to make a TOC-relative
10876 reference to it. */
10877 if (TARGET_TOC
10878 && GET_CODE (operands[1]) == SYMBOL_REF
10879 && use_toc_relative_ref (operands[1], mode))
10880 operands[1] = create_TOC_reference (operands[1], operands[0]);
10881 else if (mode == Pmode
10882 && CONSTANT_P (operands[1])
10883 && GET_CODE (operands[1]) != HIGH
10884 && ((GET_CODE (operands[1]) != CONST_INT
10885 && ! easy_fp_constant (operands[1], mode))
10886 || (GET_CODE (operands[1]) == CONST_INT
10887 && (num_insns_constant (operands[1], mode)
10888 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
10889 || (GET_CODE (operands[0]) == REG
10890 && FP_REGNO_P (REGNO (operands[0]))))
10891 && !toc_relative_expr_p (operands[1], false)
10892 && (TARGET_CMODEL == CMODEL_SMALL
10893 || can_create_pseudo_p ()
10894 || (REG_P (operands[0])
10895 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
10896 {
10897
10898 #if TARGET_MACHO
10899 /* Darwin uses a special PIC legitimizer. */
10900 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
10901 {
10902 operands[1] =
10903 rs6000_machopic_legitimize_pic_address (operands[1], mode,
10904 operands[0]);
10905 if (operands[0] != operands[1])
10906 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10907 return;
10908 }
10909 #endif
10910
10911 /* If we are to limit the number of things we put in the TOC and
10912 this is a symbol plus a constant we can add in one insn,
10913 just put the symbol in the TOC and add the constant. Don't do
10914 this if reload is in progress. */
10915 if (GET_CODE (operands[1]) == CONST
10916 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
10917 && GET_CODE (XEXP (operands[1], 0)) == PLUS
10918 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
10919 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
10920 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
10921 && ! side_effects_p (operands[0]))
10922 {
10923 rtx sym =
10924 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
10925 rtx other = XEXP (XEXP (operands[1], 0), 1);
10926
10927 sym = force_reg (mode, sym);
10928 emit_insn (gen_add3_insn (operands[0], sym, other));
10929 return;
10930 }
10931
10932 operands[1] = force_const_mem (mode, operands[1]);
10933
10934 if (TARGET_TOC
10935 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
10936 && constant_pool_expr_p (XEXP (operands[1], 0))
10937 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
10938 get_pool_constant (XEXP (operands[1], 0)),
10939 get_pool_mode (XEXP (operands[1], 0))))
10940 {
10941 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
10942 operands[0]);
10943 operands[1] = gen_const_mem (mode, tocref);
10944 set_mem_alias_set (operands[1], get_TOC_alias_set ());
10945 }
10946 }
10947 break;
10948
10949 case TImode:
10950 if (!VECTOR_MEM_VSX_P (TImode))
10951 rs6000_eliminate_indexed_memrefs (operands);
10952 break;
10953
10954 case PTImode:
10955 rs6000_eliminate_indexed_memrefs (operands);
10956 break;
10957
10958 default:
10959 fatal_insn ("bad move", gen_rtx_SET (dest, source));
10960 }
10961
10962 /* Above, we may have called force_const_mem which may have returned
10963 an invalid address. If we can, fix this up; otherwise, reload will
10964 have to deal with it. */
10965 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
10966 operands[1] = validize_mem (operands[1]);
10967
10968 emit_set:
10969 emit_insn (gen_rtx_SET (operands[0], operands[1]));
10970 }
10971
10972 /* Return true if a structure, union or array containing FIELD should be
10973 accessed using `BLKMODE'.
10974
10975 For the SPE, simd types are V2SI, and gcc can be tempted to put the
10976 entire thing in a DI and use subregs to access the internals.
10977 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
10978 back-end. Because a single GPR can hold a V2SI, but not a DI, the
10979 best thing to do is set structs to BLKmode and avoid Severe Tire
10980 Damage.
10981
10982 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
10983 fit into 1, whereas DI still needs two. */
10984
10985 static bool
10986 rs6000_member_type_forces_blk (const_tree field, machine_mode mode)
10987 {
10988 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
10989 || (TARGET_E500_DOUBLE && mode == DFmode));
10990 }
10991 \f
10992 /* Nonzero if we can use a floating-point register to pass this arg. */
10993 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10994 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10995 && (CUM)->fregno <= FP_ARG_MAX_REG \
10996 && TARGET_HARD_FLOAT && TARGET_FPRS)
10997
10998 /* Nonzero if we can use an AltiVec register to pass this arg. */
10999 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
11000 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
11001 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
11002 && TARGET_ALTIVEC_ABI \
11003 && (NAMED))
11004
11005 /* Walk down the type tree of TYPE counting consecutive base elements.
11006 If *MODEP is VOIDmode, then set it to the first valid floating point
11007 or vector type. If a non-floating point or vector type is found, or
11008 if a floating point or vector type that doesn't match a non-VOIDmode
11009 *MODEP is found, then return -1, otherwise return the count in the
11010 sub-tree. */
11011
11012 static int
11013 rs6000_aggregate_candidate (const_tree type, machine_mode *modep)
11014 {
11015 machine_mode mode;
11016 HOST_WIDE_INT size;
11017
11018 switch (TREE_CODE (type))
11019 {
11020 case REAL_TYPE:
11021 mode = TYPE_MODE (type);
11022 if (!SCALAR_FLOAT_MODE_P (mode))
11023 return -1;
11024
11025 if (*modep == VOIDmode)
11026 *modep = mode;
11027
11028 if (*modep == mode)
11029 return 1;
11030
11031 break;
11032
11033 case COMPLEX_TYPE:
11034 mode = TYPE_MODE (TREE_TYPE (type));
11035 if (!SCALAR_FLOAT_MODE_P (mode))
11036 return -1;
11037
11038 if (*modep == VOIDmode)
11039 *modep = mode;
11040
11041 if (*modep == mode)
11042 return 2;
11043
11044 break;
11045
11046 case VECTOR_TYPE:
11047 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
11048 return -1;
11049
11050 /* Use V4SImode as representative of all 128-bit vector types. */
11051 size = int_size_in_bytes (type);
11052 switch (size)
11053 {
11054 case 16:
11055 mode = V4SImode;
11056 break;
11057 default:
11058 return -1;
11059 }
11060
11061 if (*modep == VOIDmode)
11062 *modep = mode;
11063
11064 /* Vector modes are considered to be opaque: two vectors are
11065 equivalent for the purposes of being homogeneous aggregates
11066 if they are the same size. */
11067 if (*modep == mode)
11068 return 1;
11069
11070 break;
11071
11072 case ARRAY_TYPE:
11073 {
11074 int count;
11075 tree index = TYPE_DOMAIN (type);
11076
11077 /* Can't handle incomplete types nor sizes that are not
11078 fixed. */
11079 if (!COMPLETE_TYPE_P (type)
11080 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11081 return -1;
11082
11083 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
11084 if (count == -1
11085 || !index
11086 || !TYPE_MAX_VALUE (index)
11087 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
11088 || !TYPE_MIN_VALUE (index)
11089 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
11090 || count < 0)
11091 return -1;
11092
11093 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
11094 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
11095
11096 /* There must be no padding. */
11097 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11098 return -1;
11099
11100 return count;
11101 }
11102
11103 case RECORD_TYPE:
11104 {
11105 int count = 0;
11106 int sub_count;
11107 tree field;
11108
11109 /* Can't handle incomplete types nor sizes that are not
11110 fixed. */
11111 if (!COMPLETE_TYPE_P (type)
11112 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11113 return -1;
11114
11115 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11116 {
11117 if (TREE_CODE (field) != FIELD_DECL)
11118 continue;
11119
11120 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11121 if (sub_count < 0)
11122 return -1;
11123 count += sub_count;
11124 }
11125
11126 /* There must be no padding. */
11127 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11128 return -1;
11129
11130 return count;
11131 }
11132
11133 case UNION_TYPE:
11134 case QUAL_UNION_TYPE:
11135 {
11136 /* These aren't very interesting except in a degenerate case. */
11137 int count = 0;
11138 int sub_count;
11139 tree field;
11140
11141 /* Can't handle incomplete types nor sizes that are not
11142 fixed. */
11143 if (!COMPLETE_TYPE_P (type)
11144 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
11145 return -1;
11146
11147 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
11148 {
11149 if (TREE_CODE (field) != FIELD_DECL)
11150 continue;
11151
11152 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
11153 if (sub_count < 0)
11154 return -1;
11155 count = count > sub_count ? count : sub_count;
11156 }
11157
11158 /* There must be no padding. */
11159 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
11160 return -1;
11161
11162 return count;
11163 }
11164
11165 default:
11166 break;
11167 }
11168
11169 return -1;
11170 }
11171
11172 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
11173 float or vector aggregate that shall be passed in FP/vector registers
11174 according to the ELFv2 ABI, return the homogeneous element mode in
11175 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
11176
11177 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
11178
11179 static bool
11180 rs6000_discover_homogeneous_aggregate (machine_mode mode, const_tree type,
11181 machine_mode *elt_mode,
11182 int *n_elts)
11183 {
11184 /* Note that we do not accept complex types at the top level as
11185 homogeneous aggregates; these types are handled via the
11186 targetm.calls.split_complex_arg mechanism. Complex types
11187 can be elements of homogeneous aggregates, however. */
11188 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
11189 {
11190 machine_mode field_mode = VOIDmode;
11191 int field_count = rs6000_aggregate_candidate (type, &field_mode);
11192
11193 if (field_count > 0)
11194 {
11195 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode) ?
11196 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
11197
11198 /* The ELFv2 ABI allows homogeneous aggregates to occupy
11199 up to AGGR_ARG_NUM_REG registers. */
11200 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
11201 {
11202 if (elt_mode)
11203 *elt_mode = field_mode;
11204 if (n_elts)
11205 *n_elts = field_count;
11206 return true;
11207 }
11208 }
11209 }
11210
11211 if (elt_mode)
11212 *elt_mode = mode;
11213 if (n_elts)
11214 *n_elts = 1;
11215 return false;
11216 }
11217
11218 /* Return a nonzero value to say to return the function value in
11219 memory, just as large structures are always returned. TYPE will be
11220 the data type of the value, and FNTYPE will be the type of the
11221 function doing the returning, or @code{NULL} for libcalls.
11222
11223 The AIX ABI for the RS/6000 specifies that all structures are
11224 returned in memory. The Darwin ABI does the same.
11225
11226 For the Darwin 64 Bit ABI, a function result can be returned in
11227 registers or in memory, depending on the size of the return data
11228 type. If it is returned in registers, the value occupies the same
11229 registers as it would if it were the first and only function
11230 argument. Otherwise, the function places its result in memory at
11231 the location pointed to by GPR3.
11232
11233 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
11234 but a draft put them in memory, and GCC used to implement the draft
11235 instead of the final standard. Therefore, aix_struct_return
11236 controls this instead of DEFAULT_ABI; V.4 targets needing backward
11237 compatibility can change DRAFT_V4_STRUCT_RET to override the
11238 default, and -m switches get the final word. See
11239 rs6000_option_override_internal for more details.
11240
11241 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
11242 long double support is enabled. These values are returned in memory.
11243
11244 int_size_in_bytes returns -1 for variable size objects, which go in
11245 memory always. The cast to unsigned makes -1 > 8. */
11246
11247 static bool
11248 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
11249 {
11250 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
11251 if (TARGET_MACHO
11252 && rs6000_darwin64_abi
11253 && TREE_CODE (type) == RECORD_TYPE
11254 && int_size_in_bytes (type) > 0)
11255 {
11256 CUMULATIVE_ARGS valcum;
11257 rtx valret;
11258
11259 valcum.words = 0;
11260 valcum.fregno = FP_ARG_MIN_REG;
11261 valcum.vregno = ALTIVEC_ARG_MIN_REG;
11262 /* Do a trial code generation as if this were going to be passed
11263 as an argument; if any part goes in memory, we return NULL. */
11264 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
11265 if (valret)
11266 return false;
11267 /* Otherwise fall through to more conventional ABI rules. */
11268 }
11269
11270 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
11271 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
11272 NULL, NULL))
11273 return false;
11274
11275 /* The ELFv2 ABI returns aggregates up to 16B in registers */
11276 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
11277 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
11278 return false;
11279
11280 if (AGGREGATE_TYPE_P (type)
11281 && (aix_struct_return
11282 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
11283 return true;
11284
11285 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
11286 modes only exist for GCC vector types if -maltivec. */
11287 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
11288 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11289 return false;
11290
11291 /* Return synthetic vectors in memory. */
11292 if (TREE_CODE (type) == VECTOR_TYPE
11293 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
11294 {
11295 static bool warned_for_return_big_vectors = false;
11296 if (!warned_for_return_big_vectors)
11297 {
11298 warning (OPT_Wpsabi, "GCC vector returned by reference: "
11299 "non-standard ABI extension with no compatibility guarantee");
11300 warned_for_return_big_vectors = true;
11301 }
11302 return true;
11303 }
11304
11305 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
11306 && FLOAT128_IEEE_P (TYPE_MODE (type)))
11307 return true;
11308
11309 return false;
11310 }
11311
11312 /* Specify whether values returned in registers should be at the most
11313 significant end of a register. We want aggregates returned by
11314 value to match the way aggregates are passed to functions. */
11315
11316 static bool
11317 rs6000_return_in_msb (const_tree valtype)
11318 {
11319 return (DEFAULT_ABI == ABI_ELFv2
11320 && BYTES_BIG_ENDIAN
11321 && AGGREGATE_TYPE_P (valtype)
11322 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype), valtype) == upward);
11323 }
11324
11325 #ifdef HAVE_AS_GNU_ATTRIBUTE
11326 /* Return TRUE if a call to function FNDECL may be one that
11327 potentially affects the function calling ABI of the object file. */
11328
11329 static bool
11330 call_ABI_of_interest (tree fndecl)
11331 {
11332 if (rs6000_gnu_attr && symtab->state == EXPANSION)
11333 {
11334 struct cgraph_node *c_node;
11335
11336 /* Libcalls are always interesting. */
11337 if (fndecl == NULL_TREE)
11338 return true;
11339
11340 /* Any call to an external function is interesting. */
11341 if (DECL_EXTERNAL (fndecl))
11342 return true;
11343
11344 /* Interesting functions that we are emitting in this object file. */
11345 c_node = cgraph_node::get (fndecl);
11346 c_node = c_node->ultimate_alias_target ();
11347 return !c_node->only_called_directly_p ();
11348 }
11349 return false;
11350 }
11351 #endif
11352
11353 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11354 for a call to a function whose data type is FNTYPE.
11355 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11356
11357 For incoming args we set the number of arguments in the prototype large
11358 so we never return a PARALLEL. */
11359
11360 void
11361 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
11362 rtx libname ATTRIBUTE_UNUSED, int incoming,
11363 int libcall, int n_named_args,
11364 tree fndecl ATTRIBUTE_UNUSED,
11365 machine_mode return_mode ATTRIBUTE_UNUSED)
11366 {
11367 static CUMULATIVE_ARGS zero_cumulative;
11368
11369 *cum = zero_cumulative;
11370 cum->words = 0;
11371 cum->fregno = FP_ARG_MIN_REG;
11372 cum->vregno = ALTIVEC_ARG_MIN_REG;
11373 cum->prototype = (fntype && prototype_p (fntype));
11374 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
11375 ? CALL_LIBCALL : CALL_NORMAL);
11376 cum->sysv_gregno = GP_ARG_MIN_REG;
11377 cum->stdarg = stdarg_p (fntype);
11378 cum->libcall = libcall;
11379
11380 cum->nargs_prototype = 0;
11381 if (incoming || cum->prototype)
11382 cum->nargs_prototype = n_named_args;
11383
11384 /* Check for a longcall attribute. */
11385 if ((!fntype && rs6000_default_long_calls)
11386 || (fntype
11387 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
11388 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
11389 cum->call_cookie |= CALL_LONG;
11390
11391 if (TARGET_DEBUG_ARG)
11392 {
11393 fprintf (stderr, "\ninit_cumulative_args:");
11394 if (fntype)
11395 {
11396 tree ret_type = TREE_TYPE (fntype);
11397 fprintf (stderr, " ret code = %s,",
11398 get_tree_code_name (TREE_CODE (ret_type)));
11399 }
11400
11401 if (cum->call_cookie & CALL_LONG)
11402 fprintf (stderr, " longcall,");
11403
11404 fprintf (stderr, " proto = %d, nargs = %d\n",
11405 cum->prototype, cum->nargs_prototype);
11406 }
11407
11408 #ifdef HAVE_AS_GNU_ATTRIBUTE
11409 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4))
11410 {
11411 cum->escapes = call_ABI_of_interest (fndecl);
11412 if (cum->escapes)
11413 {
11414 tree return_type;
11415
11416 if (fntype)
11417 {
11418 return_type = TREE_TYPE (fntype);
11419 return_mode = TYPE_MODE (return_type);
11420 }
11421 else
11422 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
11423
11424 if (return_type != NULL)
11425 {
11426 if (TREE_CODE (return_type) == RECORD_TYPE
11427 && TYPE_TRANSPARENT_AGGR (return_type))
11428 {
11429 return_type = TREE_TYPE (first_field (return_type));
11430 return_mode = TYPE_MODE (return_type);
11431 }
11432 if (AGGREGATE_TYPE_P (return_type)
11433 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
11434 <= 8))
11435 rs6000_returns_struct = true;
11436 }
11437 if (SCALAR_FLOAT_MODE_P (return_mode))
11438 {
11439 rs6000_passes_float = true;
11440 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11441 && (FLOAT128_IBM_P (return_mode)
11442 || FLOAT128_IEEE_P (return_mode)
11443 || (return_type != NULL
11444 && (TYPE_MAIN_VARIANT (return_type)
11445 == long_double_type_node))))
11446 rs6000_passes_long_double = true;
11447 }
11448 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
11449 || SPE_VECTOR_MODE (return_mode))
11450 rs6000_passes_vector = true;
11451 }
11452 }
11453 #endif
11454
11455 if (fntype
11456 && !TARGET_ALTIVEC
11457 && TARGET_ALTIVEC_ABI
11458 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
11459 {
11460 error ("cannot return value in vector register because"
11461 " altivec instructions are disabled, use -maltivec"
11462 " to enable them");
11463 }
11464 }
11465 \f
11466 /* The mode the ABI uses for a word. This is not the same as word_mode
11467 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11468
11469 static machine_mode
11470 rs6000_abi_word_mode (void)
11471 {
11472 return TARGET_32BIT ? SImode : DImode;
11473 }
11474
11475 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11476 static char *
11477 rs6000_offload_options (void)
11478 {
11479 if (TARGET_64BIT)
11480 return xstrdup ("-foffload-abi=lp64");
11481 else
11482 return xstrdup ("-foffload-abi=ilp32");
11483 }
11484
11485 /* On rs6000, function arguments are promoted, as are function return
11486 values. */
11487
11488 static machine_mode
11489 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
11490 machine_mode mode,
11491 int *punsignedp ATTRIBUTE_UNUSED,
11492 const_tree, int)
11493 {
11494 PROMOTE_MODE (mode, *punsignedp, type);
11495
11496 return mode;
11497 }
11498
11499 /* Return true if TYPE must be passed on the stack and not in registers. */
11500
11501 static bool
11502 rs6000_must_pass_in_stack (machine_mode mode, const_tree type)
11503 {
11504 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
11505 return must_pass_in_stack_var_size (mode, type);
11506 else
11507 return must_pass_in_stack_var_size_or_pad (mode, type);
11508 }
11509
11510 static inline bool
11511 is_complex_IBM_long_double (machine_mode mode)
11512 {
11513 return mode == ICmode || (!TARGET_IEEEQUAD && mode == TCmode);
11514 }
11515
11516 /* Whether ABI_V4 passes MODE args to a function in floating point
11517 registers. */
11518
11519 static bool
11520 abi_v4_pass_in_fpr (machine_mode mode)
11521 {
11522 if (!TARGET_FPRS || !TARGET_HARD_FLOAT)
11523 return false;
11524 if (TARGET_SINGLE_FLOAT && mode == SFmode)
11525 return true;
11526 if (TARGET_DOUBLE_FLOAT && mode == DFmode)
11527 return true;
11528 /* ABI_V4 passes complex IBM long double in 8 gprs.
11529 Stupid, but we can't change the ABI now. */
11530 if (is_complex_IBM_long_double (mode))
11531 return false;
11532 if (FLOAT128_2REG_P (mode))
11533 return true;
11534 if (DECIMAL_FLOAT_MODE_P (mode))
11535 return true;
11536 return false;
11537 }
11538
11539 /* If defined, a C expression which determines whether, and in which
11540 direction, to pad out an argument with extra space. The value
11541 should be of type `enum direction': either `upward' to pad above
11542 the argument, `downward' to pad below, or `none' to inhibit
11543 padding.
11544
11545 For the AIX ABI structs are always stored left shifted in their
11546 argument slot. */
11547
11548 enum direction
11549 function_arg_padding (machine_mode mode, const_tree type)
11550 {
11551 #ifndef AGGREGATE_PADDING_FIXED
11552 #define AGGREGATE_PADDING_FIXED 0
11553 #endif
11554 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11555 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11556 #endif
11557
11558 if (!AGGREGATE_PADDING_FIXED)
11559 {
11560 /* GCC used to pass structures of the same size as integer types as
11561 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
11562 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11563 passed padded downward, except that -mstrict-align further
11564 muddied the water in that multi-component structures of 2 and 4
11565 bytes in size were passed padded upward.
11566
11567 The following arranges for best compatibility with previous
11568 versions of gcc, but removes the -mstrict-align dependency. */
11569 if (BYTES_BIG_ENDIAN)
11570 {
11571 HOST_WIDE_INT size = 0;
11572
11573 if (mode == BLKmode)
11574 {
11575 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
11576 size = int_size_in_bytes (type);
11577 }
11578 else
11579 size = GET_MODE_SIZE (mode);
11580
11581 if (size == 1 || size == 2 || size == 4)
11582 return downward;
11583 }
11584 return upward;
11585 }
11586
11587 if (AGGREGATES_PAD_UPWARD_ALWAYS)
11588 {
11589 if (type != 0 && AGGREGATE_TYPE_P (type))
11590 return upward;
11591 }
11592
11593 /* Fall back to the default. */
11594 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
11595 }
11596
11597 /* If defined, a C expression that gives the alignment boundary, in bits,
11598 of an argument with the specified mode and type. If it is not defined,
11599 PARM_BOUNDARY is used for all arguments.
11600
11601 V.4 wants long longs and doubles to be double word aligned. Just
11602 testing the mode size is a boneheaded way to do this as it means
11603 that other types such as complex int are also double word aligned.
11604 However, we're stuck with this because changing the ABI might break
11605 existing library interfaces.
11606
11607 Doubleword align SPE vectors.
11608 Quadword align Altivec/VSX vectors.
11609 Quadword align large synthetic vector types. */
11610
11611 static unsigned int
11612 rs6000_function_arg_boundary (machine_mode mode, const_tree type)
11613 {
11614 machine_mode elt_mode;
11615 int n_elts;
11616
11617 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11618
11619 if (DEFAULT_ABI == ABI_V4
11620 && (GET_MODE_SIZE (mode) == 8
11621 || (TARGET_HARD_FLOAT
11622 && TARGET_FPRS
11623 && !is_complex_IBM_long_double (mode)
11624 && FLOAT128_2REG_P (mode))))
11625 return 64;
11626 else if (FLOAT128_VECTOR_P (mode))
11627 return 128;
11628 else if (SPE_VECTOR_MODE (mode)
11629 || (type && TREE_CODE (type) == VECTOR_TYPE
11630 && int_size_in_bytes (type) >= 8
11631 && int_size_in_bytes (type) < 16))
11632 return 64;
11633 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11634 || (type && TREE_CODE (type) == VECTOR_TYPE
11635 && int_size_in_bytes (type) >= 16))
11636 return 128;
11637
11638 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11639 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11640 -mcompat-align-parm is used. */
11641 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
11642 || DEFAULT_ABI == ABI_ELFv2)
11643 && type && TYPE_ALIGN (type) > 64)
11644 {
11645 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11646 or homogeneous float/vector aggregates here. We already handled
11647 vector aggregates above, but still need to check for float here. */
11648 bool aggregate_p = (AGGREGATE_TYPE_P (type)
11649 && !SCALAR_FLOAT_MODE_P (elt_mode));
11650
11651 /* We used to check for BLKmode instead of the above aggregate type
11652 check. Warn when this results in any difference to the ABI. */
11653 if (aggregate_p != (mode == BLKmode))
11654 {
11655 static bool warned;
11656 if (!warned && warn_psabi)
11657 {
11658 warned = true;
11659 inform (input_location,
11660 "the ABI of passing aggregates with %d-byte alignment"
11661 " has changed in GCC 5",
11662 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
11663 }
11664 }
11665
11666 if (aggregate_p)
11667 return 128;
11668 }
11669
11670 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11671 implement the "aggregate type" check as a BLKmode check here; this
11672 means certain aggregate types are in fact not aligned. */
11673 if (TARGET_MACHO && rs6000_darwin64_abi
11674 && mode == BLKmode
11675 && type && TYPE_ALIGN (type) > 64)
11676 return 128;
11677
11678 return PARM_BOUNDARY;
11679 }
11680
11681 /* The offset in words to the start of the parameter save area. */
11682
11683 static unsigned int
11684 rs6000_parm_offset (void)
11685 {
11686 return (DEFAULT_ABI == ABI_V4 ? 2
11687 : DEFAULT_ABI == ABI_ELFv2 ? 4
11688 : 6);
11689 }
11690
11691 /* For a function parm of MODE and TYPE, return the starting word in
11692 the parameter area. NWORDS of the parameter area are already used. */
11693
11694 static unsigned int
11695 rs6000_parm_start (machine_mode mode, const_tree type,
11696 unsigned int nwords)
11697 {
11698 unsigned int align;
11699
11700 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
11701 return nwords + (-(rs6000_parm_offset () + nwords) & align);
11702 }
11703
11704 /* Compute the size (in words) of a function argument. */
11705
11706 static unsigned long
11707 rs6000_arg_size (machine_mode mode, const_tree type)
11708 {
11709 unsigned long size;
11710
11711 if (mode != BLKmode)
11712 size = GET_MODE_SIZE (mode);
11713 else
11714 size = int_size_in_bytes (type);
11715
11716 if (TARGET_32BIT)
11717 return (size + 3) >> 2;
11718 else
11719 return (size + 7) >> 3;
11720 }
11721 \f
11722 /* Use this to flush pending int fields. */
11723
11724 static void
11725 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
11726 HOST_WIDE_INT bitpos, int final)
11727 {
11728 unsigned int startbit, endbit;
11729 int intregs, intoffset;
11730 machine_mode mode;
11731
11732 /* Handle the situations where a float is taking up the first half
11733 of the GPR, and the other half is empty (typically due to
11734 alignment restrictions). We can detect this by a 8-byte-aligned
11735 int field, or by seeing that this is the final flush for this
11736 argument. Count the word and continue on. */
11737 if (cum->floats_in_gpr == 1
11738 && (cum->intoffset % 64 == 0
11739 || (cum->intoffset == -1 && final)))
11740 {
11741 cum->words++;
11742 cum->floats_in_gpr = 0;
11743 }
11744
11745 if (cum->intoffset == -1)
11746 return;
11747
11748 intoffset = cum->intoffset;
11749 cum->intoffset = -1;
11750 cum->floats_in_gpr = 0;
11751
11752 if (intoffset % BITS_PER_WORD != 0)
11753 {
11754 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
11755 MODE_INT, 0);
11756 if (mode == BLKmode)
11757 {
11758 /* We couldn't find an appropriate mode, which happens,
11759 e.g., in packed structs when there are 3 bytes to load.
11760 Back intoffset back to the beginning of the word in this
11761 case. */
11762 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
11763 }
11764 }
11765
11766 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
11767 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
11768 intregs = (endbit - startbit) / BITS_PER_WORD;
11769 cum->words += intregs;
11770 /* words should be unsigned. */
11771 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
11772 {
11773 int pad = (endbit/BITS_PER_WORD) - cum->words;
11774 cum->words += pad;
11775 }
11776 }
11777
11778 /* The darwin64 ABI calls for us to recurse down through structs,
11779 looking for elements passed in registers. Unfortunately, we have
11780 to track int register count here also because of misalignments
11781 in powerpc alignment mode. */
11782
11783 static void
11784 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
11785 const_tree type,
11786 HOST_WIDE_INT startbitpos)
11787 {
11788 tree f;
11789
11790 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
11791 if (TREE_CODE (f) == FIELD_DECL)
11792 {
11793 HOST_WIDE_INT bitpos = startbitpos;
11794 tree ftype = TREE_TYPE (f);
11795 machine_mode mode;
11796 if (ftype == error_mark_node)
11797 continue;
11798 mode = TYPE_MODE (ftype);
11799
11800 if (DECL_SIZE (f) != 0
11801 && tree_fits_uhwi_p (bit_position (f)))
11802 bitpos += int_bit_position (f);
11803
11804 /* ??? FIXME: else assume zero offset. */
11805
11806 if (TREE_CODE (ftype) == RECORD_TYPE)
11807 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
11808 else if (USE_FP_FOR_ARG_P (cum, mode))
11809 {
11810 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
11811 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11812 cum->fregno += n_fpregs;
11813 /* Single-precision floats present a special problem for
11814 us, because they are smaller than an 8-byte GPR, and so
11815 the structure-packing rules combined with the standard
11816 varargs behavior mean that we want to pack float/float
11817 and float/int combinations into a single register's
11818 space. This is complicated by the arg advance flushing,
11819 which works on arbitrarily large groups of int-type
11820 fields. */
11821 if (mode == SFmode)
11822 {
11823 if (cum->floats_in_gpr == 1)
11824 {
11825 /* Two floats in a word; count the word and reset
11826 the float count. */
11827 cum->words++;
11828 cum->floats_in_gpr = 0;
11829 }
11830 else if (bitpos % 64 == 0)
11831 {
11832 /* A float at the beginning of an 8-byte word;
11833 count it and put off adjusting cum->words until
11834 we see if a arg advance flush is going to do it
11835 for us. */
11836 cum->floats_in_gpr++;
11837 }
11838 else
11839 {
11840 /* The float is at the end of a word, preceded
11841 by integer fields, so the arg advance flush
11842 just above has already set cum->words and
11843 everything is taken care of. */
11844 }
11845 }
11846 else
11847 cum->words += n_fpregs;
11848 }
11849 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
11850 {
11851 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
11852 cum->vregno++;
11853 cum->words += 2;
11854 }
11855 else if (cum->intoffset == -1)
11856 cum->intoffset = bitpos;
11857 }
11858 }
11859
11860 /* Check for an item that needs to be considered specially under the darwin 64
11861 bit ABI. These are record types where the mode is BLK or the structure is
11862 8 bytes in size. */
11863 static int
11864 rs6000_darwin64_struct_check_p (machine_mode mode, const_tree type)
11865 {
11866 return rs6000_darwin64_abi
11867 && ((mode == BLKmode
11868 && TREE_CODE (type) == RECORD_TYPE
11869 && int_size_in_bytes (type) > 0)
11870 || (type && TREE_CODE (type) == RECORD_TYPE
11871 && int_size_in_bytes (type) == 8)) ? 1 : 0;
11872 }
11873
11874 /* Update the data in CUM to advance over an argument
11875 of mode MODE and data type TYPE.
11876 (TYPE is null for libcalls where that information may not be available.)
11877
11878 Note that for args passed by reference, function_arg will be called
11879 with MODE and TYPE set to that of the pointer to the arg, not the arg
11880 itself. */
11881
11882 static void
11883 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, machine_mode mode,
11884 const_tree type, bool named, int depth)
11885 {
11886 machine_mode elt_mode;
11887 int n_elts;
11888
11889 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
11890
11891 /* Only tick off an argument if we're not recursing. */
11892 if (depth == 0)
11893 cum->nargs_prototype--;
11894
11895 #ifdef HAVE_AS_GNU_ATTRIBUTE
11896 if (TARGET_ELF && (TARGET_64BIT || DEFAULT_ABI == ABI_V4)
11897 && cum->escapes)
11898 {
11899 if (SCALAR_FLOAT_MODE_P (mode))
11900 {
11901 rs6000_passes_float = true;
11902 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE || TARGET_64BIT)
11903 && (FLOAT128_IBM_P (mode)
11904 || FLOAT128_IEEE_P (mode)
11905 || (type != NULL
11906 && TYPE_MAIN_VARIANT (type) == long_double_type_node)))
11907 rs6000_passes_long_double = true;
11908 }
11909 if ((named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
11910 || (SPE_VECTOR_MODE (mode)
11911 && !cum->stdarg
11912 && cum->sysv_gregno <= GP_ARG_MAX_REG))
11913 rs6000_passes_vector = true;
11914 }
11915 #endif
11916
11917 if (TARGET_ALTIVEC_ABI
11918 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
11919 || (type && TREE_CODE (type) == VECTOR_TYPE
11920 && int_size_in_bytes (type) == 16)))
11921 {
11922 bool stack = false;
11923
11924 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
11925 {
11926 cum->vregno += n_elts;
11927
11928 if (!TARGET_ALTIVEC)
11929 error ("cannot pass argument in vector register because"
11930 " altivec instructions are disabled, use -maltivec"
11931 " to enable them");
11932
11933 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11934 even if it is going to be passed in a vector register.
11935 Darwin does the same for variable-argument functions. */
11936 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
11937 && TARGET_64BIT)
11938 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
11939 stack = true;
11940 }
11941 else
11942 stack = true;
11943
11944 if (stack)
11945 {
11946 int align;
11947
11948 /* Vector parameters must be 16-byte aligned. In 32-bit
11949 mode this means we need to take into account the offset
11950 to the parameter save area. In 64-bit mode, they just
11951 have to start on an even word, since the parameter save
11952 area is 16-byte aligned. */
11953 if (TARGET_32BIT)
11954 align = -(rs6000_parm_offset () + cum->words) & 3;
11955 else
11956 align = cum->words & 1;
11957 cum->words += align + rs6000_arg_size (mode, type);
11958
11959 if (TARGET_DEBUG_ARG)
11960 {
11961 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
11962 cum->words, align);
11963 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
11964 cum->nargs_prototype, cum->prototype,
11965 GET_MODE_NAME (mode));
11966 }
11967 }
11968 }
11969 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
11970 && !cum->stdarg
11971 && cum->sysv_gregno <= GP_ARG_MAX_REG)
11972 cum->sysv_gregno++;
11973
11974 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
11975 {
11976 int size = int_size_in_bytes (type);
11977 /* Variable sized types have size == -1 and are
11978 treated as if consisting entirely of ints.
11979 Pad to 16 byte boundary if needed. */
11980 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
11981 && (cum->words % 2) != 0)
11982 cum->words++;
11983 /* For varargs, we can just go up by the size of the struct. */
11984 if (!named)
11985 cum->words += (size + 7) / 8;
11986 else
11987 {
11988 /* It is tempting to say int register count just goes up by
11989 sizeof(type)/8, but this is wrong in a case such as
11990 { int; double; int; } [powerpc alignment]. We have to
11991 grovel through the fields for these too. */
11992 cum->intoffset = 0;
11993 cum->floats_in_gpr = 0;
11994 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
11995 rs6000_darwin64_record_arg_advance_flush (cum,
11996 size * BITS_PER_UNIT, 1);
11997 }
11998 if (TARGET_DEBUG_ARG)
11999 {
12000 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
12001 cum->words, TYPE_ALIGN (type), size);
12002 fprintf (stderr,
12003 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
12004 cum->nargs_prototype, cum->prototype,
12005 GET_MODE_NAME (mode));
12006 }
12007 }
12008 else if (DEFAULT_ABI == ABI_V4)
12009 {
12010 if (abi_v4_pass_in_fpr (mode))
12011 {
12012 /* _Decimal128 must use an even/odd register pair. This assumes
12013 that the register number is odd when fregno is odd. */
12014 if (mode == TDmode && (cum->fregno % 2) == 1)
12015 cum->fregno++;
12016
12017 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12018 <= FP_ARG_V4_MAX_REG)
12019 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
12020 else
12021 {
12022 cum->fregno = FP_ARG_V4_MAX_REG + 1;
12023 if (mode == DFmode || FLOAT128_IBM_P (mode)
12024 || mode == DDmode || mode == TDmode)
12025 cum->words += cum->words & 1;
12026 cum->words += rs6000_arg_size (mode, type);
12027 }
12028 }
12029 else
12030 {
12031 int n_words = rs6000_arg_size (mode, type);
12032 int gregno = cum->sysv_gregno;
12033
12034 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
12035 (r7,r8) or (r9,r10). As does any other 2 word item such
12036 as complex int due to a historical mistake. */
12037 if (n_words == 2)
12038 gregno += (1 - gregno) & 1;
12039
12040 /* Multi-reg args are not split between registers and stack. */
12041 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12042 {
12043 /* Long long and SPE vectors are aligned on the stack.
12044 So are other 2 word items such as complex int due to
12045 a historical mistake. */
12046 if (n_words == 2)
12047 cum->words += cum->words & 1;
12048 cum->words += n_words;
12049 }
12050
12051 /* Note: continuing to accumulate gregno past when we've started
12052 spilling to the stack indicates the fact that we've started
12053 spilling to the stack to expand_builtin_saveregs. */
12054 cum->sysv_gregno = gregno + n_words;
12055 }
12056
12057 if (TARGET_DEBUG_ARG)
12058 {
12059 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
12060 cum->words, cum->fregno);
12061 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
12062 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
12063 fprintf (stderr, "mode = %4s, named = %d\n",
12064 GET_MODE_NAME (mode), named);
12065 }
12066 }
12067 else
12068 {
12069 int n_words = rs6000_arg_size (mode, type);
12070 int start_words = cum->words;
12071 int align_words = rs6000_parm_start (mode, type, start_words);
12072
12073 cum->words = align_words + n_words;
12074
12075 if (SCALAR_FLOAT_MODE_P (elt_mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
12076 {
12077 /* _Decimal128 must be passed in an even/odd float register pair.
12078 This assumes that the register number is odd when fregno is
12079 odd. */
12080 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12081 cum->fregno++;
12082 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
12083 }
12084
12085 if (TARGET_DEBUG_ARG)
12086 {
12087 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
12088 cum->words, cum->fregno);
12089 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
12090 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
12091 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
12092 named, align_words - start_words, depth);
12093 }
12094 }
12095 }
12096
12097 static void
12098 rs6000_function_arg_advance (cumulative_args_t cum, machine_mode mode,
12099 const_tree type, bool named)
12100 {
12101 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
12102 0);
12103 }
12104
12105 static rtx
12106 spe_build_register_parallel (machine_mode mode, int gregno)
12107 {
12108 rtx r1, r3, r5, r7;
12109
12110 switch (mode)
12111 {
12112 case DFmode:
12113 r1 = gen_rtx_REG (DImode, gregno);
12114 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
12115 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
12116
12117 case DCmode:
12118 case TFmode:
12119 r1 = gen_rtx_REG (DImode, gregno);
12120 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
12121 r3 = gen_rtx_REG (DImode, gregno + 2);
12122 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
12123 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
12124
12125 case TCmode:
12126 r1 = gen_rtx_REG (DImode, gregno);
12127 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
12128 r3 = gen_rtx_REG (DImode, gregno + 2);
12129 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
12130 r5 = gen_rtx_REG (DImode, gregno + 4);
12131 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
12132 r7 = gen_rtx_REG (DImode, gregno + 6);
12133 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
12134 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
12135
12136 default:
12137 gcc_unreachable ();
12138 }
12139 }
12140
12141 /* Determine where to put a SIMD argument on the SPE. */
12142 static rtx
12143 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, machine_mode mode,
12144 const_tree type)
12145 {
12146 int gregno = cum->sysv_gregno;
12147
12148 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
12149 are passed and returned in a pair of GPRs for ABI compatibility. */
12150 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
12151 || mode == DCmode || mode == TCmode))
12152 {
12153 int n_words = rs6000_arg_size (mode, type);
12154
12155 /* Doubles go in an odd/even register pair (r5/r6, etc). */
12156 if (mode == DFmode)
12157 gregno += (1 - gregno) & 1;
12158
12159 /* Multi-reg args are not split between registers and stack. */
12160 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12161 return NULL_RTX;
12162
12163 return spe_build_register_parallel (mode, gregno);
12164 }
12165 if (cum->stdarg)
12166 {
12167 int n_words = rs6000_arg_size (mode, type);
12168
12169 /* SPE vectors are put in odd registers. */
12170 if (n_words == 2 && (gregno & 1) == 0)
12171 gregno += 1;
12172
12173 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
12174 {
12175 rtx r1, r2;
12176 machine_mode m = SImode;
12177
12178 r1 = gen_rtx_REG (m, gregno);
12179 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
12180 r2 = gen_rtx_REG (m, gregno + 1);
12181 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
12182 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
12183 }
12184 else
12185 return NULL_RTX;
12186 }
12187 else
12188 {
12189 if (gregno <= GP_ARG_MAX_REG)
12190 return gen_rtx_REG (mode, gregno);
12191 else
12192 return NULL_RTX;
12193 }
12194 }
12195
12196 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
12197 structure between cum->intoffset and bitpos to integer registers. */
12198
12199 static void
12200 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
12201 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
12202 {
12203 machine_mode mode;
12204 unsigned int regno;
12205 unsigned int startbit, endbit;
12206 int this_regno, intregs, intoffset;
12207 rtx reg;
12208
12209 if (cum->intoffset == -1)
12210 return;
12211
12212 intoffset = cum->intoffset;
12213 cum->intoffset = -1;
12214
12215 /* If this is the trailing part of a word, try to only load that
12216 much into the register. Otherwise load the whole register. Note
12217 that in the latter case we may pick up unwanted bits. It's not a
12218 problem at the moment but may wish to revisit. */
12219
12220 if (intoffset % BITS_PER_WORD != 0)
12221 {
12222 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
12223 MODE_INT, 0);
12224 if (mode == BLKmode)
12225 {
12226 /* We couldn't find an appropriate mode, which happens,
12227 e.g., in packed structs when there are 3 bytes to load.
12228 Back intoffset back to the beginning of the word in this
12229 case. */
12230 intoffset = ROUND_DOWN (intoffset, BITS_PER_WORD);
12231 mode = word_mode;
12232 }
12233 }
12234 else
12235 mode = word_mode;
12236
12237 startbit = ROUND_DOWN (intoffset, BITS_PER_WORD);
12238 endbit = ROUND_UP (bitpos, BITS_PER_WORD);
12239 intregs = (endbit - startbit) / BITS_PER_WORD;
12240 this_regno = cum->words + intoffset / BITS_PER_WORD;
12241
12242 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
12243 cum->use_stack = 1;
12244
12245 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
12246 if (intregs <= 0)
12247 return;
12248
12249 intoffset /= BITS_PER_UNIT;
12250 do
12251 {
12252 regno = GP_ARG_MIN_REG + this_regno;
12253 reg = gen_rtx_REG (mode, regno);
12254 rvec[(*k)++] =
12255 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
12256
12257 this_regno += 1;
12258 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
12259 mode = word_mode;
12260 intregs -= 1;
12261 }
12262 while (intregs > 0);
12263 }
12264
12265 /* Recursive workhorse for the following. */
12266
12267 static void
12268 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
12269 HOST_WIDE_INT startbitpos, rtx rvec[],
12270 int *k)
12271 {
12272 tree f;
12273
12274 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
12275 if (TREE_CODE (f) == FIELD_DECL)
12276 {
12277 HOST_WIDE_INT bitpos = startbitpos;
12278 tree ftype = TREE_TYPE (f);
12279 machine_mode mode;
12280 if (ftype == error_mark_node)
12281 continue;
12282 mode = TYPE_MODE (ftype);
12283
12284 if (DECL_SIZE (f) != 0
12285 && tree_fits_uhwi_p (bit_position (f)))
12286 bitpos += int_bit_position (f);
12287
12288 /* ??? FIXME: else assume zero offset. */
12289
12290 if (TREE_CODE (ftype) == RECORD_TYPE)
12291 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
12292 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
12293 {
12294 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
12295 #if 0
12296 switch (mode)
12297 {
12298 case SCmode: mode = SFmode; break;
12299 case DCmode: mode = DFmode; break;
12300 case TCmode: mode = TFmode; break;
12301 default: break;
12302 }
12303 #endif
12304 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12305 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
12306 {
12307 gcc_assert (cum->fregno == FP_ARG_MAX_REG
12308 && (mode == TFmode || mode == TDmode));
12309 /* Long double or _Decimal128 split over regs and memory. */
12310 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
12311 cum->use_stack=1;
12312 }
12313 rvec[(*k)++]
12314 = gen_rtx_EXPR_LIST (VOIDmode,
12315 gen_rtx_REG (mode, cum->fregno++),
12316 GEN_INT (bitpos / BITS_PER_UNIT));
12317 if (FLOAT128_2REG_P (mode))
12318 cum->fregno++;
12319 }
12320 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
12321 {
12322 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
12323 rvec[(*k)++]
12324 = gen_rtx_EXPR_LIST (VOIDmode,
12325 gen_rtx_REG (mode, cum->vregno++),
12326 GEN_INT (bitpos / BITS_PER_UNIT));
12327 }
12328 else if (cum->intoffset == -1)
12329 cum->intoffset = bitpos;
12330 }
12331 }
12332
12333 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
12334 the register(s) to be used for each field and subfield of a struct
12335 being passed by value, along with the offset of where the
12336 register's value may be found in the block. FP fields go in FP
12337 register, vector fields go in vector registers, and everything
12338 else goes in int registers, packed as in memory.
12339
12340 This code is also used for function return values. RETVAL indicates
12341 whether this is the case.
12342
12343 Much of this is taken from the SPARC V9 port, which has a similar
12344 calling convention. */
12345
12346 static rtx
12347 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
12348 bool named, bool retval)
12349 {
12350 rtx rvec[FIRST_PSEUDO_REGISTER];
12351 int k = 1, kbase = 1;
12352 HOST_WIDE_INT typesize = int_size_in_bytes (type);
12353 /* This is a copy; modifications are not visible to our caller. */
12354 CUMULATIVE_ARGS copy_cum = *orig_cum;
12355 CUMULATIVE_ARGS *cum = &copy_cum;
12356
12357 /* Pad to 16 byte boundary if needed. */
12358 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
12359 && (cum->words % 2) != 0)
12360 cum->words++;
12361
12362 cum->intoffset = 0;
12363 cum->use_stack = 0;
12364 cum->named = named;
12365
12366 /* Put entries into rvec[] for individual FP and vector fields, and
12367 for the chunks of memory that go in int regs. Note we start at
12368 element 1; 0 is reserved for an indication of using memory, and
12369 may or may not be filled in below. */
12370 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
12371 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
12372
12373 /* If any part of the struct went on the stack put all of it there.
12374 This hack is because the generic code for
12375 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
12376 parts of the struct are not at the beginning. */
12377 if (cum->use_stack)
12378 {
12379 if (retval)
12380 return NULL_RTX; /* doesn't go in registers at all */
12381 kbase = 0;
12382 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12383 }
12384 if (k > 1 || cum->use_stack)
12385 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
12386 else
12387 return NULL_RTX;
12388 }
12389
12390 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
12391
12392 static rtx
12393 rs6000_mixed_function_arg (machine_mode mode, const_tree type,
12394 int align_words)
12395 {
12396 int n_units;
12397 int i, k;
12398 rtx rvec[GP_ARG_NUM_REG + 1];
12399
12400 if (align_words >= GP_ARG_NUM_REG)
12401 return NULL_RTX;
12402
12403 n_units = rs6000_arg_size (mode, type);
12404
12405 /* Optimize the simple case where the arg fits in one gpr, except in
12406 the case of BLKmode due to assign_parms assuming that registers are
12407 BITS_PER_WORD wide. */
12408 if (n_units == 0
12409 || (n_units == 1 && mode != BLKmode))
12410 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12411
12412 k = 0;
12413 if (align_words + n_units > GP_ARG_NUM_REG)
12414 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12415 using a magic NULL_RTX component.
12416 This is not strictly correct. Only some of the arg belongs in
12417 memory, not all of it. However, the normal scheme using
12418 function_arg_partial_nregs can result in unusual subregs, eg.
12419 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12420 store the whole arg to memory is often more efficient than code
12421 to store pieces, and we know that space is available in the right
12422 place for the whole arg. */
12423 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12424
12425 i = 0;
12426 do
12427 {
12428 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
12429 rtx off = GEN_INT (i++ * 4);
12430 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12431 }
12432 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
12433
12434 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12435 }
12436
12437 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12438 but must also be copied into the parameter save area starting at
12439 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12440 to the GPRs and/or memory. Return the number of elements used. */
12441
12442 static int
12443 rs6000_psave_function_arg (machine_mode mode, const_tree type,
12444 int align_words, rtx *rvec)
12445 {
12446 int k = 0;
12447
12448 if (align_words < GP_ARG_NUM_REG)
12449 {
12450 int n_words = rs6000_arg_size (mode, type);
12451
12452 if (align_words + n_words > GP_ARG_NUM_REG
12453 || mode == BLKmode
12454 || (TARGET_32BIT && TARGET_POWERPC64))
12455 {
12456 /* If this is partially on the stack, then we only
12457 include the portion actually in registers here. */
12458 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12459 int i = 0;
12460
12461 if (align_words + n_words > GP_ARG_NUM_REG)
12462 {
12463 /* Not all of the arg fits in gprs. Say that it goes in memory
12464 too, using a magic NULL_RTX component. Also see comment in
12465 rs6000_mixed_function_arg for why the normal
12466 function_arg_partial_nregs scheme doesn't work in this case. */
12467 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12468 }
12469
12470 do
12471 {
12472 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12473 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
12474 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12475 }
12476 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12477 }
12478 else
12479 {
12480 /* The whole arg fits in gprs. */
12481 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12482 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
12483 }
12484 }
12485 else
12486 {
12487 /* It's entirely in memory. */
12488 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
12489 }
12490
12491 return k;
12492 }
12493
12494 /* RVEC is a vector of K components of an argument of mode MODE.
12495 Construct the final function_arg return value from it. */
12496
12497 static rtx
12498 rs6000_finish_function_arg (machine_mode mode, rtx *rvec, int k)
12499 {
12500 gcc_assert (k >= 1);
12501
12502 /* Avoid returning a PARALLEL in the trivial cases. */
12503 if (k == 1)
12504 {
12505 if (XEXP (rvec[0], 0) == NULL_RTX)
12506 return NULL_RTX;
12507
12508 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
12509 return XEXP (rvec[0], 0);
12510 }
12511
12512 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
12513 }
12514
12515 /* Determine where to put an argument to a function.
12516 Value is zero to push the argument on the stack,
12517 or a hard register in which to store the argument.
12518
12519 MODE is the argument's machine mode.
12520 TYPE is the data type of the argument (as a tree).
12521 This is null for libcalls where that information may
12522 not be available.
12523 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12524 the preceding args and about the function being called. It is
12525 not modified in this routine.
12526 NAMED is nonzero if this argument is a named parameter
12527 (otherwise it is an extra parameter matching an ellipsis).
12528
12529 On RS/6000 the first eight words of non-FP are normally in registers
12530 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12531 Under V.4, the first 8 FP args are in registers.
12532
12533 If this is floating-point and no prototype is specified, we use
12534 both an FP and integer register (or possibly FP reg and stack). Library
12535 functions (when CALL_LIBCALL is set) always have the proper types for args,
12536 so we can pass the FP value just in one register. emit_library_function
12537 doesn't support PARALLEL anyway.
12538
12539 Note that for args passed by reference, function_arg will be called
12540 with MODE and TYPE set to that of the pointer to the arg, not the arg
12541 itself. */
12542
12543 static rtx
12544 rs6000_function_arg (cumulative_args_t cum_v, machine_mode mode,
12545 const_tree type, bool named)
12546 {
12547 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12548 enum rs6000_abi abi = DEFAULT_ABI;
12549 machine_mode elt_mode;
12550 int n_elts;
12551
12552 /* Return a marker to indicate whether CR1 needs to set or clear the
12553 bit that V.4 uses to say fp args were passed in registers.
12554 Assume that we don't need the marker for software floating point,
12555 or compiler generated library calls. */
12556 if (mode == VOIDmode)
12557 {
12558 if (abi == ABI_V4
12559 && (cum->call_cookie & CALL_LIBCALL) == 0
12560 && (cum->stdarg
12561 || (cum->nargs_prototype < 0
12562 && (cum->prototype || TARGET_NO_PROTOTYPE))))
12563 {
12564 /* For the SPE, we need to crxor CR6 always. */
12565 if (TARGET_SPE_ABI)
12566 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
12567 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
12568 return GEN_INT (cum->call_cookie
12569 | ((cum->fregno == FP_ARG_MIN_REG)
12570 ? CALL_V4_SET_FP_ARGS
12571 : CALL_V4_CLEAR_FP_ARGS));
12572 }
12573
12574 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
12575 }
12576
12577 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12578
12579 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12580 {
12581 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
12582 if (rslt != NULL_RTX)
12583 return rslt;
12584 /* Else fall through to usual handling. */
12585 }
12586
12587 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12588 {
12589 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12590 rtx r, off;
12591 int i, k = 0;
12592
12593 /* Do we also need to pass this argument in the parameter save area?
12594 Library support functions for IEEE 128-bit are assumed to not need the
12595 value passed both in GPRs and in vector registers. */
12596 if (TARGET_64BIT && !cum->prototype
12597 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12598 {
12599 int align_words = ROUND_UP (cum->words, 2);
12600 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12601 }
12602
12603 /* Describe where this argument goes in the vector registers. */
12604 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
12605 {
12606 r = gen_rtx_REG (elt_mode, cum->vregno + i);
12607 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12608 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12609 }
12610
12611 return rs6000_finish_function_arg (mode, rvec, k);
12612 }
12613 else if (TARGET_ALTIVEC_ABI
12614 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
12615 || (type && TREE_CODE (type) == VECTOR_TYPE
12616 && int_size_in_bytes (type) == 16)))
12617 {
12618 if (named || abi == ABI_V4)
12619 return NULL_RTX;
12620 else
12621 {
12622 /* Vector parameters to varargs functions under AIX or Darwin
12623 get passed in memory and possibly also in GPRs. */
12624 int align, align_words, n_words;
12625 machine_mode part_mode;
12626
12627 /* Vector parameters must be 16-byte aligned. In 32-bit
12628 mode this means we need to take into account the offset
12629 to the parameter save area. In 64-bit mode, they just
12630 have to start on an even word, since the parameter save
12631 area is 16-byte aligned. */
12632 if (TARGET_32BIT)
12633 align = -(rs6000_parm_offset () + cum->words) & 3;
12634 else
12635 align = cum->words & 1;
12636 align_words = cum->words + align;
12637
12638 /* Out of registers? Memory, then. */
12639 if (align_words >= GP_ARG_NUM_REG)
12640 return NULL_RTX;
12641
12642 if (TARGET_32BIT && TARGET_POWERPC64)
12643 return rs6000_mixed_function_arg (mode, type, align_words);
12644
12645 /* The vector value goes in GPRs. Only the part of the
12646 value in GPRs is reported here. */
12647 part_mode = mode;
12648 n_words = rs6000_arg_size (mode, type);
12649 if (align_words + n_words > GP_ARG_NUM_REG)
12650 /* Fortunately, there are only two possibilities, the value
12651 is either wholly in GPRs or half in GPRs and half not. */
12652 part_mode = DImode;
12653
12654 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
12655 }
12656 }
12657 else if (TARGET_SPE_ABI && TARGET_SPE
12658 && (SPE_VECTOR_MODE (mode)
12659 || (TARGET_E500_DOUBLE && (mode == DFmode
12660 || mode == DCmode
12661 || mode == TFmode
12662 || mode == TCmode))))
12663 return rs6000_spe_function_arg (cum, mode, type);
12664
12665 else if (abi == ABI_V4)
12666 {
12667 if (abi_v4_pass_in_fpr (mode))
12668 {
12669 /* _Decimal128 must use an even/odd register pair. This assumes
12670 that the register number is odd when fregno is odd. */
12671 if (mode == TDmode && (cum->fregno % 2) == 1)
12672 cum->fregno++;
12673
12674 if (cum->fregno + (FLOAT128_2REG_P (mode) ? 1 : 0)
12675 <= FP_ARG_V4_MAX_REG)
12676 return gen_rtx_REG (mode, cum->fregno);
12677 else
12678 return NULL_RTX;
12679 }
12680 else
12681 {
12682 int n_words = rs6000_arg_size (mode, type);
12683 int gregno = cum->sysv_gregno;
12684
12685 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
12686 (r7,r8) or (r9,r10). As does any other 2 word item such
12687 as complex int due to a historical mistake. */
12688 if (n_words == 2)
12689 gregno += (1 - gregno) & 1;
12690
12691 /* Multi-reg args are not split between registers and stack. */
12692 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
12693 return NULL_RTX;
12694
12695 if (TARGET_32BIT && TARGET_POWERPC64)
12696 return rs6000_mixed_function_arg (mode, type,
12697 gregno - GP_ARG_MIN_REG);
12698 return gen_rtx_REG (mode, gregno);
12699 }
12700 }
12701 else
12702 {
12703 int align_words = rs6000_parm_start (mode, type, cum->words);
12704
12705 /* _Decimal128 must be passed in an even/odd float register pair.
12706 This assumes that the register number is odd when fregno is odd. */
12707 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
12708 cum->fregno++;
12709
12710 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12711 {
12712 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
12713 rtx r, off;
12714 int i, k = 0;
12715 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12716 int fpr_words;
12717
12718 /* Do we also need to pass this argument in the parameter
12719 save area? */
12720 if (type && (cum->nargs_prototype <= 0
12721 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12722 && TARGET_XL_COMPAT
12723 && align_words >= GP_ARG_NUM_REG)))
12724 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
12725
12726 /* Describe where this argument goes in the fprs. */
12727 for (i = 0; i < n_elts
12728 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
12729 {
12730 /* Check if the argument is split over registers and memory.
12731 This can only ever happen for long double or _Decimal128;
12732 complex types are handled via split_complex_arg. */
12733 machine_mode fmode = elt_mode;
12734 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
12735 {
12736 gcc_assert (FLOAT128_2REG_P (fmode));
12737 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
12738 }
12739
12740 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
12741 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
12742 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12743 }
12744
12745 /* If there were not enough FPRs to hold the argument, the rest
12746 usually goes into memory. However, if the current position
12747 is still within the register parameter area, a portion may
12748 actually have to go into GPRs.
12749
12750 Note that it may happen that the portion of the argument
12751 passed in the first "half" of the first GPR was already
12752 passed in the last FPR as well.
12753
12754 For unnamed arguments, we already set up GPRs to cover the
12755 whole argument in rs6000_psave_function_arg, so there is
12756 nothing further to do at this point. */
12757 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
12758 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
12759 && cum->nargs_prototype > 0)
12760 {
12761 static bool warned;
12762
12763 machine_mode rmode = TARGET_32BIT ? SImode : DImode;
12764 int n_words = rs6000_arg_size (mode, type);
12765
12766 align_words += fpr_words;
12767 n_words -= fpr_words;
12768
12769 do
12770 {
12771 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
12772 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
12773 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
12774 }
12775 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
12776
12777 if (!warned && warn_psabi)
12778 {
12779 warned = true;
12780 inform (input_location,
12781 "the ABI of passing homogeneous float aggregates"
12782 " has changed in GCC 5");
12783 }
12784 }
12785
12786 return rs6000_finish_function_arg (mode, rvec, k);
12787 }
12788 else if (align_words < GP_ARG_NUM_REG)
12789 {
12790 if (TARGET_32BIT && TARGET_POWERPC64)
12791 return rs6000_mixed_function_arg (mode, type, align_words);
12792
12793 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
12794 }
12795 else
12796 return NULL_RTX;
12797 }
12798 }
12799 \f
12800 /* For an arg passed partly in registers and partly in memory, this is
12801 the number of bytes passed in registers. For args passed entirely in
12802 registers or entirely in memory, zero. When an arg is described by a
12803 PARALLEL, perhaps using more than one register type, this function
12804 returns the number of bytes used by the first element of the PARALLEL. */
12805
12806 static int
12807 rs6000_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
12808 tree type, bool named)
12809 {
12810 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
12811 bool passed_in_gprs = true;
12812 int ret = 0;
12813 int align_words;
12814 machine_mode elt_mode;
12815 int n_elts;
12816
12817 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
12818
12819 if (DEFAULT_ABI == ABI_V4)
12820 return 0;
12821
12822 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
12823 {
12824 /* If we are passing this arg in the fixed parameter save area (gprs or
12825 memory) as well as VRs, we do not use the partial bytes mechanism;
12826 instead, rs6000_function_arg will return a PARALLEL including a memory
12827 element as necessary. Library support functions for IEEE 128-bit are
12828 assumed to not need the value passed both in GPRs and in vector
12829 registers. */
12830 if (TARGET_64BIT && !cum->prototype
12831 && (!cum->libcall || !FLOAT128_VECTOR_P (elt_mode)))
12832 return 0;
12833
12834 /* Otherwise, we pass in VRs only. Check for partial copies. */
12835 passed_in_gprs = false;
12836 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
12837 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
12838 }
12839
12840 /* In this complicated case we just disable the partial_nregs code. */
12841 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
12842 return 0;
12843
12844 align_words = rs6000_parm_start (mode, type, cum->words);
12845
12846 if (USE_FP_FOR_ARG_P (cum, elt_mode))
12847 {
12848 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
12849
12850 /* If we are passing this arg in the fixed parameter save area
12851 (gprs or memory) as well as FPRs, we do not use the partial
12852 bytes mechanism; instead, rs6000_function_arg will return a
12853 PARALLEL including a memory element as necessary. */
12854 if (type
12855 && (cum->nargs_prototype <= 0
12856 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
12857 && TARGET_XL_COMPAT
12858 && align_words >= GP_ARG_NUM_REG)))
12859 return 0;
12860
12861 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12862 passed_in_gprs = false;
12863 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
12864 {
12865 /* Compute number of bytes / words passed in FPRs. If there
12866 is still space available in the register parameter area
12867 *after* that amount, a part of the argument will be passed
12868 in GPRs. In that case, the total amount passed in any
12869 registers is equal to the amount that would have been passed
12870 in GPRs if everything were passed there, so we fall back to
12871 the GPR code below to compute the appropriate value. */
12872 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
12873 * MIN (8, GET_MODE_SIZE (elt_mode)));
12874 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
12875
12876 if (align_words + fpr_words < GP_ARG_NUM_REG)
12877 passed_in_gprs = true;
12878 else
12879 ret = fpr;
12880 }
12881 }
12882
12883 if (passed_in_gprs
12884 && align_words < GP_ARG_NUM_REG
12885 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
12886 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
12887
12888 if (ret != 0 && TARGET_DEBUG_ARG)
12889 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
12890
12891 return ret;
12892 }
12893 \f
12894 /* A C expression that indicates when an argument must be passed by
12895 reference. If nonzero for an argument, a copy of that argument is
12896 made in memory and a pointer to the argument is passed instead of
12897 the argument itself. The pointer is passed in whatever way is
12898 appropriate for passing a pointer to that type.
12899
12900 Under V.4, aggregates and long double are passed by reference.
12901
12902 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12903 reference unless the AltiVec vector extension ABI is in force.
12904
12905 As an extension to all ABIs, variable sized types are passed by
12906 reference. */
12907
12908 static bool
12909 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
12910 machine_mode mode, const_tree type,
12911 bool named ATTRIBUTE_UNUSED)
12912 {
12913 if (!type)
12914 return 0;
12915
12916 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD
12917 && FLOAT128_IEEE_P (TYPE_MODE (type)))
12918 {
12919 if (TARGET_DEBUG_ARG)
12920 fprintf (stderr, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12921 return 1;
12922 }
12923
12924 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
12925 {
12926 if (TARGET_DEBUG_ARG)
12927 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
12928 return 1;
12929 }
12930
12931 if (int_size_in_bytes (type) < 0)
12932 {
12933 if (TARGET_DEBUG_ARG)
12934 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
12935 return 1;
12936 }
12937
12938 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12939 modes only exist for GCC vector types if -maltivec. */
12940 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
12941 {
12942 if (TARGET_DEBUG_ARG)
12943 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
12944 return 1;
12945 }
12946
12947 /* Pass synthetic vectors in memory. */
12948 if (TREE_CODE (type) == VECTOR_TYPE
12949 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
12950 {
12951 static bool warned_for_pass_big_vectors = false;
12952 if (TARGET_DEBUG_ARG)
12953 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
12954 if (!warned_for_pass_big_vectors)
12955 {
12956 warning (OPT_Wpsabi, "GCC vector passed by reference: "
12957 "non-standard ABI extension with no compatibility guarantee");
12958 warned_for_pass_big_vectors = true;
12959 }
12960 return 1;
12961 }
12962
12963 return 0;
12964 }
12965
12966 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12967 already processes. Return true if the parameter must be passed
12968 (fully or partially) on the stack. */
12969
12970 static bool
12971 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
12972 {
12973 machine_mode mode;
12974 int unsignedp;
12975 rtx entry_parm;
12976
12977 /* Catch errors. */
12978 if (type == NULL || type == error_mark_node)
12979 return true;
12980
12981 /* Handle types with no storage requirement. */
12982 if (TYPE_MODE (type) == VOIDmode)
12983 return false;
12984
12985 /* Handle complex types. */
12986 if (TREE_CODE (type) == COMPLEX_TYPE)
12987 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
12988 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
12989
12990 /* Handle transparent aggregates. */
12991 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
12992 && TYPE_TRANSPARENT_AGGR (type))
12993 type = TREE_TYPE (first_field (type));
12994
12995 /* See if this arg was passed by invisible reference. */
12996 if (pass_by_reference (get_cumulative_args (args_so_far),
12997 TYPE_MODE (type), type, true))
12998 type = build_pointer_type (type);
12999
13000 /* Find mode as it is passed by the ABI. */
13001 unsignedp = TYPE_UNSIGNED (type);
13002 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
13003
13004 /* If we must pass in stack, we need a stack. */
13005 if (rs6000_must_pass_in_stack (mode, type))
13006 return true;
13007
13008 /* If there is no incoming register, we need a stack. */
13009 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
13010 if (entry_parm == NULL)
13011 return true;
13012
13013 /* Likewise if we need to pass both in registers and on the stack. */
13014 if (GET_CODE (entry_parm) == PARALLEL
13015 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
13016 return true;
13017
13018 /* Also true if we're partially in registers and partially not. */
13019 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
13020 return true;
13021
13022 /* Update info on where next arg arrives in registers. */
13023 rs6000_function_arg_advance (args_so_far, mode, type, true);
13024 return false;
13025 }
13026
13027 /* Return true if FUN has no prototype, has a variable argument
13028 list, or passes any parameter in memory. */
13029
13030 static bool
13031 rs6000_function_parms_need_stack (tree fun, bool incoming)
13032 {
13033 tree fntype, result;
13034 CUMULATIVE_ARGS args_so_far_v;
13035 cumulative_args_t args_so_far;
13036
13037 if (!fun)
13038 /* Must be a libcall, all of which only use reg parms. */
13039 return false;
13040
13041 fntype = fun;
13042 if (!TYPE_P (fun))
13043 fntype = TREE_TYPE (fun);
13044
13045 /* Varargs functions need the parameter save area. */
13046 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
13047 return true;
13048
13049 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
13050 args_so_far = pack_cumulative_args (&args_so_far_v);
13051
13052 /* When incoming, we will have been passed the function decl.
13053 It is necessary to use the decl to handle K&R style functions,
13054 where TYPE_ARG_TYPES may not be available. */
13055 if (incoming)
13056 {
13057 gcc_assert (DECL_P (fun));
13058 result = DECL_RESULT (fun);
13059 }
13060 else
13061 result = TREE_TYPE (fntype);
13062
13063 if (result && aggregate_value_p (result, fntype))
13064 {
13065 if (!TYPE_P (result))
13066 result = TREE_TYPE (result);
13067 result = build_pointer_type (result);
13068 rs6000_parm_needs_stack (args_so_far, result);
13069 }
13070
13071 if (incoming)
13072 {
13073 tree parm;
13074
13075 for (parm = DECL_ARGUMENTS (fun);
13076 parm && parm != void_list_node;
13077 parm = TREE_CHAIN (parm))
13078 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
13079 return true;
13080 }
13081 else
13082 {
13083 function_args_iterator args_iter;
13084 tree arg_type;
13085
13086 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
13087 if (rs6000_parm_needs_stack (args_so_far, arg_type))
13088 return true;
13089 }
13090
13091 return false;
13092 }
13093
13094 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
13095 usually a constant depending on the ABI. However, in the ELFv2 ABI
13096 the register parameter area is optional when calling a function that
13097 has a prototype is scope, has no variable argument list, and passes
13098 all parameters in registers. */
13099
13100 int
13101 rs6000_reg_parm_stack_space (tree fun, bool incoming)
13102 {
13103 int reg_parm_stack_space;
13104
13105 switch (DEFAULT_ABI)
13106 {
13107 default:
13108 reg_parm_stack_space = 0;
13109 break;
13110
13111 case ABI_AIX:
13112 case ABI_DARWIN:
13113 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
13114 break;
13115
13116 case ABI_ELFv2:
13117 /* ??? Recomputing this every time is a bit expensive. Is there
13118 a place to cache this information? */
13119 if (rs6000_function_parms_need_stack (fun, incoming))
13120 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
13121 else
13122 reg_parm_stack_space = 0;
13123 break;
13124 }
13125
13126 return reg_parm_stack_space;
13127 }
13128
13129 static void
13130 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
13131 {
13132 int i;
13133 machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
13134
13135 if (nregs == 0)
13136 return;
13137
13138 for (i = 0; i < nregs; i++)
13139 {
13140 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
13141 if (reload_completed)
13142 {
13143 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
13144 tem = NULL_RTX;
13145 else
13146 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
13147 i * GET_MODE_SIZE (reg_mode));
13148 }
13149 else
13150 tem = replace_equiv_address (tem, XEXP (tem, 0));
13151
13152 gcc_assert (tem);
13153
13154 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
13155 }
13156 }
13157 \f
13158 /* Perform any needed actions needed for a function that is receiving a
13159 variable number of arguments.
13160
13161 CUM is as above.
13162
13163 MODE and TYPE are the mode and type of the current parameter.
13164
13165 PRETEND_SIZE is a variable that should be set to the amount of stack
13166 that must be pushed by the prolog to pretend that our caller pushed
13167 it.
13168
13169 Normally, this macro will push all remaining incoming registers on the
13170 stack and set PRETEND_SIZE to the length of the registers pushed. */
13171
13172 static void
13173 setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
13174 tree type, int *pretend_size ATTRIBUTE_UNUSED,
13175 int no_rtl)
13176 {
13177 CUMULATIVE_ARGS next_cum;
13178 int reg_size = TARGET_32BIT ? 4 : 8;
13179 rtx save_area = NULL_RTX, mem;
13180 int first_reg_offset;
13181 alias_set_type set;
13182
13183 /* Skip the last named argument. */
13184 next_cum = *get_cumulative_args (cum);
13185 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
13186
13187 if (DEFAULT_ABI == ABI_V4)
13188 {
13189 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
13190
13191 if (! no_rtl)
13192 {
13193 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
13194 HOST_WIDE_INT offset = 0;
13195
13196 /* Try to optimize the size of the varargs save area.
13197 The ABI requires that ap.reg_save_area is doubleword
13198 aligned, but we don't need to allocate space for all
13199 the bytes, only those to which we actually will save
13200 anything. */
13201 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
13202 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
13203 if (TARGET_HARD_FLOAT && TARGET_FPRS
13204 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13205 && cfun->va_list_fpr_size)
13206 {
13207 if (gpr_reg_num)
13208 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
13209 * UNITS_PER_FP_WORD;
13210 if (cfun->va_list_fpr_size
13211 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13212 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
13213 else
13214 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
13215 * UNITS_PER_FP_WORD;
13216 }
13217 if (gpr_reg_num)
13218 {
13219 offset = -((first_reg_offset * reg_size) & ~7);
13220 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
13221 {
13222 gpr_reg_num = cfun->va_list_gpr_size;
13223 if (reg_size == 4 && (first_reg_offset & 1))
13224 gpr_reg_num++;
13225 }
13226 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
13227 }
13228 else if (fpr_size)
13229 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
13230 * UNITS_PER_FP_WORD
13231 - (int) (GP_ARG_NUM_REG * reg_size);
13232
13233 if (gpr_size + fpr_size)
13234 {
13235 rtx reg_save_area
13236 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
13237 gcc_assert (GET_CODE (reg_save_area) == MEM);
13238 reg_save_area = XEXP (reg_save_area, 0);
13239 if (GET_CODE (reg_save_area) == PLUS)
13240 {
13241 gcc_assert (XEXP (reg_save_area, 0)
13242 == virtual_stack_vars_rtx);
13243 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
13244 offset += INTVAL (XEXP (reg_save_area, 1));
13245 }
13246 else
13247 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
13248 }
13249
13250 cfun->machine->varargs_save_offset = offset;
13251 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
13252 }
13253 }
13254 else
13255 {
13256 first_reg_offset = next_cum.words;
13257 save_area = crtl->args.internal_arg_pointer;
13258
13259 if (targetm.calls.must_pass_in_stack (mode, type))
13260 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
13261 }
13262
13263 set = get_varargs_alias_set ();
13264 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
13265 && cfun->va_list_gpr_size)
13266 {
13267 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
13268
13269 if (va_list_gpr_counter_field)
13270 /* V4 va_list_gpr_size counts number of registers needed. */
13271 n_gpr = cfun->va_list_gpr_size;
13272 else
13273 /* char * va_list instead counts number of bytes needed. */
13274 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
13275
13276 if (nregs > n_gpr)
13277 nregs = n_gpr;
13278
13279 mem = gen_rtx_MEM (BLKmode,
13280 plus_constant (Pmode, save_area,
13281 first_reg_offset * reg_size));
13282 MEM_NOTRAP_P (mem) = 1;
13283 set_mem_alias_set (mem, set);
13284 set_mem_align (mem, BITS_PER_WORD);
13285
13286 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
13287 nregs);
13288 }
13289
13290 /* Save FP registers if needed. */
13291 if (DEFAULT_ABI == ABI_V4
13292 && TARGET_HARD_FLOAT && TARGET_FPRS
13293 && ! no_rtl
13294 && next_cum.fregno <= FP_ARG_V4_MAX_REG
13295 && cfun->va_list_fpr_size)
13296 {
13297 int fregno = next_cum.fregno, nregs;
13298 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
13299 rtx lab = gen_label_rtx ();
13300 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
13301 * UNITS_PER_FP_WORD);
13302
13303 emit_jump_insn
13304 (gen_rtx_SET (pc_rtx,
13305 gen_rtx_IF_THEN_ELSE (VOIDmode,
13306 gen_rtx_NE (VOIDmode, cr1,
13307 const0_rtx),
13308 gen_rtx_LABEL_REF (VOIDmode, lab),
13309 pc_rtx)));
13310
13311 for (nregs = 0;
13312 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
13313 fregno++, off += UNITS_PER_FP_WORD, nregs++)
13314 {
13315 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13316 ? DFmode : SFmode,
13317 plus_constant (Pmode, save_area, off));
13318 MEM_NOTRAP_P (mem) = 1;
13319 set_mem_alias_set (mem, set);
13320 set_mem_align (mem, GET_MODE_ALIGNMENT (
13321 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13322 ? DFmode : SFmode));
13323 emit_move_insn (mem, gen_rtx_REG (
13324 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
13325 ? DFmode : SFmode, fregno));
13326 }
13327
13328 emit_label (lab);
13329 }
13330 }
13331
13332 /* Create the va_list data type. */
13333
13334 static tree
13335 rs6000_build_builtin_va_list (void)
13336 {
13337 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
13338
13339 /* For AIX, prefer 'char *' because that's what the system
13340 header files like. */
13341 if (DEFAULT_ABI != ABI_V4)
13342 return build_pointer_type (char_type_node);
13343
13344 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
13345 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
13346 get_identifier ("__va_list_tag"), record);
13347
13348 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
13349 unsigned_char_type_node);
13350 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
13351 unsigned_char_type_node);
13352 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
13353 every user file. */
13354 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13355 get_identifier ("reserved"), short_unsigned_type_node);
13356 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13357 get_identifier ("overflow_arg_area"),
13358 ptr_type_node);
13359 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
13360 get_identifier ("reg_save_area"),
13361 ptr_type_node);
13362
13363 va_list_gpr_counter_field = f_gpr;
13364 va_list_fpr_counter_field = f_fpr;
13365
13366 DECL_FIELD_CONTEXT (f_gpr) = record;
13367 DECL_FIELD_CONTEXT (f_fpr) = record;
13368 DECL_FIELD_CONTEXT (f_res) = record;
13369 DECL_FIELD_CONTEXT (f_ovf) = record;
13370 DECL_FIELD_CONTEXT (f_sav) = record;
13371
13372 TYPE_STUB_DECL (record) = type_decl;
13373 TYPE_NAME (record) = type_decl;
13374 TYPE_FIELDS (record) = f_gpr;
13375 DECL_CHAIN (f_gpr) = f_fpr;
13376 DECL_CHAIN (f_fpr) = f_res;
13377 DECL_CHAIN (f_res) = f_ovf;
13378 DECL_CHAIN (f_ovf) = f_sav;
13379
13380 layout_type (record);
13381
13382 /* The correct type is an array type of one element. */
13383 return build_array_type (record, build_index_type (size_zero_node));
13384 }
13385
13386 /* Implement va_start. */
13387
13388 static void
13389 rs6000_va_start (tree valist, rtx nextarg)
13390 {
13391 HOST_WIDE_INT words, n_gpr, n_fpr;
13392 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13393 tree gpr, fpr, ovf, sav, t;
13394
13395 /* Only SVR4 needs something special. */
13396 if (DEFAULT_ABI != ABI_V4)
13397 {
13398 std_expand_builtin_va_start (valist, nextarg);
13399 return;
13400 }
13401
13402 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13403 f_fpr = DECL_CHAIN (f_gpr);
13404 f_res = DECL_CHAIN (f_fpr);
13405 f_ovf = DECL_CHAIN (f_res);
13406 f_sav = DECL_CHAIN (f_ovf);
13407
13408 valist = build_simple_mem_ref (valist);
13409 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13410 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13411 f_fpr, NULL_TREE);
13412 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13413 f_ovf, NULL_TREE);
13414 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13415 f_sav, NULL_TREE);
13416
13417 /* Count number of gp and fp argument registers used. */
13418 words = crtl->args.info.words;
13419 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
13420 GP_ARG_NUM_REG);
13421 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
13422 FP_ARG_NUM_REG);
13423
13424 if (TARGET_DEBUG_ARG)
13425 fprintf (stderr, "va_start: words = " HOST_WIDE_INT_PRINT_DEC", n_gpr = "
13426 HOST_WIDE_INT_PRINT_DEC", n_fpr = " HOST_WIDE_INT_PRINT_DEC"\n",
13427 words, n_gpr, n_fpr);
13428
13429 if (cfun->va_list_gpr_size)
13430 {
13431 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
13432 build_int_cst (NULL_TREE, n_gpr));
13433 TREE_SIDE_EFFECTS (t) = 1;
13434 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13435 }
13436
13437 if (cfun->va_list_fpr_size)
13438 {
13439 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
13440 build_int_cst (NULL_TREE, n_fpr));
13441 TREE_SIDE_EFFECTS (t) = 1;
13442 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13443
13444 #ifdef HAVE_AS_GNU_ATTRIBUTE
13445 if (call_ABI_of_interest (cfun->decl))
13446 rs6000_passes_float = true;
13447 #endif
13448 }
13449
13450 /* Find the overflow area. */
13451 t = make_tree (TREE_TYPE (ovf), crtl->args.internal_arg_pointer);
13452 if (words != 0)
13453 t = fold_build_pointer_plus_hwi (t, words * MIN_UNITS_PER_WORD);
13454 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
13455 TREE_SIDE_EFFECTS (t) = 1;
13456 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13457
13458 /* If there were no va_arg invocations, don't set up the register
13459 save area. */
13460 if (!cfun->va_list_gpr_size
13461 && !cfun->va_list_fpr_size
13462 && n_gpr < GP_ARG_NUM_REG
13463 && n_fpr < FP_ARG_V4_MAX_REG)
13464 return;
13465
13466 /* Find the register save area. */
13467 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
13468 if (cfun->machine->varargs_save_offset)
13469 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
13470 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
13471 TREE_SIDE_EFFECTS (t) = 1;
13472 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
13473 }
13474
13475 /* Implement va_arg. */
13476
13477 static tree
13478 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
13479 gimple_seq *post_p)
13480 {
13481 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
13482 tree gpr, fpr, ovf, sav, reg, t, u;
13483 int size, rsize, n_reg, sav_ofs, sav_scale;
13484 tree lab_false, lab_over, addr;
13485 int align;
13486 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
13487 int regalign = 0;
13488 gimple *stmt;
13489
13490 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
13491 {
13492 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
13493 return build_va_arg_indirect_ref (t);
13494 }
13495
13496 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13497 earlier version of gcc, with the property that it always applied alignment
13498 adjustments to the va-args (even for zero-sized types). The cheapest way
13499 to deal with this is to replicate the effect of the part of
13500 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13501 of relevance.
13502 We don't need to check for pass-by-reference because of the test above.
13503 We can return a simplifed answer, since we know there's no offset to add. */
13504
13505 if (((TARGET_MACHO
13506 && rs6000_darwin64_abi)
13507 || DEFAULT_ABI == ABI_ELFv2
13508 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
13509 && integer_zerop (TYPE_SIZE (type)))
13510 {
13511 unsigned HOST_WIDE_INT align, boundary;
13512 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
13513 align = PARM_BOUNDARY / BITS_PER_UNIT;
13514 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
13515 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
13516 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
13517 boundary /= BITS_PER_UNIT;
13518 if (boundary > align)
13519 {
13520 tree t ;
13521 /* This updates arg ptr by the amount that would be necessary
13522 to align the zero-sized (but not zero-alignment) item. */
13523 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13524 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
13525 gimplify_and_add (t, pre_p);
13526
13527 t = fold_convert (sizetype, valist_tmp);
13528 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
13529 fold_convert (TREE_TYPE (valist),
13530 fold_build2 (BIT_AND_EXPR, sizetype, t,
13531 size_int (-boundary))));
13532 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
13533 gimplify_and_add (t, pre_p);
13534 }
13535 /* Since it is zero-sized there's no increment for the item itself. */
13536 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
13537 return build_va_arg_indirect_ref (valist_tmp);
13538 }
13539
13540 if (DEFAULT_ABI != ABI_V4)
13541 {
13542 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
13543 {
13544 tree elem_type = TREE_TYPE (type);
13545 machine_mode elem_mode = TYPE_MODE (elem_type);
13546 int elem_size = GET_MODE_SIZE (elem_mode);
13547
13548 if (elem_size < UNITS_PER_WORD)
13549 {
13550 tree real_part, imag_part;
13551 gimple_seq post = NULL;
13552
13553 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13554 &post);
13555 /* Copy the value into a temporary, lest the formal temporary
13556 be reused out from under us. */
13557 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
13558 gimple_seq_add_seq (pre_p, post);
13559
13560 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
13561 post_p);
13562
13563 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
13564 }
13565 }
13566
13567 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
13568 }
13569
13570 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
13571 f_fpr = DECL_CHAIN (f_gpr);
13572 f_res = DECL_CHAIN (f_fpr);
13573 f_ovf = DECL_CHAIN (f_res);
13574 f_sav = DECL_CHAIN (f_ovf);
13575
13576 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
13577 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
13578 f_fpr, NULL_TREE);
13579 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
13580 f_ovf, NULL_TREE);
13581 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
13582 f_sav, NULL_TREE);
13583
13584 size = int_size_in_bytes (type);
13585 rsize = (size + 3) / 4;
13586 align = 1;
13587
13588 machine_mode mode = TYPE_MODE (type);
13589 if (abi_v4_pass_in_fpr (mode))
13590 {
13591 /* FP args go in FP registers, if present. */
13592 reg = fpr;
13593 n_reg = (size + 7) / 8;
13594 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
13595 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
13596 if (mode != SFmode && mode != SDmode)
13597 align = 8;
13598 }
13599 else
13600 {
13601 /* Otherwise into GP registers. */
13602 reg = gpr;
13603 n_reg = rsize;
13604 sav_ofs = 0;
13605 sav_scale = 4;
13606 if (n_reg == 2)
13607 align = 8;
13608 }
13609
13610 /* Pull the value out of the saved registers.... */
13611
13612 lab_over = NULL;
13613 addr = create_tmp_var (ptr_type_node, "addr");
13614
13615 /* AltiVec vectors never go in registers when -mabi=altivec. */
13616 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
13617 align = 16;
13618 else
13619 {
13620 lab_false = create_artificial_label (input_location);
13621 lab_over = create_artificial_label (input_location);
13622
13623 /* Long long and SPE vectors are aligned in the registers.
13624 As are any other 2 gpr item such as complex int due to a
13625 historical mistake. */
13626 u = reg;
13627 if (n_reg == 2 && reg == gpr)
13628 {
13629 regalign = 1;
13630 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13631 build_int_cst (TREE_TYPE (reg), n_reg - 1));
13632 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
13633 unshare_expr (reg), u);
13634 }
13635 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13636 reg number is 0 for f1, so we want to make it odd. */
13637 else if (reg == fpr && mode == TDmode)
13638 {
13639 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13640 build_int_cst (TREE_TYPE (reg), 1));
13641 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
13642 }
13643
13644 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
13645 t = build2 (GE_EXPR, boolean_type_node, u, t);
13646 u = build1 (GOTO_EXPR, void_type_node, lab_false);
13647 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
13648 gimplify_and_add (t, pre_p);
13649
13650 t = sav;
13651 if (sav_ofs)
13652 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
13653
13654 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
13655 build_int_cst (TREE_TYPE (reg), n_reg));
13656 u = fold_convert (sizetype, u);
13657 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
13658 t = fold_build_pointer_plus (t, u);
13659
13660 /* _Decimal32 varargs are located in the second word of the 64-bit
13661 FP register for 32-bit binaries. */
13662 if (TARGET_32BIT
13663 && TARGET_HARD_FLOAT && TARGET_FPRS
13664 && mode == SDmode)
13665 t = fold_build_pointer_plus_hwi (t, size);
13666
13667 gimplify_assign (addr, t, pre_p);
13668
13669 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
13670
13671 stmt = gimple_build_label (lab_false);
13672 gimple_seq_add_stmt (pre_p, stmt);
13673
13674 if ((n_reg == 2 && !regalign) || n_reg > 2)
13675 {
13676 /* Ensure that we don't find any more args in regs.
13677 Alignment has taken care of for special cases. */
13678 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
13679 }
13680 }
13681
13682 /* ... otherwise out of the overflow area. */
13683
13684 /* Care for on-stack alignment if needed. */
13685 t = ovf;
13686 if (align != 1)
13687 {
13688 t = fold_build_pointer_plus_hwi (t, align - 1);
13689 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
13690 build_int_cst (TREE_TYPE (t), -align));
13691 }
13692 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
13693
13694 gimplify_assign (unshare_expr (addr), t, pre_p);
13695
13696 t = fold_build_pointer_plus_hwi (t, size);
13697 gimplify_assign (unshare_expr (ovf), t, pre_p);
13698
13699 if (lab_over)
13700 {
13701 stmt = gimple_build_label (lab_over);
13702 gimple_seq_add_stmt (pre_p, stmt);
13703 }
13704
13705 if (STRICT_ALIGNMENT
13706 && (TYPE_ALIGN (type)
13707 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
13708 {
13709 /* The value (of type complex double, for example) may not be
13710 aligned in memory in the saved registers, so copy via a
13711 temporary. (This is the same code as used for SPARC.) */
13712 tree tmp = create_tmp_var (type, "va_arg_tmp");
13713 tree dest_addr = build_fold_addr_expr (tmp);
13714
13715 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
13716 3, dest_addr, addr, size_int (rsize * 4));
13717
13718 gimplify_and_add (copy, pre_p);
13719 addr = dest_addr;
13720 }
13721
13722 addr = fold_convert (ptrtype, addr);
13723 return build_va_arg_indirect_ref (addr);
13724 }
13725
13726 /* Builtins. */
13727
13728 static void
13729 def_builtin (const char *name, tree type, enum rs6000_builtins code)
13730 {
13731 tree t;
13732 unsigned classify = rs6000_builtin_info[(int)code].attr;
13733 const char *attr_string = "";
13734
13735 gcc_assert (name != NULL);
13736 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
13737
13738 if (rs6000_builtin_decls[(int)code])
13739 fatal_error (input_location,
13740 "internal error: builtin function %s already processed", name);
13741
13742 rs6000_builtin_decls[(int)code] = t =
13743 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
13744
13745 /* Set any special attributes. */
13746 if ((classify & RS6000_BTC_CONST) != 0)
13747 {
13748 /* const function, function only depends on the inputs. */
13749 TREE_READONLY (t) = 1;
13750 TREE_NOTHROW (t) = 1;
13751 attr_string = ", const";
13752 }
13753 else if ((classify & RS6000_BTC_PURE) != 0)
13754 {
13755 /* pure function, function can read global memory, but does not set any
13756 external state. */
13757 DECL_PURE_P (t) = 1;
13758 TREE_NOTHROW (t) = 1;
13759 attr_string = ", pure";
13760 }
13761 else if ((classify & RS6000_BTC_FP) != 0)
13762 {
13763 /* Function is a math function. If rounding mode is on, then treat the
13764 function as not reading global memory, but it can have arbitrary side
13765 effects. If it is off, then assume the function is a const function.
13766 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13767 builtin-attribute.def that is used for the math functions. */
13768 TREE_NOTHROW (t) = 1;
13769 if (flag_rounding_math)
13770 {
13771 DECL_PURE_P (t) = 1;
13772 DECL_IS_NOVOPS (t) = 1;
13773 attr_string = ", fp, pure";
13774 }
13775 else
13776 {
13777 TREE_READONLY (t) = 1;
13778 attr_string = ", fp, const";
13779 }
13780 }
13781 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
13782 gcc_unreachable ();
13783
13784 if (TARGET_DEBUG_BUILTIN)
13785 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
13786 (int)code, name, attr_string);
13787 }
13788
13789 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13790
13791 #undef RS6000_BUILTIN_0
13792 #undef RS6000_BUILTIN_1
13793 #undef RS6000_BUILTIN_2
13794 #undef RS6000_BUILTIN_3
13795 #undef RS6000_BUILTIN_A
13796 #undef RS6000_BUILTIN_D
13797 #undef RS6000_BUILTIN_E
13798 #undef RS6000_BUILTIN_H
13799 #undef RS6000_BUILTIN_P
13800 #undef RS6000_BUILTIN_Q
13801 #undef RS6000_BUILTIN_S
13802 #undef RS6000_BUILTIN_X
13803
13804 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13805 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13806 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13807 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13808 { MASK, ICODE, NAME, ENUM },
13809
13810 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13811 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13812 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13813 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13814 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13815 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13816 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13817 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13818
13819 static const struct builtin_description bdesc_3arg[] =
13820 {
13821 #include "rs6000-builtin.def"
13822 };
13823
13824 /* DST operations: void foo (void *, const int, const char). */
13825
13826 #undef RS6000_BUILTIN_0
13827 #undef RS6000_BUILTIN_1
13828 #undef RS6000_BUILTIN_2
13829 #undef RS6000_BUILTIN_3
13830 #undef RS6000_BUILTIN_A
13831 #undef RS6000_BUILTIN_D
13832 #undef RS6000_BUILTIN_E
13833 #undef RS6000_BUILTIN_H
13834 #undef RS6000_BUILTIN_P
13835 #undef RS6000_BUILTIN_Q
13836 #undef RS6000_BUILTIN_S
13837 #undef RS6000_BUILTIN_X
13838
13839 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13840 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13841 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13842 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13843 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13844 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13845 { MASK, ICODE, NAME, ENUM },
13846
13847 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13848 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13849 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13850 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13851 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13852 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13853
13854 static const struct builtin_description bdesc_dst[] =
13855 {
13856 #include "rs6000-builtin.def"
13857 };
13858
13859 /* Simple binary operations: VECc = foo (VECa, VECb). */
13860
13861 #undef RS6000_BUILTIN_0
13862 #undef RS6000_BUILTIN_1
13863 #undef RS6000_BUILTIN_2
13864 #undef RS6000_BUILTIN_3
13865 #undef RS6000_BUILTIN_A
13866 #undef RS6000_BUILTIN_D
13867 #undef RS6000_BUILTIN_E
13868 #undef RS6000_BUILTIN_H
13869 #undef RS6000_BUILTIN_P
13870 #undef RS6000_BUILTIN_Q
13871 #undef RS6000_BUILTIN_S
13872 #undef RS6000_BUILTIN_X
13873
13874 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13875 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13876 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13877 { MASK, ICODE, NAME, ENUM },
13878
13879 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13880 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13881 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13882 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13883 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13884 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13885 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13886 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13887 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13888
13889 static const struct builtin_description bdesc_2arg[] =
13890 {
13891 #include "rs6000-builtin.def"
13892 };
13893
13894 #undef RS6000_BUILTIN_0
13895 #undef RS6000_BUILTIN_1
13896 #undef RS6000_BUILTIN_2
13897 #undef RS6000_BUILTIN_3
13898 #undef RS6000_BUILTIN_A
13899 #undef RS6000_BUILTIN_D
13900 #undef RS6000_BUILTIN_E
13901 #undef RS6000_BUILTIN_H
13902 #undef RS6000_BUILTIN_P
13903 #undef RS6000_BUILTIN_Q
13904 #undef RS6000_BUILTIN_S
13905 #undef RS6000_BUILTIN_X
13906
13907 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13908 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13909 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13910 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13911 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13912 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13913 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13914 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13915 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13916 { MASK, ICODE, NAME, ENUM },
13917
13918 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13919 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13920 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13921
13922 /* AltiVec predicates. */
13923
13924 static const struct builtin_description bdesc_altivec_preds[] =
13925 {
13926 #include "rs6000-builtin.def"
13927 };
13928
13929 /* SPE predicates. */
13930 #undef RS6000_BUILTIN_0
13931 #undef RS6000_BUILTIN_1
13932 #undef RS6000_BUILTIN_2
13933 #undef RS6000_BUILTIN_3
13934 #undef RS6000_BUILTIN_A
13935 #undef RS6000_BUILTIN_D
13936 #undef RS6000_BUILTIN_E
13937 #undef RS6000_BUILTIN_H
13938 #undef RS6000_BUILTIN_P
13939 #undef RS6000_BUILTIN_Q
13940 #undef RS6000_BUILTIN_S
13941 #undef RS6000_BUILTIN_X
13942
13943 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13944 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13945 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13946 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13947 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13948 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13949 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
13950 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13951 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13952 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13953 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
13954 { MASK, ICODE, NAME, ENUM },
13955
13956 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13957
13958 static const struct builtin_description bdesc_spe_predicates[] =
13959 {
13960 #include "rs6000-builtin.def"
13961 };
13962
13963 /* SPE evsel predicates. */
13964 #undef RS6000_BUILTIN_0
13965 #undef RS6000_BUILTIN_1
13966 #undef RS6000_BUILTIN_2
13967 #undef RS6000_BUILTIN_3
13968 #undef RS6000_BUILTIN_A
13969 #undef RS6000_BUILTIN_D
13970 #undef RS6000_BUILTIN_E
13971 #undef RS6000_BUILTIN_H
13972 #undef RS6000_BUILTIN_P
13973 #undef RS6000_BUILTIN_Q
13974 #undef RS6000_BUILTIN_S
13975 #undef RS6000_BUILTIN_X
13976
13977 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13978 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13979 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13980 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13981 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13982 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13983 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
13984 { MASK, ICODE, NAME, ENUM },
13985
13986 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13987 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13988 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
13989 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
13990 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13991
13992 static const struct builtin_description bdesc_spe_evsel[] =
13993 {
13994 #include "rs6000-builtin.def"
13995 };
13996
13997 /* PAIRED predicates. */
13998 #undef RS6000_BUILTIN_0
13999 #undef RS6000_BUILTIN_1
14000 #undef RS6000_BUILTIN_2
14001 #undef RS6000_BUILTIN_3
14002 #undef RS6000_BUILTIN_A
14003 #undef RS6000_BUILTIN_D
14004 #undef RS6000_BUILTIN_E
14005 #undef RS6000_BUILTIN_H
14006 #undef RS6000_BUILTIN_P
14007 #undef RS6000_BUILTIN_Q
14008 #undef RS6000_BUILTIN_S
14009 #undef RS6000_BUILTIN_X
14010
14011 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14012 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14013 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14014 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14015 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14016 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14017 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14018 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14019 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14020 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
14021 { MASK, ICODE, NAME, ENUM },
14022
14023 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14024 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14025
14026 static const struct builtin_description bdesc_paired_preds[] =
14027 {
14028 #include "rs6000-builtin.def"
14029 };
14030
14031 /* ABS* operations. */
14032
14033 #undef RS6000_BUILTIN_0
14034 #undef RS6000_BUILTIN_1
14035 #undef RS6000_BUILTIN_2
14036 #undef RS6000_BUILTIN_3
14037 #undef RS6000_BUILTIN_A
14038 #undef RS6000_BUILTIN_D
14039 #undef RS6000_BUILTIN_E
14040 #undef RS6000_BUILTIN_H
14041 #undef RS6000_BUILTIN_P
14042 #undef RS6000_BUILTIN_Q
14043 #undef RS6000_BUILTIN_S
14044 #undef RS6000_BUILTIN_X
14045
14046 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14047 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14048 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14049 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14050 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
14051 { MASK, ICODE, NAME, ENUM },
14052
14053 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14054 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14055 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14056 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14057 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14058 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14059 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14060
14061 static const struct builtin_description bdesc_abs[] =
14062 {
14063 #include "rs6000-builtin.def"
14064 };
14065
14066 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
14067 foo (VECa). */
14068
14069 #undef RS6000_BUILTIN_0
14070 #undef RS6000_BUILTIN_1
14071 #undef RS6000_BUILTIN_2
14072 #undef RS6000_BUILTIN_3
14073 #undef RS6000_BUILTIN_A
14074 #undef RS6000_BUILTIN_D
14075 #undef RS6000_BUILTIN_E
14076 #undef RS6000_BUILTIN_H
14077 #undef RS6000_BUILTIN_P
14078 #undef RS6000_BUILTIN_Q
14079 #undef RS6000_BUILTIN_S
14080 #undef RS6000_BUILTIN_X
14081
14082 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14083 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
14084 { MASK, ICODE, NAME, ENUM },
14085
14086 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14087 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14088 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14089 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14090 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14091 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14092 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14093 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14094 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14095 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14096
14097 static const struct builtin_description bdesc_1arg[] =
14098 {
14099 #include "rs6000-builtin.def"
14100 };
14101
14102 /* Simple no-argument operations: result = __builtin_darn_32 () */
14103
14104 #undef RS6000_BUILTIN_0
14105 #undef RS6000_BUILTIN_1
14106 #undef RS6000_BUILTIN_2
14107 #undef RS6000_BUILTIN_3
14108 #undef RS6000_BUILTIN_A
14109 #undef RS6000_BUILTIN_D
14110 #undef RS6000_BUILTIN_E
14111 #undef RS6000_BUILTIN_H
14112 #undef RS6000_BUILTIN_P
14113 #undef RS6000_BUILTIN_Q
14114 #undef RS6000_BUILTIN_S
14115 #undef RS6000_BUILTIN_X
14116
14117 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
14118 { MASK, ICODE, NAME, ENUM },
14119
14120 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14121 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14122 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14123 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14124 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14125 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14126 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
14127 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14128 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14129 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14130 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14131
14132 static const struct builtin_description bdesc_0arg[] =
14133 {
14134 #include "rs6000-builtin.def"
14135 };
14136
14137 /* HTM builtins. */
14138 #undef RS6000_BUILTIN_0
14139 #undef RS6000_BUILTIN_1
14140 #undef RS6000_BUILTIN_2
14141 #undef RS6000_BUILTIN_3
14142 #undef RS6000_BUILTIN_A
14143 #undef RS6000_BUILTIN_D
14144 #undef RS6000_BUILTIN_E
14145 #undef RS6000_BUILTIN_H
14146 #undef RS6000_BUILTIN_P
14147 #undef RS6000_BUILTIN_Q
14148 #undef RS6000_BUILTIN_S
14149 #undef RS6000_BUILTIN_X
14150
14151 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
14152 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
14153 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
14154 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
14155 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
14156 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
14157 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
14158 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
14159 { MASK, ICODE, NAME, ENUM },
14160
14161 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
14162 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
14163 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
14164 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
14165
14166 static const struct builtin_description bdesc_htm[] =
14167 {
14168 #include "rs6000-builtin.def"
14169 };
14170
14171 #undef RS6000_BUILTIN_0
14172 #undef RS6000_BUILTIN_1
14173 #undef RS6000_BUILTIN_2
14174 #undef RS6000_BUILTIN_3
14175 #undef RS6000_BUILTIN_A
14176 #undef RS6000_BUILTIN_D
14177 #undef RS6000_BUILTIN_E
14178 #undef RS6000_BUILTIN_H
14179 #undef RS6000_BUILTIN_P
14180 #undef RS6000_BUILTIN_Q
14181 #undef RS6000_BUILTIN_S
14182
14183 /* Return true if a builtin function is overloaded. */
14184 bool
14185 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
14186 {
14187 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
14188 }
14189
14190 const char *
14191 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode)
14192 {
14193 return rs6000_builtin_info[(int)fncode].name;
14194 }
14195
14196 /* Expand an expression EXP that calls a builtin without arguments. */
14197 static rtx
14198 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
14199 {
14200 rtx pat;
14201 machine_mode tmode = insn_data[icode].operand[0].mode;
14202
14203 if (icode == CODE_FOR_nothing)
14204 /* Builtin not supported on this processor. */
14205 return 0;
14206
14207 if (target == 0
14208 || GET_MODE (target) != tmode
14209 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14210 target = gen_reg_rtx (tmode);
14211
14212 pat = GEN_FCN (icode) (target);
14213 if (! pat)
14214 return 0;
14215 emit_insn (pat);
14216
14217 return target;
14218 }
14219
14220
14221 static rtx
14222 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
14223 {
14224 rtx pat;
14225 tree arg0 = CALL_EXPR_ARG (exp, 0);
14226 tree arg1 = CALL_EXPR_ARG (exp, 1);
14227 rtx op0 = expand_normal (arg0);
14228 rtx op1 = expand_normal (arg1);
14229 machine_mode mode0 = insn_data[icode].operand[0].mode;
14230 machine_mode mode1 = insn_data[icode].operand[1].mode;
14231
14232 if (icode == CODE_FOR_nothing)
14233 /* Builtin not supported on this processor. */
14234 return 0;
14235
14236 /* If we got invalid arguments bail out before generating bad rtl. */
14237 if (arg0 == error_mark_node || arg1 == error_mark_node)
14238 return const0_rtx;
14239
14240 if (GET_CODE (op0) != CONST_INT
14241 || INTVAL (op0) > 255
14242 || INTVAL (op0) < 0)
14243 {
14244 error ("argument 1 must be an 8-bit field value");
14245 return const0_rtx;
14246 }
14247
14248 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
14249 op0 = copy_to_mode_reg (mode0, op0);
14250
14251 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
14252 op1 = copy_to_mode_reg (mode1, op1);
14253
14254 pat = GEN_FCN (icode) (op0, op1);
14255 if (! pat)
14256 return const0_rtx;
14257 emit_insn (pat);
14258
14259 return NULL_RTX;
14260 }
14261
14262 static rtx
14263 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
14264 {
14265 rtx pat;
14266 tree arg0 = CALL_EXPR_ARG (exp, 0);
14267 rtx op0 = expand_normal (arg0);
14268 machine_mode tmode = insn_data[icode].operand[0].mode;
14269 machine_mode mode0 = insn_data[icode].operand[1].mode;
14270
14271 if (icode == CODE_FOR_nothing)
14272 /* Builtin not supported on this processor. */
14273 return 0;
14274
14275 /* If we got invalid arguments bail out before generating bad rtl. */
14276 if (arg0 == error_mark_node)
14277 return const0_rtx;
14278
14279 if (icode == CODE_FOR_altivec_vspltisb
14280 || icode == CODE_FOR_altivec_vspltish
14281 || icode == CODE_FOR_altivec_vspltisw
14282 || icode == CODE_FOR_spe_evsplatfi
14283 || icode == CODE_FOR_spe_evsplati)
14284 {
14285 /* Only allow 5-bit *signed* literals. */
14286 if (GET_CODE (op0) != CONST_INT
14287 || INTVAL (op0) > 15
14288 || INTVAL (op0) < -16)
14289 {
14290 error ("argument 1 must be a 5-bit signed literal");
14291 return const0_rtx;
14292 }
14293 }
14294
14295 if (target == 0
14296 || GET_MODE (target) != tmode
14297 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14298 target = gen_reg_rtx (tmode);
14299
14300 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14301 op0 = copy_to_mode_reg (mode0, op0);
14302
14303 pat = GEN_FCN (icode) (target, op0);
14304 if (! pat)
14305 return 0;
14306 emit_insn (pat);
14307
14308 return target;
14309 }
14310
14311 static rtx
14312 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
14313 {
14314 rtx pat, scratch1, scratch2;
14315 tree arg0 = CALL_EXPR_ARG (exp, 0);
14316 rtx op0 = expand_normal (arg0);
14317 machine_mode tmode = insn_data[icode].operand[0].mode;
14318 machine_mode mode0 = insn_data[icode].operand[1].mode;
14319
14320 /* If we have invalid arguments, bail out before generating bad rtl. */
14321 if (arg0 == error_mark_node)
14322 return const0_rtx;
14323
14324 if (target == 0
14325 || GET_MODE (target) != tmode
14326 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14327 target = gen_reg_rtx (tmode);
14328
14329 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14330 op0 = copy_to_mode_reg (mode0, op0);
14331
14332 scratch1 = gen_reg_rtx (mode0);
14333 scratch2 = gen_reg_rtx (mode0);
14334
14335 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
14336 if (! pat)
14337 return 0;
14338 emit_insn (pat);
14339
14340 return target;
14341 }
14342
14343 static rtx
14344 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
14345 {
14346 rtx pat;
14347 tree arg0 = CALL_EXPR_ARG (exp, 0);
14348 tree arg1 = CALL_EXPR_ARG (exp, 1);
14349 rtx op0 = expand_normal (arg0);
14350 rtx op1 = expand_normal (arg1);
14351 machine_mode tmode = insn_data[icode].operand[0].mode;
14352 machine_mode mode0 = insn_data[icode].operand[1].mode;
14353 machine_mode mode1 = insn_data[icode].operand[2].mode;
14354
14355 if (icode == CODE_FOR_nothing)
14356 /* Builtin not supported on this processor. */
14357 return 0;
14358
14359 /* If we got invalid arguments bail out before generating bad rtl. */
14360 if (arg0 == error_mark_node || arg1 == error_mark_node)
14361 return const0_rtx;
14362
14363 if (icode == CODE_FOR_altivec_vcfux
14364 || icode == CODE_FOR_altivec_vcfsx
14365 || icode == CODE_FOR_altivec_vctsxs
14366 || icode == CODE_FOR_altivec_vctuxs
14367 || icode == CODE_FOR_altivec_vspltb
14368 || icode == CODE_FOR_altivec_vsplth
14369 || icode == CODE_FOR_altivec_vspltw
14370 || icode == CODE_FOR_spe_evaddiw
14371 || icode == CODE_FOR_spe_evldd
14372 || icode == CODE_FOR_spe_evldh
14373 || icode == CODE_FOR_spe_evldw
14374 || icode == CODE_FOR_spe_evlhhesplat
14375 || icode == CODE_FOR_spe_evlhhossplat
14376 || icode == CODE_FOR_spe_evlhhousplat
14377 || icode == CODE_FOR_spe_evlwhe
14378 || icode == CODE_FOR_spe_evlwhos
14379 || icode == CODE_FOR_spe_evlwhou
14380 || icode == CODE_FOR_spe_evlwhsplat
14381 || icode == CODE_FOR_spe_evlwwsplat
14382 || icode == CODE_FOR_spe_evrlwi
14383 || icode == CODE_FOR_spe_evslwi
14384 || icode == CODE_FOR_spe_evsrwis
14385 || icode == CODE_FOR_spe_evsubifw
14386 || icode == CODE_FOR_spe_evsrwiu)
14387 {
14388 /* Only allow 5-bit unsigned literals. */
14389 STRIP_NOPS (arg1);
14390 if (TREE_CODE (arg1) != INTEGER_CST
14391 || TREE_INT_CST_LOW (arg1) & ~0x1f)
14392 {
14393 error ("argument 2 must be a 5-bit unsigned literal");
14394 return const0_rtx;
14395 }
14396 }
14397 else if (icode == CODE_FOR_dfptstsfi_eq_dd
14398 || icode == CODE_FOR_dfptstsfi_lt_dd
14399 || icode == CODE_FOR_dfptstsfi_gt_dd
14400 || icode == CODE_FOR_dfptstsfi_unordered_dd
14401 || icode == CODE_FOR_dfptstsfi_eq_td
14402 || icode == CODE_FOR_dfptstsfi_lt_td
14403 || icode == CODE_FOR_dfptstsfi_gt_td
14404 || icode == CODE_FOR_dfptstsfi_unordered_td)
14405 {
14406 /* Only allow 6-bit unsigned literals. */
14407 STRIP_NOPS (arg0);
14408 if (TREE_CODE (arg0) != INTEGER_CST
14409 || !IN_RANGE (TREE_INT_CST_LOW (arg0), 0, 63))
14410 {
14411 error ("argument 1 must be a 6-bit unsigned literal");
14412 return CONST0_RTX (tmode);
14413 }
14414 }
14415 else if (icode == CODE_FOR_xststdcdp
14416 || icode == CODE_FOR_xststdcsp
14417 || icode == CODE_FOR_xvtstdcdp
14418 || icode == CODE_FOR_xvtstdcsp)
14419 {
14420 /* Only allow 7-bit unsigned literals. */
14421 STRIP_NOPS (arg1);
14422 if (TREE_CODE (arg1) != INTEGER_CST
14423 || !IN_RANGE (TREE_INT_CST_LOW (arg1), 0, 127))
14424 {
14425 error ("argument 2 must be a 7-bit unsigned literal");
14426 return CONST0_RTX (tmode);
14427 }
14428 }
14429
14430 if (target == 0
14431 || GET_MODE (target) != tmode
14432 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14433 target = gen_reg_rtx (tmode);
14434
14435 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14436 op0 = copy_to_mode_reg (mode0, op0);
14437 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14438 op1 = copy_to_mode_reg (mode1, op1);
14439
14440 pat = GEN_FCN (icode) (target, op0, op1);
14441 if (! pat)
14442 return 0;
14443 emit_insn (pat);
14444
14445 return target;
14446 }
14447
14448 static rtx
14449 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
14450 {
14451 rtx pat, scratch;
14452 tree cr6_form = CALL_EXPR_ARG (exp, 0);
14453 tree arg0 = CALL_EXPR_ARG (exp, 1);
14454 tree arg1 = CALL_EXPR_ARG (exp, 2);
14455 rtx op0 = expand_normal (arg0);
14456 rtx op1 = expand_normal (arg1);
14457 machine_mode tmode = SImode;
14458 machine_mode mode0 = insn_data[icode].operand[1].mode;
14459 machine_mode mode1 = insn_data[icode].operand[2].mode;
14460 int cr6_form_int;
14461
14462 if (TREE_CODE (cr6_form) != INTEGER_CST)
14463 {
14464 error ("argument 1 of __builtin_altivec_predicate must be a constant");
14465 return const0_rtx;
14466 }
14467 else
14468 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
14469
14470 gcc_assert (mode0 == mode1);
14471
14472 /* If we have invalid arguments, bail out before generating bad rtl. */
14473 if (arg0 == error_mark_node || arg1 == error_mark_node)
14474 return const0_rtx;
14475
14476 if (target == 0
14477 || GET_MODE (target) != tmode
14478 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14479 target = gen_reg_rtx (tmode);
14480
14481 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14482 op0 = copy_to_mode_reg (mode0, op0);
14483 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14484 op1 = copy_to_mode_reg (mode1, op1);
14485
14486 /* Note that for many of the relevant operations (e.g. cmpne or
14487 cmpeq) with float or double operands, it makes more sense for the
14488 mode of the allocated scratch register to select a vector of
14489 integer. But the choice to copy the mode of operand 0 was made
14490 long ago and there are no plans to change it. */
14491 scratch = gen_reg_rtx (mode0);
14492
14493 pat = GEN_FCN (icode) (scratch, op0, op1);
14494 if (! pat)
14495 return 0;
14496 emit_insn (pat);
14497
14498 /* The vec_any* and vec_all* predicates use the same opcodes for two
14499 different operations, but the bits in CR6 will be different
14500 depending on what information we want. So we have to play tricks
14501 with CR6 to get the right bits out.
14502
14503 If you think this is disgusting, look at the specs for the
14504 AltiVec predicates. */
14505
14506 switch (cr6_form_int)
14507 {
14508 case 0:
14509 emit_insn (gen_cr6_test_for_zero (target));
14510 break;
14511 case 1:
14512 emit_insn (gen_cr6_test_for_zero_reverse (target));
14513 break;
14514 case 2:
14515 emit_insn (gen_cr6_test_for_lt (target));
14516 break;
14517 case 3:
14518 emit_insn (gen_cr6_test_for_lt_reverse (target));
14519 break;
14520 default:
14521 error ("argument 1 of __builtin_altivec_predicate is out of range");
14522 break;
14523 }
14524
14525 return target;
14526 }
14527
14528 static rtx
14529 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
14530 {
14531 rtx pat, addr;
14532 tree arg0 = CALL_EXPR_ARG (exp, 0);
14533 tree arg1 = CALL_EXPR_ARG (exp, 1);
14534 machine_mode tmode = insn_data[icode].operand[0].mode;
14535 machine_mode mode0 = Pmode;
14536 machine_mode mode1 = Pmode;
14537 rtx op0 = expand_normal (arg0);
14538 rtx op1 = expand_normal (arg1);
14539
14540 if (icode == CODE_FOR_nothing)
14541 /* Builtin not supported on this processor. */
14542 return 0;
14543
14544 /* If we got invalid arguments bail out before generating bad rtl. */
14545 if (arg0 == error_mark_node || arg1 == error_mark_node)
14546 return const0_rtx;
14547
14548 if (target == 0
14549 || GET_MODE (target) != tmode
14550 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14551 target = gen_reg_rtx (tmode);
14552
14553 op1 = copy_to_mode_reg (mode1, op1);
14554
14555 if (op0 == const0_rtx)
14556 {
14557 addr = gen_rtx_MEM (tmode, op1);
14558 }
14559 else
14560 {
14561 op0 = copy_to_mode_reg (mode0, op0);
14562 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
14563 }
14564
14565 pat = GEN_FCN (icode) (target, addr);
14566
14567 if (! pat)
14568 return 0;
14569 emit_insn (pat);
14570
14571 return target;
14572 }
14573
14574 /* Return a constant vector for use as a little-endian permute control vector
14575 to reverse the order of elements of the given vector mode. */
14576 static rtx
14577 swap_selector_for_mode (machine_mode mode)
14578 {
14579 /* These are little endian vectors, so their elements are reversed
14580 from what you would normally expect for a permute control vector. */
14581 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
14582 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
14583 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
14584 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
14585 unsigned int *swaparray, i;
14586 rtx perm[16];
14587
14588 switch (mode)
14589 {
14590 case V2DFmode:
14591 case V2DImode:
14592 swaparray = swap2;
14593 break;
14594 case V4SFmode:
14595 case V4SImode:
14596 swaparray = swap4;
14597 break;
14598 case V8HImode:
14599 swaparray = swap8;
14600 break;
14601 case V16QImode:
14602 swaparray = swap16;
14603 break;
14604 default:
14605 gcc_unreachable ();
14606 }
14607
14608 for (i = 0; i < 16; ++i)
14609 perm[i] = GEN_INT (swaparray[i]);
14610
14611 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
14612 }
14613
14614 /* Generate code for an "lvxl", or "lve*x" built-in for a little endian target
14615 with -maltivec=be specified. Issue the load followed by an element-
14616 reversing permute. */
14617 void
14618 altivec_expand_lvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14619 {
14620 rtx tmp = gen_reg_rtx (mode);
14621 rtx load = gen_rtx_SET (tmp, op1);
14622 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14623 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
14624 rtx sel = swap_selector_for_mode (mode);
14625 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
14626
14627 gcc_assert (REG_P (op0));
14628 emit_insn (par);
14629 emit_insn (gen_rtx_SET (op0, vperm));
14630 }
14631
14632 /* Generate code for a "stvxl" built-in for a little endian target with
14633 -maltivec=be specified. Issue the store preceded by an element-reversing
14634 permute. */
14635 void
14636 altivec_expand_stvx_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14637 {
14638 rtx tmp = gen_reg_rtx (mode);
14639 rtx store = gen_rtx_SET (op0, tmp);
14640 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
14641 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
14642 rtx sel = swap_selector_for_mode (mode);
14643 rtx vperm;
14644
14645 gcc_assert (REG_P (op1));
14646 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14647 emit_insn (gen_rtx_SET (tmp, vperm));
14648 emit_insn (par);
14649 }
14650
14651 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
14652 specified. Issue the store preceded by an element-reversing permute. */
14653 void
14654 altivec_expand_stvex_be (rtx op0, rtx op1, machine_mode mode, unsigned unspec)
14655 {
14656 machine_mode inner_mode = GET_MODE_INNER (mode);
14657 rtx tmp = gen_reg_rtx (mode);
14658 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
14659 rtx sel = swap_selector_for_mode (mode);
14660 rtx vperm;
14661
14662 gcc_assert (REG_P (op1));
14663 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
14664 emit_insn (gen_rtx_SET (tmp, vperm));
14665 emit_insn (gen_rtx_SET (op0, stvx));
14666 }
14667
14668 static rtx
14669 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
14670 {
14671 rtx pat, addr;
14672 tree arg0 = CALL_EXPR_ARG (exp, 0);
14673 tree arg1 = CALL_EXPR_ARG (exp, 1);
14674 machine_mode tmode = insn_data[icode].operand[0].mode;
14675 machine_mode mode0 = Pmode;
14676 machine_mode mode1 = Pmode;
14677 rtx op0 = expand_normal (arg0);
14678 rtx op1 = expand_normal (arg1);
14679
14680 if (icode == CODE_FOR_nothing)
14681 /* Builtin not supported on this processor. */
14682 return 0;
14683
14684 /* If we got invalid arguments bail out before generating bad rtl. */
14685 if (arg0 == error_mark_node || arg1 == error_mark_node)
14686 return const0_rtx;
14687
14688 if (target == 0
14689 || GET_MODE (target) != tmode
14690 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
14691 target = gen_reg_rtx (tmode);
14692
14693 op1 = copy_to_mode_reg (mode1, op1);
14694
14695 /* For LVX, express the RTL accurately by ANDing the address with -16.
14696 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14697 so the raw address is fine. */
14698 if (icode == CODE_FOR_altivec_lvx_v2df_2op
14699 || icode == CODE_FOR_altivec_lvx_v2di_2op
14700 || icode == CODE_FOR_altivec_lvx_v4sf_2op
14701 || icode == CODE_FOR_altivec_lvx_v4si_2op
14702 || icode == CODE_FOR_altivec_lvx_v8hi_2op
14703 || icode == CODE_FOR_altivec_lvx_v16qi_2op)
14704 {
14705 rtx rawaddr;
14706 if (op0 == const0_rtx)
14707 rawaddr = op1;
14708 else
14709 {
14710 op0 = copy_to_mode_reg (mode0, op0);
14711 rawaddr = gen_rtx_PLUS (Pmode, op1, op0);
14712 }
14713 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14714 addr = gen_rtx_MEM (blk ? BLKmode : tmode, addr);
14715
14716 /* For -maltivec=be, emit the load and follow it up with a
14717 permute to swap the elements. */
14718 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14719 {
14720 rtx temp = gen_reg_rtx (tmode);
14721 emit_insn (gen_rtx_SET (temp, addr));
14722
14723 rtx sel = swap_selector_for_mode (tmode);
14724 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, temp, temp, sel),
14725 UNSPEC_VPERM);
14726 emit_insn (gen_rtx_SET (target, vperm));
14727 }
14728 else
14729 emit_insn (gen_rtx_SET (target, addr));
14730 }
14731 else
14732 {
14733 if (op0 == const0_rtx)
14734 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
14735 else
14736 {
14737 op0 = copy_to_mode_reg (mode0, op0);
14738 addr = gen_rtx_MEM (blk ? BLKmode : tmode,
14739 gen_rtx_PLUS (Pmode, op1, op0));
14740 }
14741
14742 pat = GEN_FCN (icode) (target, addr);
14743 if (! pat)
14744 return 0;
14745 emit_insn (pat);
14746 }
14747
14748 return target;
14749 }
14750
14751 static rtx
14752 spe_expand_stv_builtin (enum insn_code icode, tree exp)
14753 {
14754 tree arg0 = CALL_EXPR_ARG (exp, 0);
14755 tree arg1 = CALL_EXPR_ARG (exp, 1);
14756 tree arg2 = CALL_EXPR_ARG (exp, 2);
14757 rtx op0 = expand_normal (arg0);
14758 rtx op1 = expand_normal (arg1);
14759 rtx op2 = expand_normal (arg2);
14760 rtx pat;
14761 machine_mode mode0 = insn_data[icode].operand[0].mode;
14762 machine_mode mode1 = insn_data[icode].operand[1].mode;
14763 machine_mode mode2 = insn_data[icode].operand[2].mode;
14764
14765 /* Invalid arguments. Bail before doing anything stoopid! */
14766 if (arg0 == error_mark_node
14767 || arg1 == error_mark_node
14768 || arg2 == error_mark_node)
14769 return const0_rtx;
14770
14771 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
14772 op0 = copy_to_mode_reg (mode2, op0);
14773 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
14774 op1 = copy_to_mode_reg (mode0, op1);
14775 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
14776 op2 = copy_to_mode_reg (mode1, op2);
14777
14778 pat = GEN_FCN (icode) (op1, op2, op0);
14779 if (pat)
14780 emit_insn (pat);
14781 return NULL_RTX;
14782 }
14783
14784 static rtx
14785 paired_expand_stv_builtin (enum insn_code icode, tree exp)
14786 {
14787 tree arg0 = CALL_EXPR_ARG (exp, 0);
14788 tree arg1 = CALL_EXPR_ARG (exp, 1);
14789 tree arg2 = CALL_EXPR_ARG (exp, 2);
14790 rtx op0 = expand_normal (arg0);
14791 rtx op1 = expand_normal (arg1);
14792 rtx op2 = expand_normal (arg2);
14793 rtx pat, addr;
14794 machine_mode tmode = insn_data[icode].operand[0].mode;
14795 machine_mode mode1 = Pmode;
14796 machine_mode mode2 = Pmode;
14797
14798 /* Invalid arguments. Bail before doing anything stoopid! */
14799 if (arg0 == error_mark_node
14800 || arg1 == error_mark_node
14801 || arg2 == error_mark_node)
14802 return const0_rtx;
14803
14804 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
14805 op0 = copy_to_mode_reg (tmode, op0);
14806
14807 op2 = copy_to_mode_reg (mode2, op2);
14808
14809 if (op1 == const0_rtx)
14810 {
14811 addr = gen_rtx_MEM (tmode, op2);
14812 }
14813 else
14814 {
14815 op1 = copy_to_mode_reg (mode1, op1);
14816 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
14817 }
14818
14819 pat = GEN_FCN (icode) (addr, op0);
14820 if (pat)
14821 emit_insn (pat);
14822 return NULL_RTX;
14823 }
14824
14825 static rtx
14826 altivec_expand_stxvl_builtin (enum insn_code icode, tree exp)
14827 {
14828 rtx pat;
14829 tree arg0 = CALL_EXPR_ARG (exp, 0);
14830 tree arg1 = CALL_EXPR_ARG (exp, 1);
14831 tree arg2 = CALL_EXPR_ARG (exp, 2);
14832 rtx op0 = expand_normal (arg0);
14833 rtx op1 = expand_normal (arg1);
14834 rtx op2 = expand_normal (arg2);
14835 machine_mode mode0 = insn_data[icode].operand[0].mode;
14836 machine_mode mode1 = insn_data[icode].operand[1].mode;
14837 machine_mode mode2 = insn_data[icode].operand[2].mode;
14838
14839 if (icode == CODE_FOR_nothing)
14840 /* Builtin not supported on this processor. */
14841 return NULL_RTX;
14842
14843 /* If we got invalid arguments bail out before generating bad rtl. */
14844 if (arg0 == error_mark_node
14845 || arg1 == error_mark_node
14846 || arg2 == error_mark_node)
14847 return NULL_RTX;
14848
14849 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
14850 op0 = copy_to_mode_reg (mode0, op0);
14851 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
14852 op1 = copy_to_mode_reg (mode1, op1);
14853 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
14854 op2 = copy_to_mode_reg (mode2, op2);
14855
14856 pat = GEN_FCN (icode) (op0, op1, op2);
14857 if (pat)
14858 emit_insn (pat);
14859
14860 return NULL_RTX;
14861 }
14862
14863 static rtx
14864 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
14865 {
14866 tree arg0 = CALL_EXPR_ARG (exp, 0);
14867 tree arg1 = CALL_EXPR_ARG (exp, 1);
14868 tree arg2 = CALL_EXPR_ARG (exp, 2);
14869 rtx op0 = expand_normal (arg0);
14870 rtx op1 = expand_normal (arg1);
14871 rtx op2 = expand_normal (arg2);
14872 rtx pat, addr, rawaddr;
14873 machine_mode tmode = insn_data[icode].operand[0].mode;
14874 machine_mode smode = insn_data[icode].operand[1].mode;
14875 machine_mode mode1 = Pmode;
14876 machine_mode mode2 = Pmode;
14877
14878 /* Invalid arguments. Bail before doing anything stoopid! */
14879 if (arg0 == error_mark_node
14880 || arg1 == error_mark_node
14881 || arg2 == error_mark_node)
14882 return const0_rtx;
14883
14884 op2 = copy_to_mode_reg (mode2, op2);
14885
14886 /* For STVX, express the RTL accurately by ANDing the address with -16.
14887 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14888 so the raw address is fine. */
14889 if (icode == CODE_FOR_altivec_stvx_v2df_2op
14890 || icode == CODE_FOR_altivec_stvx_v2di_2op
14891 || icode == CODE_FOR_altivec_stvx_v4sf_2op
14892 || icode == CODE_FOR_altivec_stvx_v4si_2op
14893 || icode == CODE_FOR_altivec_stvx_v8hi_2op
14894 || icode == CODE_FOR_altivec_stvx_v16qi_2op)
14895 {
14896 if (op1 == const0_rtx)
14897 rawaddr = op2;
14898 else
14899 {
14900 op1 = copy_to_mode_reg (mode1, op1);
14901 rawaddr = gen_rtx_PLUS (Pmode, op2, op1);
14902 }
14903
14904 addr = gen_rtx_AND (Pmode, rawaddr, gen_rtx_CONST_INT (Pmode, -16));
14905 addr = gen_rtx_MEM (tmode, addr);
14906
14907 op0 = copy_to_mode_reg (tmode, op0);
14908
14909 /* For -maltivec=be, emit a permute to swap the elements, followed
14910 by the store. */
14911 if (!BYTES_BIG_ENDIAN && VECTOR_ELT_ORDER_BIG)
14912 {
14913 rtx temp = gen_reg_rtx (tmode);
14914 rtx sel = swap_selector_for_mode (tmode);
14915 rtx vperm = gen_rtx_UNSPEC (tmode, gen_rtvec (3, op0, op0, sel),
14916 UNSPEC_VPERM);
14917 emit_insn (gen_rtx_SET (temp, vperm));
14918 emit_insn (gen_rtx_SET (addr, temp));
14919 }
14920 else
14921 emit_insn (gen_rtx_SET (addr, op0));
14922 }
14923 else
14924 {
14925 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
14926 op0 = copy_to_mode_reg (smode, op0);
14927
14928 if (op1 == const0_rtx)
14929 addr = gen_rtx_MEM (tmode, op2);
14930 else
14931 {
14932 op1 = copy_to_mode_reg (mode1, op1);
14933 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op2, op1));
14934 }
14935
14936 pat = GEN_FCN (icode) (addr, op0);
14937 if (pat)
14938 emit_insn (pat);
14939 }
14940
14941 return NULL_RTX;
14942 }
14943
14944 /* Return the appropriate SPR number associated with the given builtin. */
14945 static inline HOST_WIDE_INT
14946 htm_spr_num (enum rs6000_builtins code)
14947 {
14948 if (code == HTM_BUILTIN_GET_TFHAR
14949 || code == HTM_BUILTIN_SET_TFHAR)
14950 return TFHAR_SPR;
14951 else if (code == HTM_BUILTIN_GET_TFIAR
14952 || code == HTM_BUILTIN_SET_TFIAR)
14953 return TFIAR_SPR;
14954 else if (code == HTM_BUILTIN_GET_TEXASR
14955 || code == HTM_BUILTIN_SET_TEXASR)
14956 return TEXASR_SPR;
14957 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
14958 || code == HTM_BUILTIN_SET_TEXASRU);
14959 return TEXASRU_SPR;
14960 }
14961
14962 /* Return the appropriate SPR regno associated with the given builtin. */
14963 static inline HOST_WIDE_INT
14964 htm_spr_regno (enum rs6000_builtins code)
14965 {
14966 if (code == HTM_BUILTIN_GET_TFHAR
14967 || code == HTM_BUILTIN_SET_TFHAR)
14968 return TFHAR_REGNO;
14969 else if (code == HTM_BUILTIN_GET_TFIAR
14970 || code == HTM_BUILTIN_SET_TFIAR)
14971 return TFIAR_REGNO;
14972 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
14973 || code == HTM_BUILTIN_SET_TEXASR
14974 || code == HTM_BUILTIN_GET_TEXASRU
14975 || code == HTM_BUILTIN_SET_TEXASRU);
14976 return TEXASR_REGNO;
14977 }
14978
14979 /* Return the correct ICODE value depending on whether we are
14980 setting or reading the HTM SPRs. */
14981 static inline enum insn_code
14982 rs6000_htm_spr_icode (bool nonvoid)
14983 {
14984 if (nonvoid)
14985 return (TARGET_POWERPC64) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
14986 else
14987 return (TARGET_POWERPC64) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
14988 }
14989
14990 /* Expand the HTM builtin in EXP and store the result in TARGET.
14991 Store true in *EXPANDEDP if we found a builtin to expand. */
14992 static rtx
14993 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
14994 {
14995 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
14996 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
14997 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
14998 const struct builtin_description *d;
14999 size_t i;
15000
15001 *expandedp = true;
15002
15003 if (!TARGET_POWERPC64
15004 && (fcode == HTM_BUILTIN_TABORTDC
15005 || fcode == HTM_BUILTIN_TABORTDCI))
15006 {
15007 size_t uns_fcode = (size_t)fcode;
15008 const char *name = rs6000_builtin_info[uns_fcode].name;
15009 error ("builtin %s is only valid in 64-bit mode", name);
15010 return const0_rtx;
15011 }
15012
15013 /* Expand the HTM builtins. */
15014 d = bdesc_htm;
15015 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
15016 if (d->code == fcode)
15017 {
15018 rtx op[MAX_HTM_OPERANDS], pat;
15019 int nopnds = 0;
15020 tree arg;
15021 call_expr_arg_iterator iter;
15022 unsigned attr = rs6000_builtin_info[fcode].attr;
15023 enum insn_code icode = d->icode;
15024 const struct insn_operand_data *insn_op;
15025 bool uses_spr = (attr & RS6000_BTC_SPR);
15026 rtx cr = NULL_RTX;
15027
15028 if (uses_spr)
15029 icode = rs6000_htm_spr_icode (nonvoid);
15030 insn_op = &insn_data[icode].operand[0];
15031
15032 if (nonvoid)
15033 {
15034 machine_mode tmode = (uses_spr) ? insn_op->mode : SImode;
15035 if (!target
15036 || GET_MODE (target) != tmode
15037 || (uses_spr && !(*insn_op->predicate) (target, tmode)))
15038 target = gen_reg_rtx (tmode);
15039 if (uses_spr)
15040 op[nopnds++] = target;
15041 }
15042
15043 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
15044 {
15045 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
15046 return const0_rtx;
15047
15048 insn_op = &insn_data[icode].operand[nopnds];
15049
15050 op[nopnds] = expand_normal (arg);
15051
15052 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
15053 {
15054 if (!strcmp (insn_op->constraint, "n"))
15055 {
15056 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
15057 if (!CONST_INT_P (op[nopnds]))
15058 error ("argument %d must be an unsigned literal", arg_num);
15059 else
15060 error ("argument %d is an unsigned literal that is "
15061 "out of range", arg_num);
15062 return const0_rtx;
15063 }
15064 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
15065 }
15066
15067 nopnds++;
15068 }
15069
15070 /* Handle the builtins for extended mnemonics. These accept
15071 no arguments, but map to builtins that take arguments. */
15072 switch (fcode)
15073 {
15074 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
15075 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
15076 op[nopnds++] = GEN_INT (1);
15077 if (flag_checking)
15078 attr |= RS6000_BTC_UNARY;
15079 break;
15080 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
15081 op[nopnds++] = GEN_INT (0);
15082 if (flag_checking)
15083 attr |= RS6000_BTC_UNARY;
15084 break;
15085 default:
15086 break;
15087 }
15088
15089 /* If this builtin accesses SPRs, then pass in the appropriate
15090 SPR number and SPR regno as the last two operands. */
15091 if (uses_spr)
15092 {
15093 machine_mode mode = (TARGET_POWERPC64) ? DImode : SImode;
15094 op[nopnds++] = gen_rtx_CONST_INT (mode, htm_spr_num (fcode));
15095 op[nopnds++] = gen_rtx_REG (mode, htm_spr_regno (fcode));
15096 }
15097 /* If this builtin accesses a CR, then pass in a scratch
15098 CR as the last operand. */
15099 else if (attr & RS6000_BTC_CR)
15100 { cr = gen_reg_rtx (CCmode);
15101 op[nopnds++] = cr;
15102 }
15103
15104 if (flag_checking)
15105 {
15106 int expected_nopnds = 0;
15107 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
15108 expected_nopnds = 1;
15109 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
15110 expected_nopnds = 2;
15111 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
15112 expected_nopnds = 3;
15113 if (!(attr & RS6000_BTC_VOID))
15114 expected_nopnds += 1;
15115 if (uses_spr)
15116 expected_nopnds += 2;
15117
15118 gcc_assert (nopnds == expected_nopnds
15119 && nopnds <= MAX_HTM_OPERANDS);
15120 }
15121
15122 switch (nopnds)
15123 {
15124 case 1:
15125 pat = GEN_FCN (icode) (op[0]);
15126 break;
15127 case 2:
15128 pat = GEN_FCN (icode) (op[0], op[1]);
15129 break;
15130 case 3:
15131 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
15132 break;
15133 case 4:
15134 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
15135 break;
15136 default:
15137 gcc_unreachable ();
15138 }
15139 if (!pat)
15140 return NULL_RTX;
15141 emit_insn (pat);
15142
15143 if (attr & RS6000_BTC_CR)
15144 {
15145 if (fcode == HTM_BUILTIN_TBEGIN)
15146 {
15147 /* Emit code to set TARGET to true or false depending on
15148 whether the tbegin. instruction successfully or failed
15149 to start a transaction. We do this by placing the 1's
15150 complement of CR's EQ bit into TARGET. */
15151 rtx scratch = gen_reg_rtx (SImode);
15152 emit_insn (gen_rtx_SET (scratch,
15153 gen_rtx_EQ (SImode, cr,
15154 const0_rtx)));
15155 emit_insn (gen_rtx_SET (target,
15156 gen_rtx_XOR (SImode, scratch,
15157 GEN_INT (1))));
15158 }
15159 else
15160 {
15161 /* Emit code to copy the 4-bit condition register field
15162 CR into the least significant end of register TARGET. */
15163 rtx scratch1 = gen_reg_rtx (SImode);
15164 rtx scratch2 = gen_reg_rtx (SImode);
15165 rtx subreg = simplify_gen_subreg (CCmode, scratch1, SImode, 0);
15166 emit_insn (gen_movcc (subreg, cr));
15167 emit_insn (gen_lshrsi3 (scratch2, scratch1, GEN_INT (28)));
15168 emit_insn (gen_andsi3 (target, scratch2, GEN_INT (0xf)));
15169 }
15170 }
15171
15172 if (nonvoid)
15173 return target;
15174 return const0_rtx;
15175 }
15176
15177 *expandedp = false;
15178 return NULL_RTX;
15179 }
15180
15181 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
15182
15183 static rtx
15184 cpu_expand_builtin (enum rs6000_builtins fcode, tree exp ATTRIBUTE_UNUSED,
15185 rtx target)
15186 {
15187 /* __builtin_cpu_init () is a nop, so expand to nothing. */
15188 if (fcode == RS6000_BUILTIN_CPU_INIT)
15189 return const0_rtx;
15190
15191 if (target == 0 || GET_MODE (target) != SImode)
15192 target = gen_reg_rtx (SImode);
15193
15194 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
15195 tree arg = TREE_OPERAND (CALL_EXPR_ARG (exp, 0), 0);
15196 if (TREE_CODE (arg) != STRING_CST)
15197 {
15198 error ("builtin %s only accepts a string argument",
15199 rs6000_builtin_info[(size_t) fcode].name);
15200 return const0_rtx;
15201 }
15202
15203 if (fcode == RS6000_BUILTIN_CPU_IS)
15204 {
15205 const char *cpu = TREE_STRING_POINTER (arg);
15206 rtx cpuid = NULL_RTX;
15207 for (size_t i = 0; i < ARRAY_SIZE (cpu_is_info); i++)
15208 if (strcmp (cpu, cpu_is_info[i].cpu) == 0)
15209 {
15210 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
15211 cpuid = GEN_INT (cpu_is_info[i].cpuid + _DL_FIRST_PLATFORM);
15212 break;
15213 }
15214 if (cpuid == NULL_RTX)
15215 {
15216 /* Invalid CPU argument. */
15217 error ("cpu %s is an invalid argument to builtin %s",
15218 cpu, rs6000_builtin_info[(size_t) fcode].name);
15219 return const0_rtx;
15220 }
15221
15222 rtx platform = gen_reg_rtx (SImode);
15223 rtx tcbmem = gen_const_mem (SImode,
15224 gen_rtx_PLUS (Pmode,
15225 gen_rtx_REG (Pmode, TLS_REGNUM),
15226 GEN_INT (TCB_PLATFORM_OFFSET)));
15227 emit_move_insn (platform, tcbmem);
15228 emit_insn (gen_eqsi3 (target, platform, cpuid));
15229 }
15230 else if (fcode == RS6000_BUILTIN_CPU_SUPPORTS)
15231 {
15232 const char *hwcap = TREE_STRING_POINTER (arg);
15233 rtx mask = NULL_RTX;
15234 int hwcap_offset;
15235 for (size_t i = 0; i < ARRAY_SIZE (cpu_supports_info); i++)
15236 if (strcmp (hwcap, cpu_supports_info[i].hwcap) == 0)
15237 {
15238 mask = GEN_INT (cpu_supports_info[i].mask);
15239 hwcap_offset = TCB_HWCAP_OFFSET (cpu_supports_info[i].id);
15240 break;
15241 }
15242 if (mask == NULL_RTX)
15243 {
15244 /* Invalid HWCAP argument. */
15245 error ("hwcap %s is an invalid argument to builtin %s",
15246 hwcap, rs6000_builtin_info[(size_t) fcode].name);
15247 return const0_rtx;
15248 }
15249
15250 rtx tcb_hwcap = gen_reg_rtx (SImode);
15251 rtx tcbmem = gen_const_mem (SImode,
15252 gen_rtx_PLUS (Pmode,
15253 gen_rtx_REG (Pmode, TLS_REGNUM),
15254 GEN_INT (hwcap_offset)));
15255 emit_move_insn (tcb_hwcap, tcbmem);
15256 rtx scratch1 = gen_reg_rtx (SImode);
15257 emit_insn (gen_rtx_SET (scratch1, gen_rtx_AND (SImode, tcb_hwcap, mask)));
15258 rtx scratch2 = gen_reg_rtx (SImode);
15259 emit_insn (gen_eqsi3 (scratch2, scratch1, const0_rtx));
15260 emit_insn (gen_rtx_SET (target, gen_rtx_XOR (SImode, scratch2, const1_rtx)));
15261 }
15262
15263 /* Record that we have expanded a CPU builtin, so that we can later
15264 emit a reference to the special symbol exported by LIBC to ensure we
15265 do not link against an old LIBC that doesn't support this feature. */
15266 cpu_builtin_p = true;
15267
15268 #else
15269 /* For old LIBCs, always return FALSE. */
15270 emit_move_insn (target, GEN_INT (0));
15271 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
15272
15273 return target;
15274 }
15275
15276 static rtx
15277 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
15278 {
15279 rtx pat;
15280 tree arg0 = CALL_EXPR_ARG (exp, 0);
15281 tree arg1 = CALL_EXPR_ARG (exp, 1);
15282 tree arg2 = CALL_EXPR_ARG (exp, 2);
15283 rtx op0 = expand_normal (arg0);
15284 rtx op1 = expand_normal (arg1);
15285 rtx op2 = expand_normal (arg2);
15286 machine_mode tmode = insn_data[icode].operand[0].mode;
15287 machine_mode mode0 = insn_data[icode].operand[1].mode;
15288 machine_mode mode1 = insn_data[icode].operand[2].mode;
15289 machine_mode mode2 = insn_data[icode].operand[3].mode;
15290
15291 if (icode == CODE_FOR_nothing)
15292 /* Builtin not supported on this processor. */
15293 return 0;
15294
15295 /* If we got invalid arguments bail out before generating bad rtl. */
15296 if (arg0 == error_mark_node
15297 || arg1 == error_mark_node
15298 || arg2 == error_mark_node)
15299 return const0_rtx;
15300
15301 /* Check and prepare argument depending on the instruction code.
15302
15303 Note that a switch statement instead of the sequence of tests
15304 would be incorrect as many of the CODE_FOR values could be
15305 CODE_FOR_nothing and that would yield multiple alternatives
15306 with identical values. We'd never reach here at runtime in
15307 this case. */
15308 if (icode == CODE_FOR_altivec_vsldoi_v4sf
15309 || icode == CODE_FOR_altivec_vsldoi_v2df
15310 || icode == CODE_FOR_altivec_vsldoi_v4si
15311 || icode == CODE_FOR_altivec_vsldoi_v8hi
15312 || icode == CODE_FOR_altivec_vsldoi_v16qi)
15313 {
15314 /* Only allow 4-bit unsigned literals. */
15315 STRIP_NOPS (arg2);
15316 if (TREE_CODE (arg2) != INTEGER_CST
15317 || TREE_INT_CST_LOW (arg2) & ~0xf)
15318 {
15319 error ("argument 3 must be a 4-bit unsigned literal");
15320 return const0_rtx;
15321 }
15322 }
15323 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
15324 || icode == CODE_FOR_vsx_xxpermdi_v2di
15325 || icode == CODE_FOR_vsx_xxsldwi_v16qi
15326 || icode == CODE_FOR_vsx_xxsldwi_v8hi
15327 || icode == CODE_FOR_vsx_xxsldwi_v4si
15328 || icode == CODE_FOR_vsx_xxsldwi_v4sf
15329 || icode == CODE_FOR_vsx_xxsldwi_v2di
15330 || icode == CODE_FOR_vsx_xxsldwi_v2df)
15331 {
15332 /* Only allow 2-bit unsigned literals. */
15333 STRIP_NOPS (arg2);
15334 if (TREE_CODE (arg2) != INTEGER_CST
15335 || TREE_INT_CST_LOW (arg2) & ~0x3)
15336 {
15337 error ("argument 3 must be a 2-bit unsigned literal");
15338 return const0_rtx;
15339 }
15340 }
15341 else if (icode == CODE_FOR_vsx_set_v2df
15342 || icode == CODE_FOR_vsx_set_v2di
15343 || icode == CODE_FOR_bcdadd
15344 || icode == CODE_FOR_bcdadd_lt
15345 || icode == CODE_FOR_bcdadd_eq
15346 || icode == CODE_FOR_bcdadd_gt
15347 || icode == CODE_FOR_bcdsub
15348 || icode == CODE_FOR_bcdsub_lt
15349 || icode == CODE_FOR_bcdsub_eq
15350 || icode == CODE_FOR_bcdsub_gt)
15351 {
15352 /* Only allow 1-bit unsigned literals. */
15353 STRIP_NOPS (arg2);
15354 if (TREE_CODE (arg2) != INTEGER_CST
15355 || TREE_INT_CST_LOW (arg2) & ~0x1)
15356 {
15357 error ("argument 3 must be a 1-bit unsigned literal");
15358 return const0_rtx;
15359 }
15360 }
15361 else if (icode == CODE_FOR_dfp_ddedpd_dd
15362 || icode == CODE_FOR_dfp_ddedpd_td)
15363 {
15364 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
15365 STRIP_NOPS (arg0);
15366 if (TREE_CODE (arg0) != INTEGER_CST
15367 || TREE_INT_CST_LOW (arg2) & ~0x3)
15368 {
15369 error ("argument 1 must be 0 or 2");
15370 return const0_rtx;
15371 }
15372 }
15373 else if (icode == CODE_FOR_dfp_denbcd_dd
15374 || icode == CODE_FOR_dfp_denbcd_td)
15375 {
15376 /* Only allow 1-bit unsigned literals. */
15377 STRIP_NOPS (arg0);
15378 if (TREE_CODE (arg0) != INTEGER_CST
15379 || TREE_INT_CST_LOW (arg0) & ~0x1)
15380 {
15381 error ("argument 1 must be a 1-bit unsigned literal");
15382 return const0_rtx;
15383 }
15384 }
15385 else if (icode == CODE_FOR_dfp_dscli_dd
15386 || icode == CODE_FOR_dfp_dscli_td
15387 || icode == CODE_FOR_dfp_dscri_dd
15388 || icode == CODE_FOR_dfp_dscri_td)
15389 {
15390 /* Only allow 6-bit unsigned literals. */
15391 STRIP_NOPS (arg1);
15392 if (TREE_CODE (arg1) != INTEGER_CST
15393 || TREE_INT_CST_LOW (arg1) & ~0x3f)
15394 {
15395 error ("argument 2 must be a 6-bit unsigned literal");
15396 return const0_rtx;
15397 }
15398 }
15399 else if (icode == CODE_FOR_crypto_vshasigmaw
15400 || icode == CODE_FOR_crypto_vshasigmad)
15401 {
15402 /* Check whether the 2nd and 3rd arguments are integer constants and in
15403 range and prepare arguments. */
15404 STRIP_NOPS (arg1);
15405 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
15406 {
15407 error ("argument 2 must be 0 or 1");
15408 return const0_rtx;
15409 }
15410
15411 STRIP_NOPS (arg2);
15412 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg1, 16))
15413 {
15414 error ("argument 3 must be in the range 0..15");
15415 return const0_rtx;
15416 }
15417 }
15418
15419 if (target == 0
15420 || GET_MODE (target) != tmode
15421 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15422 target = gen_reg_rtx (tmode);
15423
15424 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15425 op0 = copy_to_mode_reg (mode0, op0);
15426 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
15427 op1 = copy_to_mode_reg (mode1, op1);
15428 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
15429 op2 = copy_to_mode_reg (mode2, op2);
15430
15431 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
15432 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
15433 else
15434 pat = GEN_FCN (icode) (target, op0, op1, op2);
15435 if (! pat)
15436 return 0;
15437 emit_insn (pat);
15438
15439 return target;
15440 }
15441
15442 /* Expand the lvx builtins. */
15443 static rtx
15444 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
15445 {
15446 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15447 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15448 tree arg0;
15449 machine_mode tmode, mode0;
15450 rtx pat, op0;
15451 enum insn_code icode;
15452
15453 switch (fcode)
15454 {
15455 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
15456 icode = CODE_FOR_vector_altivec_load_v16qi;
15457 break;
15458 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
15459 icode = CODE_FOR_vector_altivec_load_v8hi;
15460 break;
15461 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
15462 icode = CODE_FOR_vector_altivec_load_v4si;
15463 break;
15464 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
15465 icode = CODE_FOR_vector_altivec_load_v4sf;
15466 break;
15467 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
15468 icode = CODE_FOR_vector_altivec_load_v2df;
15469 break;
15470 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
15471 icode = CODE_FOR_vector_altivec_load_v2di;
15472 break;
15473 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
15474 icode = CODE_FOR_vector_altivec_load_v1ti;
15475 break;
15476 default:
15477 *expandedp = false;
15478 return NULL_RTX;
15479 }
15480
15481 *expandedp = true;
15482
15483 arg0 = CALL_EXPR_ARG (exp, 0);
15484 op0 = expand_normal (arg0);
15485 tmode = insn_data[icode].operand[0].mode;
15486 mode0 = insn_data[icode].operand[1].mode;
15487
15488 if (target == 0
15489 || GET_MODE (target) != tmode
15490 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15491 target = gen_reg_rtx (tmode);
15492
15493 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
15494 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15495
15496 pat = GEN_FCN (icode) (target, op0);
15497 if (! pat)
15498 return 0;
15499 emit_insn (pat);
15500 return target;
15501 }
15502
15503 /* Expand the stvx builtins. */
15504 static rtx
15505 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15506 bool *expandedp)
15507 {
15508 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15509 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
15510 tree arg0, arg1;
15511 machine_mode mode0, mode1;
15512 rtx pat, op0, op1;
15513 enum insn_code icode;
15514
15515 switch (fcode)
15516 {
15517 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
15518 icode = CODE_FOR_vector_altivec_store_v16qi;
15519 break;
15520 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
15521 icode = CODE_FOR_vector_altivec_store_v8hi;
15522 break;
15523 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
15524 icode = CODE_FOR_vector_altivec_store_v4si;
15525 break;
15526 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
15527 icode = CODE_FOR_vector_altivec_store_v4sf;
15528 break;
15529 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
15530 icode = CODE_FOR_vector_altivec_store_v2df;
15531 break;
15532 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
15533 icode = CODE_FOR_vector_altivec_store_v2di;
15534 break;
15535 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
15536 icode = CODE_FOR_vector_altivec_store_v1ti;
15537 break;
15538 default:
15539 *expandedp = false;
15540 return NULL_RTX;
15541 }
15542
15543 arg0 = CALL_EXPR_ARG (exp, 0);
15544 arg1 = CALL_EXPR_ARG (exp, 1);
15545 op0 = expand_normal (arg0);
15546 op1 = expand_normal (arg1);
15547 mode0 = insn_data[icode].operand[0].mode;
15548 mode1 = insn_data[icode].operand[1].mode;
15549
15550 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15551 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
15552 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
15553 op1 = copy_to_mode_reg (mode1, op1);
15554
15555 pat = GEN_FCN (icode) (op0, op1);
15556 if (pat)
15557 emit_insn (pat);
15558
15559 *expandedp = true;
15560 return NULL_RTX;
15561 }
15562
15563 /* Expand the dst builtins. */
15564 static rtx
15565 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
15566 bool *expandedp)
15567 {
15568 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15569 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15570 tree arg0, arg1, arg2;
15571 machine_mode mode0, mode1;
15572 rtx pat, op0, op1, op2;
15573 const struct builtin_description *d;
15574 size_t i;
15575
15576 *expandedp = false;
15577
15578 /* Handle DST variants. */
15579 d = bdesc_dst;
15580 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
15581 if (d->code == fcode)
15582 {
15583 arg0 = CALL_EXPR_ARG (exp, 0);
15584 arg1 = CALL_EXPR_ARG (exp, 1);
15585 arg2 = CALL_EXPR_ARG (exp, 2);
15586 op0 = expand_normal (arg0);
15587 op1 = expand_normal (arg1);
15588 op2 = expand_normal (arg2);
15589 mode0 = insn_data[d->icode].operand[0].mode;
15590 mode1 = insn_data[d->icode].operand[1].mode;
15591
15592 /* Invalid arguments, bail out before generating bad rtl. */
15593 if (arg0 == error_mark_node
15594 || arg1 == error_mark_node
15595 || arg2 == error_mark_node)
15596 return const0_rtx;
15597
15598 *expandedp = true;
15599 STRIP_NOPS (arg2);
15600 if (TREE_CODE (arg2) != INTEGER_CST
15601 || TREE_INT_CST_LOW (arg2) & ~0x3)
15602 {
15603 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
15604 return const0_rtx;
15605 }
15606
15607 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
15608 op0 = copy_to_mode_reg (Pmode, op0);
15609 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
15610 op1 = copy_to_mode_reg (mode1, op1);
15611
15612 pat = GEN_FCN (d->icode) (op0, op1, op2);
15613 if (pat != 0)
15614 emit_insn (pat);
15615
15616 return NULL_RTX;
15617 }
15618
15619 return NULL_RTX;
15620 }
15621
15622 /* Expand vec_init builtin. */
15623 static rtx
15624 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
15625 {
15626 machine_mode tmode = TYPE_MODE (type);
15627 machine_mode inner_mode = GET_MODE_INNER (tmode);
15628 int i, n_elt = GET_MODE_NUNITS (tmode);
15629
15630 gcc_assert (VECTOR_MODE_P (tmode));
15631 gcc_assert (n_elt == call_expr_nargs (exp));
15632
15633 if (!target || !register_operand (target, tmode))
15634 target = gen_reg_rtx (tmode);
15635
15636 /* If we have a vector compromised of a single element, such as V1TImode, do
15637 the initialization directly. */
15638 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
15639 {
15640 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
15641 emit_move_insn (target, gen_lowpart (tmode, x));
15642 }
15643 else
15644 {
15645 rtvec v = rtvec_alloc (n_elt);
15646
15647 for (i = 0; i < n_elt; ++i)
15648 {
15649 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
15650 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
15651 }
15652
15653 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
15654 }
15655
15656 return target;
15657 }
15658
15659 /* Return the integer constant in ARG. Constrain it to be in the range
15660 of the subparts of VEC_TYPE; issue an error if not. */
15661
15662 static int
15663 get_element_number (tree vec_type, tree arg)
15664 {
15665 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
15666
15667 if (!tree_fits_uhwi_p (arg)
15668 || (elt = tree_to_uhwi (arg), elt > max))
15669 {
15670 error ("selector must be an integer constant in the range 0..%wi", max);
15671 return 0;
15672 }
15673
15674 return elt;
15675 }
15676
15677 /* Expand vec_set builtin. */
15678 static rtx
15679 altivec_expand_vec_set_builtin (tree exp)
15680 {
15681 machine_mode tmode, mode1;
15682 tree arg0, arg1, arg2;
15683 int elt;
15684 rtx op0, op1;
15685
15686 arg0 = CALL_EXPR_ARG (exp, 0);
15687 arg1 = CALL_EXPR_ARG (exp, 1);
15688 arg2 = CALL_EXPR_ARG (exp, 2);
15689
15690 tmode = TYPE_MODE (TREE_TYPE (arg0));
15691 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15692 gcc_assert (VECTOR_MODE_P (tmode));
15693
15694 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
15695 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
15696 elt = get_element_number (TREE_TYPE (arg0), arg2);
15697
15698 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
15699 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
15700
15701 op0 = force_reg (tmode, op0);
15702 op1 = force_reg (mode1, op1);
15703
15704 rs6000_expand_vector_set (op0, op1, elt);
15705
15706 return op0;
15707 }
15708
15709 /* Expand vec_ext builtin. */
15710 static rtx
15711 altivec_expand_vec_ext_builtin (tree exp, rtx target)
15712 {
15713 machine_mode tmode, mode0;
15714 tree arg0, arg1;
15715 rtx op0;
15716 rtx op1;
15717
15718 arg0 = CALL_EXPR_ARG (exp, 0);
15719 arg1 = CALL_EXPR_ARG (exp, 1);
15720
15721 op0 = expand_normal (arg0);
15722 op1 = expand_normal (arg1);
15723
15724 /* Call get_element_number to validate arg1 if it is a constant. */
15725 if (TREE_CODE (arg1) == INTEGER_CST)
15726 (void) get_element_number (TREE_TYPE (arg0), arg1);
15727
15728 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
15729 mode0 = TYPE_MODE (TREE_TYPE (arg0));
15730 gcc_assert (VECTOR_MODE_P (mode0));
15731
15732 op0 = force_reg (mode0, op0);
15733
15734 if (optimize || !target || !register_operand (target, tmode))
15735 target = gen_reg_rtx (tmode);
15736
15737 rs6000_expand_vector_extract (target, op0, op1);
15738
15739 return target;
15740 }
15741
15742 /* Expand the builtin in EXP and store the result in TARGET. Store
15743 true in *EXPANDEDP if we found a builtin to expand. */
15744 static rtx
15745 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
15746 {
15747 const struct builtin_description *d;
15748 size_t i;
15749 enum insn_code icode;
15750 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
15751 tree arg0, arg1, arg2;
15752 rtx op0, pat;
15753 machine_mode tmode, mode0;
15754 enum rs6000_builtins fcode
15755 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
15756
15757 if (rs6000_overloaded_builtin_p (fcode))
15758 {
15759 *expandedp = true;
15760 error ("unresolved overload for Altivec builtin %qF", fndecl);
15761
15762 /* Given it is invalid, just generate a normal call. */
15763 return expand_call (exp, target, false);
15764 }
15765
15766 target = altivec_expand_ld_builtin (exp, target, expandedp);
15767 if (*expandedp)
15768 return target;
15769
15770 target = altivec_expand_st_builtin (exp, target, expandedp);
15771 if (*expandedp)
15772 return target;
15773
15774 target = altivec_expand_dst_builtin (exp, target, expandedp);
15775 if (*expandedp)
15776 return target;
15777
15778 *expandedp = true;
15779
15780 switch (fcode)
15781 {
15782 case ALTIVEC_BUILTIN_STVX_V2DF:
15783 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df_2op, exp);
15784 case ALTIVEC_BUILTIN_STVX_V2DI:
15785 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di_2op, exp);
15786 case ALTIVEC_BUILTIN_STVX_V4SF:
15787 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf_2op, exp);
15788 case ALTIVEC_BUILTIN_STVX:
15789 case ALTIVEC_BUILTIN_STVX_V4SI:
15790 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si_2op, exp);
15791 case ALTIVEC_BUILTIN_STVX_V8HI:
15792 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi_2op, exp);
15793 case ALTIVEC_BUILTIN_STVX_V16QI:
15794 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi_2op, exp);
15795 case ALTIVEC_BUILTIN_STVEBX:
15796 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
15797 case ALTIVEC_BUILTIN_STVEHX:
15798 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
15799 case ALTIVEC_BUILTIN_STVEWX:
15800 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
15801 case ALTIVEC_BUILTIN_STVXL_V2DF:
15802 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
15803 case ALTIVEC_BUILTIN_STVXL_V2DI:
15804 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
15805 case ALTIVEC_BUILTIN_STVXL_V4SF:
15806 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
15807 case ALTIVEC_BUILTIN_STVXL:
15808 case ALTIVEC_BUILTIN_STVXL_V4SI:
15809 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
15810 case ALTIVEC_BUILTIN_STVXL_V8HI:
15811 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
15812 case ALTIVEC_BUILTIN_STVXL_V16QI:
15813 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
15814
15815 case ALTIVEC_BUILTIN_STVLX:
15816 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
15817 case ALTIVEC_BUILTIN_STVLXL:
15818 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
15819 case ALTIVEC_BUILTIN_STVRX:
15820 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
15821 case ALTIVEC_BUILTIN_STVRXL:
15822 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
15823
15824 case P9V_BUILTIN_STXVL:
15825 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl, exp);
15826
15827 case VSX_BUILTIN_STXVD2X_V1TI:
15828 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
15829 case VSX_BUILTIN_STXVD2X_V2DF:
15830 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
15831 case VSX_BUILTIN_STXVD2X_V2DI:
15832 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
15833 case VSX_BUILTIN_STXVW4X_V4SF:
15834 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
15835 case VSX_BUILTIN_STXVW4X_V4SI:
15836 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
15837 case VSX_BUILTIN_STXVW4X_V8HI:
15838 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
15839 case VSX_BUILTIN_STXVW4X_V16QI:
15840 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
15841
15842 /* For the following on big endian, it's ok to use any appropriate
15843 unaligned-supporting store, so use a generic expander. For
15844 little-endian, the exact element-reversing instruction must
15845 be used. */
15846 case VSX_BUILTIN_ST_ELEMREV_V2DF:
15847 {
15848 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2df
15849 : CODE_FOR_vsx_st_elemrev_v2df);
15850 return altivec_expand_stv_builtin (code, exp);
15851 }
15852 case VSX_BUILTIN_ST_ELEMREV_V2DI:
15853 {
15854 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v2di
15855 : CODE_FOR_vsx_st_elemrev_v2di);
15856 return altivec_expand_stv_builtin (code, exp);
15857 }
15858 case VSX_BUILTIN_ST_ELEMREV_V4SF:
15859 {
15860 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4sf
15861 : CODE_FOR_vsx_st_elemrev_v4sf);
15862 return altivec_expand_stv_builtin (code, exp);
15863 }
15864 case VSX_BUILTIN_ST_ELEMREV_V4SI:
15865 {
15866 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v4si
15867 : CODE_FOR_vsx_st_elemrev_v4si);
15868 return altivec_expand_stv_builtin (code, exp);
15869 }
15870 case VSX_BUILTIN_ST_ELEMREV_V8HI:
15871 {
15872 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v8hi
15873 : CODE_FOR_vsx_st_elemrev_v8hi);
15874 return altivec_expand_stv_builtin (code, exp);
15875 }
15876 case VSX_BUILTIN_ST_ELEMREV_V16QI:
15877 {
15878 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_store_v16qi
15879 : CODE_FOR_vsx_st_elemrev_v16qi);
15880 return altivec_expand_stv_builtin (code, exp);
15881 }
15882
15883 case ALTIVEC_BUILTIN_MFVSCR:
15884 icode = CODE_FOR_altivec_mfvscr;
15885 tmode = insn_data[icode].operand[0].mode;
15886
15887 if (target == 0
15888 || GET_MODE (target) != tmode
15889 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
15890 target = gen_reg_rtx (tmode);
15891
15892 pat = GEN_FCN (icode) (target);
15893 if (! pat)
15894 return 0;
15895 emit_insn (pat);
15896 return target;
15897
15898 case ALTIVEC_BUILTIN_MTVSCR:
15899 icode = CODE_FOR_altivec_mtvscr;
15900 arg0 = CALL_EXPR_ARG (exp, 0);
15901 op0 = expand_normal (arg0);
15902 mode0 = insn_data[icode].operand[0].mode;
15903
15904 /* If we got invalid arguments bail out before generating bad rtl. */
15905 if (arg0 == error_mark_node)
15906 return const0_rtx;
15907
15908 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15909 op0 = copy_to_mode_reg (mode0, op0);
15910
15911 pat = GEN_FCN (icode) (op0);
15912 if (pat)
15913 emit_insn (pat);
15914 return NULL_RTX;
15915
15916 case ALTIVEC_BUILTIN_DSSALL:
15917 emit_insn (gen_altivec_dssall ());
15918 return NULL_RTX;
15919
15920 case ALTIVEC_BUILTIN_DSS:
15921 icode = CODE_FOR_altivec_dss;
15922 arg0 = CALL_EXPR_ARG (exp, 0);
15923 STRIP_NOPS (arg0);
15924 op0 = expand_normal (arg0);
15925 mode0 = insn_data[icode].operand[0].mode;
15926
15927 /* If we got invalid arguments bail out before generating bad rtl. */
15928 if (arg0 == error_mark_node)
15929 return const0_rtx;
15930
15931 if (TREE_CODE (arg0) != INTEGER_CST
15932 || TREE_INT_CST_LOW (arg0) & ~0x3)
15933 {
15934 error ("argument to dss must be a 2-bit unsigned literal");
15935 return const0_rtx;
15936 }
15937
15938 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
15939 op0 = copy_to_mode_reg (mode0, op0);
15940
15941 emit_insn (gen_altivec_dss (op0));
15942 return NULL_RTX;
15943
15944 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
15945 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
15946 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
15947 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
15948 case VSX_BUILTIN_VEC_INIT_V2DF:
15949 case VSX_BUILTIN_VEC_INIT_V2DI:
15950 case VSX_BUILTIN_VEC_INIT_V1TI:
15951 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
15952
15953 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
15954 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
15955 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
15956 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
15957 case VSX_BUILTIN_VEC_SET_V2DF:
15958 case VSX_BUILTIN_VEC_SET_V2DI:
15959 case VSX_BUILTIN_VEC_SET_V1TI:
15960 return altivec_expand_vec_set_builtin (exp);
15961
15962 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
15963 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
15964 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
15965 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
15966 case VSX_BUILTIN_VEC_EXT_V2DF:
15967 case VSX_BUILTIN_VEC_EXT_V2DI:
15968 case VSX_BUILTIN_VEC_EXT_V1TI:
15969 return altivec_expand_vec_ext_builtin (exp, target);
15970
15971 case P9V_BUILTIN_VEXTRACT4B:
15972 case P9V_BUILTIN_VEC_VEXTRACT4B:
15973 arg1 = CALL_EXPR_ARG (exp, 1);
15974 STRIP_NOPS (arg1);
15975
15976 /* Generate a normal call if it is invalid. */
15977 if (arg1 == error_mark_node)
15978 return expand_call (exp, target, false);
15979
15980 if (TREE_CODE (arg1) != INTEGER_CST || TREE_INT_CST_LOW (arg1) > 12)
15981 {
15982 error ("second argument to vec_vextract4b must be 0..12");
15983 return expand_call (exp, target, false);
15984 }
15985 break;
15986
15987 case P9V_BUILTIN_VINSERT4B:
15988 case P9V_BUILTIN_VINSERT4B_DI:
15989 case P9V_BUILTIN_VEC_VINSERT4B:
15990 arg2 = CALL_EXPR_ARG (exp, 2);
15991 STRIP_NOPS (arg2);
15992
15993 /* Generate a normal call if it is invalid. */
15994 if (arg2 == error_mark_node)
15995 return expand_call (exp, target, false);
15996
15997 if (TREE_CODE (arg2) != INTEGER_CST || TREE_INT_CST_LOW (arg2) > 12)
15998 {
15999 error ("third argument to vec_vinsert4b must be 0..12");
16000 return expand_call (exp, target, false);
16001 }
16002 break;
16003
16004 default:
16005 break;
16006 /* Fall through. */
16007 }
16008
16009 /* Expand abs* operations. */
16010 d = bdesc_abs;
16011 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
16012 if (d->code == fcode)
16013 return altivec_expand_abs_builtin (d->icode, exp, target);
16014
16015 /* Expand the AltiVec predicates. */
16016 d = bdesc_altivec_preds;
16017 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
16018 if (d->code == fcode)
16019 return altivec_expand_predicate_builtin (d->icode, exp, target);
16020
16021 /* LV* are funky. We initialized them differently. */
16022 switch (fcode)
16023 {
16024 case ALTIVEC_BUILTIN_LVSL:
16025 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
16026 exp, target, false);
16027 case ALTIVEC_BUILTIN_LVSR:
16028 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
16029 exp, target, false);
16030 case ALTIVEC_BUILTIN_LVEBX:
16031 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
16032 exp, target, false);
16033 case ALTIVEC_BUILTIN_LVEHX:
16034 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
16035 exp, target, false);
16036 case ALTIVEC_BUILTIN_LVEWX:
16037 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
16038 exp, target, false);
16039 case ALTIVEC_BUILTIN_LVXL_V2DF:
16040 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
16041 exp, target, false);
16042 case ALTIVEC_BUILTIN_LVXL_V2DI:
16043 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
16044 exp, target, false);
16045 case ALTIVEC_BUILTIN_LVXL_V4SF:
16046 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
16047 exp, target, false);
16048 case ALTIVEC_BUILTIN_LVXL:
16049 case ALTIVEC_BUILTIN_LVXL_V4SI:
16050 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
16051 exp, target, false);
16052 case ALTIVEC_BUILTIN_LVXL_V8HI:
16053 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
16054 exp, target, false);
16055 case ALTIVEC_BUILTIN_LVXL_V16QI:
16056 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
16057 exp, target, false);
16058 case ALTIVEC_BUILTIN_LVX_V2DF:
16059 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df_2op,
16060 exp, target, false);
16061 case ALTIVEC_BUILTIN_LVX_V2DI:
16062 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di_2op,
16063 exp, target, false);
16064 case ALTIVEC_BUILTIN_LVX_V4SF:
16065 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf_2op,
16066 exp, target, false);
16067 case ALTIVEC_BUILTIN_LVX:
16068 case ALTIVEC_BUILTIN_LVX_V4SI:
16069 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si_2op,
16070 exp, target, false);
16071 case ALTIVEC_BUILTIN_LVX_V8HI:
16072 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi_2op,
16073 exp, target, false);
16074 case ALTIVEC_BUILTIN_LVX_V16QI:
16075 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi_2op,
16076 exp, target, false);
16077 case ALTIVEC_BUILTIN_LVLX:
16078 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
16079 exp, target, true);
16080 case ALTIVEC_BUILTIN_LVLXL:
16081 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
16082 exp, target, true);
16083 case ALTIVEC_BUILTIN_LVRX:
16084 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
16085 exp, target, true);
16086 case ALTIVEC_BUILTIN_LVRXL:
16087 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
16088 exp, target, true);
16089 case VSX_BUILTIN_LXVD2X_V1TI:
16090 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
16091 exp, target, false);
16092 case VSX_BUILTIN_LXVD2X_V2DF:
16093 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
16094 exp, target, false);
16095 case VSX_BUILTIN_LXVD2X_V2DI:
16096 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
16097 exp, target, false);
16098 case VSX_BUILTIN_LXVW4X_V4SF:
16099 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
16100 exp, target, false);
16101 case VSX_BUILTIN_LXVW4X_V4SI:
16102 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
16103 exp, target, false);
16104 case VSX_BUILTIN_LXVW4X_V8HI:
16105 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
16106 exp, target, false);
16107 case VSX_BUILTIN_LXVW4X_V16QI:
16108 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
16109 exp, target, false);
16110 /* For the following on big endian, it's ok to use any appropriate
16111 unaligned-supporting load, so use a generic expander. For
16112 little-endian, the exact element-reversing instruction must
16113 be used. */
16114 case VSX_BUILTIN_LD_ELEMREV_V2DF:
16115 {
16116 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2df
16117 : CODE_FOR_vsx_ld_elemrev_v2df);
16118 return altivec_expand_lv_builtin (code, exp, target, false);
16119 }
16120 case VSX_BUILTIN_LD_ELEMREV_V2DI:
16121 {
16122 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v2di
16123 : CODE_FOR_vsx_ld_elemrev_v2di);
16124 return altivec_expand_lv_builtin (code, exp, target, false);
16125 }
16126 case VSX_BUILTIN_LD_ELEMREV_V4SF:
16127 {
16128 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4sf
16129 : CODE_FOR_vsx_ld_elemrev_v4sf);
16130 return altivec_expand_lv_builtin (code, exp, target, false);
16131 }
16132 case VSX_BUILTIN_LD_ELEMREV_V4SI:
16133 {
16134 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v4si
16135 : CODE_FOR_vsx_ld_elemrev_v4si);
16136 return altivec_expand_lv_builtin (code, exp, target, false);
16137 }
16138 case VSX_BUILTIN_LD_ELEMREV_V8HI:
16139 {
16140 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v8hi
16141 : CODE_FOR_vsx_ld_elemrev_v8hi);
16142 return altivec_expand_lv_builtin (code, exp, target, false);
16143 }
16144 case VSX_BUILTIN_LD_ELEMREV_V16QI:
16145 {
16146 enum insn_code code = (BYTES_BIG_ENDIAN ? CODE_FOR_vsx_load_v16qi
16147 : CODE_FOR_vsx_ld_elemrev_v16qi);
16148 return altivec_expand_lv_builtin (code, exp, target, false);
16149 }
16150 break;
16151 default:
16152 break;
16153 /* Fall through. */
16154 }
16155
16156 *expandedp = false;
16157 return NULL_RTX;
16158 }
16159
16160 /* Expand the builtin in EXP and store the result in TARGET. Store
16161 true in *EXPANDEDP if we found a builtin to expand. */
16162 static rtx
16163 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
16164 {
16165 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16166 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16167 const struct builtin_description *d;
16168 size_t i;
16169
16170 *expandedp = true;
16171
16172 switch (fcode)
16173 {
16174 case PAIRED_BUILTIN_STX:
16175 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
16176 case PAIRED_BUILTIN_LX:
16177 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
16178 default:
16179 break;
16180 /* Fall through. */
16181 }
16182
16183 /* Expand the paired predicates. */
16184 d = bdesc_paired_preds;
16185 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
16186 if (d->code == fcode)
16187 return paired_expand_predicate_builtin (d->icode, exp, target);
16188
16189 *expandedp = false;
16190 return NULL_RTX;
16191 }
16192
16193 /* Binops that need to be initialized manually, but can be expanded
16194 automagically by rs6000_expand_binop_builtin. */
16195 static const struct builtin_description bdesc_2arg_spe[] =
16196 {
16197 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
16198 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
16199 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
16200 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
16201 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
16202 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
16203 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
16204 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
16205 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
16206 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
16207 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
16208 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
16209 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
16210 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
16211 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
16212 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
16213 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
16214 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
16215 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
16216 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
16217 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
16218 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
16219 };
16220
16221 /* Expand the builtin in EXP and store the result in TARGET. Store
16222 true in *EXPANDEDP if we found a builtin to expand.
16223
16224 This expands the SPE builtins that are not simple unary and binary
16225 operations. */
16226 static rtx
16227 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
16228 {
16229 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16230 tree arg1, arg0;
16231 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16232 enum insn_code icode;
16233 machine_mode tmode, mode0;
16234 rtx pat, op0;
16235 const struct builtin_description *d;
16236 size_t i;
16237
16238 *expandedp = true;
16239
16240 /* Syntax check for a 5-bit unsigned immediate. */
16241 switch (fcode)
16242 {
16243 case SPE_BUILTIN_EVSTDD:
16244 case SPE_BUILTIN_EVSTDH:
16245 case SPE_BUILTIN_EVSTDW:
16246 case SPE_BUILTIN_EVSTWHE:
16247 case SPE_BUILTIN_EVSTWHO:
16248 case SPE_BUILTIN_EVSTWWE:
16249 case SPE_BUILTIN_EVSTWWO:
16250 arg1 = CALL_EXPR_ARG (exp, 2);
16251 if (TREE_CODE (arg1) != INTEGER_CST
16252 || TREE_INT_CST_LOW (arg1) & ~0x1f)
16253 {
16254 error ("argument 2 must be a 5-bit unsigned literal");
16255 return const0_rtx;
16256 }
16257 break;
16258 default:
16259 break;
16260 }
16261
16262 /* The evsplat*i instructions are not quite generic. */
16263 switch (fcode)
16264 {
16265 case SPE_BUILTIN_EVSPLATFI:
16266 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
16267 exp, target);
16268 case SPE_BUILTIN_EVSPLATI:
16269 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
16270 exp, target);
16271 default:
16272 break;
16273 }
16274
16275 d = bdesc_2arg_spe;
16276 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
16277 if (d->code == fcode)
16278 return rs6000_expand_binop_builtin (d->icode, exp, target);
16279
16280 d = bdesc_spe_predicates;
16281 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
16282 if (d->code == fcode)
16283 return spe_expand_predicate_builtin (d->icode, exp, target);
16284
16285 d = bdesc_spe_evsel;
16286 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
16287 if (d->code == fcode)
16288 return spe_expand_evsel_builtin (d->icode, exp, target);
16289
16290 switch (fcode)
16291 {
16292 case SPE_BUILTIN_EVSTDDX:
16293 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
16294 case SPE_BUILTIN_EVSTDHX:
16295 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
16296 case SPE_BUILTIN_EVSTDWX:
16297 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
16298 case SPE_BUILTIN_EVSTWHEX:
16299 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
16300 case SPE_BUILTIN_EVSTWHOX:
16301 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
16302 case SPE_BUILTIN_EVSTWWEX:
16303 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
16304 case SPE_BUILTIN_EVSTWWOX:
16305 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
16306 case SPE_BUILTIN_EVSTDD:
16307 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
16308 case SPE_BUILTIN_EVSTDH:
16309 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
16310 case SPE_BUILTIN_EVSTDW:
16311 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
16312 case SPE_BUILTIN_EVSTWHE:
16313 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
16314 case SPE_BUILTIN_EVSTWHO:
16315 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
16316 case SPE_BUILTIN_EVSTWWE:
16317 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
16318 case SPE_BUILTIN_EVSTWWO:
16319 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
16320 case SPE_BUILTIN_MFSPEFSCR:
16321 icode = CODE_FOR_spe_mfspefscr;
16322 tmode = insn_data[icode].operand[0].mode;
16323
16324 if (target == 0
16325 || GET_MODE (target) != tmode
16326 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16327 target = gen_reg_rtx (tmode);
16328
16329 pat = GEN_FCN (icode) (target);
16330 if (! pat)
16331 return 0;
16332 emit_insn (pat);
16333 return target;
16334 case SPE_BUILTIN_MTSPEFSCR:
16335 icode = CODE_FOR_spe_mtspefscr;
16336 arg0 = CALL_EXPR_ARG (exp, 0);
16337 op0 = expand_normal (arg0);
16338 mode0 = insn_data[icode].operand[0].mode;
16339
16340 if (arg0 == error_mark_node)
16341 return const0_rtx;
16342
16343 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
16344 op0 = copy_to_mode_reg (mode0, op0);
16345
16346 pat = GEN_FCN (icode) (op0);
16347 if (pat)
16348 emit_insn (pat);
16349 return NULL_RTX;
16350 default:
16351 break;
16352 }
16353
16354 *expandedp = false;
16355 return NULL_RTX;
16356 }
16357
16358 static rtx
16359 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
16360 {
16361 rtx pat, scratch, tmp;
16362 tree form = CALL_EXPR_ARG (exp, 0);
16363 tree arg0 = CALL_EXPR_ARG (exp, 1);
16364 tree arg1 = CALL_EXPR_ARG (exp, 2);
16365 rtx op0 = expand_normal (arg0);
16366 rtx op1 = expand_normal (arg1);
16367 machine_mode mode0 = insn_data[icode].operand[1].mode;
16368 machine_mode mode1 = insn_data[icode].operand[2].mode;
16369 int form_int;
16370 enum rtx_code code;
16371
16372 if (TREE_CODE (form) != INTEGER_CST)
16373 {
16374 error ("argument 1 of __builtin_paired_predicate must be a constant");
16375 return const0_rtx;
16376 }
16377 else
16378 form_int = TREE_INT_CST_LOW (form);
16379
16380 gcc_assert (mode0 == mode1);
16381
16382 if (arg0 == error_mark_node || arg1 == error_mark_node)
16383 return const0_rtx;
16384
16385 if (target == 0
16386 || GET_MODE (target) != SImode
16387 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
16388 target = gen_reg_rtx (SImode);
16389 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
16390 op0 = copy_to_mode_reg (mode0, op0);
16391 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
16392 op1 = copy_to_mode_reg (mode1, op1);
16393
16394 scratch = gen_reg_rtx (CCFPmode);
16395
16396 pat = GEN_FCN (icode) (scratch, op0, op1);
16397 if (!pat)
16398 return const0_rtx;
16399
16400 emit_insn (pat);
16401
16402 switch (form_int)
16403 {
16404 /* LT bit. */
16405 case 0:
16406 code = LT;
16407 break;
16408 /* GT bit. */
16409 case 1:
16410 code = GT;
16411 break;
16412 /* EQ bit. */
16413 case 2:
16414 code = EQ;
16415 break;
16416 /* UN bit. */
16417 case 3:
16418 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
16419 return target;
16420 default:
16421 error ("argument 1 of __builtin_paired_predicate is out of range");
16422 return const0_rtx;
16423 }
16424
16425 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
16426 emit_move_insn (target, tmp);
16427 return target;
16428 }
16429
16430 static rtx
16431 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
16432 {
16433 rtx pat, scratch, tmp;
16434 tree form = CALL_EXPR_ARG (exp, 0);
16435 tree arg0 = CALL_EXPR_ARG (exp, 1);
16436 tree arg1 = CALL_EXPR_ARG (exp, 2);
16437 rtx op0 = expand_normal (arg0);
16438 rtx op1 = expand_normal (arg1);
16439 machine_mode mode0 = insn_data[icode].operand[1].mode;
16440 machine_mode mode1 = insn_data[icode].operand[2].mode;
16441 int form_int;
16442 enum rtx_code code;
16443
16444 if (TREE_CODE (form) != INTEGER_CST)
16445 {
16446 error ("argument 1 of __builtin_spe_predicate must be a constant");
16447 return const0_rtx;
16448 }
16449 else
16450 form_int = TREE_INT_CST_LOW (form);
16451
16452 gcc_assert (mode0 == mode1);
16453
16454 if (arg0 == error_mark_node || arg1 == error_mark_node)
16455 return const0_rtx;
16456
16457 if (target == 0
16458 || GET_MODE (target) != SImode
16459 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
16460 target = gen_reg_rtx (SImode);
16461
16462 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16463 op0 = copy_to_mode_reg (mode0, op0);
16464 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
16465 op1 = copy_to_mode_reg (mode1, op1);
16466
16467 scratch = gen_reg_rtx (CCmode);
16468
16469 pat = GEN_FCN (icode) (scratch, op0, op1);
16470 if (! pat)
16471 return const0_rtx;
16472 emit_insn (pat);
16473
16474 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
16475 _lower_. We use one compare, but look in different bits of the
16476 CR for each variant.
16477
16478 There are 2 elements in each SPE simd type (upper/lower). The CR
16479 bits are set as follows:
16480
16481 BIT0 | BIT 1 | BIT 2 | BIT 3
16482 U | L | (U | L) | (U & L)
16483
16484 So, for an "all" relationship, BIT 3 would be set.
16485 For an "any" relationship, BIT 2 would be set. Etc.
16486
16487 Following traditional nomenclature, these bits map to:
16488
16489 BIT0 | BIT 1 | BIT 2 | BIT 3
16490 LT | GT | EQ | OV
16491
16492 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
16493 */
16494
16495 switch (form_int)
16496 {
16497 /* All variant. OV bit. */
16498 case 0:
16499 /* We need to get to the OV bit, which is the ORDERED bit. We
16500 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
16501 that's ugly and will make validate_condition_mode die.
16502 So let's just use another pattern. */
16503 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
16504 return target;
16505 /* Any variant. EQ bit. */
16506 case 1:
16507 code = EQ;
16508 break;
16509 /* Upper variant. LT bit. */
16510 case 2:
16511 code = LT;
16512 break;
16513 /* Lower variant. GT bit. */
16514 case 3:
16515 code = GT;
16516 break;
16517 default:
16518 error ("argument 1 of __builtin_spe_predicate is out of range");
16519 return const0_rtx;
16520 }
16521
16522 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
16523 emit_move_insn (target, tmp);
16524
16525 return target;
16526 }
16527
16528 /* The evsel builtins look like this:
16529
16530 e = __builtin_spe_evsel_OP (a, b, c, d);
16531
16532 and work like this:
16533
16534 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
16535 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
16536 */
16537
16538 static rtx
16539 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
16540 {
16541 rtx pat, scratch;
16542 tree arg0 = CALL_EXPR_ARG (exp, 0);
16543 tree arg1 = CALL_EXPR_ARG (exp, 1);
16544 tree arg2 = CALL_EXPR_ARG (exp, 2);
16545 tree arg3 = CALL_EXPR_ARG (exp, 3);
16546 rtx op0 = expand_normal (arg0);
16547 rtx op1 = expand_normal (arg1);
16548 rtx op2 = expand_normal (arg2);
16549 rtx op3 = expand_normal (arg3);
16550 machine_mode mode0 = insn_data[icode].operand[1].mode;
16551 machine_mode mode1 = insn_data[icode].operand[2].mode;
16552
16553 gcc_assert (mode0 == mode1);
16554
16555 if (arg0 == error_mark_node || arg1 == error_mark_node
16556 || arg2 == error_mark_node || arg3 == error_mark_node)
16557 return const0_rtx;
16558
16559 if (target == 0
16560 || GET_MODE (target) != mode0
16561 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
16562 target = gen_reg_rtx (mode0);
16563
16564 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
16565 op0 = copy_to_mode_reg (mode0, op0);
16566 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
16567 op1 = copy_to_mode_reg (mode0, op1);
16568 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
16569 op2 = copy_to_mode_reg (mode0, op2);
16570 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
16571 op3 = copy_to_mode_reg (mode0, op3);
16572
16573 /* Generate the compare. */
16574 scratch = gen_reg_rtx (CCmode);
16575 pat = GEN_FCN (icode) (scratch, op0, op1);
16576 if (! pat)
16577 return const0_rtx;
16578 emit_insn (pat);
16579
16580 if (mode0 == V2SImode)
16581 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
16582 else
16583 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
16584
16585 return target;
16586 }
16587
16588 /* Raise an error message for a builtin function that is called without the
16589 appropriate target options being set. */
16590
16591 static void
16592 rs6000_invalid_builtin (enum rs6000_builtins fncode)
16593 {
16594 size_t uns_fncode = (size_t)fncode;
16595 const char *name = rs6000_builtin_info[uns_fncode].name;
16596 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
16597
16598 gcc_assert (name != NULL);
16599 if ((fnmask & RS6000_BTM_CELL) != 0)
16600 error ("Builtin function %s is only valid for the cell processor", name);
16601 else if ((fnmask & RS6000_BTM_VSX) != 0)
16602 error ("Builtin function %s requires the -mvsx option", name);
16603 else if ((fnmask & RS6000_BTM_HTM) != 0)
16604 error ("Builtin function %s requires the -mhtm option", name);
16605 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
16606 error ("Builtin function %s requires the -maltivec option", name);
16607 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
16608 error ("Builtin function %s requires the -mpaired option", name);
16609 else if ((fnmask & RS6000_BTM_SPE) != 0)
16610 error ("Builtin function %s requires the -mspe option", name);
16611 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16612 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
16613 error ("Builtin function %s requires the -mhard-dfp and"
16614 " -mpower8-vector options", name);
16615 else if ((fnmask & RS6000_BTM_DFP) != 0)
16616 error ("Builtin function %s requires the -mhard-dfp option", name);
16617 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
16618 error ("Builtin function %s requires the -mpower8-vector option", name);
16619 else if ((fnmask & (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16620 == (RS6000_BTM_P9_VECTOR | RS6000_BTM_64BIT))
16621 error ("Builtin function %s requires the -mcpu=power9 and"
16622 " -m64 options", name);
16623 else if ((fnmask & RS6000_BTM_P9_VECTOR) != 0)
16624 error ("Builtin function %s requires the -mcpu=power9 option", name);
16625 else if ((fnmask & (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16626 == (RS6000_BTM_P9_MISC | RS6000_BTM_64BIT))
16627 error ("Builtin function %s requires the -mcpu=power9 and"
16628 " -m64 options", name);
16629 else if ((fnmask & RS6000_BTM_P9_MISC) == RS6000_BTM_P9_MISC)
16630 error ("Builtin function %s requires the -mcpu=power9 option", name);
16631 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16632 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
16633 error ("Builtin function %s requires the -mhard-float and"
16634 " -mlong-double-128 options", name);
16635 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
16636 error ("Builtin function %s requires the -mhard-float option", name);
16637 else if ((fnmask & RS6000_BTM_FLOAT128) != 0)
16638 error ("Builtin function %s requires the -mfloat128 option", name);
16639 else
16640 error ("Builtin function %s is not supported with the current options",
16641 name);
16642 }
16643
16644 /* Target hook for early folding of built-ins, shamelessly stolen
16645 from ia64.c. */
16646
16647 static tree
16648 rs6000_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED,
16649 tree *args, bool ignore ATTRIBUTE_UNUSED)
16650 {
16651 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
16652 {
16653 enum rs6000_builtins fn_code
16654 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16655 switch (fn_code)
16656 {
16657 case RS6000_BUILTIN_NANQ:
16658 case RS6000_BUILTIN_NANSQ:
16659 {
16660 tree type = TREE_TYPE (TREE_TYPE (fndecl));
16661 const char *str = c_getstr (*args);
16662 int quiet = fn_code == RS6000_BUILTIN_NANQ;
16663 REAL_VALUE_TYPE real;
16664
16665 if (str && real_nan (&real, str, quiet, TYPE_MODE (type)))
16666 return build_real (type, real);
16667 return NULL_TREE;
16668 }
16669 case RS6000_BUILTIN_INFQ:
16670 case RS6000_BUILTIN_HUGE_VALQ:
16671 {
16672 tree type = TREE_TYPE (TREE_TYPE (fndecl));
16673 REAL_VALUE_TYPE inf;
16674 real_inf (&inf);
16675 return build_real (type, inf);
16676 }
16677 default:
16678 break;
16679 }
16680 }
16681 #ifdef SUBTARGET_FOLD_BUILTIN
16682 return SUBTARGET_FOLD_BUILTIN (fndecl, n_args, args, ignore);
16683 #else
16684 return NULL_TREE;
16685 #endif
16686 }
16687
16688 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
16689 a constant, use rs6000_fold_builtin.) */
16690
16691 bool
16692 rs6000_gimple_fold_builtin (gimple_stmt_iterator *gsi)
16693 {
16694 gimple *stmt = gsi_stmt (*gsi);
16695 tree fndecl = gimple_call_fndecl (stmt);
16696 gcc_checking_assert (fndecl && DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD);
16697 enum rs6000_builtins fn_code
16698 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
16699 tree arg0, arg1, lhs;
16700
16701 switch (fn_code)
16702 {
16703 /* Flavors of vec_add. We deliberately don't expand
16704 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
16705 TImode, resulting in much poorer code generation. */
16706 case ALTIVEC_BUILTIN_VADDUBM:
16707 case ALTIVEC_BUILTIN_VADDUHM:
16708 case ALTIVEC_BUILTIN_VADDUWM:
16709 case P8V_BUILTIN_VADDUDM:
16710 case ALTIVEC_BUILTIN_VADDFP:
16711 case VSX_BUILTIN_XVADDDP:
16712 {
16713 arg0 = gimple_call_arg (stmt, 0);
16714 arg1 = gimple_call_arg (stmt, 1);
16715 lhs = gimple_call_lhs (stmt);
16716 gimple *g = gimple_build_assign (lhs, PLUS_EXPR, arg0, arg1);
16717 gimple_set_location (g, gimple_location (stmt));
16718 gsi_replace (gsi, g, true);
16719 return true;
16720 }
16721 /* Flavors of vec_sub. We deliberately don't expand
16722 P8V_BUILTIN_VSUBUQM. */
16723 case ALTIVEC_BUILTIN_VSUBUBM:
16724 case ALTIVEC_BUILTIN_VSUBUHM:
16725 case ALTIVEC_BUILTIN_VSUBUWM:
16726 case P8V_BUILTIN_VSUBUDM:
16727 case ALTIVEC_BUILTIN_VSUBFP:
16728 case VSX_BUILTIN_XVSUBDP:
16729 {
16730 arg0 = gimple_call_arg (stmt, 0);
16731 arg1 = gimple_call_arg (stmt, 1);
16732 lhs = gimple_call_lhs (stmt);
16733 gimple *g = gimple_build_assign (lhs, MINUS_EXPR, arg0, arg1);
16734 gimple_set_location (g, gimple_location (stmt));
16735 gsi_replace (gsi, g, true);
16736 return true;
16737 }
16738 /* Even element flavors of vec_mul (signed). */
16739 case ALTIVEC_BUILTIN_VMULESB:
16740 case ALTIVEC_BUILTIN_VMULESH:
16741 /* Even element flavors of vec_mul (unsigned). */
16742 case ALTIVEC_BUILTIN_VMULEUB:
16743 case ALTIVEC_BUILTIN_VMULEUH:
16744 {
16745 arg0 = gimple_call_arg (stmt, 0);
16746 arg1 = gimple_call_arg (stmt, 1);
16747 lhs = gimple_call_lhs (stmt);
16748 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_EVEN_EXPR, arg0, arg1);
16749 gimple_set_location (g, gimple_location (stmt));
16750 gsi_replace (gsi, g, true);
16751 return true;
16752 }
16753 /* Odd element flavors of vec_mul (signed). */
16754 case ALTIVEC_BUILTIN_VMULOSB:
16755 case ALTIVEC_BUILTIN_VMULOSH:
16756 /* Odd element flavors of vec_mul (unsigned). */
16757 case ALTIVEC_BUILTIN_VMULOUB:
16758 case ALTIVEC_BUILTIN_VMULOUH:
16759 {
16760 arg0 = gimple_call_arg (stmt, 0);
16761 arg1 = gimple_call_arg (stmt, 1);
16762 lhs = gimple_call_lhs (stmt);
16763 gimple *g = gimple_build_assign (lhs, VEC_WIDEN_MULT_ODD_EXPR, arg0, arg1);
16764 gimple_set_location (g, gimple_location (stmt));
16765 gsi_replace (gsi, g, true);
16766 return true;
16767 }
16768
16769 default:
16770 break;
16771 }
16772
16773 return false;
16774 }
16775
16776 /* Expand an expression EXP that calls a built-in function,
16777 with result going to TARGET if that's convenient
16778 (and in mode MODE if that's convenient).
16779 SUBTARGET may be used as the target for computing one of EXP's operands.
16780 IGNORE is nonzero if the value is to be ignored. */
16781
16782 static rtx
16783 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
16784 machine_mode mode ATTRIBUTE_UNUSED,
16785 int ignore ATTRIBUTE_UNUSED)
16786 {
16787 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
16788 enum rs6000_builtins fcode
16789 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
16790 size_t uns_fcode = (size_t)fcode;
16791 const struct builtin_description *d;
16792 size_t i;
16793 rtx ret;
16794 bool success;
16795 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
16796 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
16797
16798 if (TARGET_DEBUG_BUILTIN)
16799 {
16800 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
16801 const char *name1 = rs6000_builtin_info[uns_fcode].name;
16802 const char *name2 = ((icode != CODE_FOR_nothing)
16803 ? get_insn_name ((int)icode)
16804 : "nothing");
16805 const char *name3;
16806
16807 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
16808 {
16809 default: name3 = "unknown"; break;
16810 case RS6000_BTC_SPECIAL: name3 = "special"; break;
16811 case RS6000_BTC_UNARY: name3 = "unary"; break;
16812 case RS6000_BTC_BINARY: name3 = "binary"; break;
16813 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
16814 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
16815 case RS6000_BTC_ABS: name3 = "abs"; break;
16816 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
16817 case RS6000_BTC_DST: name3 = "dst"; break;
16818 }
16819
16820
16821 fprintf (stderr,
16822 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16823 (name1) ? name1 : "---", fcode,
16824 (name2) ? name2 : "---", (int)icode,
16825 name3,
16826 func_valid_p ? "" : ", not valid");
16827 }
16828
16829 if (!func_valid_p)
16830 {
16831 rs6000_invalid_builtin (fcode);
16832
16833 /* Given it is invalid, just generate a normal call. */
16834 return expand_call (exp, target, ignore);
16835 }
16836
16837 switch (fcode)
16838 {
16839 case RS6000_BUILTIN_RECIP:
16840 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
16841
16842 case RS6000_BUILTIN_RECIPF:
16843 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
16844
16845 case RS6000_BUILTIN_RSQRTF:
16846 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
16847
16848 case RS6000_BUILTIN_RSQRT:
16849 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
16850
16851 case POWER7_BUILTIN_BPERMD:
16852 return rs6000_expand_binop_builtin (((TARGET_64BIT)
16853 ? CODE_FOR_bpermd_di
16854 : CODE_FOR_bpermd_si), exp, target);
16855
16856 case RS6000_BUILTIN_GET_TB:
16857 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
16858 target);
16859
16860 case RS6000_BUILTIN_MFTB:
16861 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
16862 ? CODE_FOR_rs6000_mftb_di
16863 : CODE_FOR_rs6000_mftb_si),
16864 target);
16865
16866 case RS6000_BUILTIN_MFFS:
16867 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
16868
16869 case RS6000_BUILTIN_MTFSF:
16870 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
16871
16872 case RS6000_BUILTIN_CPU_INIT:
16873 case RS6000_BUILTIN_CPU_IS:
16874 case RS6000_BUILTIN_CPU_SUPPORTS:
16875 return cpu_expand_builtin (fcode, exp, target);
16876
16877 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
16878 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
16879 {
16880 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr_direct
16881 : (int) CODE_FOR_altivec_lvsl_direct);
16882 machine_mode tmode = insn_data[icode].operand[0].mode;
16883 machine_mode mode = insn_data[icode].operand[1].mode;
16884 tree arg;
16885 rtx op, addr, pat;
16886
16887 gcc_assert (TARGET_ALTIVEC);
16888
16889 arg = CALL_EXPR_ARG (exp, 0);
16890 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
16891 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
16892 addr = memory_address (mode, op);
16893 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
16894 op = addr;
16895 else
16896 {
16897 /* For the load case need to negate the address. */
16898 op = gen_reg_rtx (GET_MODE (addr));
16899 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
16900 }
16901 op = gen_rtx_MEM (mode, op);
16902
16903 if (target == 0
16904 || GET_MODE (target) != tmode
16905 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
16906 target = gen_reg_rtx (tmode);
16907
16908 pat = GEN_FCN (icode) (target, op);
16909 if (!pat)
16910 return 0;
16911 emit_insn (pat);
16912
16913 return target;
16914 }
16915
16916 case ALTIVEC_BUILTIN_VCFUX:
16917 case ALTIVEC_BUILTIN_VCFSX:
16918 case ALTIVEC_BUILTIN_VCTUXS:
16919 case ALTIVEC_BUILTIN_VCTSXS:
16920 /* FIXME: There's got to be a nicer way to handle this case than
16921 constructing a new CALL_EXPR. */
16922 if (call_expr_nargs (exp) == 1)
16923 {
16924 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
16925 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
16926 }
16927 break;
16928
16929 default:
16930 break;
16931 }
16932
16933 if (TARGET_ALTIVEC)
16934 {
16935 ret = altivec_expand_builtin (exp, target, &success);
16936
16937 if (success)
16938 return ret;
16939 }
16940 if (TARGET_SPE)
16941 {
16942 ret = spe_expand_builtin (exp, target, &success);
16943
16944 if (success)
16945 return ret;
16946 }
16947 if (TARGET_PAIRED_FLOAT)
16948 {
16949 ret = paired_expand_builtin (exp, target, &success);
16950
16951 if (success)
16952 return ret;
16953 }
16954 if (TARGET_HTM)
16955 {
16956 ret = htm_expand_builtin (exp, target, &success);
16957
16958 if (success)
16959 return ret;
16960 }
16961
16962 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
16963 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16964 gcc_assert (attr == RS6000_BTC_UNARY
16965 || attr == RS6000_BTC_BINARY
16966 || attr == RS6000_BTC_TERNARY
16967 || attr == RS6000_BTC_SPECIAL);
16968
16969 /* Handle simple unary operations. */
16970 d = bdesc_1arg;
16971 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
16972 if (d->code == fcode)
16973 return rs6000_expand_unop_builtin (d->icode, exp, target);
16974
16975 /* Handle simple binary operations. */
16976 d = bdesc_2arg;
16977 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
16978 if (d->code == fcode)
16979 return rs6000_expand_binop_builtin (d->icode, exp, target);
16980
16981 /* Handle simple ternary operations. */
16982 d = bdesc_3arg;
16983 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
16984 if (d->code == fcode)
16985 return rs6000_expand_ternop_builtin (d->icode, exp, target);
16986
16987 /* Handle simple no-argument operations. */
16988 d = bdesc_0arg;
16989 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
16990 if (d->code == fcode)
16991 return rs6000_expand_zeroop_builtin (d->icode, target);
16992
16993 gcc_unreachable ();
16994 }
16995
16996 static void
16997 rs6000_init_builtins (void)
16998 {
16999 tree tdecl;
17000 tree ftype;
17001 machine_mode mode;
17002
17003 if (TARGET_DEBUG_BUILTIN)
17004 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
17005 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
17006 (TARGET_SPE) ? ", spe" : "",
17007 (TARGET_ALTIVEC) ? ", altivec" : "",
17008 (TARGET_VSX) ? ", vsx" : "");
17009
17010 V2SI_type_node = build_vector_type (intSI_type_node, 2);
17011 V2SF_type_node = build_vector_type (float_type_node, 2);
17012 V2DI_type_node = build_vector_type (intDI_type_node, 2);
17013 V2DF_type_node = build_vector_type (double_type_node, 2);
17014 V4HI_type_node = build_vector_type (intHI_type_node, 4);
17015 V4SI_type_node = build_vector_type (intSI_type_node, 4);
17016 V4SF_type_node = build_vector_type (float_type_node, 4);
17017 V8HI_type_node = build_vector_type (intHI_type_node, 8);
17018 V16QI_type_node = build_vector_type (intQI_type_node, 16);
17019
17020 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
17021 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
17022 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
17023 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
17024
17025 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
17026 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
17027 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
17028 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
17029
17030 const_str_type_node
17031 = build_pointer_type (build_qualified_type (char_type_node,
17032 TYPE_QUAL_CONST));
17033
17034 /* We use V1TI mode as a special container to hold __int128_t items that
17035 must live in VSX registers. */
17036 if (intTI_type_node)
17037 {
17038 V1TI_type_node = build_vector_type (intTI_type_node, 1);
17039 unsigned_V1TI_type_node = build_vector_type (unsigned_intTI_type_node, 1);
17040 }
17041
17042 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
17043 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
17044 'vector unsigned short'. */
17045
17046 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
17047 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
17048 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
17049 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
17050 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
17051
17052 long_integer_type_internal_node = long_integer_type_node;
17053 long_unsigned_type_internal_node = long_unsigned_type_node;
17054 long_long_integer_type_internal_node = long_long_integer_type_node;
17055 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
17056 intQI_type_internal_node = intQI_type_node;
17057 uintQI_type_internal_node = unsigned_intQI_type_node;
17058 intHI_type_internal_node = intHI_type_node;
17059 uintHI_type_internal_node = unsigned_intHI_type_node;
17060 intSI_type_internal_node = intSI_type_node;
17061 uintSI_type_internal_node = unsigned_intSI_type_node;
17062 intDI_type_internal_node = intDI_type_node;
17063 uintDI_type_internal_node = unsigned_intDI_type_node;
17064 intTI_type_internal_node = intTI_type_node;
17065 uintTI_type_internal_node = unsigned_intTI_type_node;
17066 float_type_internal_node = float_type_node;
17067 double_type_internal_node = double_type_node;
17068 long_double_type_internal_node = long_double_type_node;
17069 dfloat64_type_internal_node = dfloat64_type_node;
17070 dfloat128_type_internal_node = dfloat128_type_node;
17071 void_type_internal_node = void_type_node;
17072
17073 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
17074 IFmode is the IBM extended 128-bit format that is a pair of doubles.
17075 TFmode will be either IEEE 128-bit floating point or the IBM double-double
17076 format that uses a pair of doubles, depending on the switches and
17077 defaults.
17078
17079 We do not enable the actual __float128 keyword unless the user explicitly
17080 asks for it, because the library support is not yet complete.
17081
17082 If we don't support for either 128-bit IBM double double or IEEE 128-bit
17083 floating point, we need make sure the type is non-zero or else self-test
17084 fails during bootstrap.
17085
17086 We don't register a built-in type for __ibm128 if the type is the same as
17087 long double. Instead we add a #define for __ibm128 in
17088 rs6000_cpu_cpp_builtins to long double. */
17089 if (TARGET_LONG_DOUBLE_128 && FLOAT128_IEEE_P (TFmode))
17090 {
17091 ibm128_float_type_node = make_node (REAL_TYPE);
17092 TYPE_PRECISION (ibm128_float_type_node) = 128;
17093 SET_TYPE_MODE (ibm128_float_type_node, IFmode);
17094 layout_type (ibm128_float_type_node);
17095
17096 lang_hooks.types.register_builtin_type (ibm128_float_type_node,
17097 "__ibm128");
17098 }
17099 else
17100 ibm128_float_type_node = long_double_type_node;
17101
17102 if (TARGET_FLOAT128_KEYWORD)
17103 {
17104 ieee128_float_type_node = float128_type_node;
17105 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
17106 "__float128");
17107 }
17108
17109 else if (TARGET_FLOAT128_TYPE)
17110 {
17111 ieee128_float_type_node = make_node (REAL_TYPE);
17112 TYPE_PRECISION (ibm128_float_type_node) = 128;
17113 SET_TYPE_MODE (ieee128_float_type_node, KFmode);
17114 layout_type (ieee128_float_type_node);
17115
17116 /* If we are not exporting the __float128/_Float128 keywords, we need a
17117 keyword to get the types created. Use __ieee128 as the dummy
17118 keyword. */
17119 lang_hooks.types.register_builtin_type (ieee128_float_type_node,
17120 "__ieee128");
17121 }
17122
17123 else
17124 ieee128_float_type_node = long_double_type_node;
17125
17126 /* Initialize the modes for builtin_function_type, mapping a machine mode to
17127 tree type node. */
17128 builtin_mode_to_type[QImode][0] = integer_type_node;
17129 builtin_mode_to_type[HImode][0] = integer_type_node;
17130 builtin_mode_to_type[SImode][0] = intSI_type_node;
17131 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
17132 builtin_mode_to_type[DImode][0] = intDI_type_node;
17133 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
17134 builtin_mode_to_type[TImode][0] = intTI_type_node;
17135 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
17136 builtin_mode_to_type[SFmode][0] = float_type_node;
17137 builtin_mode_to_type[DFmode][0] = double_type_node;
17138 builtin_mode_to_type[IFmode][0] = ibm128_float_type_node;
17139 builtin_mode_to_type[KFmode][0] = ieee128_float_type_node;
17140 builtin_mode_to_type[TFmode][0] = long_double_type_node;
17141 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
17142 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
17143 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
17144 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
17145 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
17146 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
17147 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
17148 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
17149 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
17150 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
17151 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
17152 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
17153 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
17154 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
17155 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
17156 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
17157 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
17158
17159 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
17160 TYPE_NAME (bool_char_type_node) = tdecl;
17161
17162 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
17163 TYPE_NAME (bool_short_type_node) = tdecl;
17164
17165 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
17166 TYPE_NAME (bool_int_type_node) = tdecl;
17167
17168 tdecl = add_builtin_type ("__pixel", pixel_type_node);
17169 TYPE_NAME (pixel_type_node) = tdecl;
17170
17171 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
17172 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
17173 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
17174 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
17175 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
17176
17177 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
17178 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
17179
17180 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
17181 TYPE_NAME (V16QI_type_node) = tdecl;
17182
17183 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
17184 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
17185
17186 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
17187 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
17188
17189 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
17190 TYPE_NAME (V8HI_type_node) = tdecl;
17191
17192 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
17193 TYPE_NAME (bool_V8HI_type_node) = tdecl;
17194
17195 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
17196 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
17197
17198 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
17199 TYPE_NAME (V4SI_type_node) = tdecl;
17200
17201 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
17202 TYPE_NAME (bool_V4SI_type_node) = tdecl;
17203
17204 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
17205 TYPE_NAME (V4SF_type_node) = tdecl;
17206
17207 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
17208 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
17209
17210 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
17211 TYPE_NAME (V2DF_type_node) = tdecl;
17212
17213 if (TARGET_POWERPC64)
17214 {
17215 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
17216 TYPE_NAME (V2DI_type_node) = tdecl;
17217
17218 tdecl = add_builtin_type ("__vector unsigned long",
17219 unsigned_V2DI_type_node);
17220 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
17221
17222 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
17223 TYPE_NAME (bool_V2DI_type_node) = tdecl;
17224 }
17225 else
17226 {
17227 tdecl = add_builtin_type ("__vector long long", V2DI_type_node);
17228 TYPE_NAME (V2DI_type_node) = tdecl;
17229
17230 tdecl = add_builtin_type ("__vector unsigned long long",
17231 unsigned_V2DI_type_node);
17232 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
17233
17234 tdecl = add_builtin_type ("__vector __bool long long",
17235 bool_V2DI_type_node);
17236 TYPE_NAME (bool_V2DI_type_node) = tdecl;
17237 }
17238
17239 if (V1TI_type_node)
17240 {
17241 tdecl = add_builtin_type ("__vector __int128", V1TI_type_node);
17242 TYPE_NAME (V1TI_type_node) = tdecl;
17243
17244 tdecl = add_builtin_type ("__vector unsigned __int128",
17245 unsigned_V1TI_type_node);
17246 TYPE_NAME (unsigned_V1TI_type_node) = tdecl;
17247 }
17248
17249 /* Paired and SPE builtins are only available if you build a compiler with
17250 the appropriate options, so only create those builtins with the
17251 appropriate compiler option. Create Altivec and VSX builtins on machines
17252 with at least the general purpose extensions (970 and newer) to allow the
17253 use of the target attribute. */
17254 if (TARGET_PAIRED_FLOAT)
17255 paired_init_builtins ();
17256 if (TARGET_SPE)
17257 spe_init_builtins ();
17258 if (TARGET_EXTRA_BUILTINS)
17259 altivec_init_builtins ();
17260 if (TARGET_HTM)
17261 htm_init_builtins ();
17262
17263 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
17264 rs6000_common_init_builtins ();
17265
17266 ftype = build_function_type_list (ieee128_float_type_node,
17267 const_str_type_node, NULL_TREE);
17268 def_builtin ("__builtin_nanq", ftype, RS6000_BUILTIN_NANQ);
17269 def_builtin ("__builtin_nansq", ftype, RS6000_BUILTIN_NANSQ);
17270
17271 ftype = build_function_type_list (ieee128_float_type_node, NULL_TREE);
17272 def_builtin ("__builtin_infq", ftype, RS6000_BUILTIN_INFQ);
17273 def_builtin ("__builtin_huge_valq", ftype, RS6000_BUILTIN_HUGE_VALQ);
17274
17275 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
17276 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
17277 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
17278
17279 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
17280 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
17281 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
17282
17283 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
17284 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
17285 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
17286
17287 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
17288 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
17289 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
17290
17291 mode = (TARGET_64BIT) ? DImode : SImode;
17292 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
17293 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
17294 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
17295
17296 ftype = build_function_type_list (unsigned_intDI_type_node,
17297 NULL_TREE);
17298 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
17299
17300 if (TARGET_64BIT)
17301 ftype = build_function_type_list (unsigned_intDI_type_node,
17302 NULL_TREE);
17303 else
17304 ftype = build_function_type_list (unsigned_intSI_type_node,
17305 NULL_TREE);
17306 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
17307
17308 ftype = build_function_type_list (double_type_node, NULL_TREE);
17309 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
17310
17311 ftype = build_function_type_list (void_type_node,
17312 intSI_type_node, double_type_node,
17313 NULL_TREE);
17314 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
17315
17316 ftype = build_function_type_list (void_type_node, NULL_TREE);
17317 def_builtin ("__builtin_cpu_init", ftype, RS6000_BUILTIN_CPU_INIT);
17318
17319 ftype = build_function_type_list (bool_int_type_node, const_ptr_type_node,
17320 NULL_TREE);
17321 def_builtin ("__builtin_cpu_is", ftype, RS6000_BUILTIN_CPU_IS);
17322 def_builtin ("__builtin_cpu_supports", ftype, RS6000_BUILTIN_CPU_SUPPORTS);
17323
17324 /* AIX libm provides clog as __clog. */
17325 if (TARGET_XCOFF &&
17326 (tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
17327 set_user_assembler_name (tdecl, "__clog");
17328
17329 #ifdef SUBTARGET_INIT_BUILTINS
17330 SUBTARGET_INIT_BUILTINS;
17331 #endif
17332 }
17333
17334 /* Returns the rs6000 builtin decl for CODE. */
17335
17336 static tree
17337 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
17338 {
17339 HOST_WIDE_INT fnmask;
17340
17341 if (code >= RS6000_BUILTIN_COUNT)
17342 return error_mark_node;
17343
17344 fnmask = rs6000_builtin_info[code].mask;
17345 if ((fnmask & rs6000_builtin_mask) != fnmask)
17346 {
17347 rs6000_invalid_builtin ((enum rs6000_builtins)code);
17348 return error_mark_node;
17349 }
17350
17351 return rs6000_builtin_decls[code];
17352 }
17353
17354 static void
17355 spe_init_builtins (void)
17356 {
17357 tree puint_type_node = build_pointer_type (unsigned_type_node);
17358 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
17359 const struct builtin_description *d;
17360 size_t i;
17361 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17362
17363 tree v2si_ftype_4_v2si
17364 = build_function_type_list (opaque_V2SI_type_node,
17365 opaque_V2SI_type_node,
17366 opaque_V2SI_type_node,
17367 opaque_V2SI_type_node,
17368 opaque_V2SI_type_node,
17369 NULL_TREE);
17370
17371 tree v2sf_ftype_4_v2sf
17372 = build_function_type_list (opaque_V2SF_type_node,
17373 opaque_V2SF_type_node,
17374 opaque_V2SF_type_node,
17375 opaque_V2SF_type_node,
17376 opaque_V2SF_type_node,
17377 NULL_TREE);
17378
17379 tree int_ftype_int_v2si_v2si
17380 = build_function_type_list (integer_type_node,
17381 integer_type_node,
17382 opaque_V2SI_type_node,
17383 opaque_V2SI_type_node,
17384 NULL_TREE);
17385
17386 tree int_ftype_int_v2sf_v2sf
17387 = build_function_type_list (integer_type_node,
17388 integer_type_node,
17389 opaque_V2SF_type_node,
17390 opaque_V2SF_type_node,
17391 NULL_TREE);
17392
17393 tree void_ftype_v2si_puint_int
17394 = build_function_type_list (void_type_node,
17395 opaque_V2SI_type_node,
17396 puint_type_node,
17397 integer_type_node,
17398 NULL_TREE);
17399
17400 tree void_ftype_v2si_puint_char
17401 = build_function_type_list (void_type_node,
17402 opaque_V2SI_type_node,
17403 puint_type_node,
17404 char_type_node,
17405 NULL_TREE);
17406
17407 tree void_ftype_v2si_pv2si_int
17408 = build_function_type_list (void_type_node,
17409 opaque_V2SI_type_node,
17410 opaque_p_V2SI_type_node,
17411 integer_type_node,
17412 NULL_TREE);
17413
17414 tree void_ftype_v2si_pv2si_char
17415 = build_function_type_list (void_type_node,
17416 opaque_V2SI_type_node,
17417 opaque_p_V2SI_type_node,
17418 char_type_node,
17419 NULL_TREE);
17420
17421 tree void_ftype_int
17422 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
17423
17424 tree int_ftype_void
17425 = build_function_type_list (integer_type_node, NULL_TREE);
17426
17427 tree v2si_ftype_pv2si_int
17428 = build_function_type_list (opaque_V2SI_type_node,
17429 opaque_p_V2SI_type_node,
17430 integer_type_node,
17431 NULL_TREE);
17432
17433 tree v2si_ftype_puint_int
17434 = build_function_type_list (opaque_V2SI_type_node,
17435 puint_type_node,
17436 integer_type_node,
17437 NULL_TREE);
17438
17439 tree v2si_ftype_pushort_int
17440 = build_function_type_list (opaque_V2SI_type_node,
17441 pushort_type_node,
17442 integer_type_node,
17443 NULL_TREE);
17444
17445 tree v2si_ftype_signed_char
17446 = build_function_type_list (opaque_V2SI_type_node,
17447 signed_char_type_node,
17448 NULL_TREE);
17449
17450 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
17451
17452 /* Initialize irregular SPE builtins. */
17453
17454 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
17455 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
17456 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
17457 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
17458 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
17459 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
17460 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
17461 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
17462 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
17463 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
17464 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
17465 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
17466 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
17467 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
17468 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
17469 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
17470 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
17471 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
17472
17473 /* Loads. */
17474 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
17475 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
17476 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
17477 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
17478 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
17479 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
17480 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
17481 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
17482 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
17483 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
17484 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
17485 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
17486 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
17487 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
17488 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
17489 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
17490 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
17491 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
17492 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
17493 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
17494 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
17495 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
17496
17497 /* Predicates. */
17498 d = bdesc_spe_predicates;
17499 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
17500 {
17501 tree type;
17502 HOST_WIDE_INT mask = d->mask;
17503
17504 if ((mask & builtin_mask) != mask)
17505 {
17506 if (TARGET_DEBUG_BUILTIN)
17507 fprintf (stderr, "spe_init_builtins, skip predicate %s\n",
17508 d->name);
17509 continue;
17510 }
17511
17512 /* Cannot define builtin if the instruction is disabled. */
17513 gcc_assert (d->icode != CODE_FOR_nothing);
17514 switch (insn_data[d->icode].operand[1].mode)
17515 {
17516 case V2SImode:
17517 type = int_ftype_int_v2si_v2si;
17518 break;
17519 case V2SFmode:
17520 type = int_ftype_int_v2sf_v2sf;
17521 break;
17522 default:
17523 gcc_unreachable ();
17524 }
17525
17526 def_builtin (d->name, type, d->code);
17527 }
17528
17529 /* Evsel predicates. */
17530 d = bdesc_spe_evsel;
17531 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
17532 {
17533 tree type;
17534 HOST_WIDE_INT mask = d->mask;
17535
17536 if ((mask & builtin_mask) != mask)
17537 {
17538 if (TARGET_DEBUG_BUILTIN)
17539 fprintf (stderr, "spe_init_builtins, skip evsel %s\n",
17540 d->name);
17541 continue;
17542 }
17543
17544 /* Cannot define builtin if the instruction is disabled. */
17545 gcc_assert (d->icode != CODE_FOR_nothing);
17546 switch (insn_data[d->icode].operand[1].mode)
17547 {
17548 case V2SImode:
17549 type = v2si_ftype_4_v2si;
17550 break;
17551 case V2SFmode:
17552 type = v2sf_ftype_4_v2sf;
17553 break;
17554 default:
17555 gcc_unreachable ();
17556 }
17557
17558 def_builtin (d->name, type, d->code);
17559 }
17560 }
17561
17562 static void
17563 paired_init_builtins (void)
17564 {
17565 const struct builtin_description *d;
17566 size_t i;
17567 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17568
17569 tree int_ftype_int_v2sf_v2sf
17570 = build_function_type_list (integer_type_node,
17571 integer_type_node,
17572 V2SF_type_node,
17573 V2SF_type_node,
17574 NULL_TREE);
17575 tree pcfloat_type_node =
17576 build_pointer_type (build_qualified_type
17577 (float_type_node, TYPE_QUAL_CONST));
17578
17579 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
17580 long_integer_type_node,
17581 pcfloat_type_node,
17582 NULL_TREE);
17583 tree void_ftype_v2sf_long_pcfloat =
17584 build_function_type_list (void_type_node,
17585 V2SF_type_node,
17586 long_integer_type_node,
17587 pcfloat_type_node,
17588 NULL_TREE);
17589
17590
17591 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
17592 PAIRED_BUILTIN_LX);
17593
17594
17595 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
17596 PAIRED_BUILTIN_STX);
17597
17598 /* Predicates. */
17599 d = bdesc_paired_preds;
17600 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
17601 {
17602 tree type;
17603 HOST_WIDE_INT mask = d->mask;
17604
17605 if ((mask & builtin_mask) != mask)
17606 {
17607 if (TARGET_DEBUG_BUILTIN)
17608 fprintf (stderr, "paired_init_builtins, skip predicate %s\n",
17609 d->name);
17610 continue;
17611 }
17612
17613 /* Cannot define builtin if the instruction is disabled. */
17614 gcc_assert (d->icode != CODE_FOR_nothing);
17615
17616 if (TARGET_DEBUG_BUILTIN)
17617 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
17618 (int)i, get_insn_name (d->icode), (int)d->icode,
17619 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
17620
17621 switch (insn_data[d->icode].operand[1].mode)
17622 {
17623 case V2SFmode:
17624 type = int_ftype_int_v2sf_v2sf;
17625 break;
17626 default:
17627 gcc_unreachable ();
17628 }
17629
17630 def_builtin (d->name, type, d->code);
17631 }
17632 }
17633
17634 static void
17635 altivec_init_builtins (void)
17636 {
17637 const struct builtin_description *d;
17638 size_t i;
17639 tree ftype;
17640 tree decl;
17641 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
17642
17643 tree pvoid_type_node = build_pointer_type (void_type_node);
17644
17645 tree pcvoid_type_node
17646 = build_pointer_type (build_qualified_type (void_type_node,
17647 TYPE_QUAL_CONST));
17648
17649 tree int_ftype_opaque
17650 = build_function_type_list (integer_type_node,
17651 opaque_V4SI_type_node, NULL_TREE);
17652 tree opaque_ftype_opaque
17653 = build_function_type_list (integer_type_node, NULL_TREE);
17654 tree opaque_ftype_opaque_int
17655 = build_function_type_list (opaque_V4SI_type_node,
17656 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
17657 tree opaque_ftype_opaque_opaque_int
17658 = build_function_type_list (opaque_V4SI_type_node,
17659 opaque_V4SI_type_node, opaque_V4SI_type_node,
17660 integer_type_node, NULL_TREE);
17661 tree opaque_ftype_opaque_opaque_opaque
17662 = build_function_type_list (opaque_V4SI_type_node,
17663 opaque_V4SI_type_node, opaque_V4SI_type_node,
17664 opaque_V4SI_type_node, NULL_TREE);
17665 tree opaque_ftype_opaque_opaque
17666 = build_function_type_list (opaque_V4SI_type_node,
17667 opaque_V4SI_type_node, opaque_V4SI_type_node,
17668 NULL_TREE);
17669 tree int_ftype_int_opaque_opaque
17670 = build_function_type_list (integer_type_node,
17671 integer_type_node, opaque_V4SI_type_node,
17672 opaque_V4SI_type_node, NULL_TREE);
17673 tree int_ftype_int_v4si_v4si
17674 = build_function_type_list (integer_type_node,
17675 integer_type_node, V4SI_type_node,
17676 V4SI_type_node, NULL_TREE);
17677 tree int_ftype_int_v2di_v2di
17678 = build_function_type_list (integer_type_node,
17679 integer_type_node, V2DI_type_node,
17680 V2DI_type_node, NULL_TREE);
17681 tree void_ftype_v4si
17682 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
17683 tree v8hi_ftype_void
17684 = build_function_type_list (V8HI_type_node, NULL_TREE);
17685 tree void_ftype_void
17686 = build_function_type_list (void_type_node, NULL_TREE);
17687 tree void_ftype_int
17688 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
17689
17690 tree opaque_ftype_long_pcvoid
17691 = build_function_type_list (opaque_V4SI_type_node,
17692 long_integer_type_node, pcvoid_type_node,
17693 NULL_TREE);
17694 tree v16qi_ftype_long_pcvoid
17695 = build_function_type_list (V16QI_type_node,
17696 long_integer_type_node, pcvoid_type_node,
17697 NULL_TREE);
17698 tree v8hi_ftype_long_pcvoid
17699 = build_function_type_list (V8HI_type_node,
17700 long_integer_type_node, pcvoid_type_node,
17701 NULL_TREE);
17702 tree v4si_ftype_long_pcvoid
17703 = build_function_type_list (V4SI_type_node,
17704 long_integer_type_node, pcvoid_type_node,
17705 NULL_TREE);
17706 tree v4sf_ftype_long_pcvoid
17707 = build_function_type_list (V4SF_type_node,
17708 long_integer_type_node, pcvoid_type_node,
17709 NULL_TREE);
17710 tree v2df_ftype_long_pcvoid
17711 = build_function_type_list (V2DF_type_node,
17712 long_integer_type_node, pcvoid_type_node,
17713 NULL_TREE);
17714 tree v2di_ftype_long_pcvoid
17715 = build_function_type_list (V2DI_type_node,
17716 long_integer_type_node, pcvoid_type_node,
17717 NULL_TREE);
17718
17719 tree void_ftype_opaque_long_pvoid
17720 = build_function_type_list (void_type_node,
17721 opaque_V4SI_type_node, long_integer_type_node,
17722 pvoid_type_node, NULL_TREE);
17723 tree void_ftype_v4si_long_pvoid
17724 = build_function_type_list (void_type_node,
17725 V4SI_type_node, long_integer_type_node,
17726 pvoid_type_node, NULL_TREE);
17727 tree void_ftype_v16qi_long_pvoid
17728 = build_function_type_list (void_type_node,
17729 V16QI_type_node, long_integer_type_node,
17730 pvoid_type_node, NULL_TREE);
17731
17732 tree void_ftype_v16qi_pvoid_long
17733 = build_function_type_list (void_type_node,
17734 V16QI_type_node, pvoid_type_node,
17735 long_integer_type_node, NULL_TREE);
17736
17737 tree void_ftype_v8hi_long_pvoid
17738 = build_function_type_list (void_type_node,
17739 V8HI_type_node, long_integer_type_node,
17740 pvoid_type_node, NULL_TREE);
17741 tree void_ftype_v4sf_long_pvoid
17742 = build_function_type_list (void_type_node,
17743 V4SF_type_node, long_integer_type_node,
17744 pvoid_type_node, NULL_TREE);
17745 tree void_ftype_v2df_long_pvoid
17746 = build_function_type_list (void_type_node,
17747 V2DF_type_node, long_integer_type_node,
17748 pvoid_type_node, NULL_TREE);
17749 tree void_ftype_v2di_long_pvoid
17750 = build_function_type_list (void_type_node,
17751 V2DI_type_node, long_integer_type_node,
17752 pvoid_type_node, NULL_TREE);
17753 tree int_ftype_int_v8hi_v8hi
17754 = build_function_type_list (integer_type_node,
17755 integer_type_node, V8HI_type_node,
17756 V8HI_type_node, NULL_TREE);
17757 tree int_ftype_int_v16qi_v16qi
17758 = build_function_type_list (integer_type_node,
17759 integer_type_node, V16QI_type_node,
17760 V16QI_type_node, NULL_TREE);
17761 tree int_ftype_int_v4sf_v4sf
17762 = build_function_type_list (integer_type_node,
17763 integer_type_node, V4SF_type_node,
17764 V4SF_type_node, NULL_TREE);
17765 tree int_ftype_int_v2df_v2df
17766 = build_function_type_list (integer_type_node,
17767 integer_type_node, V2DF_type_node,
17768 V2DF_type_node, NULL_TREE);
17769 tree v2di_ftype_v2di
17770 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
17771 tree v4si_ftype_v4si
17772 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
17773 tree v8hi_ftype_v8hi
17774 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
17775 tree v16qi_ftype_v16qi
17776 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
17777 tree v4sf_ftype_v4sf
17778 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
17779 tree v2df_ftype_v2df
17780 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
17781 tree void_ftype_pcvoid_int_int
17782 = build_function_type_list (void_type_node,
17783 pcvoid_type_node, integer_type_node,
17784 integer_type_node, NULL_TREE);
17785
17786 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
17787 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
17788 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
17789 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
17790 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
17791 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
17792 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
17793 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
17794 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
17795 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
17796 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
17797 ALTIVEC_BUILTIN_LVXL_V2DF);
17798 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
17799 ALTIVEC_BUILTIN_LVXL_V2DI);
17800 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
17801 ALTIVEC_BUILTIN_LVXL_V4SF);
17802 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
17803 ALTIVEC_BUILTIN_LVXL_V4SI);
17804 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
17805 ALTIVEC_BUILTIN_LVXL_V8HI);
17806 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
17807 ALTIVEC_BUILTIN_LVXL_V16QI);
17808 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
17809 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
17810 ALTIVEC_BUILTIN_LVX_V2DF);
17811 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
17812 ALTIVEC_BUILTIN_LVX_V2DI);
17813 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
17814 ALTIVEC_BUILTIN_LVX_V4SF);
17815 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
17816 ALTIVEC_BUILTIN_LVX_V4SI);
17817 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
17818 ALTIVEC_BUILTIN_LVX_V8HI);
17819 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
17820 ALTIVEC_BUILTIN_LVX_V16QI);
17821 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
17822 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
17823 ALTIVEC_BUILTIN_STVX_V2DF);
17824 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
17825 ALTIVEC_BUILTIN_STVX_V2DI);
17826 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
17827 ALTIVEC_BUILTIN_STVX_V4SF);
17828 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
17829 ALTIVEC_BUILTIN_STVX_V4SI);
17830 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
17831 ALTIVEC_BUILTIN_STVX_V8HI);
17832 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
17833 ALTIVEC_BUILTIN_STVX_V16QI);
17834 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
17835 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
17836 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
17837 ALTIVEC_BUILTIN_STVXL_V2DF);
17838 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
17839 ALTIVEC_BUILTIN_STVXL_V2DI);
17840 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
17841 ALTIVEC_BUILTIN_STVXL_V4SF);
17842 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
17843 ALTIVEC_BUILTIN_STVXL_V4SI);
17844 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
17845 ALTIVEC_BUILTIN_STVXL_V8HI);
17846 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
17847 ALTIVEC_BUILTIN_STVXL_V16QI);
17848 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
17849 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
17850 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
17851 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
17852 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
17853 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
17854 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
17855 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
17856 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
17857 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
17858 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
17859 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
17860 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
17861 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
17862 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
17863 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
17864
17865 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
17866 VSX_BUILTIN_LXVD2X_V2DF);
17867 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
17868 VSX_BUILTIN_LXVD2X_V2DI);
17869 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
17870 VSX_BUILTIN_LXVW4X_V4SF);
17871 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
17872 VSX_BUILTIN_LXVW4X_V4SI);
17873 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
17874 VSX_BUILTIN_LXVW4X_V8HI);
17875 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
17876 VSX_BUILTIN_LXVW4X_V16QI);
17877 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
17878 VSX_BUILTIN_STXVD2X_V2DF);
17879 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
17880 VSX_BUILTIN_STXVD2X_V2DI);
17881 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
17882 VSX_BUILTIN_STXVW4X_V4SF);
17883 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
17884 VSX_BUILTIN_STXVW4X_V4SI);
17885 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
17886 VSX_BUILTIN_STXVW4X_V8HI);
17887 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
17888 VSX_BUILTIN_STXVW4X_V16QI);
17889
17890 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid,
17891 VSX_BUILTIN_LD_ELEMREV_V2DF);
17892 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid,
17893 VSX_BUILTIN_LD_ELEMREV_V2DI);
17894 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid,
17895 VSX_BUILTIN_LD_ELEMREV_V4SF);
17896 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid,
17897 VSX_BUILTIN_LD_ELEMREV_V4SI);
17898 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid,
17899 VSX_BUILTIN_ST_ELEMREV_V2DF);
17900 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid,
17901 VSX_BUILTIN_ST_ELEMREV_V2DI);
17902 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid,
17903 VSX_BUILTIN_ST_ELEMREV_V4SF);
17904 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid,
17905 VSX_BUILTIN_ST_ELEMREV_V4SI);
17906
17907 if (TARGET_P9_VECTOR)
17908 {
17909 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid,
17910 VSX_BUILTIN_LD_ELEMREV_V8HI);
17911 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid,
17912 VSX_BUILTIN_LD_ELEMREV_V16QI);
17913 def_builtin ("__builtin_vsx_st_elemrev_v8hi",
17914 void_ftype_v8hi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V8HI);
17915 def_builtin ("__builtin_vsx_st_elemrev_v16qi",
17916 void_ftype_v16qi_long_pvoid, VSX_BUILTIN_ST_ELEMREV_V16QI);
17917 }
17918
17919 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
17920 VSX_BUILTIN_VEC_LD);
17921 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
17922 VSX_BUILTIN_VEC_ST);
17923 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid,
17924 VSX_BUILTIN_VEC_XL);
17925 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid,
17926 VSX_BUILTIN_VEC_XST);
17927
17928 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
17929 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
17930 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
17931
17932 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
17933 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
17934 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
17935 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
17936 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
17937 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
17938 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
17939 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
17940 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
17941 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
17942 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
17943 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
17944
17945 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque,
17946 ALTIVEC_BUILTIN_VEC_ADDE);
17947 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque,
17948 ALTIVEC_BUILTIN_VEC_ADDEC);
17949 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque,
17950 ALTIVEC_BUILTIN_VEC_CMPNE);
17951 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque,
17952 ALTIVEC_BUILTIN_VEC_MUL);
17953
17954 /* Cell builtins. */
17955 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
17956 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
17957 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
17958 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
17959
17960 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
17961 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
17962 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
17963 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
17964
17965 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
17966 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
17967 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
17968 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
17969
17970 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
17971 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
17972 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
17973 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
17974
17975 if (TARGET_P9_VECTOR)
17976 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long,
17977 P9V_BUILTIN_STXVL);
17978
17979 /* Add the DST variants. */
17980 d = bdesc_dst;
17981 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
17982 {
17983 HOST_WIDE_INT mask = d->mask;
17984
17985 /* It is expected that these dst built-in functions may have
17986 d->icode equal to CODE_FOR_nothing. */
17987 if ((mask & builtin_mask) != mask)
17988 {
17989 if (TARGET_DEBUG_BUILTIN)
17990 fprintf (stderr, "altivec_init_builtins, skip dst %s\n",
17991 d->name);
17992 continue;
17993 }
17994 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
17995 }
17996
17997 /* Initialize the predicates. */
17998 d = bdesc_altivec_preds;
17999 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
18000 {
18001 machine_mode mode1;
18002 tree type;
18003 HOST_WIDE_INT mask = d->mask;
18004
18005 if ((mask & builtin_mask) != mask)
18006 {
18007 if (TARGET_DEBUG_BUILTIN)
18008 fprintf (stderr, "altivec_init_builtins, skip predicate %s\n",
18009 d->name);
18010 continue;
18011 }
18012
18013 if (rs6000_overloaded_builtin_p (d->code))
18014 mode1 = VOIDmode;
18015 else
18016 {
18017 /* Cannot define builtin if the instruction is disabled. */
18018 gcc_assert (d->icode != CODE_FOR_nothing);
18019 mode1 = insn_data[d->icode].operand[1].mode;
18020 }
18021
18022 switch (mode1)
18023 {
18024 case VOIDmode:
18025 type = int_ftype_int_opaque_opaque;
18026 break;
18027 case V2DImode:
18028 type = int_ftype_int_v2di_v2di;
18029 break;
18030 case V4SImode:
18031 type = int_ftype_int_v4si_v4si;
18032 break;
18033 case V8HImode:
18034 type = int_ftype_int_v8hi_v8hi;
18035 break;
18036 case V16QImode:
18037 type = int_ftype_int_v16qi_v16qi;
18038 break;
18039 case V4SFmode:
18040 type = int_ftype_int_v4sf_v4sf;
18041 break;
18042 case V2DFmode:
18043 type = int_ftype_int_v2df_v2df;
18044 break;
18045 default:
18046 gcc_unreachable ();
18047 }
18048
18049 def_builtin (d->name, type, d->code);
18050 }
18051
18052 /* Initialize the abs* operators. */
18053 d = bdesc_abs;
18054 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
18055 {
18056 machine_mode mode0;
18057 tree type;
18058 HOST_WIDE_INT mask = d->mask;
18059
18060 if ((mask & builtin_mask) != mask)
18061 {
18062 if (TARGET_DEBUG_BUILTIN)
18063 fprintf (stderr, "altivec_init_builtins, skip abs %s\n",
18064 d->name);
18065 continue;
18066 }
18067
18068 /* Cannot define builtin if the instruction is disabled. */
18069 gcc_assert (d->icode != CODE_FOR_nothing);
18070 mode0 = insn_data[d->icode].operand[0].mode;
18071
18072 switch (mode0)
18073 {
18074 case V2DImode:
18075 type = v2di_ftype_v2di;
18076 break;
18077 case V4SImode:
18078 type = v4si_ftype_v4si;
18079 break;
18080 case V8HImode:
18081 type = v8hi_ftype_v8hi;
18082 break;
18083 case V16QImode:
18084 type = v16qi_ftype_v16qi;
18085 break;
18086 case V4SFmode:
18087 type = v4sf_ftype_v4sf;
18088 break;
18089 case V2DFmode:
18090 type = v2df_ftype_v2df;
18091 break;
18092 default:
18093 gcc_unreachable ();
18094 }
18095
18096 def_builtin (d->name, type, d->code);
18097 }
18098
18099 /* Initialize target builtin that implements
18100 targetm.vectorize.builtin_mask_for_load. */
18101
18102 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
18103 v16qi_ftype_long_pcvoid,
18104 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
18105 BUILT_IN_MD, NULL, NULL_TREE);
18106 TREE_READONLY (decl) = 1;
18107 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
18108 altivec_builtin_mask_for_load = decl;
18109
18110 /* Access to the vec_init patterns. */
18111 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
18112 integer_type_node, integer_type_node,
18113 integer_type_node, NULL_TREE);
18114 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
18115
18116 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
18117 short_integer_type_node,
18118 short_integer_type_node,
18119 short_integer_type_node,
18120 short_integer_type_node,
18121 short_integer_type_node,
18122 short_integer_type_node,
18123 short_integer_type_node, NULL_TREE);
18124 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
18125
18126 ftype = build_function_type_list (V16QI_type_node, char_type_node,
18127 char_type_node, char_type_node,
18128 char_type_node, char_type_node,
18129 char_type_node, char_type_node,
18130 char_type_node, char_type_node,
18131 char_type_node, char_type_node,
18132 char_type_node, char_type_node,
18133 char_type_node, char_type_node,
18134 char_type_node, NULL_TREE);
18135 def_builtin ("__builtin_vec_init_v16qi", ftype,
18136 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
18137
18138 ftype = build_function_type_list (V4SF_type_node, float_type_node,
18139 float_type_node, float_type_node,
18140 float_type_node, NULL_TREE);
18141 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
18142
18143 /* VSX builtins. */
18144 ftype = build_function_type_list (V2DF_type_node, double_type_node,
18145 double_type_node, NULL_TREE);
18146 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
18147
18148 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
18149 intDI_type_node, NULL_TREE);
18150 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
18151
18152 /* Access to the vec_set patterns. */
18153 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
18154 intSI_type_node,
18155 integer_type_node, NULL_TREE);
18156 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
18157
18158 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
18159 intHI_type_node,
18160 integer_type_node, NULL_TREE);
18161 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
18162
18163 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
18164 intQI_type_node,
18165 integer_type_node, NULL_TREE);
18166 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
18167
18168 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
18169 float_type_node,
18170 integer_type_node, NULL_TREE);
18171 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
18172
18173 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
18174 double_type_node,
18175 integer_type_node, NULL_TREE);
18176 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
18177
18178 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
18179 intDI_type_node,
18180 integer_type_node, NULL_TREE);
18181 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
18182
18183 /* Access to the vec_extract patterns. */
18184 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
18185 integer_type_node, NULL_TREE);
18186 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
18187
18188 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
18189 integer_type_node, NULL_TREE);
18190 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
18191
18192 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
18193 integer_type_node, NULL_TREE);
18194 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
18195
18196 ftype = build_function_type_list (float_type_node, V4SF_type_node,
18197 integer_type_node, NULL_TREE);
18198 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
18199
18200 ftype = build_function_type_list (double_type_node, V2DF_type_node,
18201 integer_type_node, NULL_TREE);
18202 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
18203
18204 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
18205 integer_type_node, NULL_TREE);
18206 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
18207
18208
18209 if (V1TI_type_node)
18210 {
18211 tree v1ti_ftype_long_pcvoid
18212 = build_function_type_list (V1TI_type_node,
18213 long_integer_type_node, pcvoid_type_node,
18214 NULL_TREE);
18215 tree void_ftype_v1ti_long_pvoid
18216 = build_function_type_list (void_type_node,
18217 V1TI_type_node, long_integer_type_node,
18218 pvoid_type_node, NULL_TREE);
18219 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
18220 VSX_BUILTIN_LXVD2X_V1TI);
18221 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
18222 VSX_BUILTIN_STXVD2X_V1TI);
18223 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
18224 NULL_TREE, NULL_TREE);
18225 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
18226 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
18227 intTI_type_node,
18228 integer_type_node, NULL_TREE);
18229 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
18230 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
18231 integer_type_node, NULL_TREE);
18232 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
18233 }
18234
18235 }
18236
18237 static void
18238 htm_init_builtins (void)
18239 {
18240 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18241 const struct builtin_description *d;
18242 size_t i;
18243
18244 d = bdesc_htm;
18245 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
18246 {
18247 tree op[MAX_HTM_OPERANDS], type;
18248 HOST_WIDE_INT mask = d->mask;
18249 unsigned attr = rs6000_builtin_info[d->code].attr;
18250 bool void_func = (attr & RS6000_BTC_VOID);
18251 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
18252 int nopnds = 0;
18253 tree gpr_type_node;
18254 tree rettype;
18255 tree argtype;
18256
18257 /* It is expected that these htm built-in functions may have
18258 d->icode equal to CODE_FOR_nothing. */
18259
18260 if (TARGET_32BIT && TARGET_POWERPC64)
18261 gpr_type_node = long_long_unsigned_type_node;
18262 else
18263 gpr_type_node = long_unsigned_type_node;
18264
18265 if (attr & RS6000_BTC_SPR)
18266 {
18267 rettype = gpr_type_node;
18268 argtype = gpr_type_node;
18269 }
18270 else if (d->code == HTM_BUILTIN_TABORTDC
18271 || d->code == HTM_BUILTIN_TABORTDCI)
18272 {
18273 rettype = unsigned_type_node;
18274 argtype = gpr_type_node;
18275 }
18276 else
18277 {
18278 rettype = unsigned_type_node;
18279 argtype = unsigned_type_node;
18280 }
18281
18282 if ((mask & builtin_mask) != mask)
18283 {
18284 if (TARGET_DEBUG_BUILTIN)
18285 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
18286 continue;
18287 }
18288
18289 if (d->name == 0)
18290 {
18291 if (TARGET_DEBUG_BUILTIN)
18292 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
18293 (long unsigned) i);
18294 continue;
18295 }
18296
18297 op[nopnds++] = (void_func) ? void_type_node : rettype;
18298
18299 if (attr_args == RS6000_BTC_UNARY)
18300 op[nopnds++] = argtype;
18301 else if (attr_args == RS6000_BTC_BINARY)
18302 {
18303 op[nopnds++] = argtype;
18304 op[nopnds++] = argtype;
18305 }
18306 else if (attr_args == RS6000_BTC_TERNARY)
18307 {
18308 op[nopnds++] = argtype;
18309 op[nopnds++] = argtype;
18310 op[nopnds++] = argtype;
18311 }
18312
18313 switch (nopnds)
18314 {
18315 case 1:
18316 type = build_function_type_list (op[0], NULL_TREE);
18317 break;
18318 case 2:
18319 type = build_function_type_list (op[0], op[1], NULL_TREE);
18320 break;
18321 case 3:
18322 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
18323 break;
18324 case 4:
18325 type = build_function_type_list (op[0], op[1], op[2], op[3],
18326 NULL_TREE);
18327 break;
18328 default:
18329 gcc_unreachable ();
18330 }
18331
18332 def_builtin (d->name, type, d->code);
18333 }
18334 }
18335
18336 /* Hash function for builtin functions with up to 3 arguments and a return
18337 type. */
18338 hashval_t
18339 builtin_hasher::hash (builtin_hash_struct *bh)
18340 {
18341 unsigned ret = 0;
18342 int i;
18343
18344 for (i = 0; i < 4; i++)
18345 {
18346 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
18347 ret = (ret * 2) + bh->uns_p[i];
18348 }
18349
18350 return ret;
18351 }
18352
18353 /* Compare builtin hash entries H1 and H2 for equivalence. */
18354 bool
18355 builtin_hasher::equal (builtin_hash_struct *p1, builtin_hash_struct *p2)
18356 {
18357 return ((p1->mode[0] == p2->mode[0])
18358 && (p1->mode[1] == p2->mode[1])
18359 && (p1->mode[2] == p2->mode[2])
18360 && (p1->mode[3] == p2->mode[3])
18361 && (p1->uns_p[0] == p2->uns_p[0])
18362 && (p1->uns_p[1] == p2->uns_p[1])
18363 && (p1->uns_p[2] == p2->uns_p[2])
18364 && (p1->uns_p[3] == p2->uns_p[3]));
18365 }
18366
18367 /* Map types for builtin functions with an explicit return type and up to 3
18368 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
18369 of the argument. */
18370 static tree
18371 builtin_function_type (machine_mode mode_ret, machine_mode mode_arg0,
18372 machine_mode mode_arg1, machine_mode mode_arg2,
18373 enum rs6000_builtins builtin, const char *name)
18374 {
18375 struct builtin_hash_struct h;
18376 struct builtin_hash_struct *h2;
18377 int num_args = 3;
18378 int i;
18379 tree ret_type = NULL_TREE;
18380 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
18381
18382 /* Create builtin_hash_table. */
18383 if (builtin_hash_table == NULL)
18384 builtin_hash_table = hash_table<builtin_hasher>::create_ggc (1500);
18385
18386 h.type = NULL_TREE;
18387 h.mode[0] = mode_ret;
18388 h.mode[1] = mode_arg0;
18389 h.mode[2] = mode_arg1;
18390 h.mode[3] = mode_arg2;
18391 h.uns_p[0] = 0;
18392 h.uns_p[1] = 0;
18393 h.uns_p[2] = 0;
18394 h.uns_p[3] = 0;
18395
18396 /* If the builtin is a type that produces unsigned results or takes unsigned
18397 arguments, and it is returned as a decl for the vectorizer (such as
18398 widening multiplies, permute), make sure the arguments and return value
18399 are type correct. */
18400 switch (builtin)
18401 {
18402 /* unsigned 1 argument functions. */
18403 case CRYPTO_BUILTIN_VSBOX:
18404 case P8V_BUILTIN_VGBBD:
18405 case MISC_BUILTIN_CDTBCD:
18406 case MISC_BUILTIN_CBCDTD:
18407 h.uns_p[0] = 1;
18408 h.uns_p[1] = 1;
18409 break;
18410
18411 /* unsigned 2 argument functions. */
18412 case ALTIVEC_BUILTIN_VMULEUB_UNS:
18413 case ALTIVEC_BUILTIN_VMULEUH_UNS:
18414 case ALTIVEC_BUILTIN_VMULOUB_UNS:
18415 case ALTIVEC_BUILTIN_VMULOUH_UNS:
18416 case CRYPTO_BUILTIN_VCIPHER:
18417 case CRYPTO_BUILTIN_VCIPHERLAST:
18418 case CRYPTO_BUILTIN_VNCIPHER:
18419 case CRYPTO_BUILTIN_VNCIPHERLAST:
18420 case CRYPTO_BUILTIN_VPMSUMB:
18421 case CRYPTO_BUILTIN_VPMSUMH:
18422 case CRYPTO_BUILTIN_VPMSUMW:
18423 case CRYPTO_BUILTIN_VPMSUMD:
18424 case CRYPTO_BUILTIN_VPMSUM:
18425 case MISC_BUILTIN_ADDG6S:
18426 case MISC_BUILTIN_DIVWEU:
18427 case MISC_BUILTIN_DIVWEUO:
18428 case MISC_BUILTIN_DIVDEU:
18429 case MISC_BUILTIN_DIVDEUO:
18430 h.uns_p[0] = 1;
18431 h.uns_p[1] = 1;
18432 h.uns_p[2] = 1;
18433 break;
18434
18435 /* unsigned 3 argument functions. */
18436 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
18437 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
18438 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
18439 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
18440 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
18441 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
18442 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
18443 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
18444 case VSX_BUILTIN_VPERM_16QI_UNS:
18445 case VSX_BUILTIN_VPERM_8HI_UNS:
18446 case VSX_BUILTIN_VPERM_4SI_UNS:
18447 case VSX_BUILTIN_VPERM_2DI_UNS:
18448 case VSX_BUILTIN_XXSEL_16QI_UNS:
18449 case VSX_BUILTIN_XXSEL_8HI_UNS:
18450 case VSX_BUILTIN_XXSEL_4SI_UNS:
18451 case VSX_BUILTIN_XXSEL_2DI_UNS:
18452 case CRYPTO_BUILTIN_VPERMXOR:
18453 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
18454 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
18455 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
18456 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
18457 case CRYPTO_BUILTIN_VSHASIGMAW:
18458 case CRYPTO_BUILTIN_VSHASIGMAD:
18459 case CRYPTO_BUILTIN_VSHASIGMA:
18460 h.uns_p[0] = 1;
18461 h.uns_p[1] = 1;
18462 h.uns_p[2] = 1;
18463 h.uns_p[3] = 1;
18464 break;
18465
18466 /* signed permute functions with unsigned char mask. */
18467 case ALTIVEC_BUILTIN_VPERM_16QI:
18468 case ALTIVEC_BUILTIN_VPERM_8HI:
18469 case ALTIVEC_BUILTIN_VPERM_4SI:
18470 case ALTIVEC_BUILTIN_VPERM_4SF:
18471 case ALTIVEC_BUILTIN_VPERM_2DI:
18472 case ALTIVEC_BUILTIN_VPERM_2DF:
18473 case VSX_BUILTIN_VPERM_16QI:
18474 case VSX_BUILTIN_VPERM_8HI:
18475 case VSX_BUILTIN_VPERM_4SI:
18476 case VSX_BUILTIN_VPERM_4SF:
18477 case VSX_BUILTIN_VPERM_2DI:
18478 case VSX_BUILTIN_VPERM_2DF:
18479 h.uns_p[3] = 1;
18480 break;
18481
18482 /* unsigned args, signed return. */
18483 case VSX_BUILTIN_XVCVUXDDP_UNS:
18484 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
18485 h.uns_p[1] = 1;
18486 break;
18487
18488 /* signed args, unsigned return. */
18489 case VSX_BUILTIN_XVCVDPUXDS_UNS:
18490 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
18491 case MISC_BUILTIN_UNPACK_TD:
18492 case MISC_BUILTIN_UNPACK_V1TI:
18493 h.uns_p[0] = 1;
18494 break;
18495
18496 /* unsigned arguments for 128-bit pack instructions. */
18497 case MISC_BUILTIN_PACK_TD:
18498 case MISC_BUILTIN_PACK_V1TI:
18499 h.uns_p[1] = 1;
18500 h.uns_p[2] = 1;
18501 break;
18502
18503 default:
18504 break;
18505 }
18506
18507 /* Figure out how many args are present. */
18508 while (num_args > 0 && h.mode[num_args] == VOIDmode)
18509 num_args--;
18510
18511 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
18512 if (!ret_type && h.uns_p[0])
18513 ret_type = builtin_mode_to_type[h.mode[0]][0];
18514
18515 if (!ret_type)
18516 fatal_error (input_location,
18517 "internal error: builtin function %s had an unexpected "
18518 "return type %s", name, GET_MODE_NAME (h.mode[0]));
18519
18520 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
18521 arg_type[i] = NULL_TREE;
18522
18523 for (i = 0; i < num_args; i++)
18524 {
18525 int m = (int) h.mode[i+1];
18526 int uns_p = h.uns_p[i+1];
18527
18528 arg_type[i] = builtin_mode_to_type[m][uns_p];
18529 if (!arg_type[i] && uns_p)
18530 arg_type[i] = builtin_mode_to_type[m][0];
18531
18532 if (!arg_type[i])
18533 fatal_error (input_location,
18534 "internal error: builtin function %s, argument %d "
18535 "had unexpected argument type %s", name, i,
18536 GET_MODE_NAME (m));
18537 }
18538
18539 builtin_hash_struct **found = builtin_hash_table->find_slot (&h, INSERT);
18540 if (*found == NULL)
18541 {
18542 h2 = ggc_alloc<builtin_hash_struct> ();
18543 *h2 = h;
18544 *found = h2;
18545
18546 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
18547 arg_type[2], NULL_TREE);
18548 }
18549
18550 return (*found)->type;
18551 }
18552
18553 static void
18554 rs6000_common_init_builtins (void)
18555 {
18556 const struct builtin_description *d;
18557 size_t i;
18558
18559 tree opaque_ftype_opaque = NULL_TREE;
18560 tree opaque_ftype_opaque_opaque = NULL_TREE;
18561 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
18562 tree v2si_ftype = NULL_TREE;
18563 tree v2si_ftype_qi = NULL_TREE;
18564 tree v2si_ftype_v2si_qi = NULL_TREE;
18565 tree v2si_ftype_int_qi = NULL_TREE;
18566 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
18567
18568 if (!TARGET_PAIRED_FLOAT)
18569 {
18570 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
18571 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
18572 }
18573
18574 /* Paired and SPE builtins are only available if you build a compiler with
18575 the appropriate options, so only create those builtins with the
18576 appropriate compiler option. Create Altivec and VSX builtins on machines
18577 with at least the general purpose extensions (970 and newer) to allow the
18578 use of the target attribute.. */
18579
18580 if (TARGET_EXTRA_BUILTINS)
18581 builtin_mask |= RS6000_BTM_COMMON;
18582
18583 /* Add the ternary operators. */
18584 d = bdesc_3arg;
18585 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
18586 {
18587 tree type;
18588 HOST_WIDE_INT mask = d->mask;
18589
18590 if ((mask & builtin_mask) != mask)
18591 {
18592 if (TARGET_DEBUG_BUILTIN)
18593 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
18594 continue;
18595 }
18596
18597 if (rs6000_overloaded_builtin_p (d->code))
18598 {
18599 if (! (type = opaque_ftype_opaque_opaque_opaque))
18600 type = opaque_ftype_opaque_opaque_opaque
18601 = build_function_type_list (opaque_V4SI_type_node,
18602 opaque_V4SI_type_node,
18603 opaque_V4SI_type_node,
18604 opaque_V4SI_type_node,
18605 NULL_TREE);
18606 }
18607 else
18608 {
18609 enum insn_code icode = d->icode;
18610 if (d->name == 0)
18611 {
18612 if (TARGET_DEBUG_BUILTIN)
18613 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
18614 (long unsigned)i);
18615
18616 continue;
18617 }
18618
18619 if (icode == CODE_FOR_nothing)
18620 {
18621 if (TARGET_DEBUG_BUILTIN)
18622 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
18623 d->name);
18624
18625 continue;
18626 }
18627
18628 type = builtin_function_type (insn_data[icode].operand[0].mode,
18629 insn_data[icode].operand[1].mode,
18630 insn_data[icode].operand[2].mode,
18631 insn_data[icode].operand[3].mode,
18632 d->code, d->name);
18633 }
18634
18635 def_builtin (d->name, type, d->code);
18636 }
18637
18638 /* Add the binary operators. */
18639 d = bdesc_2arg;
18640 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
18641 {
18642 machine_mode mode0, mode1, mode2;
18643 tree type;
18644 HOST_WIDE_INT mask = d->mask;
18645
18646 if ((mask & builtin_mask) != mask)
18647 {
18648 if (TARGET_DEBUG_BUILTIN)
18649 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
18650 continue;
18651 }
18652
18653 if (rs6000_overloaded_builtin_p (d->code))
18654 {
18655 if (! (type = opaque_ftype_opaque_opaque))
18656 type = opaque_ftype_opaque_opaque
18657 = build_function_type_list (opaque_V4SI_type_node,
18658 opaque_V4SI_type_node,
18659 opaque_V4SI_type_node,
18660 NULL_TREE);
18661 }
18662 else
18663 {
18664 enum insn_code icode = d->icode;
18665 if (d->name == 0)
18666 {
18667 if (TARGET_DEBUG_BUILTIN)
18668 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
18669 (long unsigned)i);
18670
18671 continue;
18672 }
18673
18674 if (icode == CODE_FOR_nothing)
18675 {
18676 if (TARGET_DEBUG_BUILTIN)
18677 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
18678 d->name);
18679
18680 continue;
18681 }
18682
18683 mode0 = insn_data[icode].operand[0].mode;
18684 mode1 = insn_data[icode].operand[1].mode;
18685 mode2 = insn_data[icode].operand[2].mode;
18686
18687 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
18688 {
18689 if (! (type = v2si_ftype_v2si_qi))
18690 type = v2si_ftype_v2si_qi
18691 = build_function_type_list (opaque_V2SI_type_node,
18692 opaque_V2SI_type_node,
18693 char_type_node,
18694 NULL_TREE);
18695 }
18696
18697 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
18698 && mode2 == QImode)
18699 {
18700 if (! (type = v2si_ftype_int_qi))
18701 type = v2si_ftype_int_qi
18702 = build_function_type_list (opaque_V2SI_type_node,
18703 integer_type_node,
18704 char_type_node,
18705 NULL_TREE);
18706 }
18707
18708 else
18709 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
18710 d->code, d->name);
18711 }
18712
18713 def_builtin (d->name, type, d->code);
18714 }
18715
18716 /* Add the simple unary operators. */
18717 d = bdesc_1arg;
18718 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
18719 {
18720 machine_mode mode0, mode1;
18721 tree type;
18722 HOST_WIDE_INT mask = d->mask;
18723
18724 if ((mask & builtin_mask) != mask)
18725 {
18726 if (TARGET_DEBUG_BUILTIN)
18727 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
18728 continue;
18729 }
18730
18731 if (rs6000_overloaded_builtin_p (d->code))
18732 {
18733 if (! (type = opaque_ftype_opaque))
18734 type = opaque_ftype_opaque
18735 = build_function_type_list (opaque_V4SI_type_node,
18736 opaque_V4SI_type_node,
18737 NULL_TREE);
18738 }
18739 else
18740 {
18741 enum insn_code icode = d->icode;
18742 if (d->name == 0)
18743 {
18744 if (TARGET_DEBUG_BUILTIN)
18745 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
18746 (long unsigned)i);
18747
18748 continue;
18749 }
18750
18751 if (icode == CODE_FOR_nothing)
18752 {
18753 if (TARGET_DEBUG_BUILTIN)
18754 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
18755 d->name);
18756
18757 continue;
18758 }
18759
18760 mode0 = insn_data[icode].operand[0].mode;
18761 mode1 = insn_data[icode].operand[1].mode;
18762
18763 if (mode0 == V2SImode && mode1 == QImode)
18764 {
18765 if (! (type = v2si_ftype_qi))
18766 type = v2si_ftype_qi
18767 = build_function_type_list (opaque_V2SI_type_node,
18768 char_type_node,
18769 NULL_TREE);
18770 }
18771
18772 else
18773 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
18774 d->code, d->name);
18775 }
18776
18777 def_builtin (d->name, type, d->code);
18778 }
18779
18780 /* Add the simple no-argument operators. */
18781 d = bdesc_0arg;
18782 for (i = 0; i < ARRAY_SIZE (bdesc_0arg); i++, d++)
18783 {
18784 machine_mode mode0;
18785 tree type;
18786 HOST_WIDE_INT mask = d->mask;
18787
18788 if ((mask & builtin_mask) != mask)
18789 {
18790 if (TARGET_DEBUG_BUILTIN)
18791 fprintf (stderr, "rs6000_builtin, skip no-argument %s\n", d->name);
18792 continue;
18793 }
18794 if (rs6000_overloaded_builtin_p (d->code))
18795 {
18796 if (!opaque_ftype_opaque)
18797 opaque_ftype_opaque
18798 = build_function_type_list (opaque_V4SI_type_node, NULL_TREE);
18799 type = opaque_ftype_opaque;
18800 }
18801 else
18802 {
18803 enum insn_code icode = d->icode;
18804 if (d->name == 0)
18805 {
18806 if (TARGET_DEBUG_BUILTIN)
18807 fprintf (stderr, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
18808 (long unsigned) i);
18809 continue;
18810 }
18811 if (icode == CODE_FOR_nothing)
18812 {
18813 if (TARGET_DEBUG_BUILTIN)
18814 fprintf (stderr,
18815 "rs6000_builtin, skip no-argument %s (no code)\n",
18816 d->name);
18817 continue;
18818 }
18819 mode0 = insn_data[icode].operand[0].mode;
18820 if (mode0 == V2SImode)
18821 {
18822 /* code for SPE */
18823 if (! (type = v2si_ftype))
18824 {
18825 v2si_ftype
18826 = build_function_type_list (opaque_V2SI_type_node,
18827 NULL_TREE);
18828 type = v2si_ftype;
18829 }
18830 }
18831 else
18832 type = builtin_function_type (mode0, VOIDmode, VOIDmode, VOIDmode,
18833 d->code, d->name);
18834 }
18835 def_builtin (d->name, type, d->code);
18836 }
18837 }
18838
18839 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
18840 static void
18841 init_float128_ibm (machine_mode mode)
18842 {
18843 if (!TARGET_XL_COMPAT)
18844 {
18845 set_optab_libfunc (add_optab, mode, "__gcc_qadd");
18846 set_optab_libfunc (sub_optab, mode, "__gcc_qsub");
18847 set_optab_libfunc (smul_optab, mode, "__gcc_qmul");
18848 set_optab_libfunc (sdiv_optab, mode, "__gcc_qdiv");
18849
18850 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
18851 {
18852 set_optab_libfunc (neg_optab, mode, "__gcc_qneg");
18853 set_optab_libfunc (eq_optab, mode, "__gcc_qeq");
18854 set_optab_libfunc (ne_optab, mode, "__gcc_qne");
18855 set_optab_libfunc (gt_optab, mode, "__gcc_qgt");
18856 set_optab_libfunc (ge_optab, mode, "__gcc_qge");
18857 set_optab_libfunc (lt_optab, mode, "__gcc_qlt");
18858 set_optab_libfunc (le_optab, mode, "__gcc_qle");
18859
18860 set_conv_libfunc (sext_optab, mode, SFmode, "__gcc_stoq");
18861 set_conv_libfunc (sext_optab, mode, DFmode, "__gcc_dtoq");
18862 set_conv_libfunc (trunc_optab, SFmode, mode, "__gcc_qtos");
18863 set_conv_libfunc (trunc_optab, DFmode, mode, "__gcc_qtod");
18864 set_conv_libfunc (sfix_optab, SImode, mode, "__gcc_qtoi");
18865 set_conv_libfunc (ufix_optab, SImode, mode, "__gcc_qtou");
18866 set_conv_libfunc (sfloat_optab, mode, SImode, "__gcc_itoq");
18867 set_conv_libfunc (ufloat_optab, mode, SImode, "__gcc_utoq");
18868 }
18869
18870 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
18871 set_optab_libfunc (unord_optab, mode, "__gcc_qunord");
18872 }
18873 else
18874 {
18875 set_optab_libfunc (add_optab, mode, "_xlqadd");
18876 set_optab_libfunc (sub_optab, mode, "_xlqsub");
18877 set_optab_libfunc (smul_optab, mode, "_xlqmul");
18878 set_optab_libfunc (sdiv_optab, mode, "_xlqdiv");
18879 }
18880
18881 /* Add various conversions for IFmode to use the traditional TFmode
18882 names. */
18883 if (mode == IFmode)
18884 {
18885 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdtf2");
18886 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddtf2");
18887 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunctftd2");
18888 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunctfsd2");
18889 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunctfdd2");
18890 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdtf2");
18891
18892 if (TARGET_POWERPC64)
18893 {
18894 set_conv_libfunc (sfix_optab, TImode, mode, "__fixtfti");
18895 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunstfti");
18896 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattitf");
18897 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntitf");
18898 }
18899 }
18900 }
18901
18902 /* Set up IEEE 128-bit floating point routines. Use different names if the
18903 arguments can be passed in a vector register. The historical PowerPC
18904 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
18905 continue to use that if we aren't using vector registers to pass IEEE
18906 128-bit floating point. */
18907
18908 static void
18909 init_float128_ieee (machine_mode mode)
18910 {
18911 if (FLOAT128_VECTOR_P (mode))
18912 {
18913 set_optab_libfunc (add_optab, mode, "__addkf3");
18914 set_optab_libfunc (sub_optab, mode, "__subkf3");
18915 set_optab_libfunc (neg_optab, mode, "__negkf2");
18916 set_optab_libfunc (smul_optab, mode, "__mulkf3");
18917 set_optab_libfunc (sdiv_optab, mode, "__divkf3");
18918 set_optab_libfunc (sqrt_optab, mode, "__sqrtkf2");
18919 set_optab_libfunc (abs_optab, mode, "__abstkf2");
18920
18921 set_optab_libfunc (eq_optab, mode, "__eqkf2");
18922 set_optab_libfunc (ne_optab, mode, "__nekf2");
18923 set_optab_libfunc (gt_optab, mode, "__gtkf2");
18924 set_optab_libfunc (ge_optab, mode, "__gekf2");
18925 set_optab_libfunc (lt_optab, mode, "__ltkf2");
18926 set_optab_libfunc (le_optab, mode, "__lekf2");
18927 set_optab_libfunc (unord_optab, mode, "__unordkf2");
18928
18929 set_conv_libfunc (sext_optab, mode, SFmode, "__extendsfkf2");
18930 set_conv_libfunc (sext_optab, mode, DFmode, "__extenddfkf2");
18931 set_conv_libfunc (trunc_optab, SFmode, mode, "__trunckfsf2");
18932 set_conv_libfunc (trunc_optab, DFmode, mode, "__trunckfdf2");
18933
18934 set_conv_libfunc (sext_optab, mode, IFmode, "__extendtfkf2");
18935 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18936 set_conv_libfunc (sext_optab, mode, TFmode, "__extendtfkf2");
18937
18938 set_conv_libfunc (trunc_optab, IFmode, mode, "__trunckftf2");
18939 if (mode != TFmode && FLOAT128_IBM_P (TFmode))
18940 set_conv_libfunc (trunc_optab, TFmode, mode, "__trunckftf2");
18941
18942 set_conv_libfunc (sext_optab, mode, SDmode, "__dpd_extendsdkf2");
18943 set_conv_libfunc (sext_optab, mode, DDmode, "__dpd_extendddkf2");
18944 set_conv_libfunc (trunc_optab, mode, TDmode, "__dpd_trunckftd2");
18945 set_conv_libfunc (trunc_optab, SDmode, mode, "__dpd_trunckfsd2");
18946 set_conv_libfunc (trunc_optab, DDmode, mode, "__dpd_trunckfdd2");
18947 set_conv_libfunc (sext_optab, TDmode, mode, "__dpd_extendtdkf2");
18948
18949 set_conv_libfunc (sfix_optab, SImode, mode, "__fixkfsi");
18950 set_conv_libfunc (ufix_optab, SImode, mode, "__fixunskfsi");
18951 set_conv_libfunc (sfix_optab, DImode, mode, "__fixkfdi");
18952 set_conv_libfunc (ufix_optab, DImode, mode, "__fixunskfdi");
18953
18954 set_conv_libfunc (sfloat_optab, mode, SImode, "__floatsikf");
18955 set_conv_libfunc (ufloat_optab, mode, SImode, "__floatunsikf");
18956 set_conv_libfunc (sfloat_optab, mode, DImode, "__floatdikf");
18957 set_conv_libfunc (ufloat_optab, mode, DImode, "__floatundikf");
18958
18959 if (TARGET_POWERPC64)
18960 {
18961 set_conv_libfunc (sfix_optab, TImode, mode, "__fixkfti");
18962 set_conv_libfunc (ufix_optab, TImode, mode, "__fixunskfti");
18963 set_conv_libfunc (sfloat_optab, mode, TImode, "__floattikf");
18964 set_conv_libfunc (ufloat_optab, mode, TImode, "__floatuntikf");
18965 }
18966 }
18967
18968 else
18969 {
18970 set_optab_libfunc (add_optab, mode, "_q_add");
18971 set_optab_libfunc (sub_optab, mode, "_q_sub");
18972 set_optab_libfunc (neg_optab, mode, "_q_neg");
18973 set_optab_libfunc (smul_optab, mode, "_q_mul");
18974 set_optab_libfunc (sdiv_optab, mode, "_q_div");
18975 if (TARGET_PPC_GPOPT)
18976 set_optab_libfunc (sqrt_optab, mode, "_q_sqrt");
18977
18978 set_optab_libfunc (eq_optab, mode, "_q_feq");
18979 set_optab_libfunc (ne_optab, mode, "_q_fne");
18980 set_optab_libfunc (gt_optab, mode, "_q_fgt");
18981 set_optab_libfunc (ge_optab, mode, "_q_fge");
18982 set_optab_libfunc (lt_optab, mode, "_q_flt");
18983 set_optab_libfunc (le_optab, mode, "_q_fle");
18984
18985 set_conv_libfunc (sext_optab, mode, SFmode, "_q_stoq");
18986 set_conv_libfunc (sext_optab, mode, DFmode, "_q_dtoq");
18987 set_conv_libfunc (trunc_optab, SFmode, mode, "_q_qtos");
18988 set_conv_libfunc (trunc_optab, DFmode, mode, "_q_qtod");
18989 set_conv_libfunc (sfix_optab, SImode, mode, "_q_qtoi");
18990 set_conv_libfunc (ufix_optab, SImode, mode, "_q_qtou");
18991 set_conv_libfunc (sfloat_optab, mode, SImode, "_q_itoq");
18992 set_conv_libfunc (ufloat_optab, mode, SImode, "_q_utoq");
18993 }
18994 }
18995
18996 static void
18997 rs6000_init_libfuncs (void)
18998 {
18999 /* __float128 support. */
19000 if (TARGET_FLOAT128_TYPE)
19001 {
19002 init_float128_ibm (IFmode);
19003 init_float128_ieee (KFmode);
19004 }
19005
19006 /* AIX/Darwin/64-bit Linux quad floating point routines. */
19007 if (TARGET_LONG_DOUBLE_128)
19008 {
19009 if (!TARGET_IEEEQUAD)
19010 init_float128_ibm (TFmode);
19011
19012 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
19013 else
19014 init_float128_ieee (TFmode);
19015 }
19016 }
19017
19018 \f
19019 /* Expand a block clear operation, and return 1 if successful. Return 0
19020 if we should let the compiler generate normal code.
19021
19022 operands[0] is the destination
19023 operands[1] is the length
19024 operands[3] is the alignment */
19025
19026 int
19027 expand_block_clear (rtx operands[])
19028 {
19029 rtx orig_dest = operands[0];
19030 rtx bytes_rtx = operands[1];
19031 rtx align_rtx = operands[3];
19032 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
19033 HOST_WIDE_INT align;
19034 HOST_WIDE_INT bytes;
19035 int offset;
19036 int clear_bytes;
19037 int clear_step;
19038
19039 /* If this is not a fixed size move, just call memcpy */
19040 if (! constp)
19041 return 0;
19042
19043 /* This must be a fixed size alignment */
19044 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
19045 align = INTVAL (align_rtx) * BITS_PER_UNIT;
19046
19047 /* Anything to clear? */
19048 bytes = INTVAL (bytes_rtx);
19049 if (bytes <= 0)
19050 return 1;
19051
19052 /* Use the builtin memset after a point, to avoid huge code bloat.
19053 When optimize_size, avoid any significant code bloat; calling
19054 memset is about 4 instructions, so allow for one instruction to
19055 load zero and three to do clearing. */
19056 if (TARGET_ALTIVEC && align >= 128)
19057 clear_step = 16;
19058 else if (TARGET_POWERPC64 && (align >= 64 || !STRICT_ALIGNMENT))
19059 clear_step = 8;
19060 else if (TARGET_SPE && align >= 64)
19061 clear_step = 8;
19062 else
19063 clear_step = 4;
19064
19065 if (optimize_size && bytes > 3 * clear_step)
19066 return 0;
19067 if (! optimize_size && bytes > 8 * clear_step)
19068 return 0;
19069
19070 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
19071 {
19072 machine_mode mode = BLKmode;
19073 rtx dest;
19074
19075 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
19076 {
19077 clear_bytes = 16;
19078 mode = V4SImode;
19079 }
19080 else if (bytes >= 8 && TARGET_SPE && align >= 64)
19081 {
19082 clear_bytes = 8;
19083 mode = V2SImode;
19084 }
19085 else if (bytes >= 8 && TARGET_POWERPC64
19086 && (align >= 64 || !STRICT_ALIGNMENT))
19087 {
19088 clear_bytes = 8;
19089 mode = DImode;
19090 if (offset == 0 && align < 64)
19091 {
19092 rtx addr;
19093
19094 /* If the address form is reg+offset with offset not a
19095 multiple of four, reload into reg indirect form here
19096 rather than waiting for reload. This way we get one
19097 reload, not one per store. */
19098 addr = XEXP (orig_dest, 0);
19099 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
19100 && GET_CODE (XEXP (addr, 1)) == CONST_INT
19101 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
19102 {
19103 addr = copy_addr_to_reg (addr);
19104 orig_dest = replace_equiv_address (orig_dest, addr);
19105 }
19106 }
19107 }
19108 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
19109 { /* move 4 bytes */
19110 clear_bytes = 4;
19111 mode = SImode;
19112 }
19113 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
19114 { /* move 2 bytes */
19115 clear_bytes = 2;
19116 mode = HImode;
19117 }
19118 else /* move 1 byte at a time */
19119 {
19120 clear_bytes = 1;
19121 mode = QImode;
19122 }
19123
19124 dest = adjust_address (orig_dest, mode, offset);
19125
19126 emit_move_insn (dest, CONST0_RTX (mode));
19127 }
19128
19129 return 1;
19130 }
19131
19132 /* Emit a potentially record-form instruction, setting DST from SRC.
19133 If DOT is 0, that is all; otherwise, set CCREG to the result of the
19134 signed comparison of DST with zero. If DOT is 1, the generated RTL
19135 doesn't care about the DST result; if DOT is 2, it does. If CCREG
19136 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
19137 a separate COMPARE. */
19138
19139 static void
19140 rs6000_emit_dot_insn (rtx dst, rtx src, int dot, rtx ccreg)
19141 {
19142 if (dot == 0)
19143 {
19144 emit_move_insn (dst, src);
19145 return;
19146 }
19147
19148 if (cc_reg_not_cr0_operand (ccreg, CCmode))
19149 {
19150 emit_move_insn (dst, src);
19151 emit_move_insn (ccreg, gen_rtx_COMPARE (CCmode, dst, const0_rtx));
19152 return;
19153 }
19154
19155 rtx ccset = gen_rtx_SET (ccreg, gen_rtx_COMPARE (CCmode, src, const0_rtx));
19156 if (dot == 1)
19157 {
19158 rtx clobber = gen_rtx_CLOBBER (VOIDmode, dst);
19159 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, clobber)));
19160 }
19161 else
19162 {
19163 rtx set = gen_rtx_SET (dst, src);
19164 emit_insn (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, ccset, set)));
19165 }
19166 }
19167
19168 /* Figure out the correct instructions to generate to load data for
19169 block compare. MODE is used for the read from memory, and
19170 data is zero extended if REG is wider than MODE. If LE code
19171 is being generated, bswap loads are used.
19172
19173 REG is the destination register to move the data into.
19174 MEM is the memory block being read.
19175 MODE is the mode of memory to use for the read. */
19176 static void
19177 do_load_for_compare (rtx reg, rtx mem, machine_mode mode)
19178 {
19179 switch (GET_MODE (reg))
19180 {
19181 case DImode:
19182 switch (mode)
19183 {
19184 case QImode:
19185 emit_insn (gen_zero_extendqidi2 (reg, mem));
19186 break;
19187 case HImode:
19188 {
19189 rtx src = mem;
19190 if (!BYTES_BIG_ENDIAN)
19191 {
19192 src = gen_reg_rtx (HImode);
19193 emit_insn (gen_bswaphi2 (src, mem));
19194 }
19195 emit_insn (gen_zero_extendhidi2 (reg, src));
19196 break;
19197 }
19198 case SImode:
19199 {
19200 rtx src = mem;
19201 if (!BYTES_BIG_ENDIAN)
19202 {
19203 src = gen_reg_rtx (SImode);
19204 emit_insn (gen_bswapsi2 (src, mem));
19205 }
19206 emit_insn (gen_zero_extendsidi2 (reg, src));
19207 }
19208 break;
19209 case DImode:
19210 if (!BYTES_BIG_ENDIAN)
19211 emit_insn (gen_bswapdi2 (reg, mem));
19212 else
19213 emit_insn (gen_movdi (reg, mem));
19214 break;
19215 default:
19216 gcc_unreachable ();
19217 }
19218 break;
19219
19220 case SImode:
19221 switch (mode)
19222 {
19223 case QImode:
19224 emit_insn (gen_zero_extendqisi2 (reg, mem));
19225 break;
19226 case HImode:
19227 {
19228 rtx src = mem;
19229 if (!BYTES_BIG_ENDIAN)
19230 {
19231 src = gen_reg_rtx (HImode);
19232 emit_insn (gen_bswaphi2 (src, mem));
19233 }
19234 emit_insn (gen_zero_extendhisi2 (reg, src));
19235 break;
19236 }
19237 case SImode:
19238 if (!BYTES_BIG_ENDIAN)
19239 emit_insn (gen_bswapsi2 (reg, mem));
19240 else
19241 emit_insn (gen_movsi (reg, mem));
19242 break;
19243 case DImode:
19244 /* DImode is larger than the destination reg so is not expected. */
19245 gcc_unreachable ();
19246 break;
19247 default:
19248 gcc_unreachable ();
19249 }
19250 break;
19251 default:
19252 gcc_unreachable ();
19253 break;
19254 }
19255 }
19256
19257 /* Select the mode to be used for reading the next chunk of bytes
19258 in the compare.
19259
19260 OFFSET is the current read offset from the beginning of the block.
19261 BYTES is the number of bytes remaining to be read.
19262 ALIGN is the minimum alignment of the memory blocks being compared in bytes.
19263 WORD_MODE_OK indicates using WORD_MODE is allowed, else SImode is
19264 the largest allowable mode. */
19265 static machine_mode
19266 select_block_compare_mode (HOST_WIDE_INT offset, HOST_WIDE_INT bytes,
19267 HOST_WIDE_INT align, bool word_mode_ok)
19268 {
19269 /* First see if we can do a whole load unit
19270 as that will be more efficient than a larger load + shift. */
19271
19272 /* If big, use biggest chunk.
19273 If exactly chunk size, use that size.
19274 If remainder can be done in one piece with shifting, do that.
19275 Do largest chunk possible without violating alignment rules. */
19276
19277 /* The most we can read without potential page crossing. */
19278 HOST_WIDE_INT maxread = ROUND_UP (bytes, align);
19279
19280 if (word_mode_ok && bytes >= UNITS_PER_WORD)
19281 return word_mode;
19282 else if (bytes == GET_MODE_SIZE (SImode))
19283 return SImode;
19284 else if (bytes == GET_MODE_SIZE (HImode))
19285 return HImode;
19286 else if (bytes == GET_MODE_SIZE (QImode))
19287 return QImode;
19288 else if (bytes < GET_MODE_SIZE (SImode)
19289 && offset >= GET_MODE_SIZE (SImode) - bytes)
19290 /* This matches the case were we have SImode and 3 bytes
19291 and offset >= 1 and permits us to move back one and overlap
19292 with the previous read, thus avoiding having to shift
19293 unwanted bytes off of the input. */
19294 return SImode;
19295 else if (word_mode_ok && bytes < UNITS_PER_WORD
19296 && offset >= UNITS_PER_WORD-bytes)
19297 /* Similarly, if we can use DImode it will get matched here and
19298 can do an overlapping read that ends at the end of the block. */
19299 return word_mode;
19300 else if (word_mode_ok && maxread >= UNITS_PER_WORD)
19301 /* It is safe to do all remaining in one load of largest size,
19302 possibly with a shift to get rid of unwanted bytes. */
19303 return word_mode;
19304 else if (maxread >= GET_MODE_SIZE (SImode))
19305 /* It is safe to do all remaining in one SImode load,
19306 possibly with a shift to get rid of unwanted bytes. */
19307 return SImode;
19308 else if (bytes > GET_MODE_SIZE (SImode))
19309 return SImode;
19310 else if (bytes > GET_MODE_SIZE (HImode))
19311 return HImode;
19312
19313 /* final fallback is do one byte */
19314 return QImode;
19315 }
19316
19317 /* Compute the alignment of pointer+OFFSET where the original alignment
19318 of pointer was BASE_ALIGN. */
19319 static HOST_WIDE_INT
19320 compute_current_alignment (HOST_WIDE_INT base_align, HOST_WIDE_INT offset)
19321 {
19322 if (offset == 0)
19323 return base_align;
19324 return min (base_align, offset & -offset);
19325 }
19326
19327 /* Expand a block compare operation, and return true if successful.
19328 Return false if we should let the compiler generate normal code,
19329 probably a memcmp call.
19330
19331 OPERANDS[0] is the target (result).
19332 OPERANDS[1] is the first source.
19333 OPERANDS[2] is the second source.
19334 OPERANDS[3] is the length.
19335 OPERANDS[4] is the alignment. */
19336 bool
19337 expand_block_compare (rtx operands[])
19338 {
19339 rtx target = operands[0];
19340 rtx orig_src1 = operands[1];
19341 rtx orig_src2 = operands[2];
19342 rtx bytes_rtx = operands[3];
19343 rtx align_rtx = operands[4];
19344 HOST_WIDE_INT cmp_bytes = 0;
19345 rtx src1 = orig_src1;
19346 rtx src2 = orig_src2;
19347
19348 /* If this is not a fixed size compare, just call memcmp */
19349 if (!CONST_INT_P (bytes_rtx))
19350 return false;
19351
19352 /* This must be a fixed size alignment */
19353 if (!CONST_INT_P (align_rtx))
19354 return false;
19355
19356 int base_align = INTVAL (align_rtx) / BITS_PER_UNIT;
19357
19358 /* SLOW_UNALIGNED_ACCESS -- don't do unaligned stuff */
19359 if (SLOW_UNALIGNED_ACCESS (word_mode, MEM_ALIGN (orig_src1))
19360 || SLOW_UNALIGNED_ACCESS (word_mode, MEM_ALIGN (orig_src2)))
19361 return false;
19362
19363 gcc_assert (GET_MODE (target) == SImode);
19364
19365 /* Anything to move? */
19366 HOST_WIDE_INT bytes = INTVAL (bytes_rtx);
19367 if (bytes <= 0)
19368 return true;
19369
19370 /* The code generated for p7 and older is not faster than glibc
19371 memcmp if alignment is small and length is not short, so bail
19372 out to avoid those conditions. */
19373 if (!TARGET_EFFICIENT_OVERLAPPING_UNALIGNED
19374 && ((base_align == 1 && bytes > 16)
19375 || (base_align == 2 && bytes > 32)))
19376 return false;
19377
19378 rtx tmp_reg_src1 = gen_reg_rtx (word_mode);
19379 rtx tmp_reg_src2 = gen_reg_rtx (word_mode);
19380
19381 /* If we have an LE target without ldbrx and word_mode is DImode,
19382 then we must avoid using word_mode. */
19383 int word_mode_ok = !(!BYTES_BIG_ENDIAN && !TARGET_LDBRX
19384 && word_mode == DImode);
19385
19386 /* Strategy phase. How many ops will this take and should we expand it? */
19387
19388 int offset = 0;
19389 machine_mode load_mode =
19390 select_block_compare_mode (offset, bytes, base_align, word_mode_ok);
19391 int load_mode_size = GET_MODE_SIZE (load_mode);
19392
19393 /* We don't want to generate too much code. */
19394 if (ROUND_UP (bytes, load_mode_size) / load_mode_size
19395 > rs6000_block_compare_inline_limit)
19396 return false;
19397
19398 bool generate_6432_conversion = false;
19399 rtx convert_label = NULL;
19400 rtx final_label = NULL;
19401
19402 /* Example of generated code for 11 bytes aligned 1 byte:
19403 .L10:
19404 ldbrx 10,6,9
19405 ldbrx 9,7,9
19406 subf. 9,9,10
19407 bne 0,.L8
19408 addi 9,4,7
19409 lwbrx 10,0,9
19410 addi 9,5,7
19411 lwbrx 9,0,9
19412 subf 9,9,10
19413 b .L9
19414 .L8: # convert_label
19415 cntlzd 9,9
19416 addi 9,9,-1
19417 xori 9,9,0x3f
19418 .L9: # final_label
19419
19420 We start off with DImode and have a compare/branch to something
19421 with a smaller mode then we will need a block with the DI->SI conversion
19422 that may or may not be executed. */
19423
19424 while (bytes > 0)
19425 {
19426 int align = compute_current_alignment (base_align, offset);
19427 if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
19428 load_mode = select_block_compare_mode (offset, bytes, align,
19429 word_mode_ok);
19430 else
19431 load_mode = select_block_compare_mode (0, bytes, align, word_mode_ok);
19432 load_mode_size = GET_MODE_SIZE (load_mode);
19433 if (bytes >= load_mode_size)
19434 cmp_bytes = load_mode_size;
19435 else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
19436 {
19437 /* Move this load back so it doesn't go past the end.
19438 P8/P9 can do this efficiently. */
19439 int extra_bytes = load_mode_size - bytes;
19440 cmp_bytes = bytes;
19441 if (extra_bytes < offset)
19442 {
19443 offset -= extra_bytes;
19444 cmp_bytes = load_mode_size;
19445 bytes = cmp_bytes;
19446 }
19447 }
19448 else
19449 /* P7 and earlier can't do the overlapping load trick fast,
19450 so this forces a non-overlapping load and a shift to get
19451 rid of the extra bytes. */
19452 cmp_bytes = bytes;
19453
19454 src1 = adjust_address (orig_src1, load_mode, offset);
19455 src2 = adjust_address (orig_src2, load_mode, offset);
19456
19457 if (!REG_P (XEXP (src1, 0)))
19458 {
19459 rtx src1_reg = copy_addr_to_reg (XEXP (src1, 0));
19460 src1 = replace_equiv_address (src1, src1_reg);
19461 }
19462 set_mem_size (src1, cmp_bytes);
19463
19464 if (!REG_P (XEXP (src2, 0)))
19465 {
19466 rtx src2_reg = copy_addr_to_reg (XEXP (src2, 0));
19467 src2 = replace_equiv_address (src2, src2_reg);
19468 }
19469 set_mem_size (src2, cmp_bytes);
19470
19471 do_load_for_compare (tmp_reg_src1, src1, load_mode);
19472 do_load_for_compare (tmp_reg_src2, src2, load_mode);
19473
19474 if (cmp_bytes < load_mode_size)
19475 {
19476 /* Shift unneeded bytes off. */
19477 rtx sh = GEN_INT (BITS_PER_UNIT * (load_mode_size - cmp_bytes));
19478 if (word_mode == DImode)
19479 {
19480 emit_insn (gen_lshrdi3 (tmp_reg_src1, tmp_reg_src1, sh));
19481 emit_insn (gen_lshrdi3 (tmp_reg_src2, tmp_reg_src2, sh));
19482 }
19483 else
19484 {
19485 emit_insn (gen_lshrsi3 (tmp_reg_src1, tmp_reg_src1, sh));
19486 emit_insn (gen_lshrsi3 (tmp_reg_src2, tmp_reg_src2, sh));
19487 }
19488 }
19489
19490 /* We previously did a block that need 64->32 conversion but
19491 the current block does not, so a label is needed to jump
19492 to the end. */
19493 if (generate_6432_conversion && !final_label
19494 && GET_MODE_SIZE (GET_MODE (target)) >= load_mode_size)
19495 final_label = gen_label_rtx ();
19496
19497 /* Do we need a 64->32 conversion block? */
19498 int remain = bytes - cmp_bytes;
19499 if (GET_MODE_SIZE (GET_MODE (target)) < GET_MODE_SIZE (load_mode))
19500 {
19501 generate_6432_conversion = true;
19502 if (remain > 0 && !convert_label)
19503 convert_label = gen_label_rtx ();
19504 }
19505
19506 if (GET_MODE_SIZE (GET_MODE (target)) >= GET_MODE_SIZE (load_mode))
19507 {
19508 /* Target is larger than load size so we don't need to
19509 reduce result size. */
19510 if (remain > 0)
19511 {
19512 /* This is not the last block, branch to the end if the result
19513 of this subtract is not zero. */
19514 if (!final_label)
19515 final_label = gen_label_rtx ();
19516 rtx fin_ref = gen_rtx_LABEL_REF (VOIDmode, final_label);
19517 rtx cond = gen_reg_rtx (CCmode);
19518 rtx tmp = gen_rtx_MINUS (word_mode, tmp_reg_src1, tmp_reg_src2);
19519 rs6000_emit_dot_insn (tmp_reg_src2, tmp, 2, cond);
19520 emit_insn (gen_movsi (target, gen_lowpart (SImode, tmp_reg_src2)));
19521 rtx ne_rtx = gen_rtx_NE (VOIDmode, cond, const0_rtx);
19522 rtx ifelse = gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
19523 fin_ref, pc_rtx);
19524 rtx j = emit_jump_insn (gen_rtx_SET (pc_rtx, ifelse));
19525 JUMP_LABEL (j) = final_label;
19526 LABEL_NUSES (final_label) += 1;
19527 }
19528 else
19529 {
19530 if (word_mode == DImode)
19531 {
19532 emit_insn (gen_subdi3 (tmp_reg_src2, tmp_reg_src1,
19533 tmp_reg_src2));
19534 emit_insn (gen_movsi (target,
19535 gen_lowpart (SImode, tmp_reg_src2)));
19536 }
19537 else
19538 emit_insn (gen_subsi3 (target, tmp_reg_src1, tmp_reg_src2));
19539
19540 if (final_label)
19541 {
19542 rtx fin_ref = gen_rtx_LABEL_REF (VOIDmode, final_label);
19543 rtx j = emit_jump_insn (gen_rtx_SET (pc_rtx, fin_ref));
19544 JUMP_LABEL(j) = final_label;
19545 LABEL_NUSES (final_label) += 1;
19546 emit_barrier ();
19547 }
19548 }
19549 }
19550 else
19551 {
19552 generate_6432_conversion = true;
19553 if (remain > 0)
19554 {
19555 if (!convert_label)
19556 convert_label = gen_label_rtx ();
19557
19558 /* Compare to zero and branch to convert_label if not zero. */
19559 rtx cvt_ref = gen_rtx_LABEL_REF (VOIDmode, convert_label);
19560 rtx cond = gen_reg_rtx (CCmode);
19561 rtx tmp = gen_rtx_MINUS (DImode, tmp_reg_src1, tmp_reg_src2);
19562 rs6000_emit_dot_insn (tmp_reg_src2, tmp, 2, cond);
19563 rtx ne_rtx = gen_rtx_NE (VOIDmode, cond, const0_rtx);
19564 rtx ifelse = gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
19565 cvt_ref, pc_rtx);
19566 rtx j = emit_jump_insn (gen_rtx_SET (pc_rtx, ifelse));
19567 JUMP_LABEL(j) = convert_label;
19568 LABEL_NUSES (convert_label) += 1;
19569 }
19570 else
19571 {
19572 /* Just do the subtract. Since this is the last block the
19573 convert code will be generated immediately following. */
19574 emit_insn (gen_subdi3 (tmp_reg_src2, tmp_reg_src1,
19575 tmp_reg_src2));
19576 }
19577 }
19578
19579 offset += cmp_bytes;
19580 bytes -= cmp_bytes;
19581 }
19582
19583 if (generate_6432_conversion)
19584 {
19585 if (convert_label)
19586 emit_label (convert_label);
19587
19588 /* We need to produce DI result from sub, then convert to target SI
19589 while maintaining <0 / ==0 / >0 properties.
19590 Segher's sequence: cntlzd 3,3 ; addi 3,3,-1 ; xori 3,3,63 */
19591 emit_insn (gen_clzdi2 (tmp_reg_src2, tmp_reg_src2));
19592 emit_insn (gen_adddi3 (tmp_reg_src2, tmp_reg_src2, GEN_INT (-1)));
19593 emit_insn (gen_xordi3 (tmp_reg_src2, tmp_reg_src2, GEN_INT (63)));
19594 emit_insn (gen_movsi (target, gen_lowpart (SImode, tmp_reg_src2)));
19595 }
19596
19597 if (final_label)
19598 emit_label (final_label);
19599
19600 gcc_assert (bytes == 0);
19601 return true;
19602 }
19603
19604 /* Generate alignment check and branch code to set up for
19605 strncmp when we don't have DI alignment.
19606 STRNCMP_LABEL is the label to branch if there is a page crossing.
19607 SRC is the string pointer to be examined.
19608 BYTES is the max number of bytes to compare. */
19609 static void
19610 expand_strncmp_align_check (rtx strncmp_label, rtx src, HOST_WIDE_INT bytes)
19611 {
19612 rtx lab_ref = gen_rtx_LABEL_REF (VOIDmode, strncmp_label);
19613 rtx src_check = copy_addr_to_reg (XEXP (src, 0));
19614 if (GET_MODE (src_check) == SImode)
19615 emit_insn (gen_andsi3 (src_check, src_check, GEN_INT (0xfff)));
19616 else
19617 emit_insn (gen_anddi3 (src_check, src_check, GEN_INT (0xfff)));
19618 rtx cond = gen_reg_rtx (CCmode);
19619 emit_move_insn (cond, gen_rtx_COMPARE (CCmode, src_check,
19620 GEN_INT (4096 - bytes)));
19621
19622 rtx cmp_rtx = gen_rtx_LT (VOIDmode, cond, const0_rtx);
19623
19624 rtx ifelse = gen_rtx_IF_THEN_ELSE (VOIDmode, cmp_rtx,
19625 pc_rtx, lab_ref);
19626 rtx j = emit_jump_insn (gen_rtx_SET (pc_rtx, ifelse));
19627 JUMP_LABEL (j) = strncmp_label;
19628 LABEL_NUSES (strncmp_label) += 1;
19629 }
19630
19631 /* Expand a string compare operation with length, and return
19632 true if successful. Return false if we should let the
19633 compiler generate normal code, probably a strncmp call.
19634
19635 OPERANDS[0] is the target (result).
19636 OPERANDS[1] is the first source.
19637 OPERANDS[2] is the second source.
19638 OPERANDS[3] is the length.
19639 OPERANDS[4] is the alignment in bytes. */
19640 bool
19641 expand_strn_compare (rtx operands[])
19642 {
19643 rtx target = operands[0];
19644 rtx orig_src1 = operands[1];
19645 rtx orig_src2 = operands[2];
19646 rtx bytes_rtx = operands[3];
19647 rtx align_rtx = operands[4];
19648 HOST_WIDE_INT cmp_bytes = 0;
19649 rtx src1 = orig_src1;
19650 rtx src2 = orig_src2;
19651
19652 /* If this is not a fixed size compare, just call strncmp. */
19653 if (!CONST_INT_P (bytes_rtx))
19654 return false;
19655
19656 /* This must be a fixed size alignment. */
19657 if (!CONST_INT_P (align_rtx))
19658 return false;
19659
19660 int base_align = INTVAL (align_rtx);
19661 int align1 = MEM_ALIGN (orig_src1) / BITS_PER_UNIT;
19662 int align2 = MEM_ALIGN (orig_src2) / BITS_PER_UNIT;
19663
19664 /* SLOW_UNALIGNED_ACCESS -- don't do unaligned stuff. */
19665 if (SLOW_UNALIGNED_ACCESS (word_mode, align1)
19666 || SLOW_UNALIGNED_ACCESS (word_mode, align2))
19667 return false;
19668
19669 gcc_assert (GET_MODE (target) == SImode);
19670
19671 HOST_WIDE_INT bytes = INTVAL (bytes_rtx);
19672
19673 /* If we have an LE target without ldbrx and word_mode is DImode,
19674 then we must avoid using word_mode. */
19675 int word_mode_ok = !(!BYTES_BIG_ENDIAN && !TARGET_LDBRX
19676 && word_mode == DImode);
19677
19678 int word_mode_size = GET_MODE_SIZE (word_mode);
19679
19680 int offset = 0;
19681 machine_mode load_mode =
19682 select_block_compare_mode (offset, bytes, base_align, word_mode_ok);
19683 int load_mode_size = GET_MODE_SIZE (load_mode);
19684
19685 /* We don't want to generate too much code. Also if bytes is
19686 4096 or larger we always want the library strncmp anyway. */
19687 int groups = ROUND_UP (bytes, load_mode_size) / load_mode_size;
19688 if (bytes >= 4096 || groups > rs6000_string_compare_inline_limit)
19689 return false;
19690
19691 rtx result_reg = gen_reg_rtx (word_mode);
19692 rtx final_move_label = gen_label_rtx ();
19693 rtx final_label = gen_label_rtx ();
19694 rtx begin_compare_label = NULL;
19695
19696 if (base_align < 8)
19697 {
19698 /* Generate code that checks distance to 4k boundary for this case. */
19699 begin_compare_label = gen_label_rtx ();
19700 rtx strncmp_label = gen_label_rtx ();
19701 rtx jmp;
19702
19703 /* Strncmp for power8 in glibc does this:
19704 rldicl r8,r3,0,52
19705 cmpldi cr7,r8,4096-16
19706 bgt cr7,L(pagecross) */
19707
19708 if (align1 < 8)
19709 expand_strncmp_align_check (strncmp_label, src1, bytes);
19710 if (align2 < 8)
19711 expand_strncmp_align_check (strncmp_label, src2, bytes);
19712
19713 /* Now generate the following sequence:
19714 - branch to begin_compare
19715 - strncmp_label
19716 - call to strncmp
19717 - branch to final_label
19718 - begin_compare_label */
19719
19720 rtx cmp_ref = gen_rtx_LABEL_REF (VOIDmode, begin_compare_label);
19721 jmp = emit_jump_insn (gen_rtx_SET (pc_rtx, cmp_ref));
19722 JUMP_LABEL(jmp) = begin_compare_label;
19723 LABEL_NUSES (begin_compare_label) += 1;
19724 emit_barrier ();
19725
19726 emit_label (strncmp_label);
19727
19728 if (!REG_P (XEXP (src1, 0)))
19729 {
19730 rtx src1_reg = copy_addr_to_reg (XEXP (src1, 0));
19731 src1 = replace_equiv_address (src1, src1_reg);
19732 }
19733
19734 if (!REG_P (XEXP (src2, 0)))
19735 {
19736 rtx src2_reg = copy_addr_to_reg (XEXP (src2, 0));
19737 src2 = replace_equiv_address (src2, src2_reg);
19738 }
19739
19740 /* -m32 -mpowerpc64 results in word_mode being DImode even
19741 though otherwise it is 32-bit. The length arg to strncmp
19742 is a size_t which will be the same size as pointers. */
19743 rtx len_rtx;
19744 if (TARGET_64BIT)
19745 len_rtx = gen_reg_rtx(DImode);
19746 else
19747 len_rtx = gen_reg_rtx(SImode);
19748
19749 emit_move_insn (len_rtx, bytes_rtx);
19750
19751 emit_library_call_value (gen_rtx_SYMBOL_REF (Pmode, "strncmp"),
19752 target, LCT_NORMAL, GET_MODE (target), 3,
19753 force_reg (Pmode, XEXP (src1, 0)), Pmode,
19754 force_reg (Pmode, XEXP (src2, 0)), Pmode,
19755 len_rtx, GET_MODE (len_rtx));
19756
19757 rtx fin_ref = gen_rtx_LABEL_REF (VOIDmode, final_label);
19758 jmp = emit_jump_insn (gen_rtx_SET (pc_rtx, fin_ref));
19759 JUMP_LABEL (jmp) = final_label;
19760 LABEL_NUSES (final_label) += 1;
19761 emit_barrier ();
19762 emit_label (begin_compare_label);
19763 }
19764
19765 rtx cleanup_label = NULL;
19766 rtx tmp_reg_src1 = gen_reg_rtx (word_mode);
19767 rtx tmp_reg_src2 = gen_reg_rtx (word_mode);
19768
19769 /* Generate sequence of ld/ldbrx, cmpb to compare out
19770 to the length specified. */
19771 while (bytes > 0)
19772 {
19773 /* Compare sequence:
19774 check each 8B with: ld/ld cmpd bne
19775 cleanup code at end:
19776 cmpb get byte that differs
19777 cmpb look for zero byte
19778 orc combine
19779 cntlzd get bit of first zero/diff byte
19780 subfic convert for rldcl use
19781 rldcl rldcl extract diff/zero byte
19782 subf subtract for final result
19783
19784 The last compare can branch around the cleanup code if the
19785 result is zero because the strings are exactly equal. */
19786 int align = compute_current_alignment (base_align, offset);
19787 if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
19788 load_mode = select_block_compare_mode (offset, bytes, align,
19789 word_mode_ok);
19790 else
19791 load_mode = select_block_compare_mode (0, bytes, align, word_mode_ok);
19792 load_mode_size = GET_MODE_SIZE (load_mode);
19793 if (bytes >= load_mode_size)
19794 cmp_bytes = load_mode_size;
19795 else if (TARGET_EFFICIENT_OVERLAPPING_UNALIGNED)
19796 {
19797 /* Move this load back so it doesn't go past the end.
19798 P8/P9 can do this efficiently. */
19799 int extra_bytes = load_mode_size - bytes;
19800 cmp_bytes = bytes;
19801 if (extra_bytes < offset)
19802 {
19803 offset -= extra_bytes;
19804 cmp_bytes = load_mode_size;
19805 bytes = cmp_bytes;
19806 }
19807 }
19808 else
19809 /* P7 and earlier can't do the overlapping load trick fast,
19810 so this forces a non-overlapping load and a shift to get
19811 rid of the extra bytes. */
19812 cmp_bytes = bytes;
19813
19814 src1 = adjust_address (orig_src1, load_mode, offset);
19815 src2 = adjust_address (orig_src2, load_mode, offset);
19816
19817 if (!REG_P (XEXP (src1, 0)))
19818 {
19819 rtx src1_reg = copy_addr_to_reg (XEXP (src1, 0));
19820 src1 = replace_equiv_address (src1, src1_reg);
19821 }
19822 set_mem_size (src1, cmp_bytes);
19823
19824 if (!REG_P (XEXP (src2, 0)))
19825 {
19826 rtx src2_reg = copy_addr_to_reg (XEXP (src2, 0));
19827 src2 = replace_equiv_address (src2, src2_reg);
19828 }
19829 set_mem_size (src2, cmp_bytes);
19830
19831 do_load_for_compare (tmp_reg_src1, src1, load_mode);
19832 do_load_for_compare (tmp_reg_src2, src2, load_mode);
19833
19834 /* We must always left-align the data we read, and
19835 clear any bytes to the right that are beyond the string.
19836 Otherwise the cmpb sequence won't produce the correct
19837 results. The beginning of the compare will be done
19838 with word_mode so will not have any extra shifts or
19839 clear rights. */
19840
19841 if (load_mode_size < word_mode_size)
19842 {
19843 /* Rotate left first. */
19844 rtx sh = GEN_INT (BITS_PER_UNIT * (word_mode_size - load_mode_size));
19845 if (word_mode == DImode)
19846 {
19847 emit_insn (gen_rotldi3 (tmp_reg_src1, tmp_reg_src1, sh));
19848 emit_insn (gen_rotldi3 (tmp_reg_src2, tmp_reg_src2, sh));
19849 }
19850 else
19851 {
19852 emit_insn (gen_rotlsi3 (tmp_reg_src1, tmp_reg_src1, sh));
19853 emit_insn (gen_rotlsi3 (tmp_reg_src2, tmp_reg_src2, sh));
19854 }
19855 }
19856
19857 if (cmp_bytes < word_mode_size)
19858 {
19859 /* Now clear right. This plus the rotate can be
19860 turned into a rldicr instruction. */
19861 HOST_WIDE_INT mb = BITS_PER_UNIT * (word_mode_size - cmp_bytes);
19862 rtx mask = GEN_INT (HOST_WIDE_INT_M1U << mb);
19863 if (word_mode == DImode)
19864 {
19865 emit_insn (gen_anddi3_mask (tmp_reg_src1, tmp_reg_src1, mask));
19866 emit_insn (gen_anddi3_mask (tmp_reg_src2, tmp_reg_src2, mask));
19867 }
19868 else
19869 {
19870 emit_insn (gen_andsi3_mask (tmp_reg_src1, tmp_reg_src1, mask));
19871 emit_insn (gen_andsi3_mask (tmp_reg_src2, tmp_reg_src2, mask));
19872 }
19873 }
19874
19875 int remain = bytes - cmp_bytes;
19876
19877 rtx dst_label;
19878 if (remain > 0)
19879 {
19880 if (!cleanup_label)
19881 cleanup_label = gen_label_rtx ();
19882 dst_label = cleanup_label;
19883 }
19884 else
19885 dst_label = final_move_label;
19886
19887 rtx lab_ref = gen_rtx_LABEL_REF (VOIDmode, dst_label);
19888 rtx cond = gen_reg_rtx (CCmode);
19889
19890 if (remain == 0)
19891 {
19892 /* For the last chunk, subf. also
19893 generates the zero result we need. */
19894 rtx tmp = gen_rtx_MINUS (word_mode, tmp_reg_src1, tmp_reg_src2);
19895 rs6000_emit_dot_insn (result_reg, tmp, 1, cond);
19896 }
19897 else
19898 emit_move_insn (cond, gen_rtx_COMPARE (CCmode,
19899 tmp_reg_src1, tmp_reg_src2));
19900
19901 rtx cmp_rtx;
19902 if (remain > 0)
19903 cmp_rtx = gen_rtx_NE (VOIDmode, cond, const0_rtx);
19904 else
19905 cmp_rtx = gen_rtx_EQ (VOIDmode, cond, const0_rtx);
19906
19907 rtx ifelse = gen_rtx_IF_THEN_ELSE (VOIDmode, cmp_rtx,
19908 lab_ref, pc_rtx);
19909 rtx j = emit_jump_insn (gen_rtx_SET (pc_rtx, ifelse));
19910 JUMP_LABEL (j) = dst_label;
19911 LABEL_NUSES (dst_label) += 1;
19912
19913 offset += cmp_bytes;
19914 bytes -= cmp_bytes;
19915 }
19916
19917 if (cleanup_label)
19918 emit_label (cleanup_label);
19919
19920 /* Generate the final sequence that identifies the differing
19921 byte and generates the final result, taking into account
19922 zero bytes:
19923
19924 cmpb cmpb_result1, src1, src2
19925 cmpb cmpb_result2, src1, zero
19926 orc cmpb_result1, cmp_result1, cmpb_result2
19927 cntlzd get bit of first zero/diff byte
19928 addi convert for rldcl use
19929 rldcl rldcl extract diff/zero byte
19930 subf subtract for final result
19931 */
19932
19933 rtx cmpb_diff = gen_reg_rtx (word_mode);
19934 rtx cmpb_zero = gen_reg_rtx (word_mode);
19935 rtx rot_amt = gen_reg_rtx (word_mode);
19936 rtx zero_reg = gen_reg_rtx (word_mode);
19937
19938 rtx rot1_1 = gen_reg_rtx(word_mode);
19939 rtx rot1_2 = gen_reg_rtx(word_mode);
19940 rtx rot2_1 = gen_reg_rtx(word_mode);
19941 rtx rot2_2 = gen_reg_rtx(word_mode);
19942
19943 if (word_mode == SImode)
19944 {
19945 emit_insn (gen_cmpbsi3 (cmpb_diff, tmp_reg_src1, tmp_reg_src2));
19946 emit_insn (gen_movsi (zero_reg, GEN_INT(0)));
19947 emit_insn (gen_cmpbsi3 (cmpb_zero, tmp_reg_src1, zero_reg));
19948 emit_insn (gen_one_cmplsi2 (cmpb_diff,cmpb_diff));
19949 emit_insn (gen_iorsi3 (cmpb_diff, cmpb_diff, cmpb_zero));
19950 emit_insn (gen_clzsi2 (rot_amt, cmpb_diff));
19951 emit_insn (gen_addsi3 (rot_amt, rot_amt, GEN_INT (8)));
19952 emit_insn (gen_rotlsi3 (rot1_1, tmp_reg_src1,
19953 gen_lowpart (SImode, rot_amt)));
19954 emit_insn (gen_andsi3_mask (rot1_2, rot1_1, GEN_INT(0xff)));
19955 emit_insn (gen_rotlsi3 (rot2_1, tmp_reg_src2,
19956 gen_lowpart (SImode, rot_amt)));
19957 emit_insn (gen_andsi3_mask (rot2_2, rot2_1, GEN_INT(0xff)));
19958 emit_insn (gen_subsi3 (result_reg, rot1_2, rot2_2));
19959 }
19960 else
19961 {
19962 emit_insn (gen_cmpbdi3 (cmpb_diff, tmp_reg_src1, tmp_reg_src2));
19963 emit_insn (gen_movdi (zero_reg, GEN_INT(0)));
19964 emit_insn (gen_cmpbdi3 (cmpb_zero, tmp_reg_src1, zero_reg));
19965 emit_insn (gen_one_cmpldi2 (cmpb_diff,cmpb_diff));
19966 emit_insn (gen_iordi3 (cmpb_diff, cmpb_diff, cmpb_zero));
19967 emit_insn (gen_clzdi2 (rot_amt, cmpb_diff));
19968 emit_insn (gen_adddi3 (rot_amt, rot_amt, GEN_INT (8)));
19969 emit_insn (gen_rotldi3 (rot1_1, tmp_reg_src1,
19970 gen_lowpart (SImode, rot_amt)));
19971 emit_insn (gen_anddi3_mask (rot1_2, rot1_1, GEN_INT(0xff)));
19972 emit_insn (gen_rotldi3 (rot2_1, tmp_reg_src2,
19973 gen_lowpart (SImode, rot_amt)));
19974 emit_insn (gen_anddi3_mask (rot2_2, rot2_1, GEN_INT(0xff)));
19975 emit_insn (gen_subdi3 (result_reg, rot1_2, rot2_2));
19976 }
19977
19978 emit_label (final_move_label);
19979 emit_insn (gen_movsi (target,
19980 gen_lowpart (SImode, result_reg)));
19981 emit_label (final_label);
19982 return true;
19983 }
19984
19985 /* Expand a block move operation, and return 1 if successful. Return 0
19986 if we should let the compiler generate normal code.
19987
19988 operands[0] is the destination
19989 operands[1] is the source
19990 operands[2] is the length
19991 operands[3] is the alignment */
19992
19993 #define MAX_MOVE_REG 4
19994
19995 int
19996 expand_block_move (rtx operands[])
19997 {
19998 rtx orig_dest = operands[0];
19999 rtx orig_src = operands[1];
20000 rtx bytes_rtx = operands[2];
20001 rtx align_rtx = operands[3];
20002 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
20003 int align;
20004 int bytes;
20005 int offset;
20006 int move_bytes;
20007 rtx stores[MAX_MOVE_REG];
20008 int num_reg = 0;
20009
20010 /* If this is not a fixed size move, just call memcpy */
20011 if (! constp)
20012 return 0;
20013
20014 /* This must be a fixed size alignment */
20015 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
20016 align = INTVAL (align_rtx) * BITS_PER_UNIT;
20017
20018 /* Anything to move? */
20019 bytes = INTVAL (bytes_rtx);
20020 if (bytes <= 0)
20021 return 1;
20022
20023 if (bytes > rs6000_block_move_inline_limit)
20024 return 0;
20025
20026 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
20027 {
20028 union {
20029 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
20030 rtx (*mov) (rtx, rtx);
20031 } gen_func;
20032 machine_mode mode = BLKmode;
20033 rtx src, dest;
20034
20035 /* Altivec first, since it will be faster than a string move
20036 when it applies, and usually not significantly larger. */
20037 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
20038 {
20039 move_bytes = 16;
20040 mode = V4SImode;
20041 gen_func.mov = gen_movv4si;
20042 }
20043 else if (TARGET_SPE && bytes >= 8 && align >= 64)
20044 {
20045 move_bytes = 8;
20046 mode = V2SImode;
20047 gen_func.mov = gen_movv2si;
20048 }
20049 else if (TARGET_STRING
20050 && bytes > 24 /* move up to 32 bytes at a time */
20051 && ! fixed_regs[5]
20052 && ! fixed_regs[6]
20053 && ! fixed_regs[7]
20054 && ! fixed_regs[8]
20055 && ! fixed_regs[9]
20056 && ! fixed_regs[10]
20057 && ! fixed_regs[11]
20058 && ! fixed_regs[12])
20059 {
20060 move_bytes = (bytes > 32) ? 32 : bytes;
20061 gen_func.movmemsi = gen_movmemsi_8reg;
20062 }
20063 else if (TARGET_STRING
20064 && bytes > 16 /* move up to 24 bytes at a time */
20065 && ! fixed_regs[5]
20066 && ! fixed_regs[6]
20067 && ! fixed_regs[7]
20068 && ! fixed_regs[8]
20069 && ! fixed_regs[9]
20070 && ! fixed_regs[10])
20071 {
20072 move_bytes = (bytes > 24) ? 24 : bytes;
20073 gen_func.movmemsi = gen_movmemsi_6reg;
20074 }
20075 else if (TARGET_STRING
20076 && bytes > 8 /* move up to 16 bytes at a time */
20077 && ! fixed_regs[5]
20078 && ! fixed_regs[6]
20079 && ! fixed_regs[7]
20080 && ! fixed_regs[8])
20081 {
20082 move_bytes = (bytes > 16) ? 16 : bytes;
20083 gen_func.movmemsi = gen_movmemsi_4reg;
20084 }
20085 else if (bytes >= 8 && TARGET_POWERPC64
20086 && (align >= 64 || !STRICT_ALIGNMENT))
20087 {
20088 move_bytes = 8;
20089 mode = DImode;
20090 gen_func.mov = gen_movdi;
20091 if (offset == 0 && align < 64)
20092 {
20093 rtx addr;
20094
20095 /* If the address form is reg+offset with offset not a
20096 multiple of four, reload into reg indirect form here
20097 rather than waiting for reload. This way we get one
20098 reload, not one per load and/or store. */
20099 addr = XEXP (orig_dest, 0);
20100 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
20101 && GET_CODE (XEXP (addr, 1)) == CONST_INT
20102 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
20103 {
20104 addr = copy_addr_to_reg (addr);
20105 orig_dest = replace_equiv_address (orig_dest, addr);
20106 }
20107 addr = XEXP (orig_src, 0);
20108 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
20109 && GET_CODE (XEXP (addr, 1)) == CONST_INT
20110 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
20111 {
20112 addr = copy_addr_to_reg (addr);
20113 orig_src = replace_equiv_address (orig_src, addr);
20114 }
20115 }
20116 }
20117 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
20118 { /* move up to 8 bytes at a time */
20119 move_bytes = (bytes > 8) ? 8 : bytes;
20120 gen_func.movmemsi = gen_movmemsi_2reg;
20121 }
20122 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
20123 { /* move 4 bytes */
20124 move_bytes = 4;
20125 mode = SImode;
20126 gen_func.mov = gen_movsi;
20127 }
20128 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
20129 { /* move 2 bytes */
20130 move_bytes = 2;
20131 mode = HImode;
20132 gen_func.mov = gen_movhi;
20133 }
20134 else if (TARGET_STRING && bytes > 1)
20135 { /* move up to 4 bytes at a time */
20136 move_bytes = (bytes > 4) ? 4 : bytes;
20137 gen_func.movmemsi = gen_movmemsi_1reg;
20138 }
20139 else /* move 1 byte at a time */
20140 {
20141 move_bytes = 1;
20142 mode = QImode;
20143 gen_func.mov = gen_movqi;
20144 }
20145
20146 src = adjust_address (orig_src, mode, offset);
20147 dest = adjust_address (orig_dest, mode, offset);
20148
20149 if (mode != BLKmode)
20150 {
20151 rtx tmp_reg = gen_reg_rtx (mode);
20152
20153 emit_insn ((*gen_func.mov) (tmp_reg, src));
20154 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
20155 }
20156
20157 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
20158 {
20159 int i;
20160 for (i = 0; i < num_reg; i++)
20161 emit_insn (stores[i]);
20162 num_reg = 0;
20163 }
20164
20165 if (mode == BLKmode)
20166 {
20167 /* Move the address into scratch registers. The movmemsi
20168 patterns require zero offset. */
20169 if (!REG_P (XEXP (src, 0)))
20170 {
20171 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
20172 src = replace_equiv_address (src, src_reg);
20173 }
20174 set_mem_size (src, move_bytes);
20175
20176 if (!REG_P (XEXP (dest, 0)))
20177 {
20178 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
20179 dest = replace_equiv_address (dest, dest_reg);
20180 }
20181 set_mem_size (dest, move_bytes);
20182
20183 emit_insn ((*gen_func.movmemsi) (dest, src,
20184 GEN_INT (move_bytes & 31),
20185 align_rtx));
20186 }
20187 }
20188
20189 return 1;
20190 }
20191
20192 \f
20193 /* Return a string to perform a load_multiple operation.
20194 operands[0] is the vector.
20195 operands[1] is the source address.
20196 operands[2] is the first destination register. */
20197
20198 const char *
20199 rs6000_output_load_multiple (rtx operands[3])
20200 {
20201 /* We have to handle the case where the pseudo used to contain the address
20202 is assigned to one of the output registers. */
20203 int i, j;
20204 int words = XVECLEN (operands[0], 0);
20205 rtx xop[10];
20206
20207 if (XVECLEN (operands[0], 0) == 1)
20208 return "lwz %2,0(%1)";
20209
20210 for (i = 0; i < words; i++)
20211 if (refers_to_regno_p (REGNO (operands[2]) + i, operands[1]))
20212 {
20213 if (i == words-1)
20214 {
20215 xop[0] = GEN_INT (4 * (words-1));
20216 xop[1] = operands[1];
20217 xop[2] = operands[2];
20218 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
20219 return "";
20220 }
20221 else if (i == 0)
20222 {
20223 xop[0] = GEN_INT (4 * (words-1));
20224 xop[1] = operands[1];
20225 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
20226 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
20227 return "";
20228 }
20229 else
20230 {
20231 for (j = 0; j < words; j++)
20232 if (j != i)
20233 {
20234 xop[0] = GEN_INT (j * 4);
20235 xop[1] = operands[1];
20236 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
20237 output_asm_insn ("lwz %2,%0(%1)", xop);
20238 }
20239 xop[0] = GEN_INT (i * 4);
20240 xop[1] = operands[1];
20241 output_asm_insn ("lwz %1,%0(%1)", xop);
20242 return "";
20243 }
20244 }
20245
20246 return "lswi %2,%1,%N0";
20247 }
20248
20249 \f
20250 /* A validation routine: say whether CODE, a condition code, and MODE
20251 match. The other alternatives either don't make sense or should
20252 never be generated. */
20253
20254 void
20255 validate_condition_mode (enum rtx_code code, machine_mode mode)
20256 {
20257 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
20258 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
20259 && GET_MODE_CLASS (mode) == MODE_CC);
20260
20261 /* These don't make sense. */
20262 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
20263 || mode != CCUNSmode);
20264
20265 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
20266 || mode == CCUNSmode);
20267
20268 gcc_assert (mode == CCFPmode
20269 || (code != ORDERED && code != UNORDERED
20270 && code != UNEQ && code != LTGT
20271 && code != UNGT && code != UNLT
20272 && code != UNGE && code != UNLE));
20273
20274 /* These should never be generated except for
20275 flag_finite_math_only. */
20276 gcc_assert (mode != CCFPmode
20277 || flag_finite_math_only
20278 || (code != LE && code != GE
20279 && code != UNEQ && code != LTGT
20280 && code != UNGT && code != UNLT));
20281
20282 /* These are invalid; the information is not there. */
20283 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
20284 }
20285
20286 \f
20287 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
20288 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
20289 not zero, store there the bit offset (counted from the right) where
20290 the single stretch of 1 bits begins; and similarly for B, the bit
20291 offset where it ends. */
20292
20293 bool
20294 rs6000_is_valid_mask (rtx mask, int *b, int *e, machine_mode mode)
20295 {
20296 unsigned HOST_WIDE_INT val = INTVAL (mask);
20297 unsigned HOST_WIDE_INT bit;
20298 int nb, ne;
20299 int n = GET_MODE_PRECISION (mode);
20300
20301 if (mode != DImode && mode != SImode)
20302 return false;
20303
20304 if (INTVAL (mask) >= 0)
20305 {
20306 bit = val & -val;
20307 ne = exact_log2 (bit);
20308 nb = exact_log2 (val + bit);
20309 }
20310 else if (val + 1 == 0)
20311 {
20312 nb = n;
20313 ne = 0;
20314 }
20315 else if (val & 1)
20316 {
20317 val = ~val;
20318 bit = val & -val;
20319 nb = exact_log2 (bit);
20320 ne = exact_log2 (val + bit);
20321 }
20322 else
20323 {
20324 bit = val & -val;
20325 ne = exact_log2 (bit);
20326 if (val + bit == 0)
20327 nb = n;
20328 else
20329 nb = 0;
20330 }
20331
20332 nb--;
20333
20334 if (nb < 0 || ne < 0 || nb >= n || ne >= n)
20335 return false;
20336
20337 if (b)
20338 *b = nb;
20339 if (e)
20340 *e = ne;
20341
20342 return true;
20343 }
20344
20345 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
20346 or rldicr instruction, to implement an AND with it in mode MODE. */
20347
20348 bool
20349 rs6000_is_valid_and_mask (rtx mask, machine_mode mode)
20350 {
20351 int nb, ne;
20352
20353 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
20354 return false;
20355
20356 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
20357 does not wrap. */
20358 if (mode == DImode)
20359 return (ne == 0 || nb == 63 || (nb < 32 && ne <= nb));
20360
20361 /* For SImode, rlwinm can do everything. */
20362 if (mode == SImode)
20363 return (nb < 32 && ne < 32);
20364
20365 return false;
20366 }
20367
20368 /* Return the instruction template for an AND with mask in mode MODE, with
20369 operands OPERANDS. If DOT is true, make it a record-form instruction. */
20370
20371 const char *
20372 rs6000_insn_for_and_mask (machine_mode mode, rtx *operands, bool dot)
20373 {
20374 int nb, ne;
20375
20376 if (!rs6000_is_valid_mask (operands[2], &nb, &ne, mode))
20377 gcc_unreachable ();
20378
20379 if (mode == DImode && ne == 0)
20380 {
20381 operands[3] = GEN_INT (63 - nb);
20382 if (dot)
20383 return "rldicl. %0,%1,0,%3";
20384 return "rldicl %0,%1,0,%3";
20385 }
20386
20387 if (mode == DImode && nb == 63)
20388 {
20389 operands[3] = GEN_INT (63 - ne);
20390 if (dot)
20391 return "rldicr. %0,%1,0,%3";
20392 return "rldicr %0,%1,0,%3";
20393 }
20394
20395 if (nb < 32 && ne < 32)
20396 {
20397 operands[3] = GEN_INT (31 - nb);
20398 operands[4] = GEN_INT (31 - ne);
20399 if (dot)
20400 return "rlwinm. %0,%1,0,%3,%4";
20401 return "rlwinm %0,%1,0,%3,%4";
20402 }
20403
20404 gcc_unreachable ();
20405 }
20406
20407 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
20408 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
20409 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
20410
20411 bool
20412 rs6000_is_valid_shift_mask (rtx mask, rtx shift, machine_mode mode)
20413 {
20414 int nb, ne;
20415
20416 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
20417 return false;
20418
20419 int n = GET_MODE_PRECISION (mode);
20420 int sh = -1;
20421
20422 if (CONST_INT_P (XEXP (shift, 1)))
20423 {
20424 sh = INTVAL (XEXP (shift, 1));
20425 if (sh < 0 || sh >= n)
20426 return false;
20427 }
20428
20429 rtx_code code = GET_CODE (shift);
20430
20431 /* Convert any shift by 0 to a rotate, to simplify below code. */
20432 if (sh == 0)
20433 code = ROTATE;
20434
20435 /* Convert rotate to simple shift if we can, to make analysis simpler. */
20436 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
20437 code = ASHIFT;
20438 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
20439 {
20440 code = LSHIFTRT;
20441 sh = n - sh;
20442 }
20443
20444 /* DImode rotates need rld*. */
20445 if (mode == DImode && code == ROTATE)
20446 return (nb == 63 || ne == 0 || ne == sh);
20447
20448 /* SImode rotates need rlw*. */
20449 if (mode == SImode && code == ROTATE)
20450 return (nb < 32 && ne < 32 && sh < 32);
20451
20452 /* Wrap-around masks are only okay for rotates. */
20453 if (ne > nb)
20454 return false;
20455
20456 /* Variable shifts are only okay for rotates. */
20457 if (sh < 0)
20458 return false;
20459
20460 /* Don't allow ASHIFT if the mask is wrong for that. */
20461 if (code == ASHIFT && ne < sh)
20462 return false;
20463
20464 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
20465 if the mask is wrong for that. */
20466 if (nb < 32 && ne < 32 && sh < 32
20467 && !(code == LSHIFTRT && nb >= 32 - sh))
20468 return true;
20469
20470 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
20471 if the mask is wrong for that. */
20472 if (code == LSHIFTRT)
20473 sh = 64 - sh;
20474 if (nb == 63 || ne == 0 || ne == sh)
20475 return !(code == LSHIFTRT && nb >= sh);
20476
20477 return false;
20478 }
20479
20480 /* Return the instruction template for a shift with mask in mode MODE, with
20481 operands OPERANDS. If DOT is true, make it a record-form instruction. */
20482
20483 const char *
20484 rs6000_insn_for_shift_mask (machine_mode mode, rtx *operands, bool dot)
20485 {
20486 int nb, ne;
20487
20488 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
20489 gcc_unreachable ();
20490
20491 if (mode == DImode && ne == 0)
20492 {
20493 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
20494 operands[2] = GEN_INT (64 - INTVAL (operands[2]));
20495 operands[3] = GEN_INT (63 - nb);
20496 if (dot)
20497 return "rld%I2cl. %0,%1,%2,%3";
20498 return "rld%I2cl %0,%1,%2,%3";
20499 }
20500
20501 if (mode == DImode && nb == 63)
20502 {
20503 operands[3] = GEN_INT (63 - ne);
20504 if (dot)
20505 return "rld%I2cr. %0,%1,%2,%3";
20506 return "rld%I2cr %0,%1,%2,%3";
20507 }
20508
20509 if (mode == DImode
20510 && GET_CODE (operands[4]) != LSHIFTRT
20511 && CONST_INT_P (operands[2])
20512 && ne == INTVAL (operands[2]))
20513 {
20514 operands[3] = GEN_INT (63 - nb);
20515 if (dot)
20516 return "rld%I2c. %0,%1,%2,%3";
20517 return "rld%I2c %0,%1,%2,%3";
20518 }
20519
20520 if (nb < 32 && ne < 32)
20521 {
20522 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
20523 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
20524 operands[3] = GEN_INT (31 - nb);
20525 operands[4] = GEN_INT (31 - ne);
20526 /* This insn can also be a 64-bit rotate with mask that really makes
20527 it just a shift right (with mask); the %h below are to adjust for
20528 that situation (shift count is >= 32 in that case). */
20529 if (dot)
20530 return "rlw%I2nm. %0,%1,%h2,%3,%4";
20531 return "rlw%I2nm %0,%1,%h2,%3,%4";
20532 }
20533
20534 gcc_unreachable ();
20535 }
20536
20537 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
20538 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
20539 ASHIFT, or LSHIFTRT) in mode MODE. */
20540
20541 bool
20542 rs6000_is_valid_insert_mask (rtx mask, rtx shift, machine_mode mode)
20543 {
20544 int nb, ne;
20545
20546 if (!rs6000_is_valid_mask (mask, &nb, &ne, mode))
20547 return false;
20548
20549 int n = GET_MODE_PRECISION (mode);
20550
20551 int sh = INTVAL (XEXP (shift, 1));
20552 if (sh < 0 || sh >= n)
20553 return false;
20554
20555 rtx_code code = GET_CODE (shift);
20556
20557 /* Convert any shift by 0 to a rotate, to simplify below code. */
20558 if (sh == 0)
20559 code = ROTATE;
20560
20561 /* Convert rotate to simple shift if we can, to make analysis simpler. */
20562 if (code == ROTATE && sh >= 0 && nb >= ne && ne >= sh)
20563 code = ASHIFT;
20564 if (code == ROTATE && sh >= 0 && nb >= ne && nb < sh)
20565 {
20566 code = LSHIFTRT;
20567 sh = n - sh;
20568 }
20569
20570 /* DImode rotates need rldimi. */
20571 if (mode == DImode && code == ROTATE)
20572 return (ne == sh);
20573
20574 /* SImode rotates need rlwimi. */
20575 if (mode == SImode && code == ROTATE)
20576 return (nb < 32 && ne < 32 && sh < 32);
20577
20578 /* Wrap-around masks are only okay for rotates. */
20579 if (ne > nb)
20580 return false;
20581
20582 /* Don't allow ASHIFT if the mask is wrong for that. */
20583 if (code == ASHIFT && ne < sh)
20584 return false;
20585
20586 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
20587 if the mask is wrong for that. */
20588 if (nb < 32 && ne < 32 && sh < 32
20589 && !(code == LSHIFTRT && nb >= 32 - sh))
20590 return true;
20591
20592 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
20593 if the mask is wrong for that. */
20594 if (code == LSHIFTRT)
20595 sh = 64 - sh;
20596 if (ne == sh)
20597 return !(code == LSHIFTRT && nb >= sh);
20598
20599 return false;
20600 }
20601
20602 /* Return the instruction template for an insert with mask in mode MODE, with
20603 operands OPERANDS. If DOT is true, make it a record-form instruction. */
20604
20605 const char *
20606 rs6000_insn_for_insert_mask (machine_mode mode, rtx *operands, bool dot)
20607 {
20608 int nb, ne;
20609
20610 if (!rs6000_is_valid_mask (operands[3], &nb, &ne, mode))
20611 gcc_unreachable ();
20612
20613 /* Prefer rldimi because rlwimi is cracked. */
20614 if (TARGET_POWERPC64
20615 && (!dot || mode == DImode)
20616 && GET_CODE (operands[4]) != LSHIFTRT
20617 && ne == INTVAL (operands[2]))
20618 {
20619 operands[3] = GEN_INT (63 - nb);
20620 if (dot)
20621 return "rldimi. %0,%1,%2,%3";
20622 return "rldimi %0,%1,%2,%3";
20623 }
20624
20625 if (nb < 32 && ne < 32)
20626 {
20627 if (GET_CODE (operands[4]) == LSHIFTRT && INTVAL (operands[2]))
20628 operands[2] = GEN_INT (32 - INTVAL (operands[2]));
20629 operands[3] = GEN_INT (31 - nb);
20630 operands[4] = GEN_INT (31 - ne);
20631 if (dot)
20632 return "rlwimi. %0,%1,%2,%3,%4";
20633 return "rlwimi %0,%1,%2,%3,%4";
20634 }
20635
20636 gcc_unreachable ();
20637 }
20638
20639 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
20640 using two machine instructions. */
20641
20642 bool
20643 rs6000_is_valid_2insn_and (rtx c, machine_mode mode)
20644 {
20645 /* There are two kinds of AND we can handle with two insns:
20646 1) those we can do with two rl* insn;
20647 2) ori[s];xori[s].
20648
20649 We do not handle that last case yet. */
20650
20651 /* If there is just one stretch of ones, we can do it. */
20652 if (rs6000_is_valid_mask (c, NULL, NULL, mode))
20653 return true;
20654
20655 /* Otherwise, fill in the lowest "hole"; if we can do the result with
20656 one insn, we can do the whole thing with two. */
20657 unsigned HOST_WIDE_INT val = INTVAL (c);
20658 unsigned HOST_WIDE_INT bit1 = val & -val;
20659 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
20660 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
20661 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
20662 return rs6000_is_valid_and_mask (GEN_INT (val + bit3 - bit2), mode);
20663 }
20664
20665 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
20666 If EXPAND is true, split rotate-and-mask instructions we generate to
20667 their constituent parts as well (this is used during expand); if DOT
20668 is 1, make the last insn a record-form instruction clobbering the
20669 destination GPR and setting the CC reg (from operands[3]); if 2, set
20670 that GPR as well as the CC reg. */
20671
20672 void
20673 rs6000_emit_2insn_and (machine_mode mode, rtx *operands, bool expand, int dot)
20674 {
20675 gcc_assert (!(expand && dot));
20676
20677 unsigned HOST_WIDE_INT val = INTVAL (operands[2]);
20678
20679 /* If it is one stretch of ones, it is DImode; shift left, mask, then
20680 shift right. This generates better code than doing the masks without
20681 shifts, or shifting first right and then left. */
20682 int nb, ne;
20683 if (rs6000_is_valid_mask (operands[2], &nb, &ne, mode) && nb >= ne)
20684 {
20685 gcc_assert (mode == DImode);
20686
20687 int shift = 63 - nb;
20688 if (expand)
20689 {
20690 rtx tmp1 = gen_reg_rtx (DImode);
20691 rtx tmp2 = gen_reg_rtx (DImode);
20692 emit_insn (gen_ashldi3 (tmp1, operands[1], GEN_INT (shift)));
20693 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (val << shift)));
20694 emit_insn (gen_lshrdi3 (operands[0], tmp2, GEN_INT (shift)));
20695 }
20696 else
20697 {
20698 rtx tmp = gen_rtx_ASHIFT (mode, operands[1], GEN_INT (shift));
20699 tmp = gen_rtx_AND (mode, tmp, GEN_INT (val << shift));
20700 emit_move_insn (operands[0], tmp);
20701 tmp = gen_rtx_LSHIFTRT (mode, operands[0], GEN_INT (shift));
20702 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
20703 }
20704 return;
20705 }
20706
20707 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
20708 that does the rest. */
20709 unsigned HOST_WIDE_INT bit1 = val & -val;
20710 unsigned HOST_WIDE_INT bit2 = (val + bit1) & ~val;
20711 unsigned HOST_WIDE_INT val1 = (val + bit1) & val;
20712 unsigned HOST_WIDE_INT bit3 = val1 & -val1;
20713
20714 unsigned HOST_WIDE_INT mask1 = -bit3 + bit2 - 1;
20715 unsigned HOST_WIDE_INT mask2 = val + bit3 - bit2;
20716
20717 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2), mode));
20718
20719 /* Two "no-rotate"-and-mask instructions, for SImode. */
20720 if (rs6000_is_valid_and_mask (GEN_INT (mask1), mode))
20721 {
20722 gcc_assert (mode == SImode);
20723
20724 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
20725 rtx tmp = gen_rtx_AND (mode, operands[1], GEN_INT (mask1));
20726 emit_move_insn (reg, tmp);
20727 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
20728 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
20729 return;
20730 }
20731
20732 gcc_assert (mode == DImode);
20733
20734 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
20735 insns; we have to do the first in SImode, because it wraps. */
20736 if (mask2 <= 0xffffffff
20737 && rs6000_is_valid_and_mask (GEN_INT (mask1), SImode))
20738 {
20739 rtx reg = expand ? gen_reg_rtx (mode) : operands[0];
20740 rtx tmp = gen_rtx_AND (SImode, gen_lowpart (SImode, operands[1]),
20741 GEN_INT (mask1));
20742 rtx reg_low = gen_lowpart (SImode, reg);
20743 emit_move_insn (reg_low, tmp);
20744 tmp = gen_rtx_AND (mode, reg, GEN_INT (mask2));
20745 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
20746 return;
20747 }
20748
20749 /* Two rld* insns: rotate, clear the hole in the middle (which now is
20750 at the top end), rotate back and clear the other hole. */
20751 int right = exact_log2 (bit3);
20752 int left = 64 - right;
20753
20754 /* Rotate the mask too. */
20755 mask1 = (mask1 >> right) | ((bit2 - 1) << left);
20756
20757 if (expand)
20758 {
20759 rtx tmp1 = gen_reg_rtx (DImode);
20760 rtx tmp2 = gen_reg_rtx (DImode);
20761 rtx tmp3 = gen_reg_rtx (DImode);
20762 emit_insn (gen_rotldi3 (tmp1, operands[1], GEN_INT (left)));
20763 emit_insn (gen_anddi3 (tmp2, tmp1, GEN_INT (mask1)));
20764 emit_insn (gen_rotldi3 (tmp3, tmp2, GEN_INT (right)));
20765 emit_insn (gen_anddi3 (operands[0], tmp3, GEN_INT (mask2)));
20766 }
20767 else
20768 {
20769 rtx tmp = gen_rtx_ROTATE (mode, operands[1], GEN_INT (left));
20770 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask1));
20771 emit_move_insn (operands[0], tmp);
20772 tmp = gen_rtx_ROTATE (mode, operands[0], GEN_INT (right));
20773 tmp = gen_rtx_AND (mode, tmp, GEN_INT (mask2));
20774 rs6000_emit_dot_insn (operands[0], tmp, dot, dot ? operands[3] : 0);
20775 }
20776 }
20777 \f
20778 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
20779 for lfq and stfq insns iff the registers are hard registers. */
20780
20781 int
20782 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
20783 {
20784 /* We might have been passed a SUBREG. */
20785 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
20786 return 0;
20787
20788 /* We might have been passed non floating point registers. */
20789 if (!FP_REGNO_P (REGNO (reg1))
20790 || !FP_REGNO_P (REGNO (reg2)))
20791 return 0;
20792
20793 return (REGNO (reg1) == REGNO (reg2) - 1);
20794 }
20795
20796 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
20797 addr1 and addr2 must be in consecutive memory locations
20798 (addr2 == addr1 + 8). */
20799
20800 int
20801 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
20802 {
20803 rtx addr1, addr2;
20804 unsigned int reg1, reg2;
20805 int offset1, offset2;
20806
20807 /* The mems cannot be volatile. */
20808 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
20809 return 0;
20810
20811 addr1 = XEXP (mem1, 0);
20812 addr2 = XEXP (mem2, 0);
20813
20814 /* Extract an offset (if used) from the first addr. */
20815 if (GET_CODE (addr1) == PLUS)
20816 {
20817 /* If not a REG, return zero. */
20818 if (GET_CODE (XEXP (addr1, 0)) != REG)
20819 return 0;
20820 else
20821 {
20822 reg1 = REGNO (XEXP (addr1, 0));
20823 /* The offset must be constant! */
20824 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
20825 return 0;
20826 offset1 = INTVAL (XEXP (addr1, 1));
20827 }
20828 }
20829 else if (GET_CODE (addr1) != REG)
20830 return 0;
20831 else
20832 {
20833 reg1 = REGNO (addr1);
20834 /* This was a simple (mem (reg)) expression. Offset is 0. */
20835 offset1 = 0;
20836 }
20837
20838 /* And now for the second addr. */
20839 if (GET_CODE (addr2) == PLUS)
20840 {
20841 /* If not a REG, return zero. */
20842 if (GET_CODE (XEXP (addr2, 0)) != REG)
20843 return 0;
20844 else
20845 {
20846 reg2 = REGNO (XEXP (addr2, 0));
20847 /* The offset must be constant. */
20848 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
20849 return 0;
20850 offset2 = INTVAL (XEXP (addr2, 1));
20851 }
20852 }
20853 else if (GET_CODE (addr2) != REG)
20854 return 0;
20855 else
20856 {
20857 reg2 = REGNO (addr2);
20858 /* This was a simple (mem (reg)) expression. Offset is 0. */
20859 offset2 = 0;
20860 }
20861
20862 /* Both of these must have the same base register. */
20863 if (reg1 != reg2)
20864 return 0;
20865
20866 /* The offset for the second addr must be 8 more than the first addr. */
20867 if (offset2 != offset1 + 8)
20868 return 0;
20869
20870 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
20871 instructions. */
20872 return 1;
20873 }
20874 \f
20875
20876 rtx
20877 rs6000_secondary_memory_needed_rtx (machine_mode mode)
20878 {
20879 static bool eliminated = false;
20880 rtx ret;
20881
20882 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
20883 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
20884 else
20885 {
20886 rtx mem = cfun->machine->sdmode_stack_slot;
20887 gcc_assert (mem != NULL_RTX);
20888
20889 if (!eliminated)
20890 {
20891 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
20892 cfun->machine->sdmode_stack_slot = mem;
20893 eliminated = true;
20894 }
20895 ret = mem;
20896 }
20897
20898 if (TARGET_DEBUG_ADDR)
20899 {
20900 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
20901 GET_MODE_NAME (mode));
20902 if (!ret)
20903 fprintf (stderr, "\tNULL_RTX\n");
20904 else
20905 debug_rtx (ret);
20906 }
20907
20908 return ret;
20909 }
20910
20911 /* Return the mode to be used for memory when a secondary memory
20912 location is needed. For SDmode values we need to use DDmode, in
20913 all other cases we can use the same mode. */
20914 machine_mode
20915 rs6000_secondary_memory_needed_mode (machine_mode mode)
20916 {
20917 if (lra_in_progress && mode == SDmode)
20918 return DDmode;
20919 return mode;
20920 }
20921
20922 static tree
20923 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
20924 {
20925 /* Don't walk into types. */
20926 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
20927 {
20928 *walk_subtrees = 0;
20929 return NULL_TREE;
20930 }
20931
20932 switch (TREE_CODE (*tp))
20933 {
20934 case VAR_DECL:
20935 case PARM_DECL:
20936 case FIELD_DECL:
20937 case RESULT_DECL:
20938 case SSA_NAME:
20939 case REAL_CST:
20940 case MEM_REF:
20941 case VIEW_CONVERT_EXPR:
20942 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
20943 return *tp;
20944 break;
20945 default:
20946 break;
20947 }
20948
20949 return NULL_TREE;
20950 }
20951
20952 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
20953 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
20954 only work on the traditional altivec registers, note if an altivec register
20955 was chosen. */
20956
20957 static enum rs6000_reg_type
20958 register_to_reg_type (rtx reg, bool *is_altivec)
20959 {
20960 HOST_WIDE_INT regno;
20961 enum reg_class rclass;
20962
20963 if (GET_CODE (reg) == SUBREG)
20964 reg = SUBREG_REG (reg);
20965
20966 if (!REG_P (reg))
20967 return NO_REG_TYPE;
20968
20969 regno = REGNO (reg);
20970 if (regno >= FIRST_PSEUDO_REGISTER)
20971 {
20972 if (!lra_in_progress && !reload_in_progress && !reload_completed)
20973 return PSEUDO_REG_TYPE;
20974
20975 regno = true_regnum (reg);
20976 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
20977 return PSEUDO_REG_TYPE;
20978 }
20979
20980 gcc_assert (regno >= 0);
20981
20982 if (is_altivec && ALTIVEC_REGNO_P (regno))
20983 *is_altivec = true;
20984
20985 rclass = rs6000_regno_regclass[regno];
20986 return reg_class_to_reg_type[(int)rclass];
20987 }
20988
20989 /* Helper function to return the cost of adding a TOC entry address. */
20990
20991 static inline int
20992 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask)
20993 {
20994 int ret;
20995
20996 if (TARGET_CMODEL != CMODEL_SMALL)
20997 ret = ((addr_mask & RELOAD_REG_OFFSET) == 0) ? 1 : 2;
20998
20999 else
21000 ret = (TARGET_MINIMAL_TOC) ? 6 : 3;
21001
21002 return ret;
21003 }
21004
21005 /* Helper function for rs6000_secondary_reload to determine whether the memory
21006 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
21007 needs reloading. Return negative if the memory is not handled by the memory
21008 helper functions and to try a different reload method, 0 if no additional
21009 instructions are need, and positive to give the extra cost for the
21010 memory. */
21011
21012 static int
21013 rs6000_secondary_reload_memory (rtx addr,
21014 enum reg_class rclass,
21015 machine_mode mode)
21016 {
21017 int extra_cost = 0;
21018 rtx reg, and_arg, plus_arg0, plus_arg1;
21019 addr_mask_type addr_mask;
21020 const char *type = NULL;
21021 const char *fail_msg = NULL;
21022
21023 if (GPR_REG_CLASS_P (rclass))
21024 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
21025
21026 else if (rclass == FLOAT_REGS)
21027 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
21028
21029 else if (rclass == ALTIVEC_REGS)
21030 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
21031
21032 /* For the combined VSX_REGS, turn off Altivec AND -16. */
21033 else if (rclass == VSX_REGS)
21034 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_VMX]
21035 & ~RELOAD_REG_AND_M16);
21036
21037 /* If the register allocator hasn't made up its mind yet on the register
21038 class to use, settle on defaults to use. */
21039 else if (rclass == NO_REGS)
21040 {
21041 addr_mask = (reg_addr[mode].addr_mask[RELOAD_REG_ANY]
21042 & ~RELOAD_REG_AND_M16);
21043
21044 if ((addr_mask & RELOAD_REG_MULTIPLE) != 0)
21045 addr_mask &= ~(RELOAD_REG_INDEXED
21046 | RELOAD_REG_PRE_INCDEC
21047 | RELOAD_REG_PRE_MODIFY);
21048 }
21049
21050 else
21051 addr_mask = 0;
21052
21053 /* If the register isn't valid in this register class, just return now. */
21054 if ((addr_mask & RELOAD_REG_VALID) == 0)
21055 {
21056 if (TARGET_DEBUG_ADDR)
21057 {
21058 fprintf (stderr,
21059 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
21060 "not valid in class\n",
21061 GET_MODE_NAME (mode), reg_class_names[rclass]);
21062 debug_rtx (addr);
21063 }
21064
21065 return -1;
21066 }
21067
21068 switch (GET_CODE (addr))
21069 {
21070 /* Does the register class supports auto update forms for this mode? We
21071 don't need a scratch register, since the powerpc only supports
21072 PRE_INC, PRE_DEC, and PRE_MODIFY. */
21073 case PRE_INC:
21074 case PRE_DEC:
21075 reg = XEXP (addr, 0);
21076 if (!base_reg_operand (addr, GET_MODE (reg)))
21077 {
21078 fail_msg = "no base register #1";
21079 extra_cost = -1;
21080 }
21081
21082 else if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
21083 {
21084 extra_cost = 1;
21085 type = "update";
21086 }
21087 break;
21088
21089 case PRE_MODIFY:
21090 reg = XEXP (addr, 0);
21091 plus_arg1 = XEXP (addr, 1);
21092 if (!base_reg_operand (reg, GET_MODE (reg))
21093 || GET_CODE (plus_arg1) != PLUS
21094 || !rtx_equal_p (reg, XEXP (plus_arg1, 0)))
21095 {
21096 fail_msg = "bad PRE_MODIFY";
21097 extra_cost = -1;
21098 }
21099
21100 else if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
21101 {
21102 extra_cost = 1;
21103 type = "update";
21104 }
21105 break;
21106
21107 /* Do we need to simulate AND -16 to clear the bottom address bits used
21108 in VMX load/stores? Only allow the AND for vector sizes. */
21109 case AND:
21110 and_arg = XEXP (addr, 0);
21111 if (GET_MODE_SIZE (mode) != 16
21112 || GET_CODE (XEXP (addr, 1)) != CONST_INT
21113 || INTVAL (XEXP (addr, 1)) != -16)
21114 {
21115 fail_msg = "bad Altivec AND #1";
21116 extra_cost = -1;
21117 }
21118
21119 if (rclass != ALTIVEC_REGS)
21120 {
21121 if (legitimate_indirect_address_p (and_arg, false))
21122 extra_cost = 1;
21123
21124 else if (legitimate_indexed_address_p (and_arg, false))
21125 extra_cost = 2;
21126
21127 else
21128 {
21129 fail_msg = "bad Altivec AND #2";
21130 extra_cost = -1;
21131 }
21132
21133 type = "and";
21134 }
21135 break;
21136
21137 /* If this is an indirect address, make sure it is a base register. */
21138 case REG:
21139 case SUBREG:
21140 if (!legitimate_indirect_address_p (addr, false))
21141 {
21142 extra_cost = 1;
21143 type = "move";
21144 }
21145 break;
21146
21147 /* If this is an indexed address, make sure the register class can handle
21148 indexed addresses for this mode. */
21149 case PLUS:
21150 plus_arg0 = XEXP (addr, 0);
21151 plus_arg1 = XEXP (addr, 1);
21152
21153 /* (plus (plus (reg) (constant)) (constant)) is generated during
21154 push_reload processing, so handle it now. */
21155 if (GET_CODE (plus_arg0) == PLUS && CONST_INT_P (plus_arg1))
21156 {
21157 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
21158 {
21159 extra_cost = 1;
21160 type = "offset";
21161 }
21162 }
21163
21164 /* (plus (plus (reg) (constant)) (reg)) is also generated during
21165 push_reload processing, so handle it now. */
21166 else if (GET_CODE (plus_arg0) == PLUS && REG_P (plus_arg1))
21167 {
21168 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
21169 {
21170 extra_cost = 1;
21171 type = "indexed #2";
21172 }
21173 }
21174
21175 else if (!base_reg_operand (plus_arg0, GET_MODE (plus_arg0)))
21176 {
21177 fail_msg = "no base register #2";
21178 extra_cost = -1;
21179 }
21180
21181 else if (int_reg_operand (plus_arg1, GET_MODE (plus_arg1)))
21182 {
21183 if ((addr_mask & RELOAD_REG_INDEXED) == 0
21184 || !legitimate_indexed_address_p (addr, false))
21185 {
21186 extra_cost = 1;
21187 type = "indexed";
21188 }
21189 }
21190
21191 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0
21192 && CONST_INT_P (plus_arg1))
21193 {
21194 if (!quad_address_offset_p (INTVAL (plus_arg1)))
21195 {
21196 extra_cost = 1;
21197 type = "vector d-form offset";
21198 }
21199 }
21200
21201 /* Make sure the register class can handle offset addresses. */
21202 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
21203 {
21204 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
21205 {
21206 extra_cost = 1;
21207 type = "offset #2";
21208 }
21209 }
21210
21211 else
21212 {
21213 fail_msg = "bad PLUS";
21214 extra_cost = -1;
21215 }
21216
21217 break;
21218
21219 case LO_SUM:
21220 /* Quad offsets are restricted and can't handle normal addresses. */
21221 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
21222 {
21223 extra_cost = -1;
21224 type = "vector d-form lo_sum";
21225 }
21226
21227 else if (!legitimate_lo_sum_address_p (mode, addr, false))
21228 {
21229 fail_msg = "bad LO_SUM";
21230 extra_cost = -1;
21231 }
21232
21233 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
21234 {
21235 extra_cost = 1;
21236 type = "lo_sum";
21237 }
21238 break;
21239
21240 /* Static addresses need to create a TOC entry. */
21241 case CONST:
21242 case SYMBOL_REF:
21243 case LABEL_REF:
21244 if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
21245 {
21246 extra_cost = -1;
21247 type = "vector d-form lo_sum #2";
21248 }
21249
21250 else
21251 {
21252 type = "address";
21253 extra_cost = rs6000_secondary_reload_toc_costs (addr_mask);
21254 }
21255 break;
21256
21257 /* TOC references look like offsetable memory. */
21258 case UNSPEC:
21259 if (TARGET_CMODEL == CMODEL_SMALL || XINT (addr, 1) != UNSPEC_TOCREL)
21260 {
21261 fail_msg = "bad UNSPEC";
21262 extra_cost = -1;
21263 }
21264
21265 else if ((addr_mask & RELOAD_REG_QUAD_OFFSET) != 0)
21266 {
21267 extra_cost = -1;
21268 type = "vector d-form lo_sum #3";
21269 }
21270
21271 else if ((addr_mask & RELOAD_REG_OFFSET) == 0)
21272 {
21273 extra_cost = 1;
21274 type = "toc reference";
21275 }
21276 break;
21277
21278 default:
21279 {
21280 fail_msg = "bad address";
21281 extra_cost = -1;
21282 }
21283 }
21284
21285 if (TARGET_DEBUG_ADDR /* && extra_cost != 0 */)
21286 {
21287 if (extra_cost < 0)
21288 fprintf (stderr,
21289 "rs6000_secondary_reload_memory error: mode = %s, "
21290 "class = %s, addr_mask = '%s', %s\n",
21291 GET_MODE_NAME (mode),
21292 reg_class_names[rclass],
21293 rs6000_debug_addr_mask (addr_mask, false),
21294 (fail_msg != NULL) ? fail_msg : "<bad address>");
21295
21296 else
21297 fprintf (stderr,
21298 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
21299 "addr_mask = '%s', extra cost = %d, %s\n",
21300 GET_MODE_NAME (mode),
21301 reg_class_names[rclass],
21302 rs6000_debug_addr_mask (addr_mask, false),
21303 extra_cost,
21304 (type) ? type : "<none>");
21305
21306 debug_rtx (addr);
21307 }
21308
21309 return extra_cost;
21310 }
21311
21312 /* Helper function for rs6000_secondary_reload to return true if a move to a
21313 different register classe is really a simple move. */
21314
21315 static bool
21316 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
21317 enum rs6000_reg_type from_type,
21318 machine_mode mode)
21319 {
21320 int size = GET_MODE_SIZE (mode);
21321
21322 /* Add support for various direct moves available. In this function, we only
21323 look at cases where we don't need any extra registers, and one or more
21324 simple move insns are issued. Originally small integers are not allowed
21325 in FPR/VSX registers. Single precision binary floating is not a simple
21326 move because we need to convert to the single precision memory layout.
21327 The 4-byte SDmode can be moved. TDmode values are disallowed since they
21328 need special direct move handling, which we do not support yet. */
21329 if (TARGET_DIRECT_MOVE
21330 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
21331 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
21332 {
21333 if (TARGET_POWERPC64)
21334 {
21335 /* ISA 2.07: MTVSRD or MVFVSRD. */
21336 if (size == 8)
21337 return true;
21338
21339 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
21340 if (size == 16 && TARGET_P9_VECTOR && mode != TDmode)
21341 return true;
21342 }
21343
21344 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
21345 if (TARGET_VSX_SMALL_INTEGER)
21346 {
21347 if (mode == SImode)
21348 return true;
21349
21350 if (TARGET_P9_VECTOR && (mode == HImode || mode == QImode))
21351 return true;
21352 }
21353
21354 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
21355 if (mode == SDmode)
21356 return true;
21357 }
21358
21359 /* Power6+: MFTGPR or MFFGPR. */
21360 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
21361 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
21362 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
21363 return true;
21364
21365 /* Move to/from SPR. */
21366 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
21367 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
21368 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
21369 return true;
21370
21371 return false;
21372 }
21373
21374 /* Direct move helper function for rs6000_secondary_reload, handle all of the
21375 special direct moves that involve allocating an extra register, return the
21376 insn code of the helper function if there is such a function or
21377 CODE_FOR_nothing if not. */
21378
21379 static bool
21380 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
21381 enum rs6000_reg_type from_type,
21382 machine_mode mode,
21383 secondary_reload_info *sri,
21384 bool altivec_p)
21385 {
21386 bool ret = false;
21387 enum insn_code icode = CODE_FOR_nothing;
21388 int cost = 0;
21389 int size = GET_MODE_SIZE (mode);
21390
21391 if (TARGET_POWERPC64 && size == 16)
21392 {
21393 /* Handle moving 128-bit values from GPRs to VSX point registers on
21394 ISA 2.07 (power8, power9) when running in 64-bit mode using
21395 XXPERMDI to glue the two 64-bit values back together. */
21396 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
21397 {
21398 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
21399 icode = reg_addr[mode].reload_vsx_gpr;
21400 }
21401
21402 /* Handle moving 128-bit values from VSX point registers to GPRs on
21403 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
21404 bottom 64-bit value. */
21405 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
21406 {
21407 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
21408 icode = reg_addr[mode].reload_gpr_vsx;
21409 }
21410 }
21411
21412 else if (TARGET_POWERPC64 && mode == SFmode)
21413 {
21414 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
21415 {
21416 cost = 3; /* xscvdpspn, mfvsrd, and. */
21417 icode = reg_addr[mode].reload_gpr_vsx;
21418 }
21419
21420 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
21421 {
21422 cost = 2; /* mtvsrz, xscvspdpn. */
21423 icode = reg_addr[mode].reload_vsx_gpr;
21424 }
21425 }
21426
21427 else if (!TARGET_POWERPC64 && size == 8)
21428 {
21429 /* Handle moving 64-bit values from GPRs to floating point registers on
21430 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
21431 32-bit values back together. Altivec register classes must be handled
21432 specially since a different instruction is used, and the secondary
21433 reload support requires a single instruction class in the scratch
21434 register constraint. However, right now TFmode is not allowed in
21435 Altivec registers, so the pattern will never match. */
21436 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
21437 {
21438 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
21439 icode = reg_addr[mode].reload_fpr_gpr;
21440 }
21441 }
21442
21443 if (icode != CODE_FOR_nothing)
21444 {
21445 ret = true;
21446 if (sri)
21447 {
21448 sri->icode = icode;
21449 sri->extra_cost = cost;
21450 }
21451 }
21452
21453 return ret;
21454 }
21455
21456 /* Return whether a move between two register classes can be done either
21457 directly (simple move) or via a pattern that uses a single extra temporary
21458 (using ISA 2.07's direct move in this case. */
21459
21460 static bool
21461 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
21462 enum rs6000_reg_type from_type,
21463 machine_mode mode,
21464 secondary_reload_info *sri,
21465 bool altivec_p)
21466 {
21467 /* Fall back to load/store reloads if either type is not a register. */
21468 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
21469 return false;
21470
21471 /* If we haven't allocated registers yet, assume the move can be done for the
21472 standard register types. */
21473 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
21474 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
21475 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
21476 return true;
21477
21478 /* Moves to the same set of registers is a simple move for non-specialized
21479 registers. */
21480 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
21481 return true;
21482
21483 /* Check whether a simple move can be done directly. */
21484 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
21485 {
21486 if (sri)
21487 {
21488 sri->icode = CODE_FOR_nothing;
21489 sri->extra_cost = 0;
21490 }
21491 return true;
21492 }
21493
21494 /* Now check if we can do it in a few steps. */
21495 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
21496 altivec_p);
21497 }
21498
21499 /* Inform reload about cases where moving X with a mode MODE to a register in
21500 RCLASS requires an extra scratch or immediate register. Return the class
21501 needed for the immediate register.
21502
21503 For VSX and Altivec, we may need a register to convert sp+offset into
21504 reg+sp.
21505
21506 For misaligned 64-bit gpr loads and stores we need a register to
21507 convert an offset address to indirect. */
21508
21509 static reg_class_t
21510 rs6000_secondary_reload (bool in_p,
21511 rtx x,
21512 reg_class_t rclass_i,
21513 machine_mode mode,
21514 secondary_reload_info *sri)
21515 {
21516 enum reg_class rclass = (enum reg_class) rclass_i;
21517 reg_class_t ret = ALL_REGS;
21518 enum insn_code icode;
21519 bool default_p = false;
21520 bool done_p = false;
21521
21522 /* Allow subreg of memory before/during reload. */
21523 bool memory_p = (MEM_P (x)
21524 || (!reload_completed && GET_CODE (x) == SUBREG
21525 && MEM_P (SUBREG_REG (x))));
21526
21527 sri->icode = CODE_FOR_nothing;
21528 sri->t_icode = CODE_FOR_nothing;
21529 sri->extra_cost = 0;
21530 icode = ((in_p)
21531 ? reg_addr[mode].reload_load
21532 : reg_addr[mode].reload_store);
21533
21534 if (REG_P (x) || register_operand (x, mode))
21535 {
21536 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
21537 bool altivec_p = (rclass == ALTIVEC_REGS);
21538 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
21539
21540 if (!in_p)
21541 std::swap (to_type, from_type);
21542
21543 /* Can we do a direct move of some sort? */
21544 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
21545 altivec_p))
21546 {
21547 icode = (enum insn_code)sri->icode;
21548 default_p = false;
21549 done_p = true;
21550 ret = NO_REGS;
21551 }
21552 }
21553
21554 /* Make sure 0.0 is not reloaded or forced into memory. */
21555 if (x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
21556 {
21557 ret = NO_REGS;
21558 default_p = false;
21559 done_p = true;
21560 }
21561
21562 /* If this is a scalar floating point value and we want to load it into the
21563 traditional Altivec registers, do it via a move via a traditional floating
21564 point register, unless we have D-form addressing. Also make sure that
21565 non-zero constants use a FPR. */
21566 if (!done_p && reg_addr[mode].scalar_in_vmx_p
21567 && !mode_supports_vmx_dform (mode)
21568 && (rclass == VSX_REGS || rclass == ALTIVEC_REGS)
21569 && (memory_p || (GET_CODE (x) == CONST_DOUBLE)))
21570 {
21571 ret = FLOAT_REGS;
21572 default_p = false;
21573 done_p = true;
21574 }
21575
21576 /* Handle reload of load/stores if we have reload helper functions. */
21577 if (!done_p && icode != CODE_FOR_nothing && memory_p)
21578 {
21579 int extra_cost = rs6000_secondary_reload_memory (XEXP (x, 0), rclass,
21580 mode);
21581
21582 if (extra_cost >= 0)
21583 {
21584 done_p = true;
21585 ret = NO_REGS;
21586 if (extra_cost > 0)
21587 {
21588 sri->extra_cost = extra_cost;
21589 sri->icode = icode;
21590 }
21591 }
21592 }
21593
21594 /* Handle unaligned loads and stores of integer registers. */
21595 if (!done_p && TARGET_POWERPC64
21596 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
21597 && memory_p
21598 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
21599 {
21600 rtx addr = XEXP (x, 0);
21601 rtx off = address_offset (addr);
21602
21603 if (off != NULL_RTX)
21604 {
21605 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
21606 unsigned HOST_WIDE_INT offset = INTVAL (off);
21607
21608 /* We need a secondary reload when our legitimate_address_p
21609 says the address is good (as otherwise the entire address
21610 will be reloaded), and the offset is not a multiple of
21611 four or we have an address wrap. Address wrap will only
21612 occur for LO_SUMs since legitimate_offset_address_p
21613 rejects addresses for 16-byte mems that will wrap. */
21614 if (GET_CODE (addr) == LO_SUM
21615 ? (1 /* legitimate_address_p allows any offset for lo_sum */
21616 && ((offset & 3) != 0
21617 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
21618 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
21619 && (offset & 3) != 0))
21620 {
21621 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
21622 if (in_p)
21623 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_load
21624 : CODE_FOR_reload_di_load);
21625 else
21626 sri->icode = ((TARGET_32BIT) ? CODE_FOR_reload_si_store
21627 : CODE_FOR_reload_di_store);
21628 sri->extra_cost = 2;
21629 ret = NO_REGS;
21630 done_p = true;
21631 }
21632 else
21633 default_p = true;
21634 }
21635 else
21636 default_p = true;
21637 }
21638
21639 if (!done_p && !TARGET_POWERPC64
21640 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
21641 && memory_p
21642 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
21643 {
21644 rtx addr = XEXP (x, 0);
21645 rtx off = address_offset (addr);
21646
21647 if (off != NULL_RTX)
21648 {
21649 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
21650 unsigned HOST_WIDE_INT offset = INTVAL (off);
21651
21652 /* We need a secondary reload when our legitimate_address_p
21653 says the address is good (as otherwise the entire address
21654 will be reloaded), and we have a wrap.
21655
21656 legitimate_lo_sum_address_p allows LO_SUM addresses to
21657 have any offset so test for wrap in the low 16 bits.
21658
21659 legitimate_offset_address_p checks for the range
21660 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
21661 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
21662 [0x7ff4,0x7fff] respectively, so test for the
21663 intersection of these ranges, [0x7ffc,0x7fff] and
21664 [0x7ff4,0x7ff7] respectively.
21665
21666 Note that the address we see here may have been
21667 manipulated by legitimize_reload_address. */
21668 if (GET_CODE (addr) == LO_SUM
21669 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
21670 : offset - (0x8000 - extra) < UNITS_PER_WORD)
21671 {
21672 if (in_p)
21673 sri->icode = CODE_FOR_reload_si_load;
21674 else
21675 sri->icode = CODE_FOR_reload_si_store;
21676 sri->extra_cost = 2;
21677 ret = NO_REGS;
21678 done_p = true;
21679 }
21680 else
21681 default_p = true;
21682 }
21683 else
21684 default_p = true;
21685 }
21686
21687 if (!done_p)
21688 default_p = true;
21689
21690 if (default_p)
21691 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
21692
21693 gcc_assert (ret != ALL_REGS);
21694
21695 if (TARGET_DEBUG_ADDR)
21696 {
21697 fprintf (stderr,
21698 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
21699 "mode = %s",
21700 reg_class_names[ret],
21701 in_p ? "true" : "false",
21702 reg_class_names[rclass],
21703 GET_MODE_NAME (mode));
21704
21705 if (reload_completed)
21706 fputs (", after reload", stderr);
21707
21708 if (!done_p)
21709 fputs (", done_p not set", stderr);
21710
21711 if (default_p)
21712 fputs (", default secondary reload", stderr);
21713
21714 if (sri->icode != CODE_FOR_nothing)
21715 fprintf (stderr, ", reload func = %s, extra cost = %d",
21716 insn_data[sri->icode].name, sri->extra_cost);
21717
21718 else if (sri->extra_cost > 0)
21719 fprintf (stderr, ", extra cost = %d", sri->extra_cost);
21720
21721 fputs ("\n", stderr);
21722 debug_rtx (x);
21723 }
21724
21725 return ret;
21726 }
21727
21728 /* Better tracing for rs6000_secondary_reload_inner. */
21729
21730 static void
21731 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
21732 bool store_p)
21733 {
21734 rtx set, clobber;
21735
21736 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
21737
21738 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
21739 store_p ? "store" : "load");
21740
21741 if (store_p)
21742 set = gen_rtx_SET (mem, reg);
21743 else
21744 set = gen_rtx_SET (reg, mem);
21745
21746 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
21747 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
21748 }
21749
21750 static void rs6000_secondary_reload_fail (int, rtx, rtx, rtx, bool)
21751 ATTRIBUTE_NORETURN;
21752
21753 static void
21754 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
21755 bool store_p)
21756 {
21757 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
21758 gcc_unreachable ();
21759 }
21760
21761 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
21762 reload helper functions. These were identified in
21763 rs6000_secondary_reload_memory, and if reload decided to use the secondary
21764 reload, it calls the insns:
21765 reload_<RELOAD:mode>_<P:mptrsize>_store
21766 reload_<RELOAD:mode>_<P:mptrsize>_load
21767
21768 which in turn calls this function, to do whatever is necessary to create
21769 valid addresses. */
21770
21771 void
21772 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
21773 {
21774 int regno = true_regnum (reg);
21775 machine_mode mode = GET_MODE (reg);
21776 addr_mask_type addr_mask;
21777 rtx addr;
21778 rtx new_addr;
21779 rtx op_reg, op0, op1;
21780 rtx and_op;
21781 rtx cc_clobber;
21782 rtvec rv;
21783
21784 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER || !MEM_P (mem)
21785 || !base_reg_operand (scratch, GET_MODE (scratch)))
21786 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
21787
21788 if (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO))
21789 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_GPR];
21790
21791 else if (IN_RANGE (regno, FIRST_FPR_REGNO, LAST_FPR_REGNO))
21792 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_FPR];
21793
21794 else if (IN_RANGE (regno, FIRST_ALTIVEC_REGNO, LAST_ALTIVEC_REGNO))
21795 addr_mask = reg_addr[mode].addr_mask[RELOAD_REG_VMX];
21796
21797 else
21798 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
21799
21800 /* Make sure the mode is valid in this register class. */
21801 if ((addr_mask & RELOAD_REG_VALID) == 0)
21802 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
21803
21804 if (TARGET_DEBUG_ADDR)
21805 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
21806
21807 new_addr = addr = XEXP (mem, 0);
21808 switch (GET_CODE (addr))
21809 {
21810 /* Does the register class support auto update forms for this mode? If
21811 not, do the update now. We don't need a scratch register, since the
21812 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
21813 case PRE_INC:
21814 case PRE_DEC:
21815 op_reg = XEXP (addr, 0);
21816 if (!base_reg_operand (op_reg, Pmode))
21817 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
21818
21819 if ((addr_mask & RELOAD_REG_PRE_INCDEC) == 0)
21820 {
21821 emit_insn (gen_add2_insn (op_reg, GEN_INT (GET_MODE_SIZE (mode))));
21822 new_addr = op_reg;
21823 }
21824 break;
21825
21826 case PRE_MODIFY:
21827 op0 = XEXP (addr, 0);
21828 op1 = XEXP (addr, 1);
21829 if (!base_reg_operand (op0, Pmode)
21830 || GET_CODE (op1) != PLUS
21831 || !rtx_equal_p (op0, XEXP (op1, 0)))
21832 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
21833
21834 if ((addr_mask & RELOAD_REG_PRE_MODIFY) == 0)
21835 {
21836 emit_insn (gen_rtx_SET (op0, op1));
21837 new_addr = reg;
21838 }
21839 break;
21840
21841 /* Do we need to simulate AND -16 to clear the bottom address bits used
21842 in VMX load/stores? */
21843 case AND:
21844 op0 = XEXP (addr, 0);
21845 op1 = XEXP (addr, 1);
21846 if ((addr_mask & RELOAD_REG_AND_M16) == 0)
21847 {
21848 if (REG_P (op0) || GET_CODE (op0) == SUBREG)
21849 op_reg = op0;
21850
21851 else if (GET_CODE (op1) == PLUS)
21852 {
21853 emit_insn (gen_rtx_SET (scratch, op1));
21854 op_reg = scratch;
21855 }
21856
21857 else
21858 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
21859
21860 and_op = gen_rtx_AND (GET_MODE (scratch), op_reg, op1);
21861 cc_clobber = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (CCmode));
21862 rv = gen_rtvec (2, gen_rtx_SET (scratch, and_op), cc_clobber);
21863 emit_insn (gen_rtx_PARALLEL (VOIDmode, rv));
21864 new_addr = scratch;
21865 }
21866 break;
21867
21868 /* If this is an indirect address, make sure it is a base register. */
21869 case REG:
21870 case SUBREG:
21871 if (!base_reg_operand (addr, GET_MODE (addr)))
21872 {
21873 emit_insn (gen_rtx_SET (scratch, addr));
21874 new_addr = scratch;
21875 }
21876 break;
21877
21878 /* If this is an indexed address, make sure the register class can handle
21879 indexed addresses for this mode. */
21880 case PLUS:
21881 op0 = XEXP (addr, 0);
21882 op1 = XEXP (addr, 1);
21883 if (!base_reg_operand (op0, Pmode))
21884 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
21885
21886 else if (int_reg_operand (op1, Pmode))
21887 {
21888 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
21889 {
21890 emit_insn (gen_rtx_SET (scratch, addr));
21891 new_addr = scratch;
21892 }
21893 }
21894
21895 else if (mode_supports_vsx_dform_quad (mode) && CONST_INT_P (op1))
21896 {
21897 if (((addr_mask & RELOAD_REG_QUAD_OFFSET) == 0)
21898 || !quad_address_p (addr, mode, false))
21899 {
21900 emit_insn (gen_rtx_SET (scratch, addr));
21901 new_addr = scratch;
21902 }
21903 }
21904
21905 /* Make sure the register class can handle offset addresses. */
21906 else if (rs6000_legitimate_offset_address_p (mode, addr, false, true))
21907 {
21908 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
21909 {
21910 emit_insn (gen_rtx_SET (scratch, addr));
21911 new_addr = scratch;
21912 }
21913 }
21914
21915 else
21916 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
21917
21918 break;
21919
21920 case LO_SUM:
21921 op0 = XEXP (addr, 0);
21922 op1 = XEXP (addr, 1);
21923 if (!base_reg_operand (op0, Pmode))
21924 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
21925
21926 else if (int_reg_operand (op1, Pmode))
21927 {
21928 if ((addr_mask & RELOAD_REG_INDEXED) == 0)
21929 {
21930 emit_insn (gen_rtx_SET (scratch, addr));
21931 new_addr = scratch;
21932 }
21933 }
21934
21935 /* Quad offsets are restricted and can't handle normal addresses. */
21936 else if (mode_supports_vsx_dform_quad (mode))
21937 {
21938 emit_insn (gen_rtx_SET (scratch, addr));
21939 new_addr = scratch;
21940 }
21941
21942 /* Make sure the register class can handle offset addresses. */
21943 else if (legitimate_lo_sum_address_p (mode, addr, false))
21944 {
21945 if ((addr_mask & RELOAD_REG_OFFSET) == 0)
21946 {
21947 emit_insn (gen_rtx_SET (scratch, addr));
21948 new_addr = scratch;
21949 }
21950 }
21951
21952 else
21953 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
21954
21955 break;
21956
21957 case SYMBOL_REF:
21958 case CONST:
21959 case LABEL_REF:
21960 rs6000_emit_move (scratch, addr, Pmode);
21961 new_addr = scratch;
21962 break;
21963
21964 default:
21965 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
21966 }
21967
21968 /* Adjust the address if it changed. */
21969 if (addr != new_addr)
21970 {
21971 mem = replace_equiv_address_nv (mem, new_addr);
21972 if (TARGET_DEBUG_ADDR)
21973 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
21974 }
21975
21976 /* Now create the move. */
21977 if (store_p)
21978 emit_insn (gen_rtx_SET (mem, reg));
21979 else
21980 emit_insn (gen_rtx_SET (reg, mem));
21981
21982 return;
21983 }
21984
21985 /* Convert reloads involving 64-bit gprs and misaligned offset
21986 addressing, or multiple 32-bit gprs and offsets that are too large,
21987 to use indirect addressing. */
21988
21989 void
21990 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
21991 {
21992 int regno = true_regnum (reg);
21993 enum reg_class rclass;
21994 rtx addr;
21995 rtx scratch_or_premodify = scratch;
21996
21997 if (TARGET_DEBUG_ADDR)
21998 {
21999 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
22000 store_p ? "store" : "load");
22001 fprintf (stderr, "reg:\n");
22002 debug_rtx (reg);
22003 fprintf (stderr, "mem:\n");
22004 debug_rtx (mem);
22005 fprintf (stderr, "scratch:\n");
22006 debug_rtx (scratch);
22007 }
22008
22009 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
22010 gcc_assert (GET_CODE (mem) == MEM);
22011 rclass = REGNO_REG_CLASS (regno);
22012 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
22013 addr = XEXP (mem, 0);
22014
22015 if (GET_CODE (addr) == PRE_MODIFY)
22016 {
22017 gcc_assert (REG_P (XEXP (addr, 0))
22018 && GET_CODE (XEXP (addr, 1)) == PLUS
22019 && XEXP (XEXP (addr, 1), 0) == XEXP (addr, 0));
22020 scratch_or_premodify = XEXP (addr, 0);
22021 if (!HARD_REGISTER_P (scratch_or_premodify))
22022 /* If we have a pseudo here then reload will have arranged
22023 to have it replaced, but only in the original insn.
22024 Use the replacement here too. */
22025 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
22026
22027 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
22028 expressions from the original insn, without unsharing them.
22029 Any RTL that points into the original insn will of course
22030 have register replacements applied. That is why we don't
22031 need to look for replacements under the PLUS. */
22032 addr = XEXP (addr, 1);
22033 }
22034 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
22035
22036 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
22037
22038 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
22039
22040 /* Now create the move. */
22041 if (store_p)
22042 emit_insn (gen_rtx_SET (mem, reg));
22043 else
22044 emit_insn (gen_rtx_SET (reg, mem));
22045
22046 return;
22047 }
22048
22049 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
22050 this function has any SDmode references. If we are on a power7 or later, we
22051 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
22052 can load/store the value. */
22053
22054 static void
22055 rs6000_alloc_sdmode_stack_slot (void)
22056 {
22057 tree t;
22058 basic_block bb;
22059 gimple_stmt_iterator gsi;
22060
22061 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
22062 /* We use a different approach for dealing with the secondary
22063 memory in LRA. */
22064 if (ira_use_lra_p)
22065 return;
22066
22067 if (TARGET_NO_SDMODE_STACK)
22068 return;
22069
22070 FOR_EACH_BB_FN (bb, cfun)
22071 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
22072 {
22073 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
22074 if (ret)
22075 {
22076 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
22077 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
22078 SDmode, 0);
22079 return;
22080 }
22081 }
22082
22083 /* Check for any SDmode parameters of the function. */
22084 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
22085 {
22086 if (TREE_TYPE (t) == error_mark_node)
22087 continue;
22088
22089 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
22090 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
22091 {
22092 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
22093 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
22094 SDmode, 0);
22095 return;
22096 }
22097 }
22098 }
22099
22100 static void
22101 rs6000_instantiate_decls (void)
22102 {
22103 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
22104 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
22105 }
22106
22107 /* Given an rtx X being reloaded into a reg required to be
22108 in class CLASS, return the class of reg to actually use.
22109 In general this is just CLASS; but on some machines
22110 in some cases it is preferable to use a more restrictive class.
22111
22112 On the RS/6000, we have to return NO_REGS when we want to reload a
22113 floating-point CONST_DOUBLE to force it to be copied to memory.
22114
22115 We also don't want to reload integer values into floating-point
22116 registers if we can at all help it. In fact, this can
22117 cause reload to die, if it tries to generate a reload of CTR
22118 into a FP register and discovers it doesn't have the memory location
22119 required.
22120
22121 ??? Would it be a good idea to have reload do the converse, that is
22122 try to reload floating modes into FP registers if possible?
22123 */
22124
22125 static enum reg_class
22126 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
22127 {
22128 machine_mode mode = GET_MODE (x);
22129 bool is_constant = CONSTANT_P (x);
22130
22131 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
22132 reload class for it. */
22133 if ((rclass == ALTIVEC_REGS || rclass == VSX_REGS)
22134 && (reg_addr[mode].addr_mask[RELOAD_REG_VMX] & RELOAD_REG_VALID) == 0)
22135 return NO_REGS;
22136
22137 if ((rclass == FLOAT_REGS || rclass == VSX_REGS)
22138 && (reg_addr[mode].addr_mask[RELOAD_REG_FPR] & RELOAD_REG_VALID) == 0)
22139 return NO_REGS;
22140
22141 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
22142 the reloading of address expressions using PLUS into floating point
22143 registers. */
22144 if (TARGET_VSX && VSX_REG_CLASS_P (rclass) && GET_CODE (x) != PLUS)
22145 {
22146 if (is_constant)
22147 {
22148 /* Zero is always allowed in all VSX registers. */
22149 if (x == CONST0_RTX (mode))
22150 return rclass;
22151
22152 /* If this is a vector constant that can be formed with a few Altivec
22153 instructions, we want altivec registers. */
22154 if (GET_CODE (x) == CONST_VECTOR && easy_vector_constant (x, mode))
22155 return ALTIVEC_REGS;
22156
22157 /* If this is an integer constant that can easily be loaded into
22158 vector registers, allow it. */
22159 if (CONST_INT_P (x))
22160 {
22161 HOST_WIDE_INT value = INTVAL (x);
22162
22163 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
22164 2.06 can generate it in the Altivec registers with
22165 VSPLTI<x>. */
22166 if (value == -1)
22167 {
22168 if (TARGET_P8_VECTOR)
22169 return rclass;
22170 else if (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
22171 return ALTIVEC_REGS;
22172 else
22173 return NO_REGS;
22174 }
22175
22176 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
22177 a sign extend in the Altivec registers. */
22178 if (IN_RANGE (value, -128, 127) && TARGET_P9_VECTOR
22179 && TARGET_VSX_SMALL_INTEGER
22180 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS))
22181 return ALTIVEC_REGS;
22182 }
22183
22184 /* Force constant to memory. */
22185 return NO_REGS;
22186 }
22187
22188 /* D-form addressing can easily reload the value. */
22189 if (mode_supports_vmx_dform (mode)
22190 || mode_supports_vsx_dform_quad (mode))
22191 return rclass;
22192
22193 /* If this is a scalar floating point value and we don't have D-form
22194 addressing, prefer the traditional floating point registers so that we
22195 can use D-form (register+offset) addressing. */
22196 if (rclass == VSX_REGS
22197 && (mode == SFmode || GET_MODE_SIZE (mode) == 8))
22198 return FLOAT_REGS;
22199
22200 /* Prefer the Altivec registers if Altivec is handling the vector
22201 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
22202 loads. */
22203 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
22204 || mode == V1TImode)
22205 return ALTIVEC_REGS;
22206
22207 return rclass;
22208 }
22209
22210 if (is_constant || GET_CODE (x) == PLUS)
22211 {
22212 if (reg_class_subset_p (GENERAL_REGS, rclass))
22213 return GENERAL_REGS;
22214 if (reg_class_subset_p (BASE_REGS, rclass))
22215 return BASE_REGS;
22216 return NO_REGS;
22217 }
22218
22219 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
22220 return GENERAL_REGS;
22221
22222 return rclass;
22223 }
22224
22225 /* Debug version of rs6000_preferred_reload_class. */
22226 static enum reg_class
22227 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
22228 {
22229 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
22230
22231 fprintf (stderr,
22232 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
22233 "mode = %s, x:\n",
22234 reg_class_names[ret], reg_class_names[rclass],
22235 GET_MODE_NAME (GET_MODE (x)));
22236 debug_rtx (x);
22237
22238 return ret;
22239 }
22240
22241 /* If we are copying between FP or AltiVec registers and anything else, we need
22242 a memory location. The exception is when we are targeting ppc64 and the
22243 move to/from fpr to gpr instructions are available. Also, under VSX, you
22244 can copy vector registers from the FP register set to the Altivec register
22245 set and vice versa. */
22246
22247 static bool
22248 rs6000_secondary_memory_needed (enum reg_class from_class,
22249 enum reg_class to_class,
22250 machine_mode mode)
22251 {
22252 enum rs6000_reg_type from_type, to_type;
22253 bool altivec_p = ((from_class == ALTIVEC_REGS)
22254 || (to_class == ALTIVEC_REGS));
22255
22256 /* If a simple/direct move is available, we don't need secondary memory */
22257 from_type = reg_class_to_reg_type[(int)from_class];
22258 to_type = reg_class_to_reg_type[(int)to_class];
22259
22260 if (rs6000_secondary_reload_move (to_type, from_type, mode,
22261 (secondary_reload_info *)0, altivec_p))
22262 return false;
22263
22264 /* If we have a floating point or vector register class, we need to use
22265 memory to transfer the data. */
22266 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
22267 return true;
22268
22269 return false;
22270 }
22271
22272 /* Debug version of rs6000_secondary_memory_needed. */
22273 static bool
22274 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
22275 enum reg_class to_class,
22276 machine_mode mode)
22277 {
22278 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
22279
22280 fprintf (stderr,
22281 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
22282 "to_class = %s, mode = %s\n",
22283 ret ? "true" : "false",
22284 reg_class_names[from_class],
22285 reg_class_names[to_class],
22286 GET_MODE_NAME (mode));
22287
22288 return ret;
22289 }
22290
22291 /* Return the register class of a scratch register needed to copy IN into
22292 or out of a register in RCLASS in MODE. If it can be done directly,
22293 NO_REGS is returned. */
22294
22295 static enum reg_class
22296 rs6000_secondary_reload_class (enum reg_class rclass, machine_mode mode,
22297 rtx in)
22298 {
22299 int regno;
22300
22301 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
22302 #if TARGET_MACHO
22303 && MACHOPIC_INDIRECT
22304 #endif
22305 ))
22306 {
22307 /* We cannot copy a symbolic operand directly into anything
22308 other than BASE_REGS for TARGET_ELF. So indicate that a
22309 register from BASE_REGS is needed as an intermediate
22310 register.
22311
22312 On Darwin, pic addresses require a load from memory, which
22313 needs a base register. */
22314 if (rclass != BASE_REGS
22315 && (GET_CODE (in) == SYMBOL_REF
22316 || GET_CODE (in) == HIGH
22317 || GET_CODE (in) == LABEL_REF
22318 || GET_CODE (in) == CONST))
22319 return BASE_REGS;
22320 }
22321
22322 if (GET_CODE (in) == REG)
22323 {
22324 regno = REGNO (in);
22325 if (regno >= FIRST_PSEUDO_REGISTER)
22326 {
22327 regno = true_regnum (in);
22328 if (regno >= FIRST_PSEUDO_REGISTER)
22329 regno = -1;
22330 }
22331 }
22332 else if (GET_CODE (in) == SUBREG)
22333 {
22334 regno = true_regnum (in);
22335 if (regno >= FIRST_PSEUDO_REGISTER)
22336 regno = -1;
22337 }
22338 else
22339 regno = -1;
22340
22341 /* If we have VSX register moves, prefer moving scalar values between
22342 Altivec registers and GPR by going via an FPR (and then via memory)
22343 instead of reloading the secondary memory address for Altivec moves. */
22344 if (TARGET_VSX
22345 && GET_MODE_SIZE (mode) < 16
22346 && !mode_supports_vmx_dform (mode)
22347 && (((rclass == GENERAL_REGS || rclass == BASE_REGS)
22348 && (regno >= 0 && ALTIVEC_REGNO_P (regno)))
22349 || ((rclass == VSX_REGS || rclass == ALTIVEC_REGS)
22350 && (regno >= 0 && INT_REGNO_P (regno)))))
22351 return FLOAT_REGS;
22352
22353 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
22354 into anything. */
22355 if (rclass == GENERAL_REGS || rclass == BASE_REGS
22356 || (regno >= 0 && INT_REGNO_P (regno)))
22357 return NO_REGS;
22358
22359 /* Constants, memory, and VSX registers can go into VSX registers (both the
22360 traditional floating point and the altivec registers). */
22361 if (rclass == VSX_REGS
22362 && (regno == -1 || VSX_REGNO_P (regno)))
22363 return NO_REGS;
22364
22365 /* Constants, memory, and FP registers can go into FP registers. */
22366 if ((regno == -1 || FP_REGNO_P (regno))
22367 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
22368 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
22369
22370 /* Memory, and AltiVec registers can go into AltiVec registers. */
22371 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
22372 && rclass == ALTIVEC_REGS)
22373 return NO_REGS;
22374
22375 /* We can copy among the CR registers. */
22376 if ((rclass == CR_REGS || rclass == CR0_REGS)
22377 && regno >= 0 && CR_REGNO_P (regno))
22378 return NO_REGS;
22379
22380 /* Otherwise, we need GENERAL_REGS. */
22381 return GENERAL_REGS;
22382 }
22383
22384 /* Debug version of rs6000_secondary_reload_class. */
22385 static enum reg_class
22386 rs6000_debug_secondary_reload_class (enum reg_class rclass,
22387 machine_mode mode, rtx in)
22388 {
22389 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
22390 fprintf (stderr,
22391 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
22392 "mode = %s, input rtx:\n",
22393 reg_class_names[ret], reg_class_names[rclass],
22394 GET_MODE_NAME (mode));
22395 debug_rtx (in);
22396
22397 return ret;
22398 }
22399
22400 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
22401
22402 static bool
22403 rs6000_cannot_change_mode_class (machine_mode from,
22404 machine_mode to,
22405 enum reg_class rclass)
22406 {
22407 unsigned from_size = GET_MODE_SIZE (from);
22408 unsigned to_size = GET_MODE_SIZE (to);
22409
22410 if (from_size != to_size)
22411 {
22412 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
22413
22414 if (reg_classes_intersect_p (xclass, rclass))
22415 {
22416 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
22417 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
22418 bool to_float128_vector_p = FLOAT128_VECTOR_P (to);
22419 bool from_float128_vector_p = FLOAT128_VECTOR_P (from);
22420
22421 /* Don't allow 64-bit types to overlap with 128-bit types that take a
22422 single register under VSX because the scalar part of the register
22423 is in the upper 64-bits, and not the lower 64-bits. Types like
22424 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
22425 IEEE floating point can't overlap, and neither can small
22426 values. */
22427
22428 if (to_float128_vector_p && from_float128_vector_p)
22429 return false;
22430
22431 else if (to_float128_vector_p || from_float128_vector_p)
22432 return true;
22433
22434 /* TDmode in floating-mode registers must always go into a register
22435 pair with the most significant word in the even-numbered register
22436 to match ISA requirements. In little-endian mode, this does not
22437 match subreg numbering, so we cannot allow subregs. */
22438 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
22439 return true;
22440
22441 if (from_size < 8 || to_size < 8)
22442 return true;
22443
22444 if (from_size == 8 && (8 * to_nregs) != to_size)
22445 return true;
22446
22447 if (to_size == 8 && (8 * from_nregs) != from_size)
22448 return true;
22449
22450 return false;
22451 }
22452 else
22453 return false;
22454 }
22455
22456 if (TARGET_E500_DOUBLE
22457 && ((((to) == DFmode) + ((from) == DFmode)) == 1
22458 || (((to) == TFmode) + ((from) == TFmode)) == 1
22459 || (((to) == IFmode) + ((from) == IFmode)) == 1
22460 || (((to) == KFmode) + ((from) == KFmode)) == 1
22461 || (((to) == DDmode) + ((from) == DDmode)) == 1
22462 || (((to) == TDmode) + ((from) == TDmode)) == 1
22463 || (((to) == DImode) + ((from) == DImode)) == 1))
22464 return true;
22465
22466 /* Since the VSX register set includes traditional floating point registers
22467 and altivec registers, just check for the size being different instead of
22468 trying to check whether the modes are vector modes. Otherwise it won't
22469 allow say DF and DI to change classes. For types like TFmode and TDmode
22470 that take 2 64-bit registers, rather than a single 128-bit register, don't
22471 allow subregs of those types to other 128 bit types. */
22472 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
22473 {
22474 unsigned num_regs = (from_size + 15) / 16;
22475 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
22476 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
22477 return true;
22478
22479 return (from_size != 8 && from_size != 16);
22480 }
22481
22482 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
22483 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
22484 return true;
22485
22486 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
22487 && reg_classes_intersect_p (GENERAL_REGS, rclass))
22488 return true;
22489
22490 return false;
22491 }
22492
22493 /* Debug version of rs6000_cannot_change_mode_class. */
22494 static bool
22495 rs6000_debug_cannot_change_mode_class (machine_mode from,
22496 machine_mode to,
22497 enum reg_class rclass)
22498 {
22499 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
22500
22501 fprintf (stderr,
22502 "rs6000_cannot_change_mode_class, return %s, from = %s, "
22503 "to = %s, rclass = %s\n",
22504 ret ? "true" : "false",
22505 GET_MODE_NAME (from), GET_MODE_NAME (to),
22506 reg_class_names[rclass]);
22507
22508 return ret;
22509 }
22510 \f
22511 /* Return a string to do a move operation of 128 bits of data. */
22512
22513 const char *
22514 rs6000_output_move_128bit (rtx operands[])
22515 {
22516 rtx dest = operands[0];
22517 rtx src = operands[1];
22518 machine_mode mode = GET_MODE (dest);
22519 int dest_regno;
22520 int src_regno;
22521 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
22522 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
22523
22524 if (REG_P (dest))
22525 {
22526 dest_regno = REGNO (dest);
22527 dest_gpr_p = INT_REGNO_P (dest_regno);
22528 dest_fp_p = FP_REGNO_P (dest_regno);
22529 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
22530 dest_vsx_p = dest_fp_p | dest_vmx_p;
22531 }
22532 else
22533 {
22534 dest_regno = -1;
22535 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
22536 }
22537
22538 if (REG_P (src))
22539 {
22540 src_regno = REGNO (src);
22541 src_gpr_p = INT_REGNO_P (src_regno);
22542 src_fp_p = FP_REGNO_P (src_regno);
22543 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
22544 src_vsx_p = src_fp_p | src_vmx_p;
22545 }
22546 else
22547 {
22548 src_regno = -1;
22549 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
22550 }
22551
22552 /* Register moves. */
22553 if (dest_regno >= 0 && src_regno >= 0)
22554 {
22555 if (dest_gpr_p)
22556 {
22557 if (src_gpr_p)
22558 return "#";
22559
22560 if (TARGET_DIRECT_MOVE_128 && src_vsx_p)
22561 return (WORDS_BIG_ENDIAN
22562 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
22563 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
22564
22565 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
22566 return "#";
22567 }
22568
22569 else if (TARGET_VSX && dest_vsx_p)
22570 {
22571 if (src_vsx_p)
22572 return "xxlor %x0,%x1,%x1";
22573
22574 else if (TARGET_DIRECT_MOVE_128 && src_gpr_p)
22575 return (WORDS_BIG_ENDIAN
22576 ? "mtvsrdd %x0,%1,%L1"
22577 : "mtvsrdd %x0,%L1,%1");
22578
22579 else if (TARGET_DIRECT_MOVE && src_gpr_p)
22580 return "#";
22581 }
22582
22583 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
22584 return "vor %0,%1,%1";
22585
22586 else if (dest_fp_p && src_fp_p)
22587 return "#";
22588 }
22589
22590 /* Loads. */
22591 else if (dest_regno >= 0 && MEM_P (src))
22592 {
22593 if (dest_gpr_p)
22594 {
22595 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
22596 return "lq %0,%1";
22597 else
22598 return "#";
22599 }
22600
22601 else if (TARGET_ALTIVEC && dest_vmx_p
22602 && altivec_indexed_or_indirect_operand (src, mode))
22603 return "lvx %0,%y1";
22604
22605 else if (TARGET_VSX && dest_vsx_p)
22606 {
22607 if (mode_supports_vsx_dform_quad (mode)
22608 && quad_address_p (XEXP (src, 0), mode, true))
22609 return "lxv %x0,%1";
22610
22611 else if (TARGET_P9_VECTOR)
22612 return "lxvx %x0,%y1";
22613
22614 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
22615 return "lxvw4x %x0,%y1";
22616
22617 else
22618 return "lxvd2x %x0,%y1";
22619 }
22620
22621 else if (TARGET_ALTIVEC && dest_vmx_p)
22622 return "lvx %0,%y1";
22623
22624 else if (dest_fp_p)
22625 return "#";
22626 }
22627
22628 /* Stores. */
22629 else if (src_regno >= 0 && MEM_P (dest))
22630 {
22631 if (src_gpr_p)
22632 {
22633 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
22634 return "stq %1,%0";
22635 else
22636 return "#";
22637 }
22638
22639 else if (TARGET_ALTIVEC && src_vmx_p
22640 && altivec_indexed_or_indirect_operand (src, mode))
22641 return "stvx %1,%y0";
22642
22643 else if (TARGET_VSX && src_vsx_p)
22644 {
22645 if (mode_supports_vsx_dform_quad (mode)
22646 && quad_address_p (XEXP (dest, 0), mode, true))
22647 return "stxv %x1,%0";
22648
22649 else if (TARGET_P9_VECTOR)
22650 return "stxvx %x1,%y0";
22651
22652 else if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
22653 return "stxvw4x %x1,%y0";
22654
22655 else
22656 return "stxvd2x %x1,%y0";
22657 }
22658
22659 else if (TARGET_ALTIVEC && src_vmx_p)
22660 return "stvx %1,%y0";
22661
22662 else if (src_fp_p)
22663 return "#";
22664 }
22665
22666 /* Constants. */
22667 else if (dest_regno >= 0
22668 && (GET_CODE (src) == CONST_INT
22669 || GET_CODE (src) == CONST_WIDE_INT
22670 || GET_CODE (src) == CONST_DOUBLE
22671 || GET_CODE (src) == CONST_VECTOR))
22672 {
22673 if (dest_gpr_p)
22674 return "#";
22675
22676 else if ((dest_vmx_p && TARGET_ALTIVEC)
22677 || (dest_vsx_p && TARGET_VSX))
22678 return output_vec_const_move (operands);
22679 }
22680
22681 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest, src));
22682 }
22683
22684 /* Validate a 128-bit move. */
22685 bool
22686 rs6000_move_128bit_ok_p (rtx operands[])
22687 {
22688 machine_mode mode = GET_MODE (operands[0]);
22689 return (gpc_reg_operand (operands[0], mode)
22690 || gpc_reg_operand (operands[1], mode));
22691 }
22692
22693 /* Return true if a 128-bit move needs to be split. */
22694 bool
22695 rs6000_split_128bit_ok_p (rtx operands[])
22696 {
22697 if (!reload_completed)
22698 return false;
22699
22700 if (!gpr_or_gpr_p (operands[0], operands[1]))
22701 return false;
22702
22703 if (quad_load_store_p (operands[0], operands[1]))
22704 return false;
22705
22706 return true;
22707 }
22708
22709 \f
22710 /* Given a comparison operation, return the bit number in CCR to test. We
22711 know this is a valid comparison.
22712
22713 SCC_P is 1 if this is for an scc. That means that %D will have been
22714 used instead of %C, so the bits will be in different places.
22715
22716 Return -1 if OP isn't a valid comparison for some reason. */
22717
22718 int
22719 ccr_bit (rtx op, int scc_p)
22720 {
22721 enum rtx_code code = GET_CODE (op);
22722 machine_mode cc_mode;
22723 int cc_regnum;
22724 int base_bit;
22725 rtx reg;
22726
22727 if (!COMPARISON_P (op))
22728 return -1;
22729
22730 reg = XEXP (op, 0);
22731
22732 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
22733
22734 cc_mode = GET_MODE (reg);
22735 cc_regnum = REGNO (reg);
22736 base_bit = 4 * (cc_regnum - CR0_REGNO);
22737
22738 validate_condition_mode (code, cc_mode);
22739
22740 /* When generating a sCOND operation, only positive conditions are
22741 allowed. */
22742 gcc_assert (!scc_p
22743 || code == EQ || code == GT || code == LT || code == UNORDERED
22744 || code == GTU || code == LTU);
22745
22746 switch (code)
22747 {
22748 case NE:
22749 return scc_p ? base_bit + 3 : base_bit + 2;
22750 case EQ:
22751 return base_bit + 2;
22752 case GT: case GTU: case UNLE:
22753 return base_bit + 1;
22754 case LT: case LTU: case UNGE:
22755 return base_bit;
22756 case ORDERED: case UNORDERED:
22757 return base_bit + 3;
22758
22759 case GE: case GEU:
22760 /* If scc, we will have done a cror to put the bit in the
22761 unordered position. So test that bit. For integer, this is ! LT
22762 unless this is an scc insn. */
22763 return scc_p ? base_bit + 3 : base_bit;
22764
22765 case LE: case LEU:
22766 return scc_p ? base_bit + 3 : base_bit + 1;
22767
22768 default:
22769 gcc_unreachable ();
22770 }
22771 }
22772 \f
22773 /* Return the GOT register. */
22774
22775 rtx
22776 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
22777 {
22778 /* The second flow pass currently (June 1999) can't update
22779 regs_ever_live without disturbing other parts of the compiler, so
22780 update it here to make the prolog/epilogue code happy. */
22781 if (!can_create_pseudo_p ()
22782 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
22783 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
22784
22785 crtl->uses_pic_offset_table = 1;
22786
22787 return pic_offset_table_rtx;
22788 }
22789 \f
22790 static rs6000_stack_t stack_info;
22791
22792 /* Function to init struct machine_function.
22793 This will be called, via a pointer variable,
22794 from push_function_context. */
22795
22796 static struct machine_function *
22797 rs6000_init_machine_status (void)
22798 {
22799 stack_info.reload_completed = 0;
22800 return ggc_cleared_alloc<machine_function> ();
22801 }
22802 \f
22803 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
22804
22805 /* Write out a function code label. */
22806
22807 void
22808 rs6000_output_function_entry (FILE *file, const char *fname)
22809 {
22810 if (fname[0] != '.')
22811 {
22812 switch (DEFAULT_ABI)
22813 {
22814 default:
22815 gcc_unreachable ();
22816
22817 case ABI_AIX:
22818 if (DOT_SYMBOLS)
22819 putc ('.', file);
22820 else
22821 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
22822 break;
22823
22824 case ABI_ELFv2:
22825 case ABI_V4:
22826 case ABI_DARWIN:
22827 break;
22828 }
22829 }
22830
22831 RS6000_OUTPUT_BASENAME (file, fname);
22832 }
22833
22834 /* Print an operand. Recognize special options, documented below. */
22835
22836 #if TARGET_ELF
22837 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
22838 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
22839 #else
22840 #define SMALL_DATA_RELOC "sda21"
22841 #define SMALL_DATA_REG 0
22842 #endif
22843
22844 void
22845 print_operand (FILE *file, rtx x, int code)
22846 {
22847 int i;
22848 unsigned HOST_WIDE_INT uval;
22849
22850 switch (code)
22851 {
22852 /* %a is output_address. */
22853
22854 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
22855 output_operand. */
22856
22857 case 'D':
22858 /* Like 'J' but get to the GT bit only. */
22859 gcc_assert (REG_P (x));
22860
22861 /* Bit 1 is GT bit. */
22862 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
22863
22864 /* Add one for shift count in rlinm for scc. */
22865 fprintf (file, "%d", i + 1);
22866 return;
22867
22868 case 'e':
22869 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
22870 if (! INT_P (x))
22871 {
22872 output_operand_lossage ("invalid %%e value");
22873 return;
22874 }
22875
22876 uval = INTVAL (x);
22877 if ((uval & 0xffff) == 0 && uval != 0)
22878 putc ('s', file);
22879 return;
22880
22881 case 'E':
22882 /* X is a CR register. Print the number of the EQ bit of the CR */
22883 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
22884 output_operand_lossage ("invalid %%E value");
22885 else
22886 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
22887 return;
22888
22889 case 'f':
22890 /* X is a CR register. Print the shift count needed to move it
22891 to the high-order four bits. */
22892 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
22893 output_operand_lossage ("invalid %%f value");
22894 else
22895 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
22896 return;
22897
22898 case 'F':
22899 /* Similar, but print the count for the rotate in the opposite
22900 direction. */
22901 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
22902 output_operand_lossage ("invalid %%F value");
22903 else
22904 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
22905 return;
22906
22907 case 'G':
22908 /* X is a constant integer. If it is negative, print "m",
22909 otherwise print "z". This is to make an aze or ame insn. */
22910 if (GET_CODE (x) != CONST_INT)
22911 output_operand_lossage ("invalid %%G value");
22912 else if (INTVAL (x) >= 0)
22913 putc ('z', file);
22914 else
22915 putc ('m', file);
22916 return;
22917
22918 case 'h':
22919 /* If constant, output low-order five bits. Otherwise, write
22920 normally. */
22921 if (INT_P (x))
22922 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
22923 else
22924 print_operand (file, x, 0);
22925 return;
22926
22927 case 'H':
22928 /* If constant, output low-order six bits. Otherwise, write
22929 normally. */
22930 if (INT_P (x))
22931 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
22932 else
22933 print_operand (file, x, 0);
22934 return;
22935
22936 case 'I':
22937 /* Print `i' if this is a constant, else nothing. */
22938 if (INT_P (x))
22939 putc ('i', file);
22940 return;
22941
22942 case 'j':
22943 /* Write the bit number in CCR for jump. */
22944 i = ccr_bit (x, 0);
22945 if (i == -1)
22946 output_operand_lossage ("invalid %%j code");
22947 else
22948 fprintf (file, "%d", i);
22949 return;
22950
22951 case 'J':
22952 /* Similar, but add one for shift count in rlinm for scc and pass
22953 scc flag to `ccr_bit'. */
22954 i = ccr_bit (x, 1);
22955 if (i == -1)
22956 output_operand_lossage ("invalid %%J code");
22957 else
22958 /* If we want bit 31, write a shift count of zero, not 32. */
22959 fprintf (file, "%d", i == 31 ? 0 : i + 1);
22960 return;
22961
22962 case 'k':
22963 /* X must be a constant. Write the 1's complement of the
22964 constant. */
22965 if (! INT_P (x))
22966 output_operand_lossage ("invalid %%k value");
22967 else
22968 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
22969 return;
22970
22971 case 'K':
22972 /* X must be a symbolic constant on ELF. Write an
22973 expression suitable for an 'addi' that adds in the low 16
22974 bits of the MEM. */
22975 if (GET_CODE (x) == CONST)
22976 {
22977 if (GET_CODE (XEXP (x, 0)) != PLUS
22978 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
22979 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
22980 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
22981 output_operand_lossage ("invalid %%K value");
22982 }
22983 print_operand_address (file, x);
22984 fputs ("@l", file);
22985 return;
22986
22987 /* %l is output_asm_label. */
22988
22989 case 'L':
22990 /* Write second word of DImode or DFmode reference. Works on register
22991 or non-indexed memory only. */
22992 if (REG_P (x))
22993 fputs (reg_names[REGNO (x) + 1], file);
22994 else if (MEM_P (x))
22995 {
22996 machine_mode mode = GET_MODE (x);
22997 /* Handle possible auto-increment. Since it is pre-increment and
22998 we have already done it, we can just use an offset of word. */
22999 if (GET_CODE (XEXP (x, 0)) == PRE_INC
23000 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
23001 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
23002 UNITS_PER_WORD));
23003 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
23004 output_address (mode, plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
23005 UNITS_PER_WORD));
23006 else
23007 output_address (mode, XEXP (adjust_address_nv (x, SImode,
23008 UNITS_PER_WORD),
23009 0));
23010
23011 if (small_data_operand (x, GET_MODE (x)))
23012 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
23013 reg_names[SMALL_DATA_REG]);
23014 }
23015 return;
23016
23017 case 'N':
23018 /* Write the number of elements in the vector times 4. */
23019 if (GET_CODE (x) != PARALLEL)
23020 output_operand_lossage ("invalid %%N value");
23021 else
23022 fprintf (file, "%d", XVECLEN (x, 0) * 4);
23023 return;
23024
23025 case 'O':
23026 /* Similar, but subtract 1 first. */
23027 if (GET_CODE (x) != PARALLEL)
23028 output_operand_lossage ("invalid %%O value");
23029 else
23030 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
23031 return;
23032
23033 case 'p':
23034 /* X is a CONST_INT that is a power of two. Output the logarithm. */
23035 if (! INT_P (x)
23036 || INTVAL (x) < 0
23037 || (i = exact_log2 (INTVAL (x))) < 0)
23038 output_operand_lossage ("invalid %%p value");
23039 else
23040 fprintf (file, "%d", i);
23041 return;
23042
23043 case 'P':
23044 /* The operand must be an indirect memory reference. The result
23045 is the register name. */
23046 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
23047 || REGNO (XEXP (x, 0)) >= 32)
23048 output_operand_lossage ("invalid %%P value");
23049 else
23050 fputs (reg_names[REGNO (XEXP (x, 0))], file);
23051 return;
23052
23053 case 'q':
23054 /* This outputs the logical code corresponding to a boolean
23055 expression. The expression may have one or both operands
23056 negated (if one, only the first one). For condition register
23057 logical operations, it will also treat the negated
23058 CR codes as NOTs, but not handle NOTs of them. */
23059 {
23060 const char *const *t = 0;
23061 const char *s;
23062 enum rtx_code code = GET_CODE (x);
23063 static const char * const tbl[3][3] = {
23064 { "and", "andc", "nor" },
23065 { "or", "orc", "nand" },
23066 { "xor", "eqv", "xor" } };
23067
23068 if (code == AND)
23069 t = tbl[0];
23070 else if (code == IOR)
23071 t = tbl[1];
23072 else if (code == XOR)
23073 t = tbl[2];
23074 else
23075 output_operand_lossage ("invalid %%q value");
23076
23077 if (GET_CODE (XEXP (x, 0)) != NOT)
23078 s = t[0];
23079 else
23080 {
23081 if (GET_CODE (XEXP (x, 1)) == NOT)
23082 s = t[2];
23083 else
23084 s = t[1];
23085 }
23086
23087 fputs (s, file);
23088 }
23089 return;
23090
23091 case 'Q':
23092 if (! TARGET_MFCRF)
23093 return;
23094 fputc (',', file);
23095 /* FALLTHRU */
23096
23097 case 'R':
23098 /* X is a CR register. Print the mask for `mtcrf'. */
23099 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
23100 output_operand_lossage ("invalid %%R value");
23101 else
23102 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
23103 return;
23104
23105 case 's':
23106 /* Low 5 bits of 32 - value */
23107 if (! INT_P (x))
23108 output_operand_lossage ("invalid %%s value");
23109 else
23110 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
23111 return;
23112
23113 case 't':
23114 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
23115 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
23116
23117 /* Bit 3 is OV bit. */
23118 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
23119
23120 /* If we want bit 31, write a shift count of zero, not 32. */
23121 fprintf (file, "%d", i == 31 ? 0 : i + 1);
23122 return;
23123
23124 case 'T':
23125 /* Print the symbolic name of a branch target register. */
23126 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
23127 && REGNO (x) != CTR_REGNO))
23128 output_operand_lossage ("invalid %%T value");
23129 else if (REGNO (x) == LR_REGNO)
23130 fputs ("lr", file);
23131 else
23132 fputs ("ctr", file);
23133 return;
23134
23135 case 'u':
23136 /* High-order or low-order 16 bits of constant, whichever is non-zero,
23137 for use in unsigned operand. */
23138 if (! INT_P (x))
23139 {
23140 output_operand_lossage ("invalid %%u value");
23141 return;
23142 }
23143
23144 uval = INTVAL (x);
23145 if ((uval & 0xffff) == 0)
23146 uval >>= 16;
23147
23148 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
23149 return;
23150
23151 case 'v':
23152 /* High-order 16 bits of constant for use in signed operand. */
23153 if (! INT_P (x))
23154 output_operand_lossage ("invalid %%v value");
23155 else
23156 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
23157 (INTVAL (x) >> 16) & 0xffff);
23158 return;
23159
23160 case 'U':
23161 /* Print `u' if this has an auto-increment or auto-decrement. */
23162 if (MEM_P (x)
23163 && (GET_CODE (XEXP (x, 0)) == PRE_INC
23164 || GET_CODE (XEXP (x, 0)) == PRE_DEC
23165 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
23166 putc ('u', file);
23167 return;
23168
23169 case 'V':
23170 /* Print the trap code for this operand. */
23171 switch (GET_CODE (x))
23172 {
23173 case EQ:
23174 fputs ("eq", file); /* 4 */
23175 break;
23176 case NE:
23177 fputs ("ne", file); /* 24 */
23178 break;
23179 case LT:
23180 fputs ("lt", file); /* 16 */
23181 break;
23182 case LE:
23183 fputs ("le", file); /* 20 */
23184 break;
23185 case GT:
23186 fputs ("gt", file); /* 8 */
23187 break;
23188 case GE:
23189 fputs ("ge", file); /* 12 */
23190 break;
23191 case LTU:
23192 fputs ("llt", file); /* 2 */
23193 break;
23194 case LEU:
23195 fputs ("lle", file); /* 6 */
23196 break;
23197 case GTU:
23198 fputs ("lgt", file); /* 1 */
23199 break;
23200 case GEU:
23201 fputs ("lge", file); /* 5 */
23202 break;
23203 default:
23204 gcc_unreachable ();
23205 }
23206 break;
23207
23208 case 'w':
23209 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
23210 normally. */
23211 if (INT_P (x))
23212 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
23213 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
23214 else
23215 print_operand (file, x, 0);
23216 return;
23217
23218 case 'x':
23219 /* X is a FPR or Altivec register used in a VSX context. */
23220 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
23221 output_operand_lossage ("invalid %%x value");
23222 else
23223 {
23224 int reg = REGNO (x);
23225 int vsx_reg = (FP_REGNO_P (reg)
23226 ? reg - 32
23227 : reg - FIRST_ALTIVEC_REGNO + 32);
23228
23229 #ifdef TARGET_REGNAMES
23230 if (TARGET_REGNAMES)
23231 fprintf (file, "%%vs%d", vsx_reg);
23232 else
23233 #endif
23234 fprintf (file, "%d", vsx_reg);
23235 }
23236 return;
23237
23238 case 'X':
23239 if (MEM_P (x)
23240 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
23241 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
23242 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
23243 putc ('x', file);
23244 return;
23245
23246 case 'Y':
23247 /* Like 'L', for third word of TImode/PTImode */
23248 if (REG_P (x))
23249 fputs (reg_names[REGNO (x) + 2], file);
23250 else if (MEM_P (x))
23251 {
23252 machine_mode mode = GET_MODE (x);
23253 if (GET_CODE (XEXP (x, 0)) == PRE_INC
23254 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
23255 output_address (mode, plus_constant (Pmode,
23256 XEXP (XEXP (x, 0), 0), 8));
23257 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
23258 output_address (mode, plus_constant (Pmode,
23259 XEXP (XEXP (x, 0), 0), 8));
23260 else
23261 output_address (mode, XEXP (adjust_address_nv (x, SImode, 8), 0));
23262 if (small_data_operand (x, GET_MODE (x)))
23263 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
23264 reg_names[SMALL_DATA_REG]);
23265 }
23266 return;
23267
23268 case 'z':
23269 /* X is a SYMBOL_REF. Write out the name preceded by a
23270 period and without any trailing data in brackets. Used for function
23271 names. If we are configured for System V (or the embedded ABI) on
23272 the PowerPC, do not emit the period, since those systems do not use
23273 TOCs and the like. */
23274 gcc_assert (GET_CODE (x) == SYMBOL_REF);
23275
23276 /* For macho, check to see if we need a stub. */
23277 if (TARGET_MACHO)
23278 {
23279 const char *name = XSTR (x, 0);
23280 #if TARGET_MACHO
23281 if (darwin_emit_branch_islands
23282 && MACHOPIC_INDIRECT
23283 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
23284 name = machopic_indirection_name (x, /*stub_p=*/true);
23285 #endif
23286 assemble_name (file, name);
23287 }
23288 else if (!DOT_SYMBOLS)
23289 assemble_name (file, XSTR (x, 0));
23290 else
23291 rs6000_output_function_entry (file, XSTR (x, 0));
23292 return;
23293
23294 case 'Z':
23295 /* Like 'L', for last word of TImode/PTImode. */
23296 if (REG_P (x))
23297 fputs (reg_names[REGNO (x) + 3], file);
23298 else if (MEM_P (x))
23299 {
23300 machine_mode mode = GET_MODE (x);
23301 if (GET_CODE (XEXP (x, 0)) == PRE_INC
23302 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
23303 output_address (mode, plus_constant (Pmode,
23304 XEXP (XEXP (x, 0), 0), 12));
23305 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
23306 output_address (mode, plus_constant (Pmode,
23307 XEXP (XEXP (x, 0), 0), 12));
23308 else
23309 output_address (mode, XEXP (adjust_address_nv (x, SImode, 12), 0));
23310 if (small_data_operand (x, GET_MODE (x)))
23311 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
23312 reg_names[SMALL_DATA_REG]);
23313 }
23314 return;
23315
23316 /* Print AltiVec or SPE memory operand. */
23317 case 'y':
23318 {
23319 rtx tmp;
23320
23321 gcc_assert (MEM_P (x));
23322
23323 tmp = XEXP (x, 0);
23324
23325 /* Ugly hack because %y is overloaded. */
23326 if ((TARGET_SPE || TARGET_E500_DOUBLE)
23327 && (GET_MODE_SIZE (GET_MODE (x)) == 8
23328 || FLOAT128_2REG_P (GET_MODE (x))
23329 || GET_MODE (x) == TImode
23330 || GET_MODE (x) == PTImode))
23331 {
23332 /* Handle [reg]. */
23333 if (REG_P (tmp))
23334 {
23335 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
23336 break;
23337 }
23338 /* Handle [reg+UIMM]. */
23339 else if (GET_CODE (tmp) == PLUS &&
23340 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
23341 {
23342 int x;
23343
23344 gcc_assert (REG_P (XEXP (tmp, 0)));
23345
23346 x = INTVAL (XEXP (tmp, 1));
23347 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
23348 break;
23349 }
23350
23351 /* Fall through. Must be [reg+reg]. */
23352 }
23353 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
23354 && GET_CODE (tmp) == AND
23355 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
23356 && INTVAL (XEXP (tmp, 1)) == -16)
23357 tmp = XEXP (tmp, 0);
23358 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
23359 && GET_CODE (tmp) == PRE_MODIFY)
23360 tmp = XEXP (tmp, 1);
23361 if (REG_P (tmp))
23362 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
23363 else
23364 {
23365 if (GET_CODE (tmp) != PLUS
23366 || !REG_P (XEXP (tmp, 0))
23367 || !REG_P (XEXP (tmp, 1)))
23368 {
23369 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
23370 break;
23371 }
23372
23373 if (REGNO (XEXP (tmp, 0)) == 0)
23374 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
23375 reg_names[ REGNO (XEXP (tmp, 0)) ]);
23376 else
23377 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
23378 reg_names[ REGNO (XEXP (tmp, 1)) ]);
23379 }
23380 break;
23381 }
23382
23383 case 0:
23384 if (REG_P (x))
23385 fprintf (file, "%s", reg_names[REGNO (x)]);
23386 else if (MEM_P (x))
23387 {
23388 /* We need to handle PRE_INC and PRE_DEC here, since we need to
23389 know the width from the mode. */
23390 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
23391 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
23392 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
23393 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
23394 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
23395 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
23396 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
23397 output_address (GET_MODE (x), XEXP (XEXP (x, 0), 1));
23398 else
23399 output_address (GET_MODE (x), XEXP (x, 0));
23400 }
23401 else
23402 {
23403 if (toc_relative_expr_p (x, false))
23404 /* This hack along with a corresponding hack in
23405 rs6000_output_addr_const_extra arranges to output addends
23406 where the assembler expects to find them. eg.
23407 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
23408 without this hack would be output as "x@toc+4". We
23409 want "x+4@toc". */
23410 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
23411 else
23412 output_addr_const (file, x);
23413 }
23414 return;
23415
23416 case '&':
23417 if (const char *name = get_some_local_dynamic_name ())
23418 assemble_name (file, name);
23419 else
23420 output_operand_lossage ("'%%&' used without any "
23421 "local dynamic TLS references");
23422 return;
23423
23424 default:
23425 output_operand_lossage ("invalid %%xn code");
23426 }
23427 }
23428 \f
23429 /* Print the address of an operand. */
23430
23431 void
23432 print_operand_address (FILE *file, rtx x)
23433 {
23434 if (REG_P (x))
23435 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
23436 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
23437 || GET_CODE (x) == LABEL_REF)
23438 {
23439 output_addr_const (file, x);
23440 if (small_data_operand (x, GET_MODE (x)))
23441 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
23442 reg_names[SMALL_DATA_REG]);
23443 else
23444 gcc_assert (!TARGET_TOC);
23445 }
23446 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
23447 && REG_P (XEXP (x, 1)))
23448 {
23449 if (REGNO (XEXP (x, 0)) == 0)
23450 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
23451 reg_names[ REGNO (XEXP (x, 0)) ]);
23452 else
23453 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
23454 reg_names[ REGNO (XEXP (x, 1)) ]);
23455 }
23456 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
23457 && GET_CODE (XEXP (x, 1)) == CONST_INT)
23458 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
23459 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
23460 #if TARGET_MACHO
23461 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
23462 && CONSTANT_P (XEXP (x, 1)))
23463 {
23464 fprintf (file, "lo16(");
23465 output_addr_const (file, XEXP (x, 1));
23466 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
23467 }
23468 #endif
23469 #if TARGET_ELF
23470 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
23471 && CONSTANT_P (XEXP (x, 1)))
23472 {
23473 output_addr_const (file, XEXP (x, 1));
23474 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
23475 }
23476 #endif
23477 else if (toc_relative_expr_p (x, false))
23478 {
23479 /* This hack along with a corresponding hack in
23480 rs6000_output_addr_const_extra arranges to output addends
23481 where the assembler expects to find them. eg.
23482 (lo_sum (reg 9)
23483 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
23484 without this hack would be output as "x@toc+8@l(9)". We
23485 want "x+8@toc@l(9)". */
23486 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
23487 if (GET_CODE (x) == LO_SUM)
23488 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
23489 else
23490 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
23491 }
23492 else
23493 gcc_unreachable ();
23494 }
23495 \f
23496 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
23497
23498 static bool
23499 rs6000_output_addr_const_extra (FILE *file, rtx x)
23500 {
23501 if (GET_CODE (x) == UNSPEC)
23502 switch (XINT (x, 1))
23503 {
23504 case UNSPEC_TOCREL:
23505 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
23506 && REG_P (XVECEXP (x, 0, 1))
23507 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
23508 output_addr_const (file, XVECEXP (x, 0, 0));
23509 if (x == tocrel_base && tocrel_offset != const0_rtx)
23510 {
23511 if (INTVAL (tocrel_offset) >= 0)
23512 fprintf (file, "+");
23513 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
23514 }
23515 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
23516 {
23517 putc ('-', file);
23518 assemble_name (file, toc_label_name);
23519 need_toc_init = 1;
23520 }
23521 else if (TARGET_ELF)
23522 fputs ("@toc", file);
23523 return true;
23524
23525 #if TARGET_MACHO
23526 case UNSPEC_MACHOPIC_OFFSET:
23527 output_addr_const (file, XVECEXP (x, 0, 0));
23528 putc ('-', file);
23529 machopic_output_function_base_name (file);
23530 return true;
23531 #endif
23532 }
23533 return false;
23534 }
23535 \f
23536 /* Target hook for assembling integer objects. The PowerPC version has
23537 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
23538 is defined. It also needs to handle DI-mode objects on 64-bit
23539 targets. */
23540
23541 static bool
23542 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
23543 {
23544 #ifdef RELOCATABLE_NEEDS_FIXUP
23545 /* Special handling for SI values. */
23546 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
23547 {
23548 static int recurse = 0;
23549
23550 /* For -mrelocatable, we mark all addresses that need to be fixed up in
23551 the .fixup section. Since the TOC section is already relocated, we
23552 don't need to mark it here. We used to skip the text section, but it
23553 should never be valid for relocated addresses to be placed in the text
23554 section. */
23555 if (DEFAULT_ABI == ABI_V4
23556 && (TARGET_RELOCATABLE || flag_pic > 1)
23557 && in_section != toc_section
23558 && !recurse
23559 && !CONST_SCALAR_INT_P (x)
23560 && CONSTANT_P (x))
23561 {
23562 char buf[256];
23563
23564 recurse = 1;
23565 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
23566 fixuplabelno++;
23567 ASM_OUTPUT_LABEL (asm_out_file, buf);
23568 fprintf (asm_out_file, "\t.long\t(");
23569 output_addr_const (asm_out_file, x);
23570 fprintf (asm_out_file, ")@fixup\n");
23571 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
23572 ASM_OUTPUT_ALIGN (asm_out_file, 2);
23573 fprintf (asm_out_file, "\t.long\t");
23574 assemble_name (asm_out_file, buf);
23575 fprintf (asm_out_file, "\n\t.previous\n");
23576 recurse = 0;
23577 return true;
23578 }
23579 /* Remove initial .'s to turn a -mcall-aixdesc function
23580 address into the address of the descriptor, not the function
23581 itself. */
23582 else if (GET_CODE (x) == SYMBOL_REF
23583 && XSTR (x, 0)[0] == '.'
23584 && DEFAULT_ABI == ABI_AIX)
23585 {
23586 const char *name = XSTR (x, 0);
23587 while (*name == '.')
23588 name++;
23589
23590 fprintf (asm_out_file, "\t.long\t%s\n", name);
23591 return true;
23592 }
23593 }
23594 #endif /* RELOCATABLE_NEEDS_FIXUP */
23595 return default_assemble_integer (x, size, aligned_p);
23596 }
23597
23598 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
23599 /* Emit an assembler directive to set symbol visibility for DECL to
23600 VISIBILITY_TYPE. */
23601
23602 static void
23603 rs6000_assemble_visibility (tree decl, int vis)
23604 {
23605 if (TARGET_XCOFF)
23606 return;
23607
23608 /* Functions need to have their entry point symbol visibility set as
23609 well as their descriptor symbol visibility. */
23610 if (DEFAULT_ABI == ABI_AIX
23611 && DOT_SYMBOLS
23612 && TREE_CODE (decl) == FUNCTION_DECL)
23613 {
23614 static const char * const visibility_types[] = {
23615 NULL, "protected", "hidden", "internal"
23616 };
23617
23618 const char *name, *type;
23619
23620 name = ((* targetm.strip_name_encoding)
23621 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
23622 type = visibility_types[vis];
23623
23624 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
23625 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
23626 }
23627 else
23628 default_assemble_visibility (decl, vis);
23629 }
23630 #endif
23631 \f
23632 enum rtx_code
23633 rs6000_reverse_condition (machine_mode mode, enum rtx_code code)
23634 {
23635 /* Reversal of FP compares takes care -- an ordered compare
23636 becomes an unordered compare and vice versa. */
23637 if (mode == CCFPmode
23638 && (!flag_finite_math_only
23639 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
23640 || code == UNEQ || code == LTGT))
23641 return reverse_condition_maybe_unordered (code);
23642 else
23643 return reverse_condition (code);
23644 }
23645
23646 /* Generate a compare for CODE. Return a brand-new rtx that
23647 represents the result of the compare. */
23648
23649 static rtx
23650 rs6000_generate_compare (rtx cmp, machine_mode mode)
23651 {
23652 machine_mode comp_mode;
23653 rtx compare_result;
23654 enum rtx_code code = GET_CODE (cmp);
23655 rtx op0 = XEXP (cmp, 0);
23656 rtx op1 = XEXP (cmp, 1);
23657
23658 if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
23659 comp_mode = CCmode;
23660 else if (FLOAT_MODE_P (mode))
23661 comp_mode = CCFPmode;
23662 else if (code == GTU || code == LTU
23663 || code == GEU || code == LEU)
23664 comp_mode = CCUNSmode;
23665 else if ((code == EQ || code == NE)
23666 && unsigned_reg_p (op0)
23667 && (unsigned_reg_p (op1)
23668 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
23669 /* These are unsigned values, perhaps there will be a later
23670 ordering compare that can be shared with this one. */
23671 comp_mode = CCUNSmode;
23672 else
23673 comp_mode = CCmode;
23674
23675 /* If we have an unsigned compare, make sure we don't have a signed value as
23676 an immediate. */
23677 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
23678 && INTVAL (op1) < 0)
23679 {
23680 op0 = copy_rtx_if_shared (op0);
23681 op1 = force_reg (GET_MODE (op0), op1);
23682 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
23683 }
23684
23685 /* First, the compare. */
23686 compare_result = gen_reg_rtx (comp_mode);
23687
23688 /* E500 FP compare instructions on the GPRs. Yuck! */
23689 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
23690 && FLOAT_MODE_P (mode))
23691 {
23692 rtx cmp, or_result, compare_result2;
23693 machine_mode op_mode = GET_MODE (op0);
23694 bool reverse_p;
23695
23696 if (op_mode == VOIDmode)
23697 op_mode = GET_MODE (op1);
23698
23699 /* First reverse the condition codes that aren't directly supported. */
23700 switch (code)
23701 {
23702 case NE:
23703 case UNLT:
23704 case UNLE:
23705 case UNGT:
23706 case UNGE:
23707 code = reverse_condition_maybe_unordered (code);
23708 reverse_p = true;
23709 break;
23710
23711 case EQ:
23712 case LT:
23713 case LE:
23714 case GT:
23715 case GE:
23716 reverse_p = false;
23717 break;
23718
23719 default:
23720 gcc_unreachable ();
23721 }
23722
23723 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
23724 This explains the following mess. */
23725
23726 switch (code)
23727 {
23728 case EQ:
23729 switch (op_mode)
23730 {
23731 case SFmode:
23732 cmp = (flag_finite_math_only && !flag_trapping_math)
23733 ? gen_tstsfeq_gpr (compare_result, op0, op1)
23734 : gen_cmpsfeq_gpr (compare_result, op0, op1);
23735 break;
23736
23737 case DFmode:
23738 cmp = (flag_finite_math_only && !flag_trapping_math)
23739 ? gen_tstdfeq_gpr (compare_result, op0, op1)
23740 : gen_cmpdfeq_gpr (compare_result, op0, op1);
23741 break;
23742
23743 case TFmode:
23744 case IFmode:
23745 case KFmode:
23746 cmp = (flag_finite_math_only && !flag_trapping_math)
23747 ? gen_tsttfeq_gpr (compare_result, op0, op1)
23748 : gen_cmptfeq_gpr (compare_result, op0, op1);
23749 break;
23750
23751 default:
23752 gcc_unreachable ();
23753 }
23754 break;
23755
23756 case GT:
23757 case GE:
23758 switch (op_mode)
23759 {
23760 case SFmode:
23761 cmp = (flag_finite_math_only && !flag_trapping_math)
23762 ? gen_tstsfgt_gpr (compare_result, op0, op1)
23763 : gen_cmpsfgt_gpr (compare_result, op0, op1);
23764 break;
23765
23766 case DFmode:
23767 cmp = (flag_finite_math_only && !flag_trapping_math)
23768 ? gen_tstdfgt_gpr (compare_result, op0, op1)
23769 : gen_cmpdfgt_gpr (compare_result, op0, op1);
23770 break;
23771
23772 case TFmode:
23773 case IFmode:
23774 case KFmode:
23775 cmp = (flag_finite_math_only && !flag_trapping_math)
23776 ? gen_tsttfgt_gpr (compare_result, op0, op1)
23777 : gen_cmptfgt_gpr (compare_result, op0, op1);
23778 break;
23779
23780 default:
23781 gcc_unreachable ();
23782 }
23783 break;
23784
23785 case LT:
23786 case LE:
23787 switch (op_mode)
23788 {
23789 case SFmode:
23790 cmp = (flag_finite_math_only && !flag_trapping_math)
23791 ? gen_tstsflt_gpr (compare_result, op0, op1)
23792 : gen_cmpsflt_gpr (compare_result, op0, op1);
23793 break;
23794
23795 case DFmode:
23796 cmp = (flag_finite_math_only && !flag_trapping_math)
23797 ? gen_tstdflt_gpr (compare_result, op0, op1)
23798 : gen_cmpdflt_gpr (compare_result, op0, op1);
23799 break;
23800
23801 case TFmode:
23802 case IFmode:
23803 case KFmode:
23804 cmp = (flag_finite_math_only && !flag_trapping_math)
23805 ? gen_tsttflt_gpr (compare_result, op0, op1)
23806 : gen_cmptflt_gpr (compare_result, op0, op1);
23807 break;
23808
23809 default:
23810 gcc_unreachable ();
23811 }
23812 break;
23813
23814 default:
23815 gcc_unreachable ();
23816 }
23817
23818 /* Synthesize LE and GE from LT/GT || EQ. */
23819 if (code == LE || code == GE)
23820 {
23821 emit_insn (cmp);
23822
23823 compare_result2 = gen_reg_rtx (CCFPmode);
23824
23825 /* Do the EQ. */
23826 switch (op_mode)
23827 {
23828 case SFmode:
23829 cmp = (flag_finite_math_only && !flag_trapping_math)
23830 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
23831 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
23832 break;
23833
23834 case DFmode:
23835 cmp = (flag_finite_math_only && !flag_trapping_math)
23836 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
23837 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
23838 break;
23839
23840 case TFmode:
23841 case IFmode:
23842 case KFmode:
23843 cmp = (flag_finite_math_only && !flag_trapping_math)
23844 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
23845 : gen_cmptfeq_gpr (compare_result2, op0, op1);
23846 break;
23847
23848 default:
23849 gcc_unreachable ();
23850 }
23851
23852 emit_insn (cmp);
23853
23854 /* OR them together. */
23855 or_result = gen_reg_rtx (CCFPmode);
23856 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
23857 compare_result2);
23858 compare_result = or_result;
23859 }
23860
23861 code = reverse_p ? NE : EQ;
23862
23863 emit_insn (cmp);
23864 }
23865
23866 /* IEEE 128-bit support in VSX registers when we do not have hardware
23867 support. */
23868 else if (!TARGET_FLOAT128_HW && FLOAT128_VECTOR_P (mode))
23869 {
23870 rtx libfunc = NULL_RTX;
23871 bool check_nan = false;
23872 rtx dest;
23873
23874 switch (code)
23875 {
23876 case EQ:
23877 case NE:
23878 libfunc = optab_libfunc (eq_optab, mode);
23879 break;
23880
23881 case GT:
23882 case GE:
23883 libfunc = optab_libfunc (ge_optab, mode);
23884 break;
23885
23886 case LT:
23887 case LE:
23888 libfunc = optab_libfunc (le_optab, mode);
23889 break;
23890
23891 case UNORDERED:
23892 case ORDERED:
23893 libfunc = optab_libfunc (unord_optab, mode);
23894 code = (code == UNORDERED) ? NE : EQ;
23895 break;
23896
23897 case UNGE:
23898 case UNGT:
23899 check_nan = true;
23900 libfunc = optab_libfunc (ge_optab, mode);
23901 code = (code == UNGE) ? GE : GT;
23902 break;
23903
23904 case UNLE:
23905 case UNLT:
23906 check_nan = true;
23907 libfunc = optab_libfunc (le_optab, mode);
23908 code = (code == UNLE) ? LE : LT;
23909 break;
23910
23911 case UNEQ:
23912 case LTGT:
23913 check_nan = true;
23914 libfunc = optab_libfunc (eq_optab, mode);
23915 code = (code = UNEQ) ? EQ : NE;
23916 break;
23917
23918 default:
23919 gcc_unreachable ();
23920 }
23921
23922 gcc_assert (libfunc);
23923
23924 if (!check_nan)
23925 dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
23926 SImode, 2, op0, mode, op1, mode);
23927
23928 /* The library signals an exception for signalling NaNs, so we need to
23929 handle isgreater, etc. by first checking isordered. */
23930 else
23931 {
23932 rtx ne_rtx, normal_dest, unord_dest;
23933 rtx unord_func = optab_libfunc (unord_optab, mode);
23934 rtx join_label = gen_label_rtx ();
23935 rtx join_ref = gen_rtx_LABEL_REF (VOIDmode, join_label);
23936 rtx unord_cmp = gen_reg_rtx (comp_mode);
23937
23938
23939 /* Test for either value being a NaN. */
23940 gcc_assert (unord_func);
23941 unord_dest = emit_library_call_value (unord_func, NULL_RTX, LCT_CONST,
23942 SImode, 2, op0, mode, op1,
23943 mode);
23944
23945 /* Set value (0) if either value is a NaN, and jump to the join
23946 label. */
23947 dest = gen_reg_rtx (SImode);
23948 emit_move_insn (dest, const1_rtx);
23949 emit_insn (gen_rtx_SET (unord_cmp,
23950 gen_rtx_COMPARE (comp_mode, unord_dest,
23951 const0_rtx)));
23952
23953 ne_rtx = gen_rtx_NE (comp_mode, unord_cmp, const0_rtx);
23954 emit_jump_insn (gen_rtx_SET (pc_rtx,
23955 gen_rtx_IF_THEN_ELSE (VOIDmode, ne_rtx,
23956 join_ref,
23957 pc_rtx)));
23958
23959 /* Do the normal comparison, knowing that the values are not
23960 NaNs. */
23961 normal_dest = emit_library_call_value (libfunc, NULL_RTX, LCT_CONST,
23962 SImode, 2, op0, mode, op1,
23963 mode);
23964
23965 emit_insn (gen_cstoresi4 (dest,
23966 gen_rtx_fmt_ee (code, SImode, normal_dest,
23967 const0_rtx),
23968 normal_dest, const0_rtx));
23969
23970 /* Join NaN and non-Nan paths. Compare dest against 0. */
23971 emit_label (join_label);
23972 code = NE;
23973 }
23974
23975 emit_insn (gen_rtx_SET (compare_result,
23976 gen_rtx_COMPARE (comp_mode, dest, const0_rtx)));
23977 }
23978
23979 else
23980 {
23981 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
23982 CLOBBERs to match cmptf_internal2 pattern. */
23983 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
23984 && FLOAT128_IBM_P (GET_MODE (op0))
23985 && TARGET_HARD_FLOAT && TARGET_FPRS)
23986 emit_insn (gen_rtx_PARALLEL (VOIDmode,
23987 gen_rtvec (10,
23988 gen_rtx_SET (compare_result,
23989 gen_rtx_COMPARE (comp_mode, op0, op1)),
23990 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
23991 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
23992 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
23993 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
23994 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
23995 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
23996 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
23997 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
23998 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
23999 else if (GET_CODE (op1) == UNSPEC
24000 && XINT (op1, 1) == UNSPEC_SP_TEST)
24001 {
24002 rtx op1b = XVECEXP (op1, 0, 0);
24003 comp_mode = CCEQmode;
24004 compare_result = gen_reg_rtx (CCEQmode);
24005 if (TARGET_64BIT)
24006 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
24007 else
24008 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
24009 }
24010 else
24011 emit_insn (gen_rtx_SET (compare_result,
24012 gen_rtx_COMPARE (comp_mode, op0, op1)));
24013 }
24014
24015 /* Some kinds of FP comparisons need an OR operation;
24016 under flag_finite_math_only we don't bother. */
24017 if (FLOAT_MODE_P (mode)
24018 && (!FLOAT128_IEEE_P (mode) || TARGET_FLOAT128_HW)
24019 && !flag_finite_math_only
24020 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
24021 && (code == LE || code == GE
24022 || code == UNEQ || code == LTGT
24023 || code == UNGT || code == UNLT))
24024 {
24025 enum rtx_code or1, or2;
24026 rtx or1_rtx, or2_rtx, compare2_rtx;
24027 rtx or_result = gen_reg_rtx (CCEQmode);
24028
24029 switch (code)
24030 {
24031 case LE: or1 = LT; or2 = EQ; break;
24032 case GE: or1 = GT; or2 = EQ; break;
24033 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
24034 case LTGT: or1 = LT; or2 = GT; break;
24035 case UNGT: or1 = UNORDERED; or2 = GT; break;
24036 case UNLT: or1 = UNORDERED; or2 = LT; break;
24037 default: gcc_unreachable ();
24038 }
24039 validate_condition_mode (or1, comp_mode);
24040 validate_condition_mode (or2, comp_mode);
24041 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
24042 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
24043 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
24044 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
24045 const_true_rtx);
24046 emit_insn (gen_rtx_SET (or_result, compare2_rtx));
24047
24048 compare_result = or_result;
24049 code = EQ;
24050 }
24051
24052 validate_condition_mode (code, GET_MODE (compare_result));
24053
24054 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
24055 }
24056
24057 \f
24058 /* Return the diagnostic message string if the binary operation OP is
24059 not permitted on TYPE1 and TYPE2, NULL otherwise. */
24060
24061 static const char*
24062 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED,
24063 const_tree type1,
24064 const_tree type2)
24065 {
24066 enum machine_mode mode1 = TYPE_MODE (type1);
24067 enum machine_mode mode2 = TYPE_MODE (type2);
24068
24069 /* For complex modes, use the inner type. */
24070 if (COMPLEX_MODE_P (mode1))
24071 mode1 = GET_MODE_INNER (mode1);
24072
24073 if (COMPLEX_MODE_P (mode2))
24074 mode2 = GET_MODE_INNER (mode2);
24075
24076 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
24077 double to intermix unless -mfloat128-convert. */
24078 if (mode1 == mode2)
24079 return NULL;
24080
24081 if (!TARGET_FLOAT128_CVT)
24082 {
24083 if ((mode1 == KFmode && mode2 == IFmode)
24084 || (mode1 == IFmode && mode2 == KFmode))
24085 return N_("__float128 and __ibm128 cannot be used in the same "
24086 "expression");
24087
24088 if (TARGET_IEEEQUAD
24089 && ((mode1 == IFmode && mode2 == TFmode)
24090 || (mode1 == TFmode && mode2 == IFmode)))
24091 return N_("__ibm128 and long double cannot be used in the same "
24092 "expression");
24093
24094 if (!TARGET_IEEEQUAD
24095 && ((mode1 == KFmode && mode2 == TFmode)
24096 || (mode1 == TFmode && mode2 == KFmode)))
24097 return N_("__float128 and long double cannot be used in the same "
24098 "expression");
24099 }
24100
24101 return NULL;
24102 }
24103
24104 \f
24105 /* Expand floating point conversion to/from __float128 and __ibm128. */
24106
24107 void
24108 rs6000_expand_float128_convert (rtx dest, rtx src, bool unsigned_p)
24109 {
24110 machine_mode dest_mode = GET_MODE (dest);
24111 machine_mode src_mode = GET_MODE (src);
24112 convert_optab cvt = unknown_optab;
24113 bool do_move = false;
24114 rtx libfunc = NULL_RTX;
24115 rtx dest2;
24116 typedef rtx (*rtx_2func_t) (rtx, rtx);
24117 rtx_2func_t hw_convert = (rtx_2func_t)0;
24118 size_t kf_or_tf;
24119
24120 struct hw_conv_t {
24121 rtx_2func_t from_df;
24122 rtx_2func_t from_sf;
24123 rtx_2func_t from_si_sign;
24124 rtx_2func_t from_si_uns;
24125 rtx_2func_t from_di_sign;
24126 rtx_2func_t from_di_uns;
24127 rtx_2func_t to_df;
24128 rtx_2func_t to_sf;
24129 rtx_2func_t to_si_sign;
24130 rtx_2func_t to_si_uns;
24131 rtx_2func_t to_di_sign;
24132 rtx_2func_t to_di_uns;
24133 } hw_conversions[2] = {
24134 /* convertions to/from KFmode */
24135 {
24136 gen_extenddfkf2_hw, /* KFmode <- DFmode. */
24137 gen_extendsfkf2_hw, /* KFmode <- SFmode. */
24138 gen_float_kfsi2_hw, /* KFmode <- SImode (signed). */
24139 gen_floatuns_kfsi2_hw, /* KFmode <- SImode (unsigned). */
24140 gen_float_kfdi2_hw, /* KFmode <- DImode (signed). */
24141 gen_floatuns_kfdi2_hw, /* KFmode <- DImode (unsigned). */
24142 gen_trunckfdf2_hw, /* DFmode <- KFmode. */
24143 gen_trunckfsf2_hw, /* SFmode <- KFmode. */
24144 gen_fix_kfsi2_hw, /* SImode <- KFmode (signed). */
24145 gen_fixuns_kfsi2_hw, /* SImode <- KFmode (unsigned). */
24146 gen_fix_kfdi2_hw, /* DImode <- KFmode (signed). */
24147 gen_fixuns_kfdi2_hw, /* DImode <- KFmode (unsigned). */
24148 },
24149
24150 /* convertions to/from TFmode */
24151 {
24152 gen_extenddftf2_hw, /* TFmode <- DFmode. */
24153 gen_extendsftf2_hw, /* TFmode <- SFmode. */
24154 gen_float_tfsi2_hw, /* TFmode <- SImode (signed). */
24155 gen_floatuns_tfsi2_hw, /* TFmode <- SImode (unsigned). */
24156 gen_float_tfdi2_hw, /* TFmode <- DImode (signed). */
24157 gen_floatuns_tfdi2_hw, /* TFmode <- DImode (unsigned). */
24158 gen_trunctfdf2_hw, /* DFmode <- TFmode. */
24159 gen_trunctfsf2_hw, /* SFmode <- TFmode. */
24160 gen_fix_tfsi2_hw, /* SImode <- TFmode (signed). */
24161 gen_fixuns_tfsi2_hw, /* SImode <- TFmode (unsigned). */
24162 gen_fix_tfdi2_hw, /* DImode <- TFmode (signed). */
24163 gen_fixuns_tfdi2_hw, /* DImode <- TFmode (unsigned). */
24164 },
24165 };
24166
24167 if (dest_mode == src_mode)
24168 gcc_unreachable ();
24169
24170 /* Eliminate memory operations. */
24171 if (MEM_P (src))
24172 src = force_reg (src_mode, src);
24173
24174 if (MEM_P (dest))
24175 {
24176 rtx tmp = gen_reg_rtx (dest_mode);
24177 rs6000_expand_float128_convert (tmp, src, unsigned_p);
24178 rs6000_emit_move (dest, tmp, dest_mode);
24179 return;
24180 }
24181
24182 /* Convert to IEEE 128-bit floating point. */
24183 if (FLOAT128_IEEE_P (dest_mode))
24184 {
24185 if (dest_mode == KFmode)
24186 kf_or_tf = 0;
24187 else if (dest_mode == TFmode)
24188 kf_or_tf = 1;
24189 else
24190 gcc_unreachable ();
24191
24192 switch (src_mode)
24193 {
24194 case DFmode:
24195 cvt = sext_optab;
24196 hw_convert = hw_conversions[kf_or_tf].from_df;
24197 break;
24198
24199 case SFmode:
24200 cvt = sext_optab;
24201 hw_convert = hw_conversions[kf_or_tf].from_sf;
24202 break;
24203
24204 case KFmode:
24205 case IFmode:
24206 case TFmode:
24207 if (FLOAT128_IBM_P (src_mode))
24208 cvt = sext_optab;
24209 else
24210 do_move = true;
24211 break;
24212
24213 case SImode:
24214 if (unsigned_p)
24215 {
24216 cvt = ufloat_optab;
24217 hw_convert = hw_conversions[kf_or_tf].from_si_uns;
24218 }
24219 else
24220 {
24221 cvt = sfloat_optab;
24222 hw_convert = hw_conversions[kf_or_tf].from_si_sign;
24223 }
24224 break;
24225
24226 case DImode:
24227 if (unsigned_p)
24228 {
24229 cvt = ufloat_optab;
24230 hw_convert = hw_conversions[kf_or_tf].from_di_uns;
24231 }
24232 else
24233 {
24234 cvt = sfloat_optab;
24235 hw_convert = hw_conversions[kf_or_tf].from_di_sign;
24236 }
24237 break;
24238
24239 default:
24240 gcc_unreachable ();
24241 }
24242 }
24243
24244 /* Convert from IEEE 128-bit floating point. */
24245 else if (FLOAT128_IEEE_P (src_mode))
24246 {
24247 if (src_mode == KFmode)
24248 kf_or_tf = 0;
24249 else if (src_mode == TFmode)
24250 kf_or_tf = 1;
24251 else
24252 gcc_unreachable ();
24253
24254 switch (dest_mode)
24255 {
24256 case DFmode:
24257 cvt = trunc_optab;
24258 hw_convert = hw_conversions[kf_or_tf].to_df;
24259 break;
24260
24261 case SFmode:
24262 cvt = trunc_optab;
24263 hw_convert = hw_conversions[kf_or_tf].to_sf;
24264 break;
24265
24266 case KFmode:
24267 case IFmode:
24268 case TFmode:
24269 if (FLOAT128_IBM_P (dest_mode))
24270 cvt = trunc_optab;
24271 else
24272 do_move = true;
24273 break;
24274
24275 case SImode:
24276 if (unsigned_p)
24277 {
24278 cvt = ufix_optab;
24279 hw_convert = hw_conversions[kf_or_tf].to_si_uns;
24280 }
24281 else
24282 {
24283 cvt = sfix_optab;
24284 hw_convert = hw_conversions[kf_or_tf].to_si_sign;
24285 }
24286 break;
24287
24288 case DImode:
24289 if (unsigned_p)
24290 {
24291 cvt = ufix_optab;
24292 hw_convert = hw_conversions[kf_or_tf].to_di_uns;
24293 }
24294 else
24295 {
24296 cvt = sfix_optab;
24297 hw_convert = hw_conversions[kf_or_tf].to_di_sign;
24298 }
24299 break;
24300
24301 default:
24302 gcc_unreachable ();
24303 }
24304 }
24305
24306 /* Both IBM format. */
24307 else if (FLOAT128_IBM_P (dest_mode) && FLOAT128_IBM_P (src_mode))
24308 do_move = true;
24309
24310 else
24311 gcc_unreachable ();
24312
24313 /* Handle conversion between TFmode/KFmode. */
24314 if (do_move)
24315 emit_move_insn (dest, gen_lowpart (dest_mode, src));
24316
24317 /* Handle conversion if we have hardware support. */
24318 else if (TARGET_FLOAT128_HW && hw_convert)
24319 emit_insn ((hw_convert) (dest, src));
24320
24321 /* Call an external function to do the conversion. */
24322 else if (cvt != unknown_optab)
24323 {
24324 libfunc = convert_optab_libfunc (cvt, dest_mode, src_mode);
24325 gcc_assert (libfunc != NULL_RTX);
24326
24327 dest2 = emit_library_call_value (libfunc, dest, LCT_CONST, dest_mode, 1, src,
24328 src_mode);
24329
24330 gcc_assert (dest2 != NULL_RTX);
24331 if (!rtx_equal_p (dest, dest2))
24332 emit_move_insn (dest, dest2);
24333 }
24334
24335 else
24336 gcc_unreachable ();
24337
24338 return;
24339 }
24340
24341 /* Split a conversion from __float128 to an integer type into separate insns.
24342 OPERANDS points to the destination, source, and V2DI temporary
24343 register. CODE is either FIX or UNSIGNED_FIX. */
24344
24345 void
24346 convert_float128_to_int (rtx *operands, enum rtx_code code)
24347 {
24348 rtx dest = operands[0];
24349 rtx src = operands[1];
24350 rtx tmp = operands[2];
24351 rtx cvt;
24352 rtvec cvt_vec;
24353 rtx cvt_unspec;
24354 rtvec move_vec;
24355 rtx move_unspec;
24356
24357 if (GET_CODE (tmp) == SCRATCH)
24358 tmp = gen_reg_rtx (V2DImode);
24359
24360 if (MEM_P (dest))
24361 dest = rs6000_address_for_fpconvert (dest);
24362
24363 /* Generate the actual convert insn of the form:
24364 (set (tmp) (unspec:V2DI [(fix:SI (reg:KF))] UNSPEC_IEEE128_CONVERT)). */
24365 cvt = gen_rtx_fmt_e (code, GET_MODE (dest), src);
24366 cvt_vec = gen_rtvec (1, cvt);
24367 cvt_unspec = gen_rtx_UNSPEC (V2DImode, cvt_vec, UNSPEC_IEEE128_CONVERT);
24368 emit_insn (gen_rtx_SET (tmp, cvt_unspec));
24369
24370 /* Generate the move insn of the form:
24371 (set (dest:SI) (unspec:SI [(tmp:V2DI))] UNSPEC_IEEE128_MOVE)). */
24372 move_vec = gen_rtvec (1, tmp);
24373 move_unspec = gen_rtx_UNSPEC (GET_MODE (dest), move_vec, UNSPEC_IEEE128_MOVE);
24374 emit_insn (gen_rtx_SET (dest, move_unspec));
24375 }
24376
24377 /* Split a conversion from an integer type to __float128 into separate insns.
24378 OPERANDS points to the destination, source, and V2DI temporary
24379 register. CODE is either FLOAT or UNSIGNED_FLOAT. */
24380
24381 void
24382 convert_int_to_float128 (rtx *operands, enum rtx_code code)
24383 {
24384 rtx dest = operands[0];
24385 rtx src = operands[1];
24386 rtx tmp = operands[2];
24387 rtx cvt;
24388 rtvec cvt_vec;
24389 rtx cvt_unspec;
24390 rtvec move_vec;
24391 rtx move_unspec;
24392 rtx unsigned_flag;
24393
24394 if (GET_CODE (tmp) == SCRATCH)
24395 tmp = gen_reg_rtx (V2DImode);
24396
24397 if (MEM_P (src))
24398 src = rs6000_address_for_fpconvert (src);
24399
24400 /* Generate the move of the integer into the Altivec register of the form:
24401 (set (tmp:V2DI) (unspec:V2DI [(src:SI)
24402 (const_int 0)] UNSPEC_IEEE128_MOVE)).
24403
24404 or:
24405 (set (tmp:V2DI) (unspec:V2DI [(src:DI)] UNSPEC_IEEE128_MOVE)). */
24406
24407 if (GET_MODE (src) == SImode)
24408 {
24409 unsigned_flag = (code == UNSIGNED_FLOAT) ? const1_rtx : const0_rtx;
24410 move_vec = gen_rtvec (2, src, unsigned_flag);
24411 }
24412 else
24413 move_vec = gen_rtvec (1, src);
24414
24415 move_unspec = gen_rtx_UNSPEC (V2DImode, move_vec, UNSPEC_IEEE128_MOVE);
24416 emit_insn (gen_rtx_SET (tmp, move_unspec));
24417
24418 /* Generate the actual convert insn of the form:
24419 (set (dest:KF) (float:KF (unspec:DI [(tmp:V2DI)]
24420 UNSPEC_IEEE128_CONVERT))). */
24421 cvt_vec = gen_rtvec (1, tmp);
24422 cvt_unspec = gen_rtx_UNSPEC (DImode, cvt_vec, UNSPEC_IEEE128_CONVERT);
24423 cvt = gen_rtx_fmt_e (code, GET_MODE (dest), cvt_unspec);
24424 emit_insn (gen_rtx_SET (dest, cvt));
24425 }
24426
24427 \f
24428 /* Emit the RTL for an sISEL pattern. */
24429
24430 void
24431 rs6000_emit_sISEL (machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
24432 {
24433 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
24434 }
24435
24436 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
24437 can be used as that dest register. Return the dest register. */
24438
24439 rtx
24440 rs6000_emit_eqne (machine_mode mode, rtx op1, rtx op2, rtx scratch)
24441 {
24442 if (op2 == const0_rtx)
24443 return op1;
24444
24445 if (GET_CODE (scratch) == SCRATCH)
24446 scratch = gen_reg_rtx (mode);
24447
24448 if (logical_operand (op2, mode))
24449 emit_insn (gen_rtx_SET (scratch, gen_rtx_XOR (mode, op1, op2)));
24450 else
24451 emit_insn (gen_rtx_SET (scratch,
24452 gen_rtx_PLUS (mode, op1, negate_rtx (mode, op2))));
24453
24454 return scratch;
24455 }
24456
24457 void
24458 rs6000_emit_sCOND (machine_mode mode, rtx operands[])
24459 {
24460 rtx condition_rtx;
24461 machine_mode op_mode;
24462 enum rtx_code cond_code;
24463 rtx result = operands[0];
24464
24465 condition_rtx = rs6000_generate_compare (operands[1], mode);
24466 cond_code = GET_CODE (condition_rtx);
24467
24468 if (FLOAT_MODE_P (mode)
24469 && !TARGET_FPRS && TARGET_HARD_FLOAT)
24470 {
24471 rtx t;
24472
24473 PUT_MODE (condition_rtx, SImode);
24474 t = XEXP (condition_rtx, 0);
24475
24476 gcc_assert (cond_code == NE || cond_code == EQ);
24477
24478 if (cond_code == NE)
24479 emit_insn (gen_e500_flip_gt_bit (t, t));
24480
24481 emit_insn (gen_move_from_CR_gt_bit (result, t));
24482 return;
24483 }
24484
24485 if (cond_code == NE
24486 || cond_code == GE || cond_code == LE
24487 || cond_code == GEU || cond_code == LEU
24488 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
24489 {
24490 rtx not_result = gen_reg_rtx (CCEQmode);
24491 rtx not_op, rev_cond_rtx;
24492 machine_mode cc_mode;
24493
24494 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
24495
24496 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
24497 SImode, XEXP (condition_rtx, 0), const0_rtx);
24498 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
24499 emit_insn (gen_rtx_SET (not_result, not_op));
24500 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
24501 }
24502
24503 op_mode = GET_MODE (XEXP (operands[1], 0));
24504 if (op_mode == VOIDmode)
24505 op_mode = GET_MODE (XEXP (operands[1], 1));
24506
24507 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
24508 {
24509 PUT_MODE (condition_rtx, DImode);
24510 convert_move (result, condition_rtx, 0);
24511 }
24512 else
24513 {
24514 PUT_MODE (condition_rtx, SImode);
24515 emit_insn (gen_rtx_SET (result, condition_rtx));
24516 }
24517 }
24518
24519 /* Emit a branch of kind CODE to location LOC. */
24520
24521 void
24522 rs6000_emit_cbranch (machine_mode mode, rtx operands[])
24523 {
24524 rtx condition_rtx, loc_ref;
24525
24526 condition_rtx = rs6000_generate_compare (operands[0], mode);
24527 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
24528 emit_jump_insn (gen_rtx_SET (pc_rtx,
24529 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
24530 loc_ref, pc_rtx)));
24531 }
24532
24533 /* Return the string to output a conditional branch to LABEL, which is
24534 the operand template of the label, or NULL if the branch is really a
24535 conditional return.
24536
24537 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
24538 condition code register and its mode specifies what kind of
24539 comparison we made.
24540
24541 REVERSED is nonzero if we should reverse the sense of the comparison.
24542
24543 INSN is the insn. */
24544
24545 char *
24546 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
24547 {
24548 static char string[64];
24549 enum rtx_code code = GET_CODE (op);
24550 rtx cc_reg = XEXP (op, 0);
24551 machine_mode mode = GET_MODE (cc_reg);
24552 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
24553 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
24554 int really_reversed = reversed ^ need_longbranch;
24555 char *s = string;
24556 const char *ccode;
24557 const char *pred;
24558 rtx note;
24559
24560 validate_condition_mode (code, mode);
24561
24562 /* Work out which way this really branches. We could use
24563 reverse_condition_maybe_unordered here always but this
24564 makes the resulting assembler clearer. */
24565 if (really_reversed)
24566 {
24567 /* Reversal of FP compares takes care -- an ordered compare
24568 becomes an unordered compare and vice versa. */
24569 if (mode == CCFPmode)
24570 code = reverse_condition_maybe_unordered (code);
24571 else
24572 code = reverse_condition (code);
24573 }
24574
24575 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
24576 {
24577 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
24578 to the GT bit. */
24579 switch (code)
24580 {
24581 case EQ:
24582 /* Opposite of GT. */
24583 code = GT;
24584 break;
24585
24586 case NE:
24587 code = UNLE;
24588 break;
24589
24590 default:
24591 gcc_unreachable ();
24592 }
24593 }
24594
24595 switch (code)
24596 {
24597 /* Not all of these are actually distinct opcodes, but
24598 we distinguish them for clarity of the resulting assembler. */
24599 case NE: case LTGT:
24600 ccode = "ne"; break;
24601 case EQ: case UNEQ:
24602 ccode = "eq"; break;
24603 case GE: case GEU:
24604 ccode = "ge"; break;
24605 case GT: case GTU: case UNGT:
24606 ccode = "gt"; break;
24607 case LE: case LEU:
24608 ccode = "le"; break;
24609 case LT: case LTU: case UNLT:
24610 ccode = "lt"; break;
24611 case UNORDERED: ccode = "un"; break;
24612 case ORDERED: ccode = "nu"; break;
24613 case UNGE: ccode = "nl"; break;
24614 case UNLE: ccode = "ng"; break;
24615 default:
24616 gcc_unreachable ();
24617 }
24618
24619 /* Maybe we have a guess as to how likely the branch is. */
24620 pred = "";
24621 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
24622 if (note != NULL_RTX)
24623 {
24624 /* PROB is the difference from 50%. */
24625 int prob = XINT (note, 0) - REG_BR_PROB_BASE / 2;
24626
24627 /* Only hint for highly probable/improbable branches on newer cpus when
24628 we have real profile data, as static prediction overrides processor
24629 dynamic prediction. For older cpus we may as well always hint, but
24630 assume not taken for branches that are very close to 50% as a
24631 mispredicted taken branch is more expensive than a
24632 mispredicted not-taken branch. */
24633 if (rs6000_always_hint
24634 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
24635 && (profile_status_for_fn (cfun) != PROFILE_GUESSED)
24636 && br_prob_note_reliable_p (note)))
24637 {
24638 if (abs (prob) > REG_BR_PROB_BASE / 20
24639 && ((prob > 0) ^ need_longbranch))
24640 pred = "+";
24641 else
24642 pred = "-";
24643 }
24644 }
24645
24646 if (label == NULL)
24647 s += sprintf (s, "b%slr%s ", ccode, pred);
24648 else
24649 s += sprintf (s, "b%s%s ", ccode, pred);
24650
24651 /* We need to escape any '%' characters in the reg_names string.
24652 Assume they'd only be the first character.... */
24653 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
24654 *s++ = '%';
24655 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
24656
24657 if (label != NULL)
24658 {
24659 /* If the branch distance was too far, we may have to use an
24660 unconditional branch to go the distance. */
24661 if (need_longbranch)
24662 s += sprintf (s, ",$+8\n\tb %s", label);
24663 else
24664 s += sprintf (s, ",%s", label);
24665 }
24666
24667 return string;
24668 }
24669
24670 /* Return the string to flip the GT bit on a CR. */
24671 char *
24672 output_e500_flip_gt_bit (rtx dst, rtx src)
24673 {
24674 static char string[64];
24675 int a, b;
24676
24677 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
24678 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
24679
24680 /* GT bit. */
24681 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
24682 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
24683
24684 sprintf (string, "crnot %d,%d", a, b);
24685 return string;
24686 }
24687
24688 /* Return insn for VSX or Altivec comparisons. */
24689
24690 static rtx
24691 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
24692 {
24693 rtx mask;
24694 machine_mode mode = GET_MODE (op0);
24695
24696 switch (code)
24697 {
24698 default:
24699 break;
24700
24701 case GE:
24702 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
24703 return NULL_RTX;
24704 /* FALLTHRU */
24705
24706 case EQ:
24707 case GT:
24708 case GTU:
24709 case ORDERED:
24710 case UNORDERED:
24711 case UNEQ:
24712 case LTGT:
24713 mask = gen_reg_rtx (mode);
24714 emit_insn (gen_rtx_SET (mask, gen_rtx_fmt_ee (code, mode, op0, op1)));
24715 return mask;
24716 }
24717
24718 return NULL_RTX;
24719 }
24720
24721 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
24722 DMODE is expected destination mode. This is a recursive function. */
24723
24724 static rtx
24725 rs6000_emit_vector_compare (enum rtx_code rcode,
24726 rtx op0, rtx op1,
24727 machine_mode dmode)
24728 {
24729 rtx mask;
24730 bool swap_operands = false;
24731 bool try_again = false;
24732
24733 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
24734 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
24735
24736 /* See if the comparison works as is. */
24737 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
24738 if (mask)
24739 return mask;
24740
24741 switch (rcode)
24742 {
24743 case LT:
24744 rcode = GT;
24745 swap_operands = true;
24746 try_again = true;
24747 break;
24748 case LTU:
24749 rcode = GTU;
24750 swap_operands = true;
24751 try_again = true;
24752 break;
24753 case NE:
24754 case UNLE:
24755 case UNLT:
24756 case UNGE:
24757 case UNGT:
24758 /* Invert condition and try again.
24759 e.g., A != B becomes ~(A==B). */
24760 {
24761 enum rtx_code rev_code;
24762 enum insn_code nor_code;
24763 rtx mask2;
24764
24765 rev_code = reverse_condition_maybe_unordered (rcode);
24766 if (rev_code == UNKNOWN)
24767 return NULL_RTX;
24768
24769 nor_code = optab_handler (one_cmpl_optab, dmode);
24770 if (nor_code == CODE_FOR_nothing)
24771 return NULL_RTX;
24772
24773 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
24774 if (!mask2)
24775 return NULL_RTX;
24776
24777 mask = gen_reg_rtx (dmode);
24778 emit_insn (GEN_FCN (nor_code) (mask, mask2));
24779 return mask;
24780 }
24781 break;
24782 case GE:
24783 case GEU:
24784 case LE:
24785 case LEU:
24786 /* Try GT/GTU/LT/LTU OR EQ */
24787 {
24788 rtx c_rtx, eq_rtx;
24789 enum insn_code ior_code;
24790 enum rtx_code new_code;
24791
24792 switch (rcode)
24793 {
24794 case GE:
24795 new_code = GT;
24796 break;
24797
24798 case GEU:
24799 new_code = GTU;
24800 break;
24801
24802 case LE:
24803 new_code = LT;
24804 break;
24805
24806 case LEU:
24807 new_code = LTU;
24808 break;
24809
24810 default:
24811 gcc_unreachable ();
24812 }
24813
24814 ior_code = optab_handler (ior_optab, dmode);
24815 if (ior_code == CODE_FOR_nothing)
24816 return NULL_RTX;
24817
24818 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
24819 if (!c_rtx)
24820 return NULL_RTX;
24821
24822 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
24823 if (!eq_rtx)
24824 return NULL_RTX;
24825
24826 mask = gen_reg_rtx (dmode);
24827 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
24828 return mask;
24829 }
24830 break;
24831 default:
24832 return NULL_RTX;
24833 }
24834
24835 if (try_again)
24836 {
24837 if (swap_operands)
24838 std::swap (op0, op1);
24839
24840 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
24841 if (mask)
24842 return mask;
24843 }
24844
24845 /* You only get two chances. */
24846 return NULL_RTX;
24847 }
24848
24849 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
24850 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
24851 operands for the relation operation COND. */
24852
24853 int
24854 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
24855 rtx cond, rtx cc_op0, rtx cc_op1)
24856 {
24857 machine_mode dest_mode = GET_MODE (dest);
24858 machine_mode mask_mode = GET_MODE (cc_op0);
24859 enum rtx_code rcode = GET_CODE (cond);
24860 machine_mode cc_mode = CCmode;
24861 rtx mask;
24862 rtx cond2;
24863 rtx tmp;
24864 bool invert_move = false;
24865
24866 if (VECTOR_UNIT_NONE_P (dest_mode))
24867 return 0;
24868
24869 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
24870 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
24871
24872 switch (rcode)
24873 {
24874 /* Swap operands if we can, and fall back to doing the operation as
24875 specified, and doing a NOR to invert the test. */
24876 case NE:
24877 case UNLE:
24878 case UNLT:
24879 case UNGE:
24880 case UNGT:
24881 /* Invert condition and try again.
24882 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
24883 invert_move = true;
24884 rcode = reverse_condition_maybe_unordered (rcode);
24885 if (rcode == UNKNOWN)
24886 return 0;
24887 break;
24888
24889 /* Mark unsigned tests with CCUNSmode. */
24890 case GTU:
24891 case GEU:
24892 case LTU:
24893 case LEU:
24894 cc_mode = CCUNSmode;
24895 break;
24896
24897 default:
24898 break;
24899 }
24900
24901 /* Get the vector mask for the given relational operations. */
24902 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
24903
24904 if (!mask)
24905 return 0;
24906
24907 if (invert_move)
24908 {
24909 tmp = op_true;
24910 op_true = op_false;
24911 op_false = tmp;
24912 }
24913
24914 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
24915 CONST0_RTX (dest_mode));
24916 emit_insn (gen_rtx_SET (dest,
24917 gen_rtx_IF_THEN_ELSE (dest_mode,
24918 cond2,
24919 op_true,
24920 op_false)));
24921 return 1;
24922 }
24923
24924 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
24925 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
24926 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
24927 hardware has no such operation. */
24928
24929 static int
24930 rs6000_emit_p9_fp_minmax (rtx dest, rtx op, rtx true_cond, rtx false_cond)
24931 {
24932 enum rtx_code code = GET_CODE (op);
24933 rtx op0 = XEXP (op, 0);
24934 rtx op1 = XEXP (op, 1);
24935 machine_mode compare_mode = GET_MODE (op0);
24936 machine_mode result_mode = GET_MODE (dest);
24937 bool max_p = false;
24938
24939 if (result_mode != compare_mode)
24940 return 0;
24941
24942 if (code == GE || code == GT)
24943 max_p = true;
24944 else if (code == LE || code == LT)
24945 max_p = false;
24946 else
24947 return 0;
24948
24949 if (rtx_equal_p (op0, true_cond) && rtx_equal_p (op1, false_cond))
24950 ;
24951
24952 else if (rtx_equal_p (op1, true_cond) && rtx_equal_p (op0, false_cond))
24953 max_p = !max_p;
24954
24955 else
24956 return 0;
24957
24958 rs6000_emit_minmax (dest, max_p ? SMAX : SMIN, op0, op1);
24959 return 1;
24960 }
24961
24962 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
24963 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
24964 operands of the last comparison is nonzero/true, FALSE_COND if it is
24965 zero/false. Return 0 if the hardware has no such operation. */
24966
24967 static int
24968 rs6000_emit_p9_fp_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
24969 {
24970 enum rtx_code code = GET_CODE (op);
24971 rtx op0 = XEXP (op, 0);
24972 rtx op1 = XEXP (op, 1);
24973 machine_mode result_mode = GET_MODE (dest);
24974 rtx compare_rtx;
24975 rtx cmove_rtx;
24976 rtx clobber_rtx;
24977
24978 if (!can_create_pseudo_p ())
24979 return 0;
24980
24981 switch (code)
24982 {
24983 case EQ:
24984 case GE:
24985 case GT:
24986 break;
24987
24988 case NE:
24989 case LT:
24990 case LE:
24991 code = swap_condition (code);
24992 std::swap (op0, op1);
24993 break;
24994
24995 default:
24996 return 0;
24997 }
24998
24999 /* Generate: [(parallel [(set (dest)
25000 (if_then_else (op (cmp1) (cmp2))
25001 (true)
25002 (false)))
25003 (clobber (scratch))])]. */
25004
25005 compare_rtx = gen_rtx_fmt_ee (code, CCFPmode, op0, op1);
25006 cmove_rtx = gen_rtx_SET (dest,
25007 gen_rtx_IF_THEN_ELSE (result_mode,
25008 compare_rtx,
25009 true_cond,
25010 false_cond));
25011
25012 clobber_rtx = gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (V2DImode));
25013 emit_insn (gen_rtx_PARALLEL (VOIDmode,
25014 gen_rtvec (2, cmove_rtx, clobber_rtx)));
25015
25016 return 1;
25017 }
25018
25019 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
25020 operands of the last comparison is nonzero/true, FALSE_COND if it
25021 is zero/false. Return 0 if the hardware has no such operation. */
25022
25023 int
25024 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
25025 {
25026 enum rtx_code code = GET_CODE (op);
25027 rtx op0 = XEXP (op, 0);
25028 rtx op1 = XEXP (op, 1);
25029 machine_mode compare_mode = GET_MODE (op0);
25030 machine_mode result_mode = GET_MODE (dest);
25031 rtx temp;
25032 bool is_against_zero;
25033
25034 /* These modes should always match. */
25035 if (GET_MODE (op1) != compare_mode
25036 /* In the isel case however, we can use a compare immediate, so
25037 op1 may be a small constant. */
25038 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
25039 return 0;
25040 if (GET_MODE (true_cond) != result_mode)
25041 return 0;
25042 if (GET_MODE (false_cond) != result_mode)
25043 return 0;
25044
25045 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
25046 if (TARGET_P9_MINMAX
25047 && (compare_mode == SFmode || compare_mode == DFmode)
25048 && (result_mode == SFmode || result_mode == DFmode))
25049 {
25050 if (rs6000_emit_p9_fp_minmax (dest, op, true_cond, false_cond))
25051 return 1;
25052
25053 if (rs6000_emit_p9_fp_cmove (dest, op, true_cond, false_cond))
25054 return 1;
25055 }
25056
25057 /* Don't allow using floating point comparisons for integer results for
25058 now. */
25059 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
25060 return 0;
25061
25062 /* First, work out if the hardware can do this at all, or
25063 if it's too slow.... */
25064 if (!FLOAT_MODE_P (compare_mode))
25065 {
25066 if (TARGET_ISEL)
25067 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
25068 return 0;
25069 }
25070 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
25071 && SCALAR_FLOAT_MODE_P (compare_mode))
25072 return 0;
25073
25074 is_against_zero = op1 == CONST0_RTX (compare_mode);
25075
25076 /* A floating-point subtract might overflow, underflow, or produce
25077 an inexact result, thus changing the floating-point flags, so it
25078 can't be generated if we care about that. It's safe if one side
25079 of the construct is zero, since then no subtract will be
25080 generated. */
25081 if (SCALAR_FLOAT_MODE_P (compare_mode)
25082 && flag_trapping_math && ! is_against_zero)
25083 return 0;
25084
25085 /* Eliminate half of the comparisons by switching operands, this
25086 makes the remaining code simpler. */
25087 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
25088 || code == LTGT || code == LT || code == UNLE)
25089 {
25090 code = reverse_condition_maybe_unordered (code);
25091 temp = true_cond;
25092 true_cond = false_cond;
25093 false_cond = temp;
25094 }
25095
25096 /* UNEQ and LTGT take four instructions for a comparison with zero,
25097 it'll probably be faster to use a branch here too. */
25098 if (code == UNEQ && HONOR_NANS (compare_mode))
25099 return 0;
25100
25101 /* We're going to try to implement comparisons by performing
25102 a subtract, then comparing against zero. Unfortunately,
25103 Inf - Inf is NaN which is not zero, and so if we don't
25104 know that the operand is finite and the comparison
25105 would treat EQ different to UNORDERED, we can't do it. */
25106 if (HONOR_INFINITIES (compare_mode)
25107 && code != GT && code != UNGE
25108 && (GET_CODE (op1) != CONST_DOUBLE
25109 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1)))
25110 /* Constructs of the form (a OP b ? a : b) are safe. */
25111 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
25112 || (! rtx_equal_p (op0, true_cond)
25113 && ! rtx_equal_p (op1, true_cond))))
25114 return 0;
25115
25116 /* At this point we know we can use fsel. */
25117
25118 /* Reduce the comparison to a comparison against zero. */
25119 if (! is_against_zero)
25120 {
25121 temp = gen_reg_rtx (compare_mode);
25122 emit_insn (gen_rtx_SET (temp, gen_rtx_MINUS (compare_mode, op0, op1)));
25123 op0 = temp;
25124 op1 = CONST0_RTX (compare_mode);
25125 }
25126
25127 /* If we don't care about NaNs we can reduce some of the comparisons
25128 down to faster ones. */
25129 if (! HONOR_NANS (compare_mode))
25130 switch (code)
25131 {
25132 case GT:
25133 code = LE;
25134 temp = true_cond;
25135 true_cond = false_cond;
25136 false_cond = temp;
25137 break;
25138 case UNGE:
25139 code = GE;
25140 break;
25141 case UNEQ:
25142 code = EQ;
25143 break;
25144 default:
25145 break;
25146 }
25147
25148 /* Now, reduce everything down to a GE. */
25149 switch (code)
25150 {
25151 case GE:
25152 break;
25153
25154 case LE:
25155 temp = gen_reg_rtx (compare_mode);
25156 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
25157 op0 = temp;
25158 break;
25159
25160 case ORDERED:
25161 temp = gen_reg_rtx (compare_mode);
25162 emit_insn (gen_rtx_SET (temp, gen_rtx_ABS (compare_mode, op0)));
25163 op0 = temp;
25164 break;
25165
25166 case EQ:
25167 temp = gen_reg_rtx (compare_mode);
25168 emit_insn (gen_rtx_SET (temp,
25169 gen_rtx_NEG (compare_mode,
25170 gen_rtx_ABS (compare_mode, op0))));
25171 op0 = temp;
25172 break;
25173
25174 case UNGE:
25175 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
25176 temp = gen_reg_rtx (result_mode);
25177 emit_insn (gen_rtx_SET (temp,
25178 gen_rtx_IF_THEN_ELSE (result_mode,
25179 gen_rtx_GE (VOIDmode,
25180 op0, op1),
25181 true_cond, false_cond)));
25182 false_cond = true_cond;
25183 true_cond = temp;
25184
25185 temp = gen_reg_rtx (compare_mode);
25186 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
25187 op0 = temp;
25188 break;
25189
25190 case GT:
25191 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
25192 temp = gen_reg_rtx (result_mode);
25193 emit_insn (gen_rtx_SET (temp,
25194 gen_rtx_IF_THEN_ELSE (result_mode,
25195 gen_rtx_GE (VOIDmode,
25196 op0, op1),
25197 true_cond, false_cond)));
25198 true_cond = false_cond;
25199 false_cond = temp;
25200
25201 temp = gen_reg_rtx (compare_mode);
25202 emit_insn (gen_rtx_SET (temp, gen_rtx_NEG (compare_mode, op0)));
25203 op0 = temp;
25204 break;
25205
25206 default:
25207 gcc_unreachable ();
25208 }
25209
25210 emit_insn (gen_rtx_SET (dest,
25211 gen_rtx_IF_THEN_ELSE (result_mode,
25212 gen_rtx_GE (VOIDmode,
25213 op0, op1),
25214 true_cond, false_cond)));
25215 return 1;
25216 }
25217
25218 /* Same as above, but for ints (isel). */
25219
25220 static int
25221 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
25222 {
25223 rtx condition_rtx, cr;
25224 machine_mode mode = GET_MODE (dest);
25225 enum rtx_code cond_code;
25226 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
25227 bool signedp;
25228
25229 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
25230 return 0;
25231
25232 /* We still have to do the compare, because isel doesn't do a
25233 compare, it just looks at the CRx bits set by a previous compare
25234 instruction. */
25235 condition_rtx = rs6000_generate_compare (op, mode);
25236 cond_code = GET_CODE (condition_rtx);
25237 cr = XEXP (condition_rtx, 0);
25238 signedp = GET_MODE (cr) == CCmode;
25239
25240 isel_func = (mode == SImode
25241 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
25242 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
25243
25244 switch (cond_code)
25245 {
25246 case LT: case GT: case LTU: case GTU: case EQ:
25247 /* isel handles these directly. */
25248 break;
25249
25250 default:
25251 /* We need to swap the sense of the comparison. */
25252 {
25253 std::swap (false_cond, true_cond);
25254 PUT_CODE (condition_rtx, reverse_condition (cond_code));
25255 }
25256 break;
25257 }
25258
25259 false_cond = force_reg (mode, false_cond);
25260 if (true_cond != const0_rtx)
25261 true_cond = force_reg (mode, true_cond);
25262
25263 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
25264
25265 return 1;
25266 }
25267
25268 const char *
25269 output_isel (rtx *operands)
25270 {
25271 enum rtx_code code;
25272
25273 code = GET_CODE (operands[1]);
25274
25275 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
25276 {
25277 gcc_assert (GET_CODE (operands[2]) == REG
25278 && GET_CODE (operands[3]) == REG);
25279 PUT_CODE (operands[1], reverse_condition (code));
25280 return "isel %0,%3,%2,%j1";
25281 }
25282
25283 return "isel %0,%2,%3,%j1";
25284 }
25285
25286 void
25287 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
25288 {
25289 machine_mode mode = GET_MODE (op0);
25290 enum rtx_code c;
25291 rtx target;
25292
25293 /* VSX/altivec have direct min/max insns. */
25294 if ((code == SMAX || code == SMIN)
25295 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
25296 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
25297 {
25298 emit_insn (gen_rtx_SET (dest, gen_rtx_fmt_ee (code, mode, op0, op1)));
25299 return;
25300 }
25301
25302 if (code == SMAX || code == SMIN)
25303 c = GE;
25304 else
25305 c = GEU;
25306
25307 if (code == SMAX || code == UMAX)
25308 target = emit_conditional_move (dest, c, op0, op1, mode,
25309 op0, op1, mode, 0);
25310 else
25311 target = emit_conditional_move (dest, c, op0, op1, mode,
25312 op1, op0, mode, 0);
25313 gcc_assert (target);
25314 if (target != dest)
25315 emit_move_insn (dest, target);
25316 }
25317
25318 /* Split a signbit operation on 64-bit machines with direct move. Also allow
25319 for the value to come from memory or if it is already loaded into a GPR. */
25320
25321 void
25322 rs6000_split_signbit (rtx dest, rtx src)
25323 {
25324 machine_mode d_mode = GET_MODE (dest);
25325 machine_mode s_mode = GET_MODE (src);
25326 rtx dest_di = (d_mode == DImode) ? dest : gen_lowpart (DImode, dest);
25327 rtx shift_reg = dest_di;
25328
25329 gcc_assert (FLOAT128_IEEE_P (s_mode) && TARGET_POWERPC64);
25330
25331 if (MEM_P (src))
25332 {
25333 rtx mem = (WORDS_BIG_ENDIAN
25334 ? adjust_address (src, DImode, 0)
25335 : adjust_address (src, DImode, 8));
25336 emit_insn (gen_rtx_SET (dest_di, mem));
25337 }
25338
25339 else
25340 {
25341 unsigned int r = reg_or_subregno (src);
25342
25343 if (INT_REGNO_P (r))
25344 shift_reg = gen_rtx_REG (DImode, r + (BYTES_BIG_ENDIAN == 0));
25345
25346 else
25347 {
25348 /* Generate the special mfvsrd instruction to get it in a GPR. */
25349 gcc_assert (VSX_REGNO_P (r));
25350 if (s_mode == KFmode)
25351 emit_insn (gen_signbitkf2_dm2 (dest_di, src));
25352 else
25353 emit_insn (gen_signbittf2_dm2 (dest_di, src));
25354 }
25355 }
25356
25357 emit_insn (gen_lshrdi3 (dest_di, shift_reg, GEN_INT (63)));
25358 return;
25359 }
25360
25361 /* A subroutine of the atomic operation splitters. Jump to LABEL if
25362 COND is true. Mark the jump as unlikely to be taken. */
25363
25364 static void
25365 emit_unlikely_jump (rtx cond, rtx label)
25366 {
25367 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
25368 rtx x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
25369 rtx_insn *insn = emit_jump_insn (gen_rtx_SET (pc_rtx, x));
25370 add_int_reg_note (insn, REG_BR_PROB, very_unlikely);
25371 }
25372
25373 /* A subroutine of the atomic operation splitters. Emit a load-locked
25374 instruction in MODE. For QI/HImode, possibly use a pattern than includes
25375 the zero_extend operation. */
25376
25377 static void
25378 emit_load_locked (machine_mode mode, rtx reg, rtx mem)
25379 {
25380 rtx (*fn) (rtx, rtx) = NULL;
25381
25382 switch (mode)
25383 {
25384 case QImode:
25385 fn = gen_load_lockedqi;
25386 break;
25387 case HImode:
25388 fn = gen_load_lockedhi;
25389 break;
25390 case SImode:
25391 if (GET_MODE (mem) == QImode)
25392 fn = gen_load_lockedqi_si;
25393 else if (GET_MODE (mem) == HImode)
25394 fn = gen_load_lockedhi_si;
25395 else
25396 fn = gen_load_lockedsi;
25397 break;
25398 case DImode:
25399 fn = gen_load_lockeddi;
25400 break;
25401 case TImode:
25402 fn = gen_load_lockedti;
25403 break;
25404 default:
25405 gcc_unreachable ();
25406 }
25407 emit_insn (fn (reg, mem));
25408 }
25409
25410 /* A subroutine of the atomic operation splitters. Emit a store-conditional
25411 instruction in MODE. */
25412
25413 static void
25414 emit_store_conditional (machine_mode mode, rtx res, rtx mem, rtx val)
25415 {
25416 rtx (*fn) (rtx, rtx, rtx) = NULL;
25417
25418 switch (mode)
25419 {
25420 case QImode:
25421 fn = gen_store_conditionalqi;
25422 break;
25423 case HImode:
25424 fn = gen_store_conditionalhi;
25425 break;
25426 case SImode:
25427 fn = gen_store_conditionalsi;
25428 break;
25429 case DImode:
25430 fn = gen_store_conditionaldi;
25431 break;
25432 case TImode:
25433 fn = gen_store_conditionalti;
25434 break;
25435 default:
25436 gcc_unreachable ();
25437 }
25438
25439 /* Emit sync before stwcx. to address PPC405 Erratum. */
25440 if (PPC405_ERRATUM77)
25441 emit_insn (gen_hwsync ());
25442
25443 emit_insn (fn (res, mem, val));
25444 }
25445
25446 /* Expand barriers before and after a load_locked/store_cond sequence. */
25447
25448 static rtx
25449 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
25450 {
25451 rtx addr = XEXP (mem, 0);
25452 int strict_p = (reload_in_progress || reload_completed);
25453
25454 if (!legitimate_indirect_address_p (addr, strict_p)
25455 && !legitimate_indexed_address_p (addr, strict_p))
25456 {
25457 addr = force_reg (Pmode, addr);
25458 mem = replace_equiv_address_nv (mem, addr);
25459 }
25460
25461 switch (model)
25462 {
25463 case MEMMODEL_RELAXED:
25464 case MEMMODEL_CONSUME:
25465 case MEMMODEL_ACQUIRE:
25466 break;
25467 case MEMMODEL_RELEASE:
25468 case MEMMODEL_ACQ_REL:
25469 emit_insn (gen_lwsync ());
25470 break;
25471 case MEMMODEL_SEQ_CST:
25472 emit_insn (gen_hwsync ());
25473 break;
25474 default:
25475 gcc_unreachable ();
25476 }
25477 return mem;
25478 }
25479
25480 static void
25481 rs6000_post_atomic_barrier (enum memmodel model)
25482 {
25483 switch (model)
25484 {
25485 case MEMMODEL_RELAXED:
25486 case MEMMODEL_CONSUME:
25487 case MEMMODEL_RELEASE:
25488 break;
25489 case MEMMODEL_ACQUIRE:
25490 case MEMMODEL_ACQ_REL:
25491 case MEMMODEL_SEQ_CST:
25492 emit_insn (gen_isync ());
25493 break;
25494 default:
25495 gcc_unreachable ();
25496 }
25497 }
25498
25499 /* A subroutine of the various atomic expanders. For sub-word operations,
25500 we must adjust things to operate on SImode. Given the original MEM,
25501 return a new aligned memory. Also build and return the quantities by
25502 which to shift and mask. */
25503
25504 static rtx
25505 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
25506 {
25507 rtx addr, align, shift, mask, mem;
25508 HOST_WIDE_INT shift_mask;
25509 machine_mode mode = GET_MODE (orig_mem);
25510
25511 /* For smaller modes, we have to implement this via SImode. */
25512 shift_mask = (mode == QImode ? 0x18 : 0x10);
25513
25514 addr = XEXP (orig_mem, 0);
25515 addr = force_reg (GET_MODE (addr), addr);
25516
25517 /* Aligned memory containing subword. Generate a new memory. We
25518 do not want any of the existing MEM_ATTR data, as we're now
25519 accessing memory outside the original object. */
25520 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
25521 NULL_RTX, 1, OPTAB_LIB_WIDEN);
25522 mem = gen_rtx_MEM (SImode, align);
25523 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
25524 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
25525 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
25526
25527 /* Shift amount for subword relative to aligned word. */
25528 shift = gen_reg_rtx (SImode);
25529 addr = gen_lowpart (SImode, addr);
25530 rtx tmp = gen_reg_rtx (SImode);
25531 emit_insn (gen_ashlsi3 (tmp, addr, GEN_INT (3)));
25532 emit_insn (gen_andsi3 (shift, tmp, GEN_INT (shift_mask)));
25533 if (BYTES_BIG_ENDIAN)
25534 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
25535 shift, 1, OPTAB_LIB_WIDEN);
25536 *pshift = shift;
25537
25538 /* Mask for insertion. */
25539 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
25540 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
25541 *pmask = mask;
25542
25543 return mem;
25544 }
25545
25546 /* A subroutine of the various atomic expanders. For sub-word operands,
25547 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
25548
25549 static rtx
25550 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
25551 {
25552 rtx x;
25553
25554 x = gen_reg_rtx (SImode);
25555 emit_insn (gen_rtx_SET (x, gen_rtx_AND (SImode,
25556 gen_rtx_NOT (SImode, mask),
25557 oldval)));
25558
25559 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
25560
25561 return x;
25562 }
25563
25564 /* A subroutine of the various atomic expanders. For sub-word operands,
25565 extract WIDE to NARROW via SHIFT. */
25566
25567 static void
25568 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
25569 {
25570 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
25571 wide, 1, OPTAB_LIB_WIDEN);
25572 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
25573 }
25574
25575 /* Expand an atomic compare and swap operation. */
25576
25577 void
25578 rs6000_expand_atomic_compare_and_swap (rtx operands[])
25579 {
25580 rtx boolval, retval, mem, oldval, newval, cond;
25581 rtx label1, label2, x, mask, shift;
25582 machine_mode mode, orig_mode;
25583 enum memmodel mod_s, mod_f;
25584 bool is_weak;
25585
25586 boolval = operands[0];
25587 retval = operands[1];
25588 mem = operands[2];
25589 oldval = operands[3];
25590 newval = operands[4];
25591 is_weak = (INTVAL (operands[5]) != 0);
25592 mod_s = memmodel_base (INTVAL (operands[6]));
25593 mod_f = memmodel_base (INTVAL (operands[7]));
25594 orig_mode = mode = GET_MODE (mem);
25595
25596 mask = shift = NULL_RTX;
25597 if (mode == QImode || mode == HImode)
25598 {
25599 /* Before power8, we didn't have access to lbarx/lharx, so generate a
25600 lwarx and shift/mask operations. With power8, we need to do the
25601 comparison in SImode, but the store is still done in QI/HImode. */
25602 oldval = convert_modes (SImode, mode, oldval, 1);
25603
25604 if (!TARGET_SYNC_HI_QI)
25605 {
25606 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
25607
25608 /* Shift and mask OLDVAL into position with the word. */
25609 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
25610 NULL_RTX, 1, OPTAB_LIB_WIDEN);
25611
25612 /* Shift and mask NEWVAL into position within the word. */
25613 newval = convert_modes (SImode, mode, newval, 1);
25614 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
25615 NULL_RTX, 1, OPTAB_LIB_WIDEN);
25616 }
25617
25618 /* Prepare to adjust the return value. */
25619 retval = gen_reg_rtx (SImode);
25620 mode = SImode;
25621 }
25622 else if (reg_overlap_mentioned_p (retval, oldval))
25623 oldval = copy_to_reg (oldval);
25624
25625 if (mode != TImode && !reg_or_short_operand (oldval, mode))
25626 oldval = copy_to_mode_reg (mode, oldval);
25627
25628 if (reg_overlap_mentioned_p (retval, newval))
25629 newval = copy_to_reg (newval);
25630
25631 mem = rs6000_pre_atomic_barrier (mem, mod_s);
25632
25633 label1 = NULL_RTX;
25634 if (!is_weak)
25635 {
25636 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
25637 emit_label (XEXP (label1, 0));
25638 }
25639 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
25640
25641 emit_load_locked (mode, retval, mem);
25642
25643 x = retval;
25644 if (mask)
25645 x = expand_simple_binop (SImode, AND, retval, mask,
25646 NULL_RTX, 1, OPTAB_LIB_WIDEN);
25647
25648 cond = gen_reg_rtx (CCmode);
25649 /* If we have TImode, synthesize a comparison. */
25650 if (mode != TImode)
25651 x = gen_rtx_COMPARE (CCmode, x, oldval);
25652 else
25653 {
25654 rtx xor1_result = gen_reg_rtx (DImode);
25655 rtx xor2_result = gen_reg_rtx (DImode);
25656 rtx or_result = gen_reg_rtx (DImode);
25657 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
25658 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
25659 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
25660 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
25661
25662 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
25663 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
25664 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
25665 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
25666 }
25667
25668 emit_insn (gen_rtx_SET (cond, x));
25669
25670 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
25671 emit_unlikely_jump (x, label2);
25672
25673 x = newval;
25674 if (mask)
25675 x = rs6000_mask_atomic_subword (retval, newval, mask);
25676
25677 emit_store_conditional (orig_mode, cond, mem, x);
25678
25679 if (!is_weak)
25680 {
25681 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
25682 emit_unlikely_jump (x, label1);
25683 }
25684
25685 if (!is_mm_relaxed (mod_f))
25686 emit_label (XEXP (label2, 0));
25687
25688 rs6000_post_atomic_barrier (mod_s);
25689
25690 if (is_mm_relaxed (mod_f))
25691 emit_label (XEXP (label2, 0));
25692
25693 if (shift)
25694 rs6000_finish_atomic_subword (operands[1], retval, shift);
25695 else if (mode != GET_MODE (operands[1]))
25696 convert_move (operands[1], retval, 1);
25697
25698 /* In all cases, CR0 contains EQ on success, and NE on failure. */
25699 x = gen_rtx_EQ (SImode, cond, const0_rtx);
25700 emit_insn (gen_rtx_SET (boolval, x));
25701 }
25702
25703 /* Expand an atomic exchange operation. */
25704
25705 void
25706 rs6000_expand_atomic_exchange (rtx operands[])
25707 {
25708 rtx retval, mem, val, cond;
25709 machine_mode mode;
25710 enum memmodel model;
25711 rtx label, x, mask, shift;
25712
25713 retval = operands[0];
25714 mem = operands[1];
25715 val = operands[2];
25716 model = memmodel_base (INTVAL (operands[3]));
25717 mode = GET_MODE (mem);
25718
25719 mask = shift = NULL_RTX;
25720 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
25721 {
25722 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
25723
25724 /* Shift and mask VAL into position with the word. */
25725 val = convert_modes (SImode, mode, val, 1);
25726 val = expand_simple_binop (SImode, ASHIFT, val, shift,
25727 NULL_RTX, 1, OPTAB_LIB_WIDEN);
25728
25729 /* Prepare to adjust the return value. */
25730 retval = gen_reg_rtx (SImode);
25731 mode = SImode;
25732 }
25733
25734 mem = rs6000_pre_atomic_barrier (mem, model);
25735
25736 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
25737 emit_label (XEXP (label, 0));
25738
25739 emit_load_locked (mode, retval, mem);
25740
25741 x = val;
25742 if (mask)
25743 x = rs6000_mask_atomic_subword (retval, val, mask);
25744
25745 cond = gen_reg_rtx (CCmode);
25746 emit_store_conditional (mode, cond, mem, x);
25747
25748 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
25749 emit_unlikely_jump (x, label);
25750
25751 rs6000_post_atomic_barrier (model);
25752
25753 if (shift)
25754 rs6000_finish_atomic_subword (operands[0], retval, shift);
25755 }
25756
25757 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
25758 to perform. MEM is the memory on which to operate. VAL is the second
25759 operand of the binary operator. BEFORE and AFTER are optional locations to
25760 return the value of MEM either before of after the operation. MODEL_RTX
25761 is a CONST_INT containing the memory model to use. */
25762
25763 void
25764 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
25765 rtx orig_before, rtx orig_after, rtx model_rtx)
25766 {
25767 enum memmodel model = memmodel_base (INTVAL (model_rtx));
25768 machine_mode mode = GET_MODE (mem);
25769 machine_mode store_mode = mode;
25770 rtx label, x, cond, mask, shift;
25771 rtx before = orig_before, after = orig_after;
25772
25773 mask = shift = NULL_RTX;
25774 /* On power8, we want to use SImode for the operation. On previous systems,
25775 use the operation in a subword and shift/mask to get the proper byte or
25776 halfword. */
25777 if (mode == QImode || mode == HImode)
25778 {
25779 if (TARGET_SYNC_HI_QI)
25780 {
25781 val = convert_modes (SImode, mode, val, 1);
25782
25783 /* Prepare to adjust the return value. */
25784 before = gen_reg_rtx (SImode);
25785 if (after)
25786 after = gen_reg_rtx (SImode);
25787 mode = SImode;
25788 }
25789 else
25790 {
25791 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
25792
25793 /* Shift and mask VAL into position with the word. */
25794 val = convert_modes (SImode, mode, val, 1);
25795 val = expand_simple_binop (SImode, ASHIFT, val, shift,
25796 NULL_RTX, 1, OPTAB_LIB_WIDEN);
25797
25798 switch (code)
25799 {
25800 case IOR:
25801 case XOR:
25802 /* We've already zero-extended VAL. That is sufficient to
25803 make certain that it does not affect other bits. */
25804 mask = NULL;
25805 break;
25806
25807 case AND:
25808 /* If we make certain that all of the other bits in VAL are
25809 set, that will be sufficient to not affect other bits. */
25810 x = gen_rtx_NOT (SImode, mask);
25811 x = gen_rtx_IOR (SImode, x, val);
25812 emit_insn (gen_rtx_SET (val, x));
25813 mask = NULL;
25814 break;
25815
25816 case NOT:
25817 case PLUS:
25818 case MINUS:
25819 /* These will all affect bits outside the field and need
25820 adjustment via MASK within the loop. */
25821 break;
25822
25823 default:
25824 gcc_unreachable ();
25825 }
25826
25827 /* Prepare to adjust the return value. */
25828 before = gen_reg_rtx (SImode);
25829 if (after)
25830 after = gen_reg_rtx (SImode);
25831 store_mode = mode = SImode;
25832 }
25833 }
25834
25835 mem = rs6000_pre_atomic_barrier (mem, model);
25836
25837 label = gen_label_rtx ();
25838 emit_label (label);
25839 label = gen_rtx_LABEL_REF (VOIDmode, label);
25840
25841 if (before == NULL_RTX)
25842 before = gen_reg_rtx (mode);
25843
25844 emit_load_locked (mode, before, mem);
25845
25846 if (code == NOT)
25847 {
25848 x = expand_simple_binop (mode, AND, before, val,
25849 NULL_RTX, 1, OPTAB_LIB_WIDEN);
25850 after = expand_simple_unop (mode, NOT, x, after, 1);
25851 }
25852 else
25853 {
25854 after = expand_simple_binop (mode, code, before, val,
25855 after, 1, OPTAB_LIB_WIDEN);
25856 }
25857
25858 x = after;
25859 if (mask)
25860 {
25861 x = expand_simple_binop (SImode, AND, after, mask,
25862 NULL_RTX, 1, OPTAB_LIB_WIDEN);
25863 x = rs6000_mask_atomic_subword (before, x, mask);
25864 }
25865 else if (store_mode != mode)
25866 x = convert_modes (store_mode, mode, x, 1);
25867
25868 cond = gen_reg_rtx (CCmode);
25869 emit_store_conditional (store_mode, cond, mem, x);
25870
25871 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
25872 emit_unlikely_jump (x, label);
25873
25874 rs6000_post_atomic_barrier (model);
25875
25876 if (shift)
25877 {
25878 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
25879 then do the calcuations in a SImode register. */
25880 if (orig_before)
25881 rs6000_finish_atomic_subword (orig_before, before, shift);
25882 if (orig_after)
25883 rs6000_finish_atomic_subword (orig_after, after, shift);
25884 }
25885 else if (store_mode != mode)
25886 {
25887 /* QImode/HImode on machines with lbarx/lharx where we do the native
25888 operation and then do the calcuations in a SImode register. */
25889 if (orig_before)
25890 convert_move (orig_before, before, 1);
25891 if (orig_after)
25892 convert_move (orig_after, after, 1);
25893 }
25894 else if (orig_after && after != orig_after)
25895 emit_move_insn (orig_after, after);
25896 }
25897
25898 /* Emit instructions to move SRC to DST. Called by splitters for
25899 multi-register moves. It will emit at most one instruction for
25900 each register that is accessed; that is, it won't emit li/lis pairs
25901 (or equivalent for 64-bit code). One of SRC or DST must be a hard
25902 register. */
25903
25904 void
25905 rs6000_split_multireg_move (rtx dst, rtx src)
25906 {
25907 /* The register number of the first register being moved. */
25908 int reg;
25909 /* The mode that is to be moved. */
25910 machine_mode mode;
25911 /* The mode that the move is being done in, and its size. */
25912 machine_mode reg_mode;
25913 int reg_mode_size;
25914 /* The number of registers that will be moved. */
25915 int nregs;
25916
25917 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
25918 mode = GET_MODE (dst);
25919 nregs = hard_regno_nregs[reg][mode];
25920 if (FP_REGNO_P (reg))
25921 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
25922 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
25923 else if (ALTIVEC_REGNO_P (reg))
25924 reg_mode = V16QImode;
25925 else if (TARGET_E500_DOUBLE && FLOAT128_2REG_P (mode))
25926 reg_mode = DFmode;
25927 else
25928 reg_mode = word_mode;
25929 reg_mode_size = GET_MODE_SIZE (reg_mode);
25930
25931 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
25932
25933 /* TDmode residing in FP registers is special, since the ISA requires that
25934 the lower-numbered word of a register pair is always the most significant
25935 word, even in little-endian mode. This does not match the usual subreg
25936 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
25937 the appropriate constituent registers "by hand" in little-endian mode.
25938
25939 Note we do not need to check for destructive overlap here since TDmode
25940 can only reside in even/odd register pairs. */
25941 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
25942 {
25943 rtx p_src, p_dst;
25944 int i;
25945
25946 for (i = 0; i < nregs; i++)
25947 {
25948 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
25949 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
25950 else
25951 p_src = simplify_gen_subreg (reg_mode, src, mode,
25952 i * reg_mode_size);
25953
25954 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
25955 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
25956 else
25957 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
25958 i * reg_mode_size);
25959
25960 emit_insn (gen_rtx_SET (p_dst, p_src));
25961 }
25962
25963 return;
25964 }
25965
25966 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
25967 {
25968 /* Move register range backwards, if we might have destructive
25969 overlap. */
25970 int i;
25971 for (i = nregs - 1; i >= 0; i--)
25972 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
25973 i * reg_mode_size),
25974 simplify_gen_subreg (reg_mode, src, mode,
25975 i * reg_mode_size)));
25976 }
25977 else
25978 {
25979 int i;
25980 int j = -1;
25981 bool used_update = false;
25982 rtx restore_basereg = NULL_RTX;
25983
25984 if (MEM_P (src) && INT_REGNO_P (reg))
25985 {
25986 rtx breg;
25987
25988 if (GET_CODE (XEXP (src, 0)) == PRE_INC
25989 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
25990 {
25991 rtx delta_rtx;
25992 breg = XEXP (XEXP (src, 0), 0);
25993 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
25994 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
25995 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
25996 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
25997 src = replace_equiv_address (src, breg);
25998 }
25999 else if (! rs6000_offsettable_memref_p (src, reg_mode))
26000 {
26001 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
26002 {
26003 rtx basereg = XEXP (XEXP (src, 0), 0);
26004 if (TARGET_UPDATE)
26005 {
26006 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
26007 emit_insn (gen_rtx_SET (ndst,
26008 gen_rtx_MEM (reg_mode,
26009 XEXP (src, 0))));
26010 used_update = true;
26011 }
26012 else
26013 emit_insn (gen_rtx_SET (basereg,
26014 XEXP (XEXP (src, 0), 1)));
26015 src = replace_equiv_address (src, basereg);
26016 }
26017 else
26018 {
26019 rtx basereg = gen_rtx_REG (Pmode, reg);
26020 emit_insn (gen_rtx_SET (basereg, XEXP (src, 0)));
26021 src = replace_equiv_address (src, basereg);
26022 }
26023 }
26024
26025 breg = XEXP (src, 0);
26026 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
26027 breg = XEXP (breg, 0);
26028
26029 /* If the base register we are using to address memory is
26030 also a destination reg, then change that register last. */
26031 if (REG_P (breg)
26032 && REGNO (breg) >= REGNO (dst)
26033 && REGNO (breg) < REGNO (dst) + nregs)
26034 j = REGNO (breg) - REGNO (dst);
26035 }
26036 else if (MEM_P (dst) && INT_REGNO_P (reg))
26037 {
26038 rtx breg;
26039
26040 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
26041 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
26042 {
26043 rtx delta_rtx;
26044 breg = XEXP (XEXP (dst, 0), 0);
26045 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
26046 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
26047 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
26048
26049 /* We have to update the breg before doing the store.
26050 Use store with update, if available. */
26051
26052 if (TARGET_UPDATE)
26053 {
26054 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
26055 emit_insn (TARGET_32BIT
26056 ? (TARGET_POWERPC64
26057 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
26058 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
26059 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
26060 used_update = true;
26061 }
26062 else
26063 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
26064 dst = replace_equiv_address (dst, breg);
26065 }
26066 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
26067 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
26068 {
26069 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
26070 {
26071 rtx basereg = XEXP (XEXP (dst, 0), 0);
26072 if (TARGET_UPDATE)
26073 {
26074 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
26075 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode,
26076 XEXP (dst, 0)),
26077 nsrc));
26078 used_update = true;
26079 }
26080 else
26081 emit_insn (gen_rtx_SET (basereg,
26082 XEXP (XEXP (dst, 0), 1)));
26083 dst = replace_equiv_address (dst, basereg);
26084 }
26085 else
26086 {
26087 rtx basereg = XEXP (XEXP (dst, 0), 0);
26088 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
26089 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
26090 && REG_P (basereg)
26091 && REG_P (offsetreg)
26092 && REGNO (basereg) != REGNO (offsetreg));
26093 if (REGNO (basereg) == 0)
26094 {
26095 rtx tmp = offsetreg;
26096 offsetreg = basereg;
26097 basereg = tmp;
26098 }
26099 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
26100 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
26101 dst = replace_equiv_address (dst, basereg);
26102 }
26103 }
26104 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
26105 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
26106 }
26107
26108 for (i = 0; i < nregs; i++)
26109 {
26110 /* Calculate index to next subword. */
26111 ++j;
26112 if (j == nregs)
26113 j = 0;
26114
26115 /* If compiler already emitted move of first word by
26116 store with update, no need to do anything. */
26117 if (j == 0 && used_update)
26118 continue;
26119
26120 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode, dst, mode,
26121 j * reg_mode_size),
26122 simplify_gen_subreg (reg_mode, src, mode,
26123 j * reg_mode_size)));
26124 }
26125 if (restore_basereg != NULL_RTX)
26126 emit_insn (restore_basereg);
26127 }
26128 }
26129
26130 \f
26131 /* This page contains routines that are used to determine what the
26132 function prologue and epilogue code will do and write them out. */
26133
26134 static inline bool
26135 save_reg_p (int r)
26136 {
26137 return !call_used_regs[r] && df_regs_ever_live_p (r);
26138 }
26139
26140 /* Determine whether the gp REG is really used. */
26141
26142 static bool
26143 rs6000_reg_live_or_pic_offset_p (int reg)
26144 {
26145 /* We need to mark the PIC offset register live for the same conditions
26146 as it is set up, or otherwise it won't be saved before we clobber it. */
26147
26148 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM && !TARGET_SINGLE_PIC_BASE)
26149 {
26150 if (TARGET_TOC && TARGET_MINIMAL_TOC
26151 && (crtl->calls_eh_return
26152 || df_regs_ever_live_p (reg)
26153 || !constant_pool_empty_p ()))
26154 return true;
26155
26156 if ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
26157 && flag_pic)
26158 return true;
26159 }
26160
26161 /* If the function calls eh_return, claim used all the registers that would
26162 be checked for liveness otherwise. */
26163
26164 return ((crtl->calls_eh_return || df_regs_ever_live_p (reg))
26165 && !call_used_regs[reg]);
26166 }
26167
26168 /* Return the first fixed-point register that is required to be
26169 saved. 32 if none. */
26170
26171 int
26172 first_reg_to_save (void)
26173 {
26174 int first_reg;
26175
26176 /* Find lowest numbered live register. */
26177 for (first_reg = 13; first_reg <= 31; first_reg++)
26178 if (save_reg_p (first_reg))
26179 break;
26180
26181 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
26182 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
26183 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
26184 || (TARGET_TOC && TARGET_MINIMAL_TOC))
26185 && rs6000_reg_live_or_pic_offset_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
26186 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
26187
26188 #if TARGET_MACHO
26189 if (flag_pic
26190 && crtl->uses_pic_offset_table
26191 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
26192 return RS6000_PIC_OFFSET_TABLE_REGNUM;
26193 #endif
26194
26195 return first_reg;
26196 }
26197
26198 /* Similar, for FP regs. */
26199
26200 int
26201 first_fp_reg_to_save (void)
26202 {
26203 int first_reg;
26204
26205 /* Find lowest numbered live register. */
26206 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
26207 if (save_reg_p (first_reg))
26208 break;
26209
26210 return first_reg;
26211 }
26212
26213 /* Similar, for AltiVec regs. */
26214
26215 static int
26216 first_altivec_reg_to_save (void)
26217 {
26218 int i;
26219
26220 /* Stack frame remains as is unless we are in AltiVec ABI. */
26221 if (! TARGET_ALTIVEC_ABI)
26222 return LAST_ALTIVEC_REGNO + 1;
26223
26224 /* On Darwin, the unwind routines are compiled without
26225 TARGET_ALTIVEC, and use save_world to save/restore the
26226 altivec registers when necessary. */
26227 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
26228 && ! TARGET_ALTIVEC)
26229 return FIRST_ALTIVEC_REGNO + 20;
26230
26231 /* Find lowest numbered live register. */
26232 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
26233 if (save_reg_p (i))
26234 break;
26235
26236 return i;
26237 }
26238
26239 /* Return a 32-bit mask of the AltiVec registers we need to set in
26240 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
26241 the 32-bit word is 0. */
26242
26243 static unsigned int
26244 compute_vrsave_mask (void)
26245 {
26246 unsigned int i, mask = 0;
26247
26248 /* On Darwin, the unwind routines are compiled without
26249 TARGET_ALTIVEC, and use save_world to save/restore the
26250 call-saved altivec registers when necessary. */
26251 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
26252 && ! TARGET_ALTIVEC)
26253 mask |= 0xFFF;
26254
26255 /* First, find out if we use _any_ altivec registers. */
26256 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
26257 if (df_regs_ever_live_p (i))
26258 mask |= ALTIVEC_REG_BIT (i);
26259
26260 if (mask == 0)
26261 return mask;
26262
26263 /* Next, remove the argument registers from the set. These must
26264 be in the VRSAVE mask set by the caller, so we don't need to add
26265 them in again. More importantly, the mask we compute here is
26266 used to generate CLOBBERs in the set_vrsave insn, and we do not
26267 wish the argument registers to die. */
26268 for (i = ALTIVEC_ARG_MIN_REG; i < (unsigned) crtl->args.info.vregno; i++)
26269 mask &= ~ALTIVEC_REG_BIT (i);
26270
26271 /* Similarly, remove the return value from the set. */
26272 {
26273 bool yes = false;
26274 diddle_return_value (is_altivec_return_reg, &yes);
26275 if (yes)
26276 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
26277 }
26278
26279 return mask;
26280 }
26281
26282 /* For a very restricted set of circumstances, we can cut down the
26283 size of prologues/epilogues by calling our own save/restore-the-world
26284 routines. */
26285
26286 static void
26287 compute_save_world_info (rs6000_stack_t *info)
26288 {
26289 info->world_save_p = 1;
26290 info->world_save_p
26291 = (WORLD_SAVE_P (info)
26292 && DEFAULT_ABI == ABI_DARWIN
26293 && !cfun->has_nonlocal_label
26294 && info->first_fp_reg_save == FIRST_SAVED_FP_REGNO
26295 && info->first_gp_reg_save == FIRST_SAVED_GP_REGNO
26296 && info->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
26297 && info->cr_save_p);
26298
26299 /* This will not work in conjunction with sibcalls. Make sure there
26300 are none. (This check is expensive, but seldom executed.) */
26301 if (WORLD_SAVE_P (info))
26302 {
26303 rtx_insn *insn;
26304 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
26305 if (CALL_P (insn) && SIBLING_CALL_P (insn))
26306 {
26307 info->world_save_p = 0;
26308 break;
26309 }
26310 }
26311
26312 if (WORLD_SAVE_P (info))
26313 {
26314 /* Even if we're not touching VRsave, make sure there's room on the
26315 stack for it, if it looks like we're calling SAVE_WORLD, which
26316 will attempt to save it. */
26317 info->vrsave_size = 4;
26318
26319 /* If we are going to save the world, we need to save the link register too. */
26320 info->lr_save_p = 1;
26321
26322 /* "Save" the VRsave register too if we're saving the world. */
26323 if (info->vrsave_mask == 0)
26324 info->vrsave_mask = compute_vrsave_mask ();
26325
26326 /* Because the Darwin register save/restore routines only handle
26327 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
26328 check. */
26329 gcc_assert (info->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
26330 && (info->first_altivec_reg_save
26331 >= FIRST_SAVED_ALTIVEC_REGNO));
26332 }
26333
26334 return;
26335 }
26336
26337
26338 static void
26339 is_altivec_return_reg (rtx reg, void *xyes)
26340 {
26341 bool *yes = (bool *) xyes;
26342 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
26343 *yes = true;
26344 }
26345
26346 \f
26347 /* Return whether REG is a global user reg or has been specifed by
26348 -ffixed-REG. We should not restore these, and so cannot use
26349 lmw or out-of-line restore functions if there are any. We also
26350 can't save them (well, emit frame notes for them), because frame
26351 unwinding during exception handling will restore saved registers. */
26352
26353 static bool
26354 fixed_reg_p (int reg)
26355 {
26356 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
26357 backend sets it, overriding anything the user might have given. */
26358 if (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
26359 && ((DEFAULT_ABI == ABI_V4 && flag_pic)
26360 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
26361 || (TARGET_TOC && TARGET_MINIMAL_TOC)))
26362 return false;
26363
26364 return fixed_regs[reg];
26365 }
26366
26367 /* Determine the strategy for savings/restoring registers. */
26368
26369 enum {
26370 SAVE_MULTIPLE = 0x1,
26371 SAVE_INLINE_GPRS = 0x2,
26372 SAVE_INLINE_FPRS = 0x4,
26373 SAVE_NOINLINE_GPRS_SAVES_LR = 0x8,
26374 SAVE_NOINLINE_FPRS_SAVES_LR = 0x10,
26375 SAVE_INLINE_VRS = 0x20,
26376 REST_MULTIPLE = 0x100,
26377 REST_INLINE_GPRS = 0x200,
26378 REST_INLINE_FPRS = 0x400,
26379 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x800,
26380 REST_INLINE_VRS = 0x1000
26381 };
26382
26383 static int
26384 rs6000_savres_strategy (rs6000_stack_t *info,
26385 bool using_static_chain_p)
26386 {
26387 int strategy = 0;
26388
26389 /* Select between in-line and out-of-line save and restore of regs.
26390 First, all the obvious cases where we don't use out-of-line. */
26391 if (crtl->calls_eh_return
26392 || cfun->machine->ra_need_lr)
26393 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
26394 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
26395 | SAVE_INLINE_VRS | REST_INLINE_VRS);
26396
26397 if (info->first_gp_reg_save == 32)
26398 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
26399
26400 if (info->first_fp_reg_save == 64
26401 /* The out-of-line FP routines use double-precision stores;
26402 we can't use those routines if we don't have such stores. */
26403 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT))
26404 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
26405
26406 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1)
26407 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
26408
26409 /* Define cutoff for using out-of-line functions to save registers. */
26410 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
26411 {
26412 if (!optimize_size)
26413 {
26414 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
26415 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
26416 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
26417 }
26418 else
26419 {
26420 /* Prefer out-of-line restore if it will exit. */
26421 if (info->first_fp_reg_save > 61)
26422 strategy |= SAVE_INLINE_FPRS;
26423 if (info->first_gp_reg_save > 29)
26424 {
26425 if (info->first_fp_reg_save == 64)
26426 strategy |= SAVE_INLINE_GPRS;
26427 else
26428 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
26429 }
26430 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
26431 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
26432 }
26433 }
26434 else if (DEFAULT_ABI == ABI_DARWIN)
26435 {
26436 if (info->first_fp_reg_save > 60)
26437 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
26438 if (info->first_gp_reg_save > 29)
26439 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
26440 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
26441 }
26442 else
26443 {
26444 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
26445 if ((flag_shrink_wrap_separate && optimize_function_for_speed_p (cfun))
26446 || info->first_fp_reg_save > 61)
26447 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
26448 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
26449 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
26450 }
26451
26452 /* Don't bother to try to save things out-of-line if r11 is occupied
26453 by the static chain. It would require too much fiddling and the
26454 static chain is rarely used anyway. FPRs are saved w.r.t the stack
26455 pointer on Darwin, and AIX uses r1 or r12. */
26456 if (using_static_chain_p
26457 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
26458 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
26459 | SAVE_INLINE_GPRS
26460 | SAVE_INLINE_VRS);
26461
26462 /* Saving CR interferes with the exit routines used on the SPE, so
26463 just punt here. */
26464 if (TARGET_SPE_ABI
26465 && info->spe_64bit_regs_used
26466 && info->cr_save_p)
26467 strategy |= REST_INLINE_GPRS;
26468
26469 /* We can only use the out-of-line routines to restore fprs if we've
26470 saved all the registers from first_fp_reg_save in the prologue.
26471 Otherwise, we risk loading garbage. Of course, if we have saved
26472 out-of-line then we know we haven't skipped any fprs. */
26473 if ((strategy & SAVE_INLINE_FPRS)
26474 && !(strategy & REST_INLINE_FPRS))
26475 {
26476 int i;
26477
26478 for (i = info->first_fp_reg_save; i < 64; i++)
26479 if (fixed_regs[i] || !save_reg_p (i))
26480 {
26481 strategy |= REST_INLINE_FPRS;
26482 break;
26483 }
26484 }
26485
26486 /* Similarly, for altivec regs. */
26487 if ((strategy & SAVE_INLINE_VRS)
26488 && !(strategy & REST_INLINE_VRS))
26489 {
26490 int i;
26491
26492 for (i = info->first_altivec_reg_save; i < LAST_ALTIVEC_REGNO + 1; i++)
26493 if (fixed_regs[i] || !save_reg_p (i))
26494 {
26495 strategy |= REST_INLINE_VRS;
26496 break;
26497 }
26498 }
26499
26500 /* info->lr_save_p isn't yet set if the only reason lr needs to be
26501 saved is an out-of-line save or restore. Set up the value for
26502 the next test (excluding out-of-line gprs). */
26503 bool lr_save_p = (info->lr_save_p
26504 || !(strategy & SAVE_INLINE_FPRS)
26505 || !(strategy & SAVE_INLINE_VRS)
26506 || !(strategy & REST_INLINE_FPRS)
26507 || !(strategy & REST_INLINE_VRS));
26508
26509 if (TARGET_MULTIPLE
26510 && !TARGET_POWERPC64
26511 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
26512 && info->first_gp_reg_save < 31
26513 && !(flag_shrink_wrap
26514 && flag_shrink_wrap_separate
26515 && optimize_function_for_speed_p (cfun)))
26516 {
26517 /* Prefer store multiple for saves over out-of-line routines,
26518 since the store-multiple instruction will always be smaller. */
26519 strategy |= SAVE_INLINE_GPRS | SAVE_MULTIPLE;
26520
26521 /* The situation is more complicated with load multiple. We'd
26522 prefer to use the out-of-line routines for restores, since the
26523 "exit" out-of-line routines can handle the restore of LR and the
26524 frame teardown. However if doesn't make sense to use the
26525 out-of-line routine if that is the only reason we'd need to save
26526 LR, and we can't use the "exit" out-of-line gpr restore if we
26527 have saved some fprs; In those cases it is advantageous to use
26528 load multiple when available. */
26529 if (info->first_fp_reg_save != 64 || !lr_save_p)
26530 strategy |= REST_INLINE_GPRS | REST_MULTIPLE;
26531 }
26532
26533 /* Using the "exit" out-of-line routine does not improve code size
26534 if using it would require lr to be saved and if only saving one
26535 or two gprs. */
26536 else if (!lr_save_p && info->first_gp_reg_save > 29)
26537 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
26538
26539 /* We can only use load multiple or the out-of-line routines to
26540 restore gprs if we've saved all the registers from
26541 first_gp_reg_save. Otherwise, we risk loading garbage.
26542 Of course, if we have saved out-of-line or used stmw then we know
26543 we haven't skipped any gprs. */
26544 if ((strategy & (SAVE_INLINE_GPRS | SAVE_MULTIPLE)) == SAVE_INLINE_GPRS
26545 && (strategy & (REST_INLINE_GPRS | REST_MULTIPLE)) != REST_INLINE_GPRS)
26546 {
26547 int i;
26548
26549 for (i = info->first_gp_reg_save; i < 32; i++)
26550 if (fixed_reg_p (i) || !save_reg_p (i))
26551 {
26552 strategy |= REST_INLINE_GPRS;
26553 strategy &= ~REST_MULTIPLE;
26554 break;
26555 }
26556 }
26557
26558 if (TARGET_ELF && TARGET_64BIT)
26559 {
26560 if (!(strategy & SAVE_INLINE_FPRS))
26561 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
26562 else if (!(strategy & SAVE_INLINE_GPRS)
26563 && info->first_fp_reg_save == 64)
26564 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
26565 }
26566 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
26567 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
26568
26569 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
26570 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
26571
26572 return strategy;
26573 }
26574
26575 /* Calculate the stack information for the current function. This is
26576 complicated by having two separate calling sequences, the AIX calling
26577 sequence and the V.4 calling sequence.
26578
26579 AIX (and Darwin/Mac OS X) stack frames look like:
26580 32-bit 64-bit
26581 SP----> +---------------------------------------+
26582 | back chain to caller | 0 0
26583 +---------------------------------------+
26584 | saved CR | 4 8 (8-11)
26585 +---------------------------------------+
26586 | saved LR | 8 16
26587 +---------------------------------------+
26588 | reserved for compilers | 12 24
26589 +---------------------------------------+
26590 | reserved for binders | 16 32
26591 +---------------------------------------+
26592 | saved TOC pointer | 20 40
26593 +---------------------------------------+
26594 | Parameter save area (+padding*) (P) | 24 48
26595 +---------------------------------------+
26596 | Alloca space (A) | 24+P etc.
26597 +---------------------------------------+
26598 | Local variable space (L) | 24+P+A
26599 +---------------------------------------+
26600 | Float/int conversion temporary (X) | 24+P+A+L
26601 +---------------------------------------+
26602 | Save area for AltiVec registers (W) | 24+P+A+L+X
26603 +---------------------------------------+
26604 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
26605 +---------------------------------------+
26606 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
26607 +---------------------------------------+
26608 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
26609 +---------------------------------------+
26610 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
26611 +---------------------------------------+
26612 old SP->| back chain to caller's caller |
26613 +---------------------------------------+
26614
26615 * If the alloca area is present, the parameter save area is
26616 padded so that the former starts 16-byte aligned.
26617
26618 The required alignment for AIX configurations is two words (i.e., 8
26619 or 16 bytes).
26620
26621 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
26622
26623 SP----> +---------------------------------------+
26624 | Back chain to caller | 0
26625 +---------------------------------------+
26626 | Save area for CR | 8
26627 +---------------------------------------+
26628 | Saved LR | 16
26629 +---------------------------------------+
26630 | Saved TOC pointer | 24
26631 +---------------------------------------+
26632 | Parameter save area (+padding*) (P) | 32
26633 +---------------------------------------+
26634 | Alloca space (A) | 32+P
26635 +---------------------------------------+
26636 | Local variable space (L) | 32+P+A
26637 +---------------------------------------+
26638 | Save area for AltiVec registers (W) | 32+P+A+L
26639 +---------------------------------------+
26640 | AltiVec alignment padding (Y) | 32+P+A+L+W
26641 +---------------------------------------+
26642 | Save area for GP registers (G) | 32+P+A+L+W+Y
26643 +---------------------------------------+
26644 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
26645 +---------------------------------------+
26646 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
26647 +---------------------------------------+
26648
26649 * If the alloca area is present, the parameter save area is
26650 padded so that the former starts 16-byte aligned.
26651
26652 V.4 stack frames look like:
26653
26654 SP----> +---------------------------------------+
26655 | back chain to caller | 0
26656 +---------------------------------------+
26657 | caller's saved LR | 4
26658 +---------------------------------------+
26659 | Parameter save area (+padding*) (P) | 8
26660 +---------------------------------------+
26661 | Alloca space (A) | 8+P
26662 +---------------------------------------+
26663 | Varargs save area (V) | 8+P+A
26664 +---------------------------------------+
26665 | Local variable space (L) | 8+P+A+V
26666 +---------------------------------------+
26667 | Float/int conversion temporary (X) | 8+P+A+V+L
26668 +---------------------------------------+
26669 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
26670 +---------------------------------------+
26671 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
26672 +---------------------------------------+
26673 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
26674 +---------------------------------------+
26675 | SPE: area for 64-bit GP registers |
26676 +---------------------------------------+
26677 | SPE alignment padding |
26678 +---------------------------------------+
26679 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
26680 +---------------------------------------+
26681 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
26682 +---------------------------------------+
26683 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
26684 +---------------------------------------+
26685 old SP->| back chain to caller's caller |
26686 +---------------------------------------+
26687
26688 * If the alloca area is present and the required alignment is
26689 16 bytes, the parameter save area is padded so that the
26690 alloca area starts 16-byte aligned.
26691
26692 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
26693 given. (But note below and in sysv4.h that we require only 8 and
26694 may round up the size of our stack frame anyways. The historical
26695 reason is early versions of powerpc-linux which didn't properly
26696 align the stack at program startup. A happy side-effect is that
26697 -mno-eabi libraries can be used with -meabi programs.)
26698
26699 The EABI configuration defaults to the V.4 layout. However,
26700 the stack alignment requirements may differ. If -mno-eabi is not
26701 given, the required stack alignment is 8 bytes; if -mno-eabi is
26702 given, the required alignment is 16 bytes. (But see V.4 comment
26703 above.) */
26704
26705 #ifndef ABI_STACK_BOUNDARY
26706 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
26707 #endif
26708
26709 static rs6000_stack_t *
26710 rs6000_stack_info (void)
26711 {
26712 /* We should never be called for thunks, we are not set up for that. */
26713 gcc_assert (!cfun->is_thunk);
26714
26715 rs6000_stack_t *info = &stack_info;
26716 int reg_size = TARGET_32BIT ? 4 : 8;
26717 int ehrd_size;
26718 int ehcr_size;
26719 int save_align;
26720 int first_gp;
26721 HOST_WIDE_INT non_fixed_size;
26722 bool using_static_chain_p;
26723
26724 if (reload_completed && info->reload_completed)
26725 return info;
26726
26727 memset (info, 0, sizeof (*info));
26728 info->reload_completed = reload_completed;
26729
26730 if (TARGET_SPE)
26731 {
26732 /* Cache value so we don't rescan instruction chain over and over. */
26733 if (cfun->machine->spe_insn_chain_scanned_p == 0)
26734 cfun->machine->spe_insn_chain_scanned_p
26735 = spe_func_has_64bit_regs_p () + 1;
26736 info->spe_64bit_regs_used = cfun->machine->spe_insn_chain_scanned_p - 1;
26737 }
26738
26739 /* Select which calling sequence. */
26740 info->abi = DEFAULT_ABI;
26741
26742 /* Calculate which registers need to be saved & save area size. */
26743 info->first_gp_reg_save = first_reg_to_save ();
26744 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
26745 even if it currently looks like we won't. Reload may need it to
26746 get at a constant; if so, it will have already created a constant
26747 pool entry for it. */
26748 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
26749 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
26750 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
26751 && crtl->uses_const_pool
26752 && info->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
26753 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
26754 else
26755 first_gp = info->first_gp_reg_save;
26756
26757 info->gp_size = reg_size * (32 - first_gp);
26758
26759 /* For the SPE, we have an additional upper 32-bits on each GPR.
26760 Ideally we should save the entire 64-bits only when the upper
26761 half is used in SIMD instructions. Since we only record
26762 registers live (not the size they are used in), this proves
26763 difficult because we'd have to traverse the instruction chain at
26764 the right time, taking reload into account. This is a real pain,
26765 so we opt to save the GPRs in 64-bits always if but one register
26766 gets used in 64-bits. Otherwise, all the registers in the frame
26767 get saved in 32-bits.
26768
26769 So... since when we save all GPRs (except the SP) in 64-bits, the
26770 traditional GP save area will be empty. */
26771 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
26772 info->gp_size = 0;
26773
26774 info->first_fp_reg_save = first_fp_reg_to_save ();
26775 info->fp_size = 8 * (64 - info->first_fp_reg_save);
26776
26777 info->first_altivec_reg_save = first_altivec_reg_to_save ();
26778 info->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
26779 - info->first_altivec_reg_save);
26780
26781 /* Does this function call anything? */
26782 info->calls_p = (!crtl->is_leaf || cfun->machine->ra_needs_full_frame);
26783
26784 /* Determine if we need to save the condition code registers. */
26785 if (save_reg_p (CR2_REGNO)
26786 || save_reg_p (CR3_REGNO)
26787 || save_reg_p (CR4_REGNO))
26788 {
26789 info->cr_save_p = 1;
26790 if (DEFAULT_ABI == ABI_V4)
26791 info->cr_size = reg_size;
26792 }
26793
26794 /* If the current function calls __builtin_eh_return, then we need
26795 to allocate stack space for registers that will hold data for
26796 the exception handler. */
26797 if (crtl->calls_eh_return)
26798 {
26799 unsigned int i;
26800 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
26801 continue;
26802
26803 /* SPE saves EH registers in 64-bits. */
26804 ehrd_size = i * (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0
26805 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
26806 }
26807 else
26808 ehrd_size = 0;
26809
26810 /* In the ELFv2 ABI, we also need to allocate space for separate
26811 CR field save areas if the function calls __builtin_eh_return. */
26812 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
26813 {
26814 /* This hard-codes that we have three call-saved CR fields. */
26815 ehcr_size = 3 * reg_size;
26816 /* We do *not* use the regular CR save mechanism. */
26817 info->cr_save_p = 0;
26818 }
26819 else
26820 ehcr_size = 0;
26821
26822 /* Determine various sizes. */
26823 info->reg_size = reg_size;
26824 info->fixed_size = RS6000_SAVE_AREA;
26825 info->vars_size = RS6000_ALIGN (get_frame_size (), 8);
26826 if (cfun->calls_alloca)
26827 info->parm_size =
26828 RS6000_ALIGN (crtl->outgoing_args_size + info->fixed_size,
26829 STACK_BOUNDARY / BITS_PER_UNIT) - info->fixed_size;
26830 else
26831 info->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
26832 TARGET_ALTIVEC ? 16 : 8);
26833 if (FRAME_GROWS_DOWNWARD)
26834 info->vars_size
26835 += RS6000_ALIGN (info->fixed_size + info->vars_size + info->parm_size,
26836 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
26837 - (info->fixed_size + info->vars_size + info->parm_size);
26838
26839 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
26840 info->spe_gp_size = 8 * (32 - first_gp);
26841
26842 if (TARGET_ALTIVEC_ABI)
26843 info->vrsave_mask = compute_vrsave_mask ();
26844
26845 if (TARGET_ALTIVEC_VRSAVE && info->vrsave_mask)
26846 info->vrsave_size = 4;
26847
26848 compute_save_world_info (info);
26849
26850 /* Calculate the offsets. */
26851 switch (DEFAULT_ABI)
26852 {
26853 case ABI_NONE:
26854 default:
26855 gcc_unreachable ();
26856
26857 case ABI_AIX:
26858 case ABI_ELFv2:
26859 case ABI_DARWIN:
26860 info->fp_save_offset = -info->fp_size;
26861 info->gp_save_offset = info->fp_save_offset - info->gp_size;
26862
26863 if (TARGET_ALTIVEC_ABI)
26864 {
26865 info->vrsave_save_offset = info->gp_save_offset - info->vrsave_size;
26866
26867 /* Align stack so vector save area is on a quadword boundary.
26868 The padding goes above the vectors. */
26869 if (info->altivec_size != 0)
26870 info->altivec_padding_size = info->vrsave_save_offset & 0xF;
26871
26872 info->altivec_save_offset = info->vrsave_save_offset
26873 - info->altivec_padding_size
26874 - info->altivec_size;
26875 gcc_assert (info->altivec_size == 0
26876 || info->altivec_save_offset % 16 == 0);
26877
26878 /* Adjust for AltiVec case. */
26879 info->ehrd_offset = info->altivec_save_offset - ehrd_size;
26880 }
26881 else
26882 info->ehrd_offset = info->gp_save_offset - ehrd_size;
26883
26884 info->ehcr_offset = info->ehrd_offset - ehcr_size;
26885 info->cr_save_offset = reg_size; /* first word when 64-bit. */
26886 info->lr_save_offset = 2*reg_size;
26887 break;
26888
26889 case ABI_V4:
26890 info->fp_save_offset = -info->fp_size;
26891 info->gp_save_offset = info->fp_save_offset - info->gp_size;
26892 info->cr_save_offset = info->gp_save_offset - info->cr_size;
26893
26894 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
26895 {
26896 /* Align stack so SPE GPR save area is aligned on a
26897 double-word boundary. */
26898 if (info->spe_gp_size != 0 && info->cr_save_offset != 0)
26899 info->spe_padding_size = 8 - (-info->cr_save_offset % 8);
26900 else
26901 info->spe_padding_size = 0;
26902
26903 info->spe_gp_save_offset = info->cr_save_offset
26904 - info->spe_padding_size
26905 - info->spe_gp_size;
26906
26907 /* Adjust for SPE case. */
26908 info->ehrd_offset = info->spe_gp_save_offset;
26909 }
26910 else if (TARGET_ALTIVEC_ABI)
26911 {
26912 info->vrsave_save_offset = info->cr_save_offset - info->vrsave_size;
26913
26914 /* Align stack so vector save area is on a quadword boundary. */
26915 if (info->altivec_size != 0)
26916 info->altivec_padding_size = 16 - (-info->vrsave_save_offset % 16);
26917
26918 info->altivec_save_offset = info->vrsave_save_offset
26919 - info->altivec_padding_size
26920 - info->altivec_size;
26921
26922 /* Adjust for AltiVec case. */
26923 info->ehrd_offset = info->altivec_save_offset;
26924 }
26925 else
26926 info->ehrd_offset = info->cr_save_offset;
26927
26928 info->ehrd_offset -= ehrd_size;
26929 info->lr_save_offset = reg_size;
26930 }
26931
26932 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
26933 info->save_size = RS6000_ALIGN (info->fp_size
26934 + info->gp_size
26935 + info->altivec_size
26936 + info->altivec_padding_size
26937 + info->spe_gp_size
26938 + info->spe_padding_size
26939 + ehrd_size
26940 + ehcr_size
26941 + info->cr_size
26942 + info->vrsave_size,
26943 save_align);
26944
26945 non_fixed_size = info->vars_size + info->parm_size + info->save_size;
26946
26947 info->total_size = RS6000_ALIGN (non_fixed_size + info->fixed_size,
26948 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
26949
26950 /* Determine if we need to save the link register. */
26951 if (info->calls_p
26952 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26953 && crtl->profile
26954 && !TARGET_PROFILE_KERNEL)
26955 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
26956 #ifdef TARGET_RELOCATABLE
26957 || (DEFAULT_ABI == ABI_V4
26958 && (TARGET_RELOCATABLE || flag_pic > 1)
26959 && !constant_pool_empty_p ())
26960 #endif
26961 || rs6000_ra_ever_killed ())
26962 info->lr_save_p = 1;
26963
26964 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
26965 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
26966 && call_used_regs[STATIC_CHAIN_REGNUM]);
26967 info->savres_strategy = rs6000_savres_strategy (info, using_static_chain_p);
26968
26969 if (!(info->savres_strategy & SAVE_INLINE_GPRS)
26970 || !(info->savres_strategy & SAVE_INLINE_FPRS)
26971 || !(info->savres_strategy & SAVE_INLINE_VRS)
26972 || !(info->savres_strategy & REST_INLINE_GPRS)
26973 || !(info->savres_strategy & REST_INLINE_FPRS)
26974 || !(info->savres_strategy & REST_INLINE_VRS))
26975 info->lr_save_p = 1;
26976
26977 if (info->lr_save_p)
26978 df_set_regs_ever_live (LR_REGNO, true);
26979
26980 /* Determine if we need to allocate any stack frame:
26981
26982 For AIX we need to push the stack if a frame pointer is needed
26983 (because the stack might be dynamically adjusted), if we are
26984 debugging, if we make calls, or if the sum of fp_save, gp_save,
26985 and local variables are more than the space needed to save all
26986 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
26987 + 18*8 = 288 (GPR13 reserved).
26988
26989 For V.4 we don't have the stack cushion that AIX uses, but assume
26990 that the debugger can handle stackless frames. */
26991
26992 if (info->calls_p)
26993 info->push_p = 1;
26994
26995 else if (DEFAULT_ABI == ABI_V4)
26996 info->push_p = non_fixed_size != 0;
26997
26998 else if (frame_pointer_needed)
26999 info->push_p = 1;
27000
27001 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
27002 info->push_p = 1;
27003
27004 else
27005 info->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
27006
27007 return info;
27008 }
27009
27010 /* Return true if the current function uses any GPRs in 64-bit SIMD
27011 mode. */
27012
27013 static bool
27014 spe_func_has_64bit_regs_p (void)
27015 {
27016 rtx_insn *insns, *insn;
27017
27018 /* Functions that save and restore all the call-saved registers will
27019 need to save/restore the registers in 64-bits. */
27020 if (crtl->calls_eh_return
27021 || cfun->calls_setjmp
27022 || crtl->has_nonlocal_goto)
27023 return true;
27024
27025 insns = get_insns ();
27026
27027 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
27028 {
27029 if (INSN_P (insn))
27030 {
27031 rtx i;
27032
27033 /* FIXME: This should be implemented with attributes...
27034
27035 (set_attr "spe64" "true")....then,
27036 if (get_spe64(insn)) return true;
27037
27038 It's the only reliable way to do the stuff below. */
27039
27040 i = PATTERN (insn);
27041 if (GET_CODE (i) == SET)
27042 {
27043 machine_mode mode = GET_MODE (SET_SRC (i));
27044
27045 if (SPE_VECTOR_MODE (mode))
27046 return true;
27047 if (TARGET_E500_DOUBLE
27048 && (mode == DFmode || FLOAT128_2REG_P (mode)))
27049 return true;
27050 }
27051 }
27052 }
27053
27054 return false;
27055 }
27056
27057 static void
27058 debug_stack_info (rs6000_stack_t *info)
27059 {
27060 const char *abi_string;
27061
27062 if (! info)
27063 info = rs6000_stack_info ();
27064
27065 fprintf (stderr, "\nStack information for function %s:\n",
27066 ((current_function_decl && DECL_NAME (current_function_decl))
27067 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
27068 : "<unknown>"));
27069
27070 switch (info->abi)
27071 {
27072 default: abi_string = "Unknown"; break;
27073 case ABI_NONE: abi_string = "NONE"; break;
27074 case ABI_AIX: abi_string = "AIX"; break;
27075 case ABI_ELFv2: abi_string = "ELFv2"; break;
27076 case ABI_DARWIN: abi_string = "Darwin"; break;
27077 case ABI_V4: abi_string = "V.4"; break;
27078 }
27079
27080 fprintf (stderr, "\tABI = %5s\n", abi_string);
27081
27082 if (TARGET_ALTIVEC_ABI)
27083 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
27084
27085 if (TARGET_SPE_ABI)
27086 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
27087
27088 if (info->first_gp_reg_save != 32)
27089 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
27090
27091 if (info->first_fp_reg_save != 64)
27092 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
27093
27094 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
27095 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
27096 info->first_altivec_reg_save);
27097
27098 if (info->lr_save_p)
27099 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
27100
27101 if (info->cr_save_p)
27102 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
27103
27104 if (info->vrsave_mask)
27105 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
27106
27107 if (info->push_p)
27108 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
27109
27110 if (info->calls_p)
27111 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
27112
27113 if (info->gp_size)
27114 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
27115
27116 if (info->fp_size)
27117 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
27118
27119 if (info->altivec_size)
27120 fprintf (stderr, "\taltivec_save_offset = %5d\n",
27121 info->altivec_save_offset);
27122
27123 if (info->spe_gp_size)
27124 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
27125 info->spe_gp_save_offset);
27126
27127 if (info->vrsave_size)
27128 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
27129 info->vrsave_save_offset);
27130
27131 if (info->lr_save_p)
27132 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
27133
27134 if (info->cr_save_p)
27135 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
27136
27137 if (info->varargs_save_offset)
27138 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
27139
27140 if (info->total_size)
27141 fprintf (stderr, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC"\n",
27142 info->total_size);
27143
27144 if (info->vars_size)
27145 fprintf (stderr, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC"\n",
27146 info->vars_size);
27147
27148 if (info->parm_size)
27149 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
27150
27151 if (info->fixed_size)
27152 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
27153
27154 if (info->gp_size)
27155 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
27156
27157 if (info->spe_gp_size)
27158 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
27159
27160 if (info->fp_size)
27161 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
27162
27163 if (info->altivec_size)
27164 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
27165
27166 if (info->vrsave_size)
27167 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
27168
27169 if (info->altivec_padding_size)
27170 fprintf (stderr, "\taltivec_padding_size= %5d\n",
27171 info->altivec_padding_size);
27172
27173 if (info->spe_padding_size)
27174 fprintf (stderr, "\tspe_padding_size = %5d\n",
27175 info->spe_padding_size);
27176
27177 if (info->cr_size)
27178 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
27179
27180 if (info->save_size)
27181 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
27182
27183 if (info->reg_size != 4)
27184 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
27185
27186 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
27187
27188 fprintf (stderr, "\n");
27189 }
27190
27191 rtx
27192 rs6000_return_addr (int count, rtx frame)
27193 {
27194 /* Currently we don't optimize very well between prolog and body
27195 code and for PIC code the code can be actually quite bad, so
27196 don't try to be too clever here. */
27197 if (count != 0
27198 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
27199 {
27200 cfun->machine->ra_needs_full_frame = 1;
27201
27202 return
27203 gen_rtx_MEM
27204 (Pmode,
27205 memory_address
27206 (Pmode,
27207 plus_constant (Pmode,
27208 copy_to_reg
27209 (gen_rtx_MEM (Pmode,
27210 memory_address (Pmode, frame))),
27211 RETURN_ADDRESS_OFFSET)));
27212 }
27213
27214 cfun->machine->ra_need_lr = 1;
27215 return get_hard_reg_initial_val (Pmode, LR_REGNO);
27216 }
27217
27218 /* Say whether a function is a candidate for sibcall handling or not. */
27219
27220 static bool
27221 rs6000_function_ok_for_sibcall (tree decl, tree exp)
27222 {
27223 tree fntype;
27224
27225 if (decl)
27226 fntype = TREE_TYPE (decl);
27227 else
27228 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
27229
27230 /* We can't do it if the called function has more vector parameters
27231 than the current function; there's nowhere to put the VRsave code. */
27232 if (TARGET_ALTIVEC_ABI
27233 && TARGET_ALTIVEC_VRSAVE
27234 && !(decl && decl == current_function_decl))
27235 {
27236 function_args_iterator args_iter;
27237 tree type;
27238 int nvreg = 0;
27239
27240 /* Functions with vector parameters are required to have a
27241 prototype, so the argument type info must be available
27242 here. */
27243 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
27244 if (TREE_CODE (type) == VECTOR_TYPE
27245 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
27246 nvreg++;
27247
27248 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
27249 if (TREE_CODE (type) == VECTOR_TYPE
27250 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
27251 nvreg--;
27252
27253 if (nvreg > 0)
27254 return false;
27255 }
27256
27257 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
27258 functions, because the callee may have a different TOC pointer to
27259 the caller and there's no way to ensure we restore the TOC when
27260 we return. With the secure-plt SYSV ABI we can't make non-local
27261 calls when -fpic/PIC because the plt call stubs use r30. */
27262 if (DEFAULT_ABI == ABI_DARWIN
27263 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
27264 && decl
27265 && !DECL_EXTERNAL (decl)
27266 && !DECL_WEAK (decl)
27267 && (*targetm.binds_local_p) (decl))
27268 || (DEFAULT_ABI == ABI_V4
27269 && (!TARGET_SECURE_PLT
27270 || !flag_pic
27271 || (decl
27272 && (*targetm.binds_local_p) (decl)))))
27273 {
27274 tree attr_list = TYPE_ATTRIBUTES (fntype);
27275
27276 if (!lookup_attribute ("longcall", attr_list)
27277 || lookup_attribute ("shortcall", attr_list))
27278 return true;
27279 }
27280
27281 return false;
27282 }
27283
27284 static int
27285 rs6000_ra_ever_killed (void)
27286 {
27287 rtx_insn *top;
27288 rtx reg;
27289 rtx_insn *insn;
27290
27291 if (cfun->is_thunk)
27292 return 0;
27293
27294 if (cfun->machine->lr_save_state)
27295 return cfun->machine->lr_save_state - 1;
27296
27297 /* regs_ever_live has LR marked as used if any sibcalls are present,
27298 but this should not force saving and restoring in the
27299 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
27300 clobbers LR, so that is inappropriate. */
27301
27302 /* Also, the prologue can generate a store into LR that
27303 doesn't really count, like this:
27304
27305 move LR->R0
27306 bcl to set PIC register
27307 move LR->R31
27308 move R0->LR
27309
27310 When we're called from the epilogue, we need to avoid counting
27311 this as a store. */
27312
27313 push_topmost_sequence ();
27314 top = get_insns ();
27315 pop_topmost_sequence ();
27316 reg = gen_rtx_REG (Pmode, LR_REGNO);
27317
27318 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
27319 {
27320 if (INSN_P (insn))
27321 {
27322 if (CALL_P (insn))
27323 {
27324 if (!SIBLING_CALL_P (insn))
27325 return 1;
27326 }
27327 else if (find_regno_note (insn, REG_INC, LR_REGNO))
27328 return 1;
27329 else if (set_of (reg, insn) != NULL_RTX
27330 && !prologue_epilogue_contains (insn))
27331 return 1;
27332 }
27333 }
27334 return 0;
27335 }
27336 \f
27337 /* Emit instructions needed to load the TOC register.
27338 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
27339 a constant pool; or for SVR4 -fpic. */
27340
27341 void
27342 rs6000_emit_load_toc_table (int fromprolog)
27343 {
27344 rtx dest;
27345 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
27346
27347 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
27348 {
27349 char buf[30];
27350 rtx lab, tmp1, tmp2, got;
27351
27352 lab = gen_label_rtx ();
27353 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
27354 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
27355 if (flag_pic == 2)
27356 {
27357 got = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
27358 need_toc_init = 1;
27359 }
27360 else
27361 got = rs6000_got_sym ();
27362 tmp1 = tmp2 = dest;
27363 if (!fromprolog)
27364 {
27365 tmp1 = gen_reg_rtx (Pmode);
27366 tmp2 = gen_reg_rtx (Pmode);
27367 }
27368 emit_insn (gen_load_toc_v4_PIC_1 (lab));
27369 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
27370 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
27371 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
27372 }
27373 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
27374 {
27375 emit_insn (gen_load_toc_v4_pic_si ());
27376 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
27377 }
27378 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
27379 {
27380 char buf[30];
27381 rtx temp0 = (fromprolog
27382 ? gen_rtx_REG (Pmode, 0)
27383 : gen_reg_rtx (Pmode));
27384
27385 if (fromprolog)
27386 {
27387 rtx symF, symL;
27388
27389 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
27390 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
27391
27392 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
27393 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
27394
27395 emit_insn (gen_load_toc_v4_PIC_1 (symF));
27396 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
27397 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
27398 }
27399 else
27400 {
27401 rtx tocsym, lab;
27402
27403 tocsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
27404 need_toc_init = 1;
27405 lab = gen_label_rtx ();
27406 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
27407 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
27408 if (TARGET_LINK_STACK)
27409 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
27410 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
27411 }
27412 emit_insn (gen_addsi3 (dest, temp0, dest));
27413 }
27414 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
27415 {
27416 /* This is for AIX code running in non-PIC ELF32. */
27417 rtx realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (toc_label_name));
27418
27419 need_toc_init = 1;
27420 emit_insn (gen_elf_high (dest, realsym));
27421 emit_insn (gen_elf_low (dest, dest, realsym));
27422 }
27423 else
27424 {
27425 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
27426
27427 if (TARGET_32BIT)
27428 emit_insn (gen_load_toc_aix_si (dest));
27429 else
27430 emit_insn (gen_load_toc_aix_di (dest));
27431 }
27432 }
27433
27434 /* Emit instructions to restore the link register after determining where
27435 its value has been stored. */
27436
27437 void
27438 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
27439 {
27440 rs6000_stack_t *info = rs6000_stack_info ();
27441 rtx operands[2];
27442
27443 operands[0] = source;
27444 operands[1] = scratch;
27445
27446 if (info->lr_save_p)
27447 {
27448 rtx frame_rtx = stack_pointer_rtx;
27449 HOST_WIDE_INT sp_offset = 0;
27450 rtx tmp;
27451
27452 if (frame_pointer_needed
27453 || cfun->calls_alloca
27454 || info->total_size > 32767)
27455 {
27456 tmp = gen_frame_mem (Pmode, frame_rtx);
27457 emit_move_insn (operands[1], tmp);
27458 frame_rtx = operands[1];
27459 }
27460 else if (info->push_p)
27461 sp_offset = info->total_size;
27462
27463 tmp = plus_constant (Pmode, frame_rtx,
27464 info->lr_save_offset + sp_offset);
27465 tmp = gen_frame_mem (Pmode, tmp);
27466 emit_move_insn (tmp, operands[0]);
27467 }
27468 else
27469 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
27470
27471 /* Freeze lr_save_p. We've just emitted rtl that depends on the
27472 state of lr_save_p so any change from here on would be a bug. In
27473 particular, stop rs6000_ra_ever_killed from considering the SET
27474 of lr we may have added just above. */
27475 cfun->machine->lr_save_state = info->lr_save_p + 1;
27476 }
27477
27478 static GTY(()) alias_set_type set = -1;
27479
27480 alias_set_type
27481 get_TOC_alias_set (void)
27482 {
27483 if (set == -1)
27484 set = new_alias_set ();
27485 return set;
27486 }
27487
27488 /* This returns nonzero if the current function uses the TOC. This is
27489 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
27490 is generated by the ABI_V4 load_toc_* patterns. */
27491 #if TARGET_ELF
27492 static int
27493 uses_TOC (void)
27494 {
27495 rtx_insn *insn;
27496
27497 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
27498 if (INSN_P (insn))
27499 {
27500 rtx pat = PATTERN (insn);
27501 int i;
27502
27503 if (GET_CODE (pat) == PARALLEL)
27504 for (i = 0; i < XVECLEN (pat, 0); i++)
27505 {
27506 rtx sub = XVECEXP (pat, 0, i);
27507 if (GET_CODE (sub) == USE)
27508 {
27509 sub = XEXP (sub, 0);
27510 if (GET_CODE (sub) == UNSPEC
27511 && XINT (sub, 1) == UNSPEC_TOC)
27512 return 1;
27513 }
27514 }
27515 }
27516 return 0;
27517 }
27518 #endif
27519
27520 rtx
27521 create_TOC_reference (rtx symbol, rtx largetoc_reg)
27522 {
27523 rtx tocrel, tocreg, hi;
27524
27525 if (TARGET_DEBUG_ADDR)
27526 {
27527 if (GET_CODE (symbol) == SYMBOL_REF)
27528 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
27529 XSTR (symbol, 0));
27530 else
27531 {
27532 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
27533 GET_RTX_NAME (GET_CODE (symbol)));
27534 debug_rtx (symbol);
27535 }
27536 }
27537
27538 if (!can_create_pseudo_p ())
27539 df_set_regs_ever_live (TOC_REGISTER, true);
27540
27541 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
27542 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
27543 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
27544 return tocrel;
27545
27546 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
27547 if (largetoc_reg != NULL)
27548 {
27549 emit_move_insn (largetoc_reg, hi);
27550 hi = largetoc_reg;
27551 }
27552 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
27553 }
27554
27555 /* Issue assembly directives that create a reference to the given DWARF
27556 FRAME_TABLE_LABEL from the current function section. */
27557 void
27558 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
27559 {
27560 fprintf (asm_out_file, "\t.ref %s\n",
27561 (* targetm.strip_name_encoding) (frame_table_label));
27562 }
27563 \f
27564 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
27565 and the change to the stack pointer. */
27566
27567 static void
27568 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
27569 {
27570 rtvec p;
27571 int i;
27572 rtx regs[3];
27573
27574 i = 0;
27575 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
27576 if (hard_frame_needed)
27577 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
27578 if (!(REGNO (fp) == STACK_POINTER_REGNUM
27579 || (hard_frame_needed
27580 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
27581 regs[i++] = fp;
27582
27583 p = rtvec_alloc (i);
27584 while (--i >= 0)
27585 {
27586 rtx mem = gen_frame_mem (BLKmode, regs[i]);
27587 RTVEC_ELT (p, i) = gen_rtx_SET (mem, const0_rtx);
27588 }
27589
27590 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
27591 }
27592
27593 /* Emit the correct code for allocating stack space, as insns.
27594 If COPY_REG, make sure a copy of the old frame is left there.
27595 The generated code may use hard register 0 as a temporary. */
27596
27597 static rtx_insn *
27598 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
27599 {
27600 rtx_insn *insn;
27601 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
27602 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
27603 rtx todec = gen_int_mode (-size, Pmode);
27604 rtx par, set, mem;
27605
27606 if (INTVAL (todec) != -size)
27607 {
27608 warning (0, "stack frame too large");
27609 emit_insn (gen_trap ());
27610 return 0;
27611 }
27612
27613 if (crtl->limit_stack)
27614 {
27615 if (REG_P (stack_limit_rtx)
27616 && REGNO (stack_limit_rtx) > 1
27617 && REGNO (stack_limit_rtx) <= 31)
27618 {
27619 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
27620 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
27621 const0_rtx));
27622 }
27623 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
27624 && TARGET_32BIT
27625 && DEFAULT_ABI == ABI_V4)
27626 {
27627 rtx toload = gen_rtx_CONST (VOIDmode,
27628 gen_rtx_PLUS (Pmode,
27629 stack_limit_rtx,
27630 GEN_INT (size)));
27631
27632 emit_insn (gen_elf_high (tmp_reg, toload));
27633 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
27634 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
27635 const0_rtx));
27636 }
27637 else
27638 warning (0, "stack limit expression is not supported");
27639 }
27640
27641 if (copy_reg)
27642 {
27643 if (copy_off != 0)
27644 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
27645 else
27646 emit_move_insn (copy_reg, stack_reg);
27647 }
27648
27649 if (size > 32767)
27650 {
27651 /* Need a note here so that try_split doesn't get confused. */
27652 if (get_last_insn () == NULL_RTX)
27653 emit_note (NOTE_INSN_DELETED);
27654 insn = emit_move_insn (tmp_reg, todec);
27655 try_split (PATTERN (insn), insn, 0);
27656 todec = tmp_reg;
27657 }
27658
27659 insn = emit_insn (TARGET_32BIT
27660 ? gen_movsi_update_stack (stack_reg, stack_reg,
27661 todec, stack_reg)
27662 : gen_movdi_di_update_stack (stack_reg, stack_reg,
27663 todec, stack_reg));
27664 /* Since we didn't use gen_frame_mem to generate the MEM, grab
27665 it now and set the alias set/attributes. The above gen_*_update
27666 calls will generate a PARALLEL with the MEM set being the first
27667 operation. */
27668 par = PATTERN (insn);
27669 gcc_assert (GET_CODE (par) == PARALLEL);
27670 set = XVECEXP (par, 0, 0);
27671 gcc_assert (GET_CODE (set) == SET);
27672 mem = SET_DEST (set);
27673 gcc_assert (MEM_P (mem));
27674 MEM_NOTRAP_P (mem) = 1;
27675 set_mem_alias_set (mem, get_frame_alias_set ());
27676
27677 RTX_FRAME_RELATED_P (insn) = 1;
27678 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
27679 gen_rtx_SET (stack_reg, gen_rtx_PLUS (Pmode, stack_reg,
27680 GEN_INT (-size))));
27681 return insn;
27682 }
27683
27684 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
27685
27686 #if PROBE_INTERVAL > 32768
27687 #error Cannot use indexed addressing mode for stack probing
27688 #endif
27689
27690 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
27691 inclusive. These are offsets from the current stack pointer. */
27692
27693 static void
27694 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
27695 {
27696 /* See if we have a constant small number of probes to generate. If so,
27697 that's the easy case. */
27698 if (first + size <= 32768)
27699 {
27700 HOST_WIDE_INT i;
27701
27702 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
27703 it exceeds SIZE. If only one probe is needed, this will not
27704 generate any code. Then probe at FIRST + SIZE. */
27705 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
27706 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
27707 -(first + i)));
27708
27709 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
27710 -(first + size)));
27711 }
27712
27713 /* Otherwise, do the same as above, but in a loop. Note that we must be
27714 extra careful with variables wrapping around because we might be at
27715 the very top (or the very bottom) of the address space and we have
27716 to be able to handle this case properly; in particular, we use an
27717 equality test for the loop condition. */
27718 else
27719 {
27720 HOST_WIDE_INT rounded_size;
27721 rtx r12 = gen_rtx_REG (Pmode, 12);
27722 rtx r0 = gen_rtx_REG (Pmode, 0);
27723
27724 /* Sanity check for the addressing mode we're going to use. */
27725 gcc_assert (first <= 32768);
27726
27727 /* Step 1: round SIZE to the previous multiple of the interval. */
27728
27729 rounded_size = ROUND_DOWN (size, PROBE_INTERVAL);
27730
27731
27732 /* Step 2: compute initial and final value of the loop counter. */
27733
27734 /* TEST_ADDR = SP + FIRST. */
27735 emit_insn (gen_rtx_SET (r12, plus_constant (Pmode, stack_pointer_rtx,
27736 -first)));
27737
27738 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
27739 if (rounded_size > 32768)
27740 {
27741 emit_move_insn (r0, GEN_INT (-rounded_size));
27742 emit_insn (gen_rtx_SET (r0, gen_rtx_PLUS (Pmode, r12, r0)));
27743 }
27744 else
27745 emit_insn (gen_rtx_SET (r0, plus_constant (Pmode, r12,
27746 -rounded_size)));
27747
27748
27749 /* Step 3: the loop
27750
27751 do
27752 {
27753 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
27754 probe at TEST_ADDR
27755 }
27756 while (TEST_ADDR != LAST_ADDR)
27757
27758 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
27759 until it is equal to ROUNDED_SIZE. */
27760
27761 if (TARGET_64BIT)
27762 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
27763 else
27764 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
27765
27766
27767 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
27768 that SIZE is equal to ROUNDED_SIZE. */
27769
27770 if (size != rounded_size)
27771 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
27772 }
27773 }
27774
27775 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
27776 absolute addresses. */
27777
27778 const char *
27779 output_probe_stack_range (rtx reg1, rtx reg2)
27780 {
27781 static int labelno = 0;
27782 char loop_lab[32];
27783 rtx xops[2];
27784
27785 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno++);
27786
27787 /* Loop. */
27788 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
27789
27790 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
27791 xops[0] = reg1;
27792 xops[1] = GEN_INT (-PROBE_INTERVAL);
27793 output_asm_insn ("addi %0,%0,%1", xops);
27794
27795 /* Probe at TEST_ADDR. */
27796 xops[1] = gen_rtx_REG (Pmode, 0);
27797 output_asm_insn ("stw %1,0(%0)", xops);
27798
27799 /* Test if TEST_ADDR == LAST_ADDR. */
27800 xops[1] = reg2;
27801 if (TARGET_64BIT)
27802 output_asm_insn ("cmpd 0,%0,%1", xops);
27803 else
27804 output_asm_insn ("cmpw 0,%0,%1", xops);
27805
27806 /* Branch. */
27807 fputs ("\tbne 0,", asm_out_file);
27808 assemble_name_raw (asm_out_file, loop_lab);
27809 fputc ('\n', asm_out_file);
27810
27811 return "";
27812 }
27813
27814 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
27815 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
27816 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
27817 deduce these equivalences by itself so it wasn't necessary to hold
27818 its hand so much. Don't be tempted to always supply d2_f_d_e with
27819 the actual cfa register, ie. r31 when we are using a hard frame
27820 pointer. That fails when saving regs off r1, and sched moves the
27821 r31 setup past the reg saves. */
27822
27823 static rtx_insn *
27824 rs6000_frame_related (rtx_insn *insn, rtx reg, HOST_WIDE_INT val,
27825 rtx reg2, rtx repl2)
27826 {
27827 rtx repl;
27828
27829 if (REGNO (reg) == STACK_POINTER_REGNUM)
27830 {
27831 gcc_checking_assert (val == 0);
27832 repl = NULL_RTX;
27833 }
27834 else
27835 repl = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
27836 GEN_INT (val));
27837
27838 rtx pat = PATTERN (insn);
27839 if (!repl && !reg2)
27840 {
27841 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
27842 if (GET_CODE (pat) == PARALLEL)
27843 for (int i = 0; i < XVECLEN (pat, 0); i++)
27844 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
27845 {
27846 rtx set = XVECEXP (pat, 0, i);
27847
27848 /* If this PARALLEL has been emitted for out-of-line
27849 register save functions, or store multiple, then omit
27850 eh_frame info for any user-defined global regs. If
27851 eh_frame info is supplied, frame unwinding will
27852 restore a user reg. */
27853 if (!REG_P (SET_SRC (set))
27854 || !fixed_reg_p (REGNO (SET_SRC (set))))
27855 RTX_FRAME_RELATED_P (set) = 1;
27856 }
27857 RTX_FRAME_RELATED_P (insn) = 1;
27858 return insn;
27859 }
27860
27861 /* We expect that 'pat' is either a SET or a PARALLEL containing
27862 SETs (and possibly other stuff). In a PARALLEL, all the SETs
27863 are important so they all have to be marked RTX_FRAME_RELATED_P.
27864 Call simplify_replace_rtx on the SETs rather than the whole insn
27865 so as to leave the other stuff alone (for example USE of r12). */
27866
27867 set_used_flags (pat);
27868 if (GET_CODE (pat) == SET)
27869 {
27870 if (repl)
27871 pat = simplify_replace_rtx (pat, reg, repl);
27872 if (reg2)
27873 pat = simplify_replace_rtx (pat, reg2, repl2);
27874 }
27875 else if (GET_CODE (pat) == PARALLEL)
27876 {
27877 pat = shallow_copy_rtx (pat);
27878 XVEC (pat, 0) = shallow_copy_rtvec (XVEC (pat, 0));
27879
27880 for (int i = 0; i < XVECLEN (pat, 0); i++)
27881 if (GET_CODE (XVECEXP (pat, 0, i)) == SET)
27882 {
27883 rtx set = XVECEXP (pat, 0, i);
27884
27885 if (repl)
27886 set = simplify_replace_rtx (set, reg, repl);
27887 if (reg2)
27888 set = simplify_replace_rtx (set, reg2, repl2);
27889 XVECEXP (pat, 0, i) = set;
27890
27891 /* Omit eh_frame info for any user-defined global regs. */
27892 if (!REG_P (SET_SRC (set))
27893 || !fixed_reg_p (REGNO (SET_SRC (set))))
27894 RTX_FRAME_RELATED_P (set) = 1;
27895 }
27896 }
27897 else
27898 gcc_unreachable ();
27899
27900 RTX_FRAME_RELATED_P (insn) = 1;
27901 add_reg_note (insn, REG_FRAME_RELATED_EXPR, copy_rtx_if_shared (pat));
27902
27903 return insn;
27904 }
27905
27906 /* Returns an insn that has a vrsave set operation with the
27907 appropriate CLOBBERs. */
27908
27909 static rtx
27910 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
27911 {
27912 int nclobs, i;
27913 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
27914 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
27915
27916 clobs[0]
27917 = gen_rtx_SET (vrsave,
27918 gen_rtx_UNSPEC_VOLATILE (SImode,
27919 gen_rtvec (2, reg, vrsave),
27920 UNSPECV_SET_VRSAVE));
27921
27922 nclobs = 1;
27923
27924 /* We need to clobber the registers in the mask so the scheduler
27925 does not move sets to VRSAVE before sets of AltiVec registers.
27926
27927 However, if the function receives nonlocal gotos, reload will set
27928 all call saved registers live. We will end up with:
27929
27930 (set (reg 999) (mem))
27931 (parallel [ (set (reg vrsave) (unspec blah))
27932 (clobber (reg 999))])
27933
27934 The clobber will cause the store into reg 999 to be dead, and
27935 flow will attempt to delete an epilogue insn. In this case, we
27936 need an unspec use/set of the register. */
27937
27938 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
27939 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
27940 {
27941 if (!epiloguep || call_used_regs [i])
27942 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
27943 gen_rtx_REG (V4SImode, i));
27944 else
27945 {
27946 rtx reg = gen_rtx_REG (V4SImode, i);
27947
27948 clobs[nclobs++]
27949 = gen_rtx_SET (reg,
27950 gen_rtx_UNSPEC (V4SImode,
27951 gen_rtvec (1, reg), 27));
27952 }
27953 }
27954
27955 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
27956
27957 for (i = 0; i < nclobs; ++i)
27958 XVECEXP (insn, 0, i) = clobs[i];
27959
27960 return insn;
27961 }
27962
27963 static rtx
27964 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
27965 {
27966 rtx addr, mem;
27967
27968 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
27969 mem = gen_frame_mem (GET_MODE (reg), addr);
27970 return gen_rtx_SET (store ? mem : reg, store ? reg : mem);
27971 }
27972
27973 static rtx
27974 gen_frame_load (rtx reg, rtx frame_reg, int offset)
27975 {
27976 return gen_frame_set (reg, frame_reg, offset, false);
27977 }
27978
27979 static rtx
27980 gen_frame_store (rtx reg, rtx frame_reg, int offset)
27981 {
27982 return gen_frame_set (reg, frame_reg, offset, true);
27983 }
27984
27985 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
27986 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
27987
27988 static rtx_insn *
27989 emit_frame_save (rtx frame_reg, machine_mode mode,
27990 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
27991 {
27992 rtx reg;
27993
27994 /* Some cases that need register indexed addressing. */
27995 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
27996 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
27997 || (TARGET_E500_DOUBLE && mode == DFmode)
27998 || (TARGET_SPE_ABI
27999 && SPE_VECTOR_MODE (mode)
28000 && !SPE_CONST_OFFSET_OK (offset))));
28001
28002 reg = gen_rtx_REG (mode, regno);
28003 rtx_insn *insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
28004 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
28005 NULL_RTX, NULL_RTX);
28006 }
28007
28008 /* Emit an offset memory reference suitable for a frame store, while
28009 converting to a valid addressing mode. */
28010
28011 static rtx
28012 gen_frame_mem_offset (machine_mode mode, rtx reg, int offset)
28013 {
28014 rtx int_rtx, offset_rtx;
28015
28016 int_rtx = GEN_INT (offset);
28017
28018 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
28019 || (TARGET_E500_DOUBLE && mode == DFmode))
28020 {
28021 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
28022 emit_move_insn (offset_rtx, int_rtx);
28023 }
28024 else
28025 offset_rtx = int_rtx;
28026
28027 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
28028 }
28029
28030 #ifndef TARGET_FIX_AND_CONTINUE
28031 #define TARGET_FIX_AND_CONTINUE 0
28032 #endif
28033
28034 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
28035 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
28036 #define LAST_SAVRES_REGISTER 31
28037 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
28038
28039 enum {
28040 SAVRES_LR = 0x1,
28041 SAVRES_SAVE = 0x2,
28042 SAVRES_REG = 0x0c,
28043 SAVRES_GPR = 0,
28044 SAVRES_FPR = 4,
28045 SAVRES_VR = 8
28046 };
28047
28048 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
28049
28050 /* Temporary holding space for an out-of-line register save/restore
28051 routine name. */
28052 static char savres_routine_name[30];
28053
28054 /* Return the name for an out-of-line register save/restore routine.
28055 We are saving/restoring GPRs if GPR is true. */
28056
28057 static char *
28058 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
28059 {
28060 const char *prefix = "";
28061 const char *suffix = "";
28062
28063 /* Different targets are supposed to define
28064 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
28065 routine name could be defined with:
28066
28067 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
28068
28069 This is a nice idea in practice, but in reality, things are
28070 complicated in several ways:
28071
28072 - ELF targets have save/restore routines for GPRs.
28073
28074 - SPE targets use different prefixes for 32/64-bit registers, and
28075 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
28076
28077 - PPC64 ELF targets have routines for save/restore of GPRs that
28078 differ in what they do with the link register, so having a set
28079 prefix doesn't work. (We only use one of the save routines at
28080 the moment, though.)
28081
28082 - PPC32 elf targets have "exit" versions of the restore routines
28083 that restore the link register and can save some extra space.
28084 These require an extra suffix. (There are also "tail" versions
28085 of the restore routines and "GOT" versions of the save routines,
28086 but we don't generate those at present. Same problems apply,
28087 though.)
28088
28089 We deal with all this by synthesizing our own prefix/suffix and
28090 using that for the simple sprintf call shown above. */
28091 if (TARGET_SPE)
28092 {
28093 /* No floating point saves on the SPE. */
28094 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
28095
28096 if ((sel & SAVRES_SAVE))
28097 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
28098 else
28099 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
28100
28101 if ((sel & SAVRES_LR))
28102 suffix = "_x";
28103 }
28104 else if (DEFAULT_ABI == ABI_V4)
28105 {
28106 if (TARGET_64BIT)
28107 goto aix_names;
28108
28109 if ((sel & SAVRES_REG) == SAVRES_GPR)
28110 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
28111 else if ((sel & SAVRES_REG) == SAVRES_FPR)
28112 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
28113 else if ((sel & SAVRES_REG) == SAVRES_VR)
28114 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
28115 else
28116 abort ();
28117
28118 if ((sel & SAVRES_LR))
28119 suffix = "_x";
28120 }
28121 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28122 {
28123 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
28124 /* No out-of-line save/restore routines for GPRs on AIX. */
28125 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
28126 #endif
28127
28128 aix_names:
28129 if ((sel & SAVRES_REG) == SAVRES_GPR)
28130 prefix = ((sel & SAVRES_SAVE)
28131 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
28132 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
28133 else if ((sel & SAVRES_REG) == SAVRES_FPR)
28134 {
28135 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
28136 if ((sel & SAVRES_LR))
28137 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
28138 else
28139 #endif
28140 {
28141 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
28142 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
28143 }
28144 }
28145 else if ((sel & SAVRES_REG) == SAVRES_VR)
28146 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
28147 else
28148 abort ();
28149 }
28150
28151 if (DEFAULT_ABI == ABI_DARWIN)
28152 {
28153 /* The Darwin approach is (slightly) different, in order to be
28154 compatible with code generated by the system toolchain. There is a
28155 single symbol for the start of save sequence, and the code here
28156 embeds an offset into that code on the basis of the first register
28157 to be saved. */
28158 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
28159 if ((sel & SAVRES_REG) == SAVRES_GPR)
28160 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
28161 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
28162 (regno - 13) * 4, prefix, regno);
28163 else if ((sel & SAVRES_REG) == SAVRES_FPR)
28164 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
28165 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
28166 else if ((sel & SAVRES_REG) == SAVRES_VR)
28167 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
28168 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
28169 else
28170 abort ();
28171 }
28172 else
28173 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
28174
28175 return savres_routine_name;
28176 }
28177
28178 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
28179 We are saving/restoring GPRs if GPR is true. */
28180
28181 static rtx
28182 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
28183 {
28184 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
28185 ? info->first_gp_reg_save
28186 : (sel & SAVRES_REG) == SAVRES_FPR
28187 ? info->first_fp_reg_save - 32
28188 : (sel & SAVRES_REG) == SAVRES_VR
28189 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
28190 : -1);
28191 rtx sym;
28192 int select = sel;
28193
28194 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
28195 versions of the gpr routines. */
28196 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
28197 && info->spe_64bit_regs_used)
28198 select ^= SAVRES_FPR ^ SAVRES_GPR;
28199
28200 /* Don't generate bogus routine names. */
28201 gcc_assert (FIRST_SAVRES_REGISTER <= regno
28202 && regno <= LAST_SAVRES_REGISTER
28203 && select >= 0 && select <= 12);
28204
28205 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
28206
28207 if (sym == NULL)
28208 {
28209 char *name;
28210
28211 name = rs6000_savres_routine_name (info, regno, sel);
28212
28213 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
28214 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
28215 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
28216 }
28217
28218 return sym;
28219 }
28220
28221 /* Emit a sequence of insns, including a stack tie if needed, for
28222 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
28223 reset the stack pointer, but move the base of the frame into
28224 reg UPDT_REGNO for use by out-of-line register restore routines. */
28225
28226 static rtx
28227 rs6000_emit_stack_reset (rs6000_stack_t *info,
28228 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
28229 unsigned updt_regno)
28230 {
28231 /* If there is nothing to do, don't do anything. */
28232 if (frame_off == 0 && REGNO (frame_reg_rtx) == updt_regno)
28233 return NULL_RTX;
28234
28235 rtx updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
28236
28237 /* This blockage is needed so that sched doesn't decide to move
28238 the sp change before the register restores. */
28239 if (DEFAULT_ABI == ABI_V4
28240 || (TARGET_SPE_ABI
28241 && info->spe_64bit_regs_used != 0
28242 && info->first_gp_reg_save != 32))
28243 return emit_insn (gen_stack_restore_tie (updt_reg_rtx, frame_reg_rtx,
28244 GEN_INT (frame_off)));
28245
28246 /* If we are restoring registers out-of-line, we will be using the
28247 "exit" variants of the restore routines, which will reset the
28248 stack for us. But we do need to point updt_reg into the
28249 right place for those routines. */
28250 if (frame_off != 0)
28251 return emit_insn (gen_add3_insn (updt_reg_rtx,
28252 frame_reg_rtx, GEN_INT (frame_off)));
28253 else
28254 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
28255
28256 return NULL_RTX;
28257 }
28258
28259 /* Return the register number used as a pointer by out-of-line
28260 save/restore functions. */
28261
28262 static inline unsigned
28263 ptr_regno_for_savres (int sel)
28264 {
28265 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28266 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
28267 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
28268 }
28269
28270 /* Construct a parallel rtx describing the effect of a call to an
28271 out-of-line register save/restore routine, and emit the insn
28272 or jump_insn as appropriate. */
28273
28274 static rtx_insn *
28275 rs6000_emit_savres_rtx (rs6000_stack_t *info,
28276 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
28277 machine_mode reg_mode, int sel)
28278 {
28279 int i;
28280 int offset, start_reg, end_reg, n_regs, use_reg;
28281 int reg_size = GET_MODE_SIZE (reg_mode);
28282 rtx sym;
28283 rtvec p;
28284 rtx par;
28285 rtx_insn *insn;
28286
28287 offset = 0;
28288 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
28289 ? info->first_gp_reg_save
28290 : (sel & SAVRES_REG) == SAVRES_FPR
28291 ? info->first_fp_reg_save
28292 : (sel & SAVRES_REG) == SAVRES_VR
28293 ? info->first_altivec_reg_save
28294 : -1);
28295 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
28296 ? 32
28297 : (sel & SAVRES_REG) == SAVRES_FPR
28298 ? 64
28299 : (sel & SAVRES_REG) == SAVRES_VR
28300 ? LAST_ALTIVEC_REGNO + 1
28301 : -1);
28302 n_regs = end_reg - start_reg;
28303 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
28304 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
28305 + n_regs);
28306
28307 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
28308 RTVEC_ELT (p, offset++) = ret_rtx;
28309
28310 RTVEC_ELT (p, offset++)
28311 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
28312
28313 sym = rs6000_savres_routine_sym (info, sel);
28314 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
28315
28316 use_reg = ptr_regno_for_savres (sel);
28317 if ((sel & SAVRES_REG) == SAVRES_VR)
28318 {
28319 /* Vector regs are saved/restored using [reg+reg] addressing. */
28320 RTVEC_ELT (p, offset++)
28321 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
28322 RTVEC_ELT (p, offset++)
28323 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
28324 }
28325 else
28326 RTVEC_ELT (p, offset++)
28327 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
28328
28329 for (i = 0; i < end_reg - start_reg; i++)
28330 RTVEC_ELT (p, i + offset)
28331 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
28332 frame_reg_rtx, save_area_offset + reg_size * i,
28333 (sel & SAVRES_SAVE) != 0);
28334
28335 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
28336 RTVEC_ELT (p, i + offset)
28337 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
28338
28339 par = gen_rtx_PARALLEL (VOIDmode, p);
28340
28341 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
28342 {
28343 insn = emit_jump_insn (par);
28344 JUMP_LABEL (insn) = ret_rtx;
28345 }
28346 else
28347 insn = emit_insn (par);
28348 return insn;
28349 }
28350
28351 /* Emit code to store CR fields that need to be saved into REG. */
28352
28353 static void
28354 rs6000_emit_move_from_cr (rtx reg)
28355 {
28356 /* Only the ELFv2 ABI allows storing only selected fields. */
28357 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
28358 {
28359 int i, cr_reg[8], count = 0;
28360
28361 /* Collect CR fields that must be saved. */
28362 for (i = 0; i < 8; i++)
28363 if (save_reg_p (CR0_REGNO + i))
28364 cr_reg[count++] = i;
28365
28366 /* If it's just a single one, use mfcrf. */
28367 if (count == 1)
28368 {
28369 rtvec p = rtvec_alloc (1);
28370 rtvec r = rtvec_alloc (2);
28371 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
28372 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
28373 RTVEC_ELT (p, 0)
28374 = gen_rtx_SET (reg,
28375 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
28376
28377 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28378 return;
28379 }
28380
28381 /* ??? It might be better to handle count == 2 / 3 cases here
28382 as well, using logical operations to combine the values. */
28383 }
28384
28385 emit_insn (gen_movesi_from_cr (reg));
28386 }
28387
28388 /* Return whether the split-stack arg pointer (r12) is used. */
28389
28390 static bool
28391 split_stack_arg_pointer_used_p (void)
28392 {
28393 /* If the pseudo holding the arg pointer is no longer a pseudo,
28394 then the arg pointer is used. */
28395 if (cfun->machine->split_stack_arg_pointer != NULL_RTX
28396 && (!REG_P (cfun->machine->split_stack_arg_pointer)
28397 || (REGNO (cfun->machine->split_stack_arg_pointer)
28398 < FIRST_PSEUDO_REGISTER)))
28399 return true;
28400
28401 /* Unfortunately we also need to do some code scanning, since
28402 r12 may have been substituted for the pseudo. */
28403 rtx_insn *insn;
28404 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
28405 FOR_BB_INSNS (bb, insn)
28406 if (NONDEBUG_INSN_P (insn))
28407 {
28408 /* A call destroys r12. */
28409 if (CALL_P (insn))
28410 return false;
28411
28412 df_ref use;
28413 FOR_EACH_INSN_USE (use, insn)
28414 {
28415 rtx x = DF_REF_REG (use);
28416 if (REG_P (x) && REGNO (x) == 12)
28417 return true;
28418 }
28419 df_ref def;
28420 FOR_EACH_INSN_DEF (def, insn)
28421 {
28422 rtx x = DF_REF_REG (def);
28423 if (REG_P (x) && REGNO (x) == 12)
28424 return false;
28425 }
28426 }
28427 return bitmap_bit_p (DF_LR_OUT (bb), 12);
28428 }
28429
28430 /* Return whether we need to emit an ELFv2 global entry point prologue. */
28431
28432 static bool
28433 rs6000_global_entry_point_needed_p (void)
28434 {
28435 /* Only needed for the ELFv2 ABI. */
28436 if (DEFAULT_ABI != ABI_ELFv2)
28437 return false;
28438
28439 /* With -msingle-pic-base, we assume the whole program shares the same
28440 TOC, so no global entry point prologues are needed anywhere. */
28441 if (TARGET_SINGLE_PIC_BASE)
28442 return false;
28443
28444 /* Ensure we have a global entry point for thunks. ??? We could
28445 avoid that if the target routine doesn't need a global entry point,
28446 but we do not know whether this is the case at this point. */
28447 if (cfun->is_thunk)
28448 return true;
28449
28450 /* For regular functions, rs6000_emit_prologue sets this flag if the
28451 routine ever uses the TOC pointer. */
28452 return cfun->machine->r2_setup_needed;
28453 }
28454
28455 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
28456 static sbitmap
28457 rs6000_get_separate_components (void)
28458 {
28459 rs6000_stack_t *info = rs6000_stack_info ();
28460
28461 if (WORLD_SAVE_P (info))
28462 return NULL;
28463
28464 if (TARGET_SPE_ABI)
28465 return NULL;
28466
28467 sbitmap components = sbitmap_alloc (32);
28468 bitmap_clear (components);
28469
28470 gcc_assert (!(info->savres_strategy & SAVE_MULTIPLE)
28471 && !(info->savres_strategy & REST_MULTIPLE));
28472
28473 /* The GPRs we need saved to the frame. */
28474 if ((info->savres_strategy & SAVE_INLINE_GPRS)
28475 && (info->savres_strategy & REST_INLINE_GPRS))
28476 {
28477 int reg_size = TARGET_32BIT ? 4 : 8;
28478 int offset = info->gp_save_offset;
28479 if (info->push_p)
28480 offset += info->total_size;
28481
28482 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
28483 {
28484 if (IN_RANGE (offset, -0x8000, 0x7fff)
28485 && rs6000_reg_live_or_pic_offset_p (regno))
28486 bitmap_set_bit (components, regno);
28487
28488 offset += reg_size;
28489 }
28490 }
28491
28492 /* Don't mess with the hard frame pointer. */
28493 if (frame_pointer_needed)
28494 bitmap_clear_bit (components, HARD_FRAME_POINTER_REGNUM);
28495
28496 /* Don't mess with the fixed TOC register. */
28497 if ((TARGET_TOC && TARGET_MINIMAL_TOC)
28498 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
28499 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
28500 bitmap_clear_bit (components, RS6000_PIC_OFFSET_TABLE_REGNUM);
28501
28502 /* Optimize LR save and restore if we can. This is component 0. Any
28503 out-of-line register save/restore routines need LR. */
28504 if (info->lr_save_p
28505 && !(flag_pic && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
28506 && (info->savres_strategy & SAVE_INLINE_GPRS)
28507 && (info->savres_strategy & REST_INLINE_GPRS)
28508 && (info->savres_strategy & SAVE_INLINE_FPRS)
28509 && (info->savres_strategy & REST_INLINE_FPRS)
28510 && (info->savres_strategy & SAVE_INLINE_VRS)
28511 && (info->savres_strategy & REST_INLINE_VRS))
28512 {
28513 int offset = info->lr_save_offset;
28514 if (info->push_p)
28515 offset += info->total_size;
28516 if (IN_RANGE (offset, -0x8000, 0x7fff))
28517 bitmap_set_bit (components, 0);
28518 }
28519
28520 return components;
28521 }
28522
28523 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
28524 static sbitmap
28525 rs6000_components_for_bb (basic_block bb)
28526 {
28527 rs6000_stack_t *info = rs6000_stack_info ();
28528
28529 bitmap in = DF_LIVE_IN (bb);
28530 bitmap gen = &DF_LIVE_BB_INFO (bb)->gen;
28531 bitmap kill = &DF_LIVE_BB_INFO (bb)->kill;
28532
28533 sbitmap components = sbitmap_alloc (32);
28534 bitmap_clear (components);
28535
28536 /* GPRs are used in a bb if they are in the IN, GEN, or KILL sets. */
28537 for (unsigned regno = info->first_gp_reg_save; regno < 32; regno++)
28538 if (bitmap_bit_p (in, regno)
28539 || bitmap_bit_p (gen, regno)
28540 || bitmap_bit_p (kill, regno))
28541 bitmap_set_bit (components, regno);
28542
28543 /* LR needs to be saved around a bb if it is killed in that bb. */
28544 if (bitmap_bit_p (in, LR_REGNO)
28545 || bitmap_bit_p (gen, LR_REGNO)
28546 || bitmap_bit_p (kill, LR_REGNO))
28547 bitmap_set_bit (components, 0);
28548
28549 return components;
28550 }
28551
28552 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
28553 static void
28554 rs6000_disqualify_components (sbitmap components, edge e,
28555 sbitmap edge_components, bool /*is_prologue*/)
28556 {
28557 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
28558 live where we want to place that code. */
28559 if (bitmap_bit_p (edge_components, 0)
28560 && bitmap_bit_p (DF_LIVE_IN (e->dest), 0))
28561 {
28562 if (dump_file)
28563 fprintf (dump_file, "Disqualifying LR because GPR0 is live "
28564 "on entry to bb %d\n", e->dest->index);
28565 bitmap_clear_bit (components, 0);
28566 }
28567 }
28568
28569 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
28570 static void
28571 rs6000_emit_prologue_components (sbitmap components)
28572 {
28573 rs6000_stack_t *info = rs6000_stack_info ();
28574 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
28575 ? HARD_FRAME_POINTER_REGNUM
28576 : STACK_POINTER_REGNUM);
28577 int reg_size = TARGET_32BIT ? 4 : 8;
28578
28579 /* Prologue for LR. */
28580 if (bitmap_bit_p (components, 0))
28581 {
28582 rtx reg = gen_rtx_REG (Pmode, 0);
28583 rtx_insn *insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
28584 RTX_FRAME_RELATED_P (insn) = 1;
28585 add_reg_note (insn, REG_CFA_REGISTER, NULL);
28586
28587 int offset = info->lr_save_offset;
28588 if (info->push_p)
28589 offset += info->total_size;
28590
28591 insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
28592 RTX_FRAME_RELATED_P (insn) = 1;
28593 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
28594 rtx mem = copy_rtx (SET_DEST (single_set (insn)));
28595 add_reg_note (insn, REG_CFA_OFFSET, gen_rtx_SET (mem, lr));
28596 }
28597
28598 /* Prologue for the GPRs. */
28599 int offset = info->gp_save_offset;
28600 if (info->push_p)
28601 offset += info->total_size;
28602
28603 for (int i = info->first_gp_reg_save; i < 32; i++)
28604 {
28605 if (bitmap_bit_p (components, i))
28606 {
28607 rtx reg = gen_rtx_REG (Pmode, i);
28608 rtx_insn *insn = emit_insn (gen_frame_store (reg, ptr_reg, offset));
28609 RTX_FRAME_RELATED_P (insn) = 1;
28610 rtx set = copy_rtx (single_set (insn));
28611 add_reg_note (insn, REG_CFA_OFFSET, set);
28612 }
28613
28614 offset += reg_size;
28615 }
28616 }
28617
28618 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
28619 static void
28620 rs6000_emit_epilogue_components (sbitmap components)
28621 {
28622 rs6000_stack_t *info = rs6000_stack_info ();
28623 rtx ptr_reg = gen_rtx_REG (Pmode, frame_pointer_needed
28624 ? HARD_FRAME_POINTER_REGNUM
28625 : STACK_POINTER_REGNUM);
28626 int reg_size = TARGET_32BIT ? 4 : 8;
28627
28628 /* Epilogue for the GPRs. */
28629 int offset = info->gp_save_offset;
28630 if (info->push_p)
28631 offset += info->total_size;
28632
28633 for (int i = info->first_gp_reg_save; i < 32; i++)
28634 {
28635 if (bitmap_bit_p (components, i))
28636 {
28637 rtx reg = gen_rtx_REG (Pmode, i);
28638 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
28639 RTX_FRAME_RELATED_P (insn) = 1;
28640 add_reg_note (insn, REG_CFA_RESTORE, reg);
28641 }
28642
28643 offset += reg_size;
28644 }
28645
28646 /* Epilogue for LR. */
28647 if (bitmap_bit_p (components, 0))
28648 {
28649 int offset = info->lr_save_offset;
28650 if (info->push_p)
28651 offset += info->total_size;
28652
28653 rtx reg = gen_rtx_REG (Pmode, 0);
28654 rtx_insn *insn = emit_insn (gen_frame_load (reg, ptr_reg, offset));
28655
28656 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
28657 insn = emit_move_insn (lr, reg);
28658 RTX_FRAME_RELATED_P (insn) = 1;
28659 add_reg_note (insn, REG_CFA_RESTORE, lr);
28660 }
28661 }
28662
28663 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
28664 static void
28665 rs6000_set_handled_components (sbitmap components)
28666 {
28667 rs6000_stack_t *info = rs6000_stack_info ();
28668
28669 for (int i = info->first_gp_reg_save; i < 32; i++)
28670 if (bitmap_bit_p (components, i))
28671 cfun->machine->gpr_is_wrapped_separately[i] = true;
28672
28673 if (bitmap_bit_p (components, 0))
28674 cfun->machine->lr_is_wrapped_separately = true;
28675 }
28676
28677 /* Emit function prologue as insns. */
28678
28679 void
28680 rs6000_emit_prologue (void)
28681 {
28682 rs6000_stack_t *info = rs6000_stack_info ();
28683 machine_mode reg_mode = Pmode;
28684 int reg_size = TARGET_32BIT ? 4 : 8;
28685 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
28686 rtx frame_reg_rtx = sp_reg_rtx;
28687 unsigned int cr_save_regno;
28688 rtx cr_save_rtx = NULL_RTX;
28689 rtx_insn *insn;
28690 int strategy;
28691 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
28692 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
28693 && call_used_regs[STATIC_CHAIN_REGNUM]);
28694 int using_split_stack = (flag_split_stack
28695 && (lookup_attribute ("no_split_stack",
28696 DECL_ATTRIBUTES (cfun->decl))
28697 == NULL));
28698
28699 /* Offset to top of frame for frame_reg and sp respectively. */
28700 HOST_WIDE_INT frame_off = 0;
28701 HOST_WIDE_INT sp_off = 0;
28702 /* sp_adjust is the stack adjusting instruction, tracked so that the
28703 insn setting up the split-stack arg pointer can be emitted just
28704 prior to it, when r12 is not used here for other purposes. */
28705 rtx_insn *sp_adjust = 0;
28706
28707 #if CHECKING_P
28708 /* Track and check usage of r0, r11, r12. */
28709 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
28710 #define START_USE(R) do \
28711 { \
28712 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
28713 reg_inuse |= 1 << (R); \
28714 } while (0)
28715 #define END_USE(R) do \
28716 { \
28717 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
28718 reg_inuse &= ~(1 << (R)); \
28719 } while (0)
28720 #define NOT_INUSE(R) do \
28721 { \
28722 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
28723 } while (0)
28724 #else
28725 #define START_USE(R) do {} while (0)
28726 #define END_USE(R) do {} while (0)
28727 #define NOT_INUSE(R) do {} while (0)
28728 #endif
28729
28730 if (DEFAULT_ABI == ABI_ELFv2
28731 && !TARGET_SINGLE_PIC_BASE)
28732 {
28733 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
28734
28735 /* With -mminimal-toc we may generate an extra use of r2 below. */
28736 if (TARGET_TOC && TARGET_MINIMAL_TOC
28737 && !constant_pool_empty_p ())
28738 cfun->machine->r2_setup_needed = true;
28739 }
28740
28741
28742 if (flag_stack_usage_info)
28743 current_function_static_stack_size = info->total_size;
28744
28745 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
28746 {
28747 HOST_WIDE_INT size = info->total_size;
28748
28749 if (crtl->is_leaf && !cfun->calls_alloca)
28750 {
28751 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
28752 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
28753 size - STACK_CHECK_PROTECT);
28754 }
28755 else if (size > 0)
28756 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
28757 }
28758
28759 if (TARGET_FIX_AND_CONTINUE)
28760 {
28761 /* gdb on darwin arranges to forward a function from the old
28762 address by modifying the first 5 instructions of the function
28763 to branch to the overriding function. This is necessary to
28764 permit function pointers that point to the old function to
28765 actually forward to the new function. */
28766 emit_insn (gen_nop ());
28767 emit_insn (gen_nop ());
28768 emit_insn (gen_nop ());
28769 emit_insn (gen_nop ());
28770 emit_insn (gen_nop ());
28771 }
28772
28773 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
28774 {
28775 reg_mode = V2SImode;
28776 reg_size = 8;
28777 }
28778
28779 /* Handle world saves specially here. */
28780 if (WORLD_SAVE_P (info))
28781 {
28782 int i, j, sz;
28783 rtx treg;
28784 rtvec p;
28785 rtx reg0;
28786
28787 /* save_world expects lr in r0. */
28788 reg0 = gen_rtx_REG (Pmode, 0);
28789 if (info->lr_save_p)
28790 {
28791 insn = emit_move_insn (reg0,
28792 gen_rtx_REG (Pmode, LR_REGNO));
28793 RTX_FRAME_RELATED_P (insn) = 1;
28794 }
28795
28796 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
28797 assumptions about the offsets of various bits of the stack
28798 frame. */
28799 gcc_assert (info->gp_save_offset == -220
28800 && info->fp_save_offset == -144
28801 && info->lr_save_offset == 8
28802 && info->cr_save_offset == 4
28803 && info->push_p
28804 && info->lr_save_p
28805 && (!crtl->calls_eh_return
28806 || info->ehrd_offset == -432)
28807 && info->vrsave_save_offset == -224
28808 && info->altivec_save_offset == -416);
28809
28810 treg = gen_rtx_REG (SImode, 11);
28811 emit_move_insn (treg, GEN_INT (-info->total_size));
28812
28813 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
28814 in R11. It also clobbers R12, so beware! */
28815
28816 /* Preserve CR2 for save_world prologues */
28817 sz = 5;
28818 sz += 32 - info->first_gp_reg_save;
28819 sz += 64 - info->first_fp_reg_save;
28820 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
28821 p = rtvec_alloc (sz);
28822 j = 0;
28823 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
28824 gen_rtx_REG (SImode,
28825 LR_REGNO));
28826 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
28827 gen_rtx_SYMBOL_REF (Pmode,
28828 "*save_world"));
28829 /* We do floats first so that the instruction pattern matches
28830 properly. */
28831 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28832 RTVEC_ELT (p, j++)
28833 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
28834 ? DFmode : SFmode,
28835 info->first_fp_reg_save + i),
28836 frame_reg_rtx,
28837 info->fp_save_offset + frame_off + 8 * i);
28838 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
28839 RTVEC_ELT (p, j++)
28840 = gen_frame_store (gen_rtx_REG (V4SImode,
28841 info->first_altivec_reg_save + i),
28842 frame_reg_rtx,
28843 info->altivec_save_offset + frame_off + 16 * i);
28844 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
28845 RTVEC_ELT (p, j++)
28846 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
28847 frame_reg_rtx,
28848 info->gp_save_offset + frame_off + reg_size * i);
28849
28850 /* CR register traditionally saved as CR2. */
28851 RTVEC_ELT (p, j++)
28852 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
28853 frame_reg_rtx, info->cr_save_offset + frame_off);
28854 /* Explain about use of R0. */
28855 if (info->lr_save_p)
28856 RTVEC_ELT (p, j++)
28857 = gen_frame_store (reg0,
28858 frame_reg_rtx, info->lr_save_offset + frame_off);
28859 /* Explain what happens to the stack pointer. */
28860 {
28861 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
28862 RTVEC_ELT (p, j++) = gen_rtx_SET (sp_reg_rtx, newval);
28863 }
28864
28865 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
28866 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
28867 treg, GEN_INT (-info->total_size));
28868 sp_off = frame_off = info->total_size;
28869 }
28870
28871 strategy = info->savres_strategy;
28872
28873 /* For V.4, update stack before we do any saving and set back pointer. */
28874 if (! WORLD_SAVE_P (info)
28875 && info->push_p
28876 && (DEFAULT_ABI == ABI_V4
28877 || crtl->calls_eh_return))
28878 {
28879 bool need_r11 = (TARGET_SPE
28880 ? (!(strategy & SAVE_INLINE_GPRS)
28881 && info->spe_64bit_regs_used == 0)
28882 : (!(strategy & SAVE_INLINE_FPRS)
28883 || !(strategy & SAVE_INLINE_GPRS)
28884 || !(strategy & SAVE_INLINE_VRS)));
28885 int ptr_regno = -1;
28886 rtx ptr_reg = NULL_RTX;
28887 int ptr_off = 0;
28888
28889 if (info->total_size < 32767)
28890 frame_off = info->total_size;
28891 else if (need_r11)
28892 ptr_regno = 11;
28893 else if (info->cr_save_p
28894 || info->lr_save_p
28895 || info->first_fp_reg_save < 64
28896 || info->first_gp_reg_save < 32
28897 || info->altivec_size != 0
28898 || info->vrsave_size != 0
28899 || crtl->calls_eh_return)
28900 ptr_regno = 12;
28901 else
28902 {
28903 /* The prologue won't be saving any regs so there is no need
28904 to set up a frame register to access any frame save area.
28905 We also won't be using frame_off anywhere below, but set
28906 the correct value anyway to protect against future
28907 changes to this function. */
28908 frame_off = info->total_size;
28909 }
28910 if (ptr_regno != -1)
28911 {
28912 /* Set up the frame offset to that needed by the first
28913 out-of-line save function. */
28914 START_USE (ptr_regno);
28915 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
28916 frame_reg_rtx = ptr_reg;
28917 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
28918 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
28919 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
28920 ptr_off = info->gp_save_offset + info->gp_size;
28921 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
28922 ptr_off = info->altivec_save_offset + info->altivec_size;
28923 frame_off = -ptr_off;
28924 }
28925 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
28926 ptr_reg, ptr_off);
28927 if (REGNO (frame_reg_rtx) == 12)
28928 sp_adjust = 0;
28929 sp_off = info->total_size;
28930 if (frame_reg_rtx != sp_reg_rtx)
28931 rs6000_emit_stack_tie (frame_reg_rtx, false);
28932 }
28933
28934 /* If we use the link register, get it into r0. */
28935 if (!WORLD_SAVE_P (info) && info->lr_save_p
28936 && !cfun->machine->lr_is_wrapped_separately)
28937 {
28938 rtx addr, reg, mem;
28939
28940 reg = gen_rtx_REG (Pmode, 0);
28941 START_USE (0);
28942 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
28943 RTX_FRAME_RELATED_P (insn) = 1;
28944
28945 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
28946 | SAVE_NOINLINE_FPRS_SAVES_LR)))
28947 {
28948 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
28949 GEN_INT (info->lr_save_offset + frame_off));
28950 mem = gen_rtx_MEM (Pmode, addr);
28951 /* This should not be of rs6000_sr_alias_set, because of
28952 __builtin_return_address. */
28953
28954 insn = emit_move_insn (mem, reg);
28955 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
28956 NULL_RTX, NULL_RTX);
28957 END_USE (0);
28958 }
28959 }
28960
28961 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
28962 r12 will be needed by out-of-line gpr restore. */
28963 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28964 && !(strategy & (SAVE_INLINE_GPRS
28965 | SAVE_NOINLINE_GPRS_SAVES_LR))
28966 ? 11 : 12);
28967 if (!WORLD_SAVE_P (info)
28968 && info->cr_save_p
28969 && REGNO (frame_reg_rtx) != cr_save_regno
28970 && !(using_static_chain_p && cr_save_regno == 11)
28971 && !(using_split_stack && cr_save_regno == 12 && sp_adjust))
28972 {
28973 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
28974 START_USE (cr_save_regno);
28975 rs6000_emit_move_from_cr (cr_save_rtx);
28976 }
28977
28978 /* Do any required saving of fpr's. If only one or two to save, do
28979 it ourselves. Otherwise, call function. */
28980 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
28981 {
28982 int i;
28983 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
28984 if (save_reg_p (info->first_fp_reg_save + i))
28985 emit_frame_save (frame_reg_rtx,
28986 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
28987 ? DFmode : SFmode),
28988 info->first_fp_reg_save + i,
28989 info->fp_save_offset + frame_off + 8 * i,
28990 sp_off - frame_off);
28991 }
28992 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
28993 {
28994 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
28995 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
28996 unsigned ptr_regno = ptr_regno_for_savres (sel);
28997 rtx ptr_reg = frame_reg_rtx;
28998
28999 if (REGNO (frame_reg_rtx) == ptr_regno)
29000 gcc_checking_assert (frame_off == 0);
29001 else
29002 {
29003 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
29004 NOT_INUSE (ptr_regno);
29005 emit_insn (gen_add3_insn (ptr_reg,
29006 frame_reg_rtx, GEN_INT (frame_off)));
29007 }
29008 insn = rs6000_emit_savres_rtx (info, ptr_reg,
29009 info->fp_save_offset,
29010 info->lr_save_offset,
29011 DFmode, sel);
29012 rs6000_frame_related (insn, ptr_reg, sp_off,
29013 NULL_RTX, NULL_RTX);
29014 if (lr)
29015 END_USE (0);
29016 }
29017
29018 /* Save GPRs. This is done as a PARALLEL if we are using
29019 the store-multiple instructions. */
29020 if (!WORLD_SAVE_P (info)
29021 && TARGET_SPE_ABI
29022 && info->spe_64bit_regs_used != 0
29023 && info->first_gp_reg_save != 32)
29024 {
29025 int i;
29026 rtx spe_save_area_ptr;
29027 HOST_WIDE_INT save_off;
29028 int ool_adjust = 0;
29029
29030 /* Determine whether we can address all of the registers that need
29031 to be saved with an offset from frame_reg_rtx that fits in
29032 the small const field for SPE memory instructions. */
29033 int spe_regs_addressable
29034 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
29035 + reg_size * (32 - info->first_gp_reg_save - 1))
29036 && (strategy & SAVE_INLINE_GPRS));
29037
29038 if (spe_regs_addressable)
29039 {
29040 spe_save_area_ptr = frame_reg_rtx;
29041 save_off = frame_off;
29042 }
29043 else
29044 {
29045 /* Make r11 point to the start of the SPE save area. We need
29046 to be careful here if r11 is holding the static chain. If
29047 it is, then temporarily save it in r0. */
29048 HOST_WIDE_INT offset;
29049
29050 if (!(strategy & SAVE_INLINE_GPRS))
29051 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
29052 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
29053 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
29054 save_off = frame_off - offset;
29055
29056 if (using_static_chain_p)
29057 {
29058 rtx r0 = gen_rtx_REG (Pmode, 0);
29059
29060 START_USE (0);
29061 gcc_assert (info->first_gp_reg_save > 11);
29062
29063 emit_move_insn (r0, spe_save_area_ptr);
29064 }
29065 else if (REGNO (frame_reg_rtx) != 11)
29066 START_USE (11);
29067
29068 emit_insn (gen_addsi3 (spe_save_area_ptr,
29069 frame_reg_rtx, GEN_INT (offset)));
29070 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
29071 frame_off = -info->spe_gp_save_offset + ool_adjust;
29072 }
29073
29074 if ((strategy & SAVE_INLINE_GPRS))
29075 {
29076 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
29077 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
29078 emit_frame_save (spe_save_area_ptr, reg_mode,
29079 info->first_gp_reg_save + i,
29080 (info->spe_gp_save_offset + save_off
29081 + reg_size * i),
29082 sp_off - save_off);
29083 }
29084 else
29085 {
29086 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
29087 info->spe_gp_save_offset + save_off,
29088 0, reg_mode,
29089 SAVRES_SAVE | SAVRES_GPR);
29090
29091 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
29092 NULL_RTX, NULL_RTX);
29093 }
29094
29095 /* Move the static chain pointer back. */
29096 if (!spe_regs_addressable)
29097 {
29098 if (using_static_chain_p)
29099 {
29100 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
29101 END_USE (0);
29102 }
29103 else if (REGNO (frame_reg_rtx) != 11)
29104 END_USE (11);
29105 }
29106 }
29107 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
29108 {
29109 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
29110 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
29111 unsigned ptr_regno = ptr_regno_for_savres (sel);
29112 rtx ptr_reg = frame_reg_rtx;
29113 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
29114 int end_save = info->gp_save_offset + info->gp_size;
29115 int ptr_off;
29116
29117 if (ptr_regno == 12)
29118 sp_adjust = 0;
29119 if (!ptr_set_up)
29120 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
29121
29122 /* Need to adjust r11 (r12) if we saved any FPRs. */
29123 if (end_save + frame_off != 0)
29124 {
29125 rtx offset = GEN_INT (end_save + frame_off);
29126
29127 if (ptr_set_up)
29128 frame_off = -end_save;
29129 else
29130 NOT_INUSE (ptr_regno);
29131 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
29132 }
29133 else if (!ptr_set_up)
29134 {
29135 NOT_INUSE (ptr_regno);
29136 emit_move_insn (ptr_reg, frame_reg_rtx);
29137 }
29138 ptr_off = -end_save;
29139 insn = rs6000_emit_savres_rtx (info, ptr_reg,
29140 info->gp_save_offset + ptr_off,
29141 info->lr_save_offset + ptr_off,
29142 reg_mode, sel);
29143 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
29144 NULL_RTX, NULL_RTX);
29145 if (lr)
29146 END_USE (0);
29147 }
29148 else if (!WORLD_SAVE_P (info) && (strategy & SAVE_MULTIPLE))
29149 {
29150 rtvec p;
29151 int i;
29152 p = rtvec_alloc (32 - info->first_gp_reg_save);
29153 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
29154 RTVEC_ELT (p, i)
29155 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
29156 frame_reg_rtx,
29157 info->gp_save_offset + frame_off + reg_size * i);
29158 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
29159 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
29160 NULL_RTX, NULL_RTX);
29161 }
29162 else if (!WORLD_SAVE_P (info))
29163 {
29164 int offset = info->gp_save_offset + frame_off;
29165 for (int i = info->first_gp_reg_save; i < 32; i++)
29166 {
29167 if (rs6000_reg_live_or_pic_offset_p (i)
29168 && !cfun->machine->gpr_is_wrapped_separately[i])
29169 emit_frame_save (frame_reg_rtx, reg_mode, i, offset,
29170 sp_off - frame_off);
29171
29172 offset += reg_size;
29173 }
29174 }
29175
29176 if (crtl->calls_eh_return)
29177 {
29178 unsigned int i;
29179 rtvec p;
29180
29181 for (i = 0; ; ++i)
29182 {
29183 unsigned int regno = EH_RETURN_DATA_REGNO (i);
29184 if (regno == INVALID_REGNUM)
29185 break;
29186 }
29187
29188 p = rtvec_alloc (i);
29189
29190 for (i = 0; ; ++i)
29191 {
29192 unsigned int regno = EH_RETURN_DATA_REGNO (i);
29193 if (regno == INVALID_REGNUM)
29194 break;
29195
29196 rtx set
29197 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
29198 sp_reg_rtx,
29199 info->ehrd_offset + sp_off + reg_size * (int) i);
29200 RTVEC_ELT (p, i) = set;
29201 RTX_FRAME_RELATED_P (set) = 1;
29202 }
29203
29204 insn = emit_insn (gen_blockage ());
29205 RTX_FRAME_RELATED_P (insn) = 1;
29206 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
29207 }
29208
29209 /* In AIX ABI we need to make sure r2 is really saved. */
29210 if (TARGET_AIX && crtl->calls_eh_return)
29211 {
29212 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
29213 rtx join_insn, note;
29214 rtx_insn *save_insn;
29215 long toc_restore_insn;
29216
29217 tmp_reg = gen_rtx_REG (Pmode, 11);
29218 tmp_reg_si = gen_rtx_REG (SImode, 11);
29219 if (using_static_chain_p)
29220 {
29221 START_USE (0);
29222 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
29223 }
29224 else
29225 START_USE (11);
29226 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
29227 /* Peek at instruction to which this function returns. If it's
29228 restoring r2, then we know we've already saved r2. We can't
29229 unconditionally save r2 because the value we have will already
29230 be updated if we arrived at this function via a plt call or
29231 toc adjusting stub. */
29232 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
29233 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
29234 + RS6000_TOC_SAVE_SLOT);
29235 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
29236 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
29237 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
29238 validate_condition_mode (EQ, CCUNSmode);
29239 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
29240 emit_insn (gen_rtx_SET (compare_result,
29241 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
29242 toc_save_done = gen_label_rtx ();
29243 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29244 gen_rtx_EQ (VOIDmode, compare_result,
29245 const0_rtx),
29246 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
29247 pc_rtx);
29248 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29249 JUMP_LABEL (jump) = toc_save_done;
29250 LABEL_NUSES (toc_save_done) += 1;
29251
29252 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
29253 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
29254 sp_off - frame_off);
29255
29256 emit_label (toc_save_done);
29257
29258 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
29259 have a CFG that has different saves along different paths.
29260 Move the note to a dummy blockage insn, which describes that
29261 R2 is unconditionally saved after the label. */
29262 /* ??? An alternate representation might be a special insn pattern
29263 containing both the branch and the store. That might let the
29264 code that minimizes the number of DW_CFA_advance opcodes better
29265 freedom in placing the annotations. */
29266 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
29267 if (note)
29268 remove_note (save_insn, note);
29269 else
29270 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
29271 copy_rtx (PATTERN (save_insn)), NULL_RTX);
29272 RTX_FRAME_RELATED_P (save_insn) = 0;
29273
29274 join_insn = emit_insn (gen_blockage ());
29275 REG_NOTES (join_insn) = note;
29276 RTX_FRAME_RELATED_P (join_insn) = 1;
29277
29278 if (using_static_chain_p)
29279 {
29280 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
29281 END_USE (0);
29282 }
29283 else
29284 END_USE (11);
29285 }
29286
29287 /* Save CR if we use any that must be preserved. */
29288 if (!WORLD_SAVE_P (info) && info->cr_save_p)
29289 {
29290 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
29291 GEN_INT (info->cr_save_offset + frame_off));
29292 rtx mem = gen_frame_mem (SImode, addr);
29293
29294 /* If we didn't copy cr before, do so now using r0. */
29295 if (cr_save_rtx == NULL_RTX)
29296 {
29297 START_USE (0);
29298 cr_save_rtx = gen_rtx_REG (SImode, 0);
29299 rs6000_emit_move_from_cr (cr_save_rtx);
29300 }
29301
29302 /* Saving CR requires a two-instruction sequence: one instruction
29303 to move the CR to a general-purpose register, and a second
29304 instruction that stores the GPR to memory.
29305
29306 We do not emit any DWARF CFI records for the first of these,
29307 because we cannot properly represent the fact that CR is saved in
29308 a register. One reason is that we cannot express that multiple
29309 CR fields are saved; another reason is that on 64-bit, the size
29310 of the CR register in DWARF (4 bytes) differs from the size of
29311 a general-purpose register.
29312
29313 This means if any intervening instruction were to clobber one of
29314 the call-saved CR fields, we'd have incorrect CFI. To prevent
29315 this from happening, we mark the store to memory as a use of
29316 those CR fields, which prevents any such instruction from being
29317 scheduled in between the two instructions. */
29318 rtx crsave_v[9];
29319 int n_crsave = 0;
29320 int i;
29321
29322 crsave_v[n_crsave++] = gen_rtx_SET (mem, cr_save_rtx);
29323 for (i = 0; i < 8; i++)
29324 if (save_reg_p (CR0_REGNO + i))
29325 crsave_v[n_crsave++]
29326 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
29327
29328 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
29329 gen_rtvec_v (n_crsave, crsave_v)));
29330 END_USE (REGNO (cr_save_rtx));
29331
29332 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
29333 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
29334 so we need to construct a frame expression manually. */
29335 RTX_FRAME_RELATED_P (insn) = 1;
29336
29337 /* Update address to be stack-pointer relative, like
29338 rs6000_frame_related would do. */
29339 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
29340 GEN_INT (info->cr_save_offset + sp_off));
29341 mem = gen_frame_mem (SImode, addr);
29342
29343 if (DEFAULT_ABI == ABI_ELFv2)
29344 {
29345 /* In the ELFv2 ABI we generate separate CFI records for each
29346 CR field that was actually saved. They all point to the
29347 same 32-bit stack slot. */
29348 rtx crframe[8];
29349 int n_crframe = 0;
29350
29351 for (i = 0; i < 8; i++)
29352 if (save_reg_p (CR0_REGNO + i))
29353 {
29354 crframe[n_crframe]
29355 = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR0_REGNO + i));
29356
29357 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
29358 n_crframe++;
29359 }
29360
29361 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
29362 gen_rtx_PARALLEL (VOIDmode,
29363 gen_rtvec_v (n_crframe, crframe)));
29364 }
29365 else
29366 {
29367 /* In other ABIs, by convention, we use a single CR regnum to
29368 represent the fact that all call-saved CR fields are saved.
29369 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
29370 rtx set = gen_rtx_SET (mem, gen_rtx_REG (SImode, CR2_REGNO));
29371 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
29372 }
29373 }
29374
29375 /* In the ELFv2 ABI we need to save all call-saved CR fields into
29376 *separate* slots if the routine calls __builtin_eh_return, so
29377 that they can be independently restored by the unwinder. */
29378 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
29379 {
29380 int i, cr_off = info->ehcr_offset;
29381 rtx crsave;
29382
29383 /* ??? We might get better performance by using multiple mfocrf
29384 instructions. */
29385 crsave = gen_rtx_REG (SImode, 0);
29386 emit_insn (gen_movesi_from_cr (crsave));
29387
29388 for (i = 0; i < 8; i++)
29389 if (!call_used_regs[CR0_REGNO + i])
29390 {
29391 rtvec p = rtvec_alloc (2);
29392 RTVEC_ELT (p, 0)
29393 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
29394 RTVEC_ELT (p, 1)
29395 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
29396
29397 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
29398
29399 RTX_FRAME_RELATED_P (insn) = 1;
29400 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
29401 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
29402 sp_reg_rtx, cr_off + sp_off));
29403
29404 cr_off += reg_size;
29405 }
29406 }
29407
29408 /* Update stack and set back pointer unless this is V.4,
29409 for which it was done previously. */
29410 if (!WORLD_SAVE_P (info) && info->push_p
29411 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
29412 {
29413 rtx ptr_reg = NULL;
29414 int ptr_off = 0;
29415
29416 /* If saving altivec regs we need to be able to address all save
29417 locations using a 16-bit offset. */
29418 if ((strategy & SAVE_INLINE_VRS) == 0
29419 || (info->altivec_size != 0
29420 && (info->altivec_save_offset + info->altivec_size - 16
29421 + info->total_size - frame_off) > 32767)
29422 || (info->vrsave_size != 0
29423 && (info->vrsave_save_offset
29424 + info->total_size - frame_off) > 32767))
29425 {
29426 int sel = SAVRES_SAVE | SAVRES_VR;
29427 unsigned ptr_regno = ptr_regno_for_savres (sel);
29428
29429 if (using_static_chain_p
29430 && ptr_regno == STATIC_CHAIN_REGNUM)
29431 ptr_regno = 12;
29432 if (REGNO (frame_reg_rtx) != ptr_regno)
29433 START_USE (ptr_regno);
29434 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
29435 frame_reg_rtx = ptr_reg;
29436 ptr_off = info->altivec_save_offset + info->altivec_size;
29437 frame_off = -ptr_off;
29438 }
29439 else if (REGNO (frame_reg_rtx) == 1)
29440 frame_off = info->total_size;
29441 sp_adjust = rs6000_emit_allocate_stack (info->total_size,
29442 ptr_reg, ptr_off);
29443 if (REGNO (frame_reg_rtx) == 12)
29444 sp_adjust = 0;
29445 sp_off = info->total_size;
29446 if (frame_reg_rtx != sp_reg_rtx)
29447 rs6000_emit_stack_tie (frame_reg_rtx, false);
29448 }
29449
29450 /* Set frame pointer, if needed. */
29451 if (frame_pointer_needed)
29452 {
29453 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
29454 sp_reg_rtx);
29455 RTX_FRAME_RELATED_P (insn) = 1;
29456 }
29457
29458 /* Save AltiVec registers if needed. Save here because the red zone does
29459 not always include AltiVec registers. */
29460 if (!WORLD_SAVE_P (info)
29461 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
29462 {
29463 int end_save = info->altivec_save_offset + info->altivec_size;
29464 int ptr_off;
29465 /* Oddly, the vector save/restore functions point r0 at the end
29466 of the save area, then use r11 or r12 to load offsets for
29467 [reg+reg] addressing. */
29468 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
29469 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
29470 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
29471
29472 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
29473 NOT_INUSE (0);
29474 if (scratch_regno == 12)
29475 sp_adjust = 0;
29476 if (end_save + frame_off != 0)
29477 {
29478 rtx offset = GEN_INT (end_save + frame_off);
29479
29480 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
29481 }
29482 else
29483 emit_move_insn (ptr_reg, frame_reg_rtx);
29484
29485 ptr_off = -end_save;
29486 insn = rs6000_emit_savres_rtx (info, scratch_reg,
29487 info->altivec_save_offset + ptr_off,
29488 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
29489 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
29490 NULL_RTX, NULL_RTX);
29491 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
29492 {
29493 /* The oddity mentioned above clobbered our frame reg. */
29494 emit_move_insn (frame_reg_rtx, ptr_reg);
29495 frame_off = ptr_off;
29496 }
29497 }
29498 else if (!WORLD_SAVE_P (info)
29499 && info->altivec_size != 0)
29500 {
29501 int i;
29502
29503 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
29504 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
29505 {
29506 rtx areg, savereg, mem;
29507 HOST_WIDE_INT offset;
29508
29509 offset = (info->altivec_save_offset + frame_off
29510 + 16 * (i - info->first_altivec_reg_save));
29511
29512 savereg = gen_rtx_REG (V4SImode, i);
29513
29514 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
29515 {
29516 mem = gen_frame_mem (V4SImode,
29517 gen_rtx_PLUS (Pmode, frame_reg_rtx,
29518 GEN_INT (offset)));
29519 insn = emit_insn (gen_rtx_SET (mem, savereg));
29520 areg = NULL_RTX;
29521 }
29522 else
29523 {
29524 NOT_INUSE (0);
29525 areg = gen_rtx_REG (Pmode, 0);
29526 emit_move_insn (areg, GEN_INT (offset));
29527
29528 /* AltiVec addressing mode is [reg+reg]. */
29529 mem = gen_frame_mem (V4SImode,
29530 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
29531
29532 /* Rather than emitting a generic move, force use of the stvx
29533 instruction, which we always want on ISA 2.07 (power8) systems.
29534 In particular we don't want xxpermdi/stxvd2x for little
29535 endian. */
29536 insn = emit_insn (gen_altivec_stvx_v4si_internal (mem, savereg));
29537 }
29538
29539 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
29540 areg, GEN_INT (offset));
29541 }
29542 }
29543
29544 /* VRSAVE is a bit vector representing which AltiVec registers
29545 are used. The OS uses this to determine which vector
29546 registers to save on a context switch. We need to save
29547 VRSAVE on the stack frame, add whatever AltiVec registers we
29548 used in this function, and do the corresponding magic in the
29549 epilogue. */
29550
29551 if (!WORLD_SAVE_P (info)
29552 && info->vrsave_size != 0)
29553 {
29554 rtx reg, vrsave;
29555 int offset;
29556 int save_regno;
29557
29558 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
29559 be using r12 as frame_reg_rtx and r11 as the static chain
29560 pointer for nested functions. */
29561 save_regno = 12;
29562 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29563 && !using_static_chain_p)
29564 save_regno = 11;
29565 else if (using_split_stack || REGNO (frame_reg_rtx) == 12)
29566 {
29567 save_regno = 11;
29568 if (using_static_chain_p)
29569 save_regno = 0;
29570 }
29571
29572 NOT_INUSE (save_regno);
29573 reg = gen_rtx_REG (SImode, save_regno);
29574 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
29575 if (TARGET_MACHO)
29576 emit_insn (gen_get_vrsave_internal (reg));
29577 else
29578 emit_insn (gen_rtx_SET (reg, vrsave));
29579
29580 /* Save VRSAVE. */
29581 offset = info->vrsave_save_offset + frame_off;
29582 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
29583
29584 /* Include the registers in the mask. */
29585 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
29586
29587 insn = emit_insn (generate_set_vrsave (reg, info, 0));
29588 }
29589
29590 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
29591 if (!TARGET_SINGLE_PIC_BASE
29592 && ((TARGET_TOC && TARGET_MINIMAL_TOC
29593 && !constant_pool_empty_p ())
29594 || (DEFAULT_ABI == ABI_V4
29595 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
29596 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
29597 {
29598 /* If emit_load_toc_table will use the link register, we need to save
29599 it. We use R12 for this purpose because emit_load_toc_table
29600 can use register 0. This allows us to use a plain 'blr' to return
29601 from the procedure more often. */
29602 int save_LR_around_toc_setup = (TARGET_ELF
29603 && DEFAULT_ABI == ABI_V4
29604 && flag_pic
29605 && ! info->lr_save_p
29606 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
29607 if (save_LR_around_toc_setup)
29608 {
29609 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
29610 rtx tmp = gen_rtx_REG (Pmode, 12);
29611
29612 sp_adjust = 0;
29613 insn = emit_move_insn (tmp, lr);
29614 RTX_FRAME_RELATED_P (insn) = 1;
29615
29616 rs6000_emit_load_toc_table (TRUE);
29617
29618 insn = emit_move_insn (lr, tmp);
29619 add_reg_note (insn, REG_CFA_RESTORE, lr);
29620 RTX_FRAME_RELATED_P (insn) = 1;
29621 }
29622 else
29623 rs6000_emit_load_toc_table (TRUE);
29624 }
29625
29626 #if TARGET_MACHO
29627 if (!TARGET_SINGLE_PIC_BASE
29628 && DEFAULT_ABI == ABI_DARWIN
29629 && flag_pic && crtl->uses_pic_offset_table)
29630 {
29631 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
29632 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
29633
29634 /* Save and restore LR locally around this call (in R0). */
29635 if (!info->lr_save_p)
29636 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
29637
29638 emit_insn (gen_load_macho_picbase (src));
29639
29640 emit_move_insn (gen_rtx_REG (Pmode,
29641 RS6000_PIC_OFFSET_TABLE_REGNUM),
29642 lr);
29643
29644 if (!info->lr_save_p)
29645 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
29646 }
29647 #endif
29648
29649 /* If we need to, save the TOC register after doing the stack setup.
29650 Do not emit eh frame info for this save. The unwinder wants info,
29651 conceptually attached to instructions in this function, about
29652 register values in the caller of this function. This R2 may have
29653 already been changed from the value in the caller.
29654 We don't attempt to write accurate DWARF EH frame info for R2
29655 because code emitted by gcc for a (non-pointer) function call
29656 doesn't save and restore R2. Instead, R2 is managed out-of-line
29657 by a linker generated plt call stub when the function resides in
29658 a shared library. This behavior is costly to describe in DWARF,
29659 both in terms of the size of DWARF info and the time taken in the
29660 unwinder to interpret it. R2 changes, apart from the
29661 calls_eh_return case earlier in this function, are handled by
29662 linux-unwind.h frob_update_context. */
29663 if (rs6000_save_toc_in_prologue_p ())
29664 {
29665 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
29666 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
29667 }
29668
29669 if (using_split_stack && split_stack_arg_pointer_used_p ())
29670 {
29671 /* Set up the arg pointer (r12) for -fsplit-stack code. If
29672 __morestack was called, it left the arg pointer to the old
29673 stack in r29. Otherwise, the arg pointer is the top of the
29674 current frame. */
29675 cfun->machine->split_stack_argp_used = true;
29676 if (sp_adjust)
29677 {
29678 rtx r12 = gen_rtx_REG (Pmode, 12);
29679 rtx set_r12 = gen_rtx_SET (r12, sp_reg_rtx);
29680 emit_insn_before (set_r12, sp_adjust);
29681 }
29682 else if (frame_off != 0 || REGNO (frame_reg_rtx) != 12)
29683 {
29684 rtx r12 = gen_rtx_REG (Pmode, 12);
29685 if (frame_off == 0)
29686 emit_move_insn (r12, frame_reg_rtx);
29687 else
29688 emit_insn (gen_add3_insn (r12, frame_reg_rtx, GEN_INT (frame_off)));
29689 }
29690 if (info->push_p)
29691 {
29692 rtx r12 = gen_rtx_REG (Pmode, 12);
29693 rtx r29 = gen_rtx_REG (Pmode, 29);
29694 rtx cr7 = gen_rtx_REG (CCUNSmode, CR7_REGNO);
29695 rtx not_more = gen_label_rtx ();
29696 rtx jump;
29697
29698 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
29699 gen_rtx_GEU (VOIDmode, cr7, const0_rtx),
29700 gen_rtx_LABEL_REF (VOIDmode, not_more),
29701 pc_rtx);
29702 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
29703 JUMP_LABEL (jump) = not_more;
29704 LABEL_NUSES (not_more) += 1;
29705 emit_move_insn (r12, r29);
29706 emit_label (not_more);
29707 }
29708 }
29709 }
29710
29711 /* Output .extern statements for the save/restore routines we use. */
29712
29713 static void
29714 rs6000_output_savres_externs (FILE *file)
29715 {
29716 rs6000_stack_t *info = rs6000_stack_info ();
29717
29718 if (TARGET_DEBUG_STACK)
29719 debug_stack_info (info);
29720
29721 /* Write .extern for any function we will call to save and restore
29722 fp values. */
29723 if (info->first_fp_reg_save < 64
29724 && !TARGET_MACHO
29725 && !TARGET_ELF)
29726 {
29727 char *name;
29728 int regno = info->first_fp_reg_save - 32;
29729
29730 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
29731 {
29732 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
29733 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
29734 name = rs6000_savres_routine_name (info, regno, sel);
29735 fprintf (file, "\t.extern %s\n", name);
29736 }
29737 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
29738 {
29739 bool lr = (info->savres_strategy
29740 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
29741 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
29742 name = rs6000_savres_routine_name (info, regno, sel);
29743 fprintf (file, "\t.extern %s\n", name);
29744 }
29745 }
29746 }
29747
29748 /* Write function prologue. */
29749
29750 static void
29751 rs6000_output_function_prologue (FILE *file,
29752 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
29753 {
29754 if (!cfun->is_thunk)
29755 rs6000_output_savres_externs (file);
29756
29757 /* ELFv2 ABI r2 setup code and local entry point. This must follow
29758 immediately after the global entry point label. */
29759 if (rs6000_global_entry_point_needed_p ())
29760 {
29761 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
29762
29763 (*targetm.asm_out.internal_label) (file, "LCF", rs6000_pic_labelno);
29764
29765 if (TARGET_CMODEL != CMODEL_LARGE)
29766 {
29767 /* In the small and medium code models, we assume the TOC is less
29768 2 GB away from the text section, so it can be computed via the
29769 following two-instruction sequence. */
29770 char buf[256];
29771
29772 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
29773 fprintf (file, "0:\taddis 2,12,.TOC.-");
29774 assemble_name (file, buf);
29775 fprintf (file, "@ha\n");
29776 fprintf (file, "\taddi 2,2,.TOC.-");
29777 assemble_name (file, buf);
29778 fprintf (file, "@l\n");
29779 }
29780 else
29781 {
29782 /* In the large code model, we allow arbitrary offsets between the
29783 TOC and the text section, so we have to load the offset from
29784 memory. The data field is emitted directly before the global
29785 entry point in rs6000_elf_declare_function_name. */
29786 char buf[256];
29787
29788 #ifdef HAVE_AS_ENTRY_MARKERS
29789 /* If supported by the linker, emit a marker relocation. If the
29790 total code size of the final executable or shared library
29791 happens to fit into 2 GB after all, the linker will replace
29792 this code sequence with the sequence for the small or medium
29793 code model. */
29794 fprintf (file, "\t.reloc .,R_PPC64_ENTRY\n");
29795 #endif
29796 fprintf (file, "\tld 2,");
29797 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
29798 assemble_name (file, buf);
29799 fprintf (file, "-");
29800 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
29801 assemble_name (file, buf);
29802 fprintf (file, "(12)\n");
29803 fprintf (file, "\tadd 2,2,12\n");
29804 }
29805
29806 fputs ("\t.localentry\t", file);
29807 assemble_name (file, name);
29808 fputs (",.-", file);
29809 assemble_name (file, name);
29810 fputs ("\n", file);
29811 }
29812
29813 /* Output -mprofile-kernel code. This needs to be done here instead of
29814 in output_function_profile since it must go after the ELFv2 ABI
29815 local entry point. */
29816 if (TARGET_PROFILE_KERNEL && crtl->profile)
29817 {
29818 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
29819 gcc_assert (!TARGET_32BIT);
29820
29821 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
29822
29823 /* In the ELFv2 ABI we have no compiler stack word. It must be
29824 the resposibility of _mcount to preserve the static chain
29825 register if required. */
29826 if (DEFAULT_ABI != ABI_ELFv2
29827 && cfun->static_chain_decl != NULL)
29828 {
29829 asm_fprintf (file, "\tstd %s,24(%s)\n",
29830 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
29831 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
29832 asm_fprintf (file, "\tld %s,24(%s)\n",
29833 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
29834 }
29835 else
29836 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
29837 }
29838
29839 rs6000_pic_labelno++;
29840 }
29841
29842 /* -mprofile-kernel code calls mcount before the function prolog,
29843 so a profiled leaf function should stay a leaf function. */
29844 static bool
29845 rs6000_keep_leaf_when_profiled ()
29846 {
29847 return TARGET_PROFILE_KERNEL;
29848 }
29849
29850 /* Non-zero if vmx regs are restored before the frame pop, zero if
29851 we restore after the pop when possible. */
29852 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
29853
29854 /* Restoring cr is a two step process: loading a reg from the frame
29855 save, then moving the reg to cr. For ABI_V4 we must let the
29856 unwinder know that the stack location is no longer valid at or
29857 before the stack deallocation, but we can't emit a cfa_restore for
29858 cr at the stack deallocation like we do for other registers.
29859 The trouble is that it is possible for the move to cr to be
29860 scheduled after the stack deallocation. So say exactly where cr
29861 is located on each of the two insns. */
29862
29863 static rtx
29864 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
29865 {
29866 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
29867 rtx reg = gen_rtx_REG (SImode, regno);
29868 rtx_insn *insn = emit_move_insn (reg, mem);
29869
29870 if (!exit_func && DEFAULT_ABI == ABI_V4)
29871 {
29872 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
29873 rtx set = gen_rtx_SET (reg, cr);
29874
29875 add_reg_note (insn, REG_CFA_REGISTER, set);
29876 RTX_FRAME_RELATED_P (insn) = 1;
29877 }
29878 return reg;
29879 }
29880
29881 /* Reload CR from REG. */
29882
29883 static void
29884 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
29885 {
29886 int count = 0;
29887 int i;
29888
29889 if (using_mfcr_multiple)
29890 {
29891 for (i = 0; i < 8; i++)
29892 if (save_reg_p (CR0_REGNO + i))
29893 count++;
29894 gcc_assert (count);
29895 }
29896
29897 if (using_mfcr_multiple && count > 1)
29898 {
29899 rtx_insn *insn;
29900 rtvec p;
29901 int ndx;
29902
29903 p = rtvec_alloc (count);
29904
29905 ndx = 0;
29906 for (i = 0; i < 8; i++)
29907 if (save_reg_p (CR0_REGNO + i))
29908 {
29909 rtvec r = rtvec_alloc (2);
29910 RTVEC_ELT (r, 0) = reg;
29911 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
29912 RTVEC_ELT (p, ndx) =
29913 gen_rtx_SET (gen_rtx_REG (CCmode, CR0_REGNO + i),
29914 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
29915 ndx++;
29916 }
29917 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
29918 gcc_assert (ndx == count);
29919
29920 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
29921 CR field separately. */
29922 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
29923 {
29924 for (i = 0; i < 8; i++)
29925 if (save_reg_p (CR0_REGNO + i))
29926 add_reg_note (insn, REG_CFA_RESTORE,
29927 gen_rtx_REG (SImode, CR0_REGNO + i));
29928
29929 RTX_FRAME_RELATED_P (insn) = 1;
29930 }
29931 }
29932 else
29933 for (i = 0; i < 8; i++)
29934 if (save_reg_p (CR0_REGNO + i))
29935 {
29936 rtx insn = emit_insn (gen_movsi_to_cr_one
29937 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
29938
29939 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
29940 CR field separately, attached to the insn that in fact
29941 restores this particular CR field. */
29942 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
29943 {
29944 add_reg_note (insn, REG_CFA_RESTORE,
29945 gen_rtx_REG (SImode, CR0_REGNO + i));
29946
29947 RTX_FRAME_RELATED_P (insn) = 1;
29948 }
29949 }
29950
29951 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
29952 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
29953 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
29954 {
29955 rtx_insn *insn = get_last_insn ();
29956 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
29957
29958 add_reg_note (insn, REG_CFA_RESTORE, cr);
29959 RTX_FRAME_RELATED_P (insn) = 1;
29960 }
29961 }
29962
29963 /* Like cr, the move to lr instruction can be scheduled after the
29964 stack deallocation, but unlike cr, its stack frame save is still
29965 valid. So we only need to emit the cfa_restore on the correct
29966 instruction. */
29967
29968 static void
29969 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
29970 {
29971 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
29972 rtx reg = gen_rtx_REG (Pmode, regno);
29973
29974 emit_move_insn (reg, mem);
29975 }
29976
29977 static void
29978 restore_saved_lr (int regno, bool exit_func)
29979 {
29980 rtx reg = gen_rtx_REG (Pmode, regno);
29981 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
29982 rtx_insn *insn = emit_move_insn (lr, reg);
29983
29984 if (!exit_func && flag_shrink_wrap)
29985 {
29986 add_reg_note (insn, REG_CFA_RESTORE, lr);
29987 RTX_FRAME_RELATED_P (insn) = 1;
29988 }
29989 }
29990
29991 static rtx
29992 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
29993 {
29994 if (DEFAULT_ABI == ABI_ELFv2)
29995 {
29996 int i;
29997 for (i = 0; i < 8; i++)
29998 if (save_reg_p (CR0_REGNO + i))
29999 {
30000 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
30001 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
30002 cfa_restores);
30003 }
30004 }
30005 else if (info->cr_save_p)
30006 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
30007 gen_rtx_REG (SImode, CR2_REGNO),
30008 cfa_restores);
30009
30010 if (info->lr_save_p)
30011 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
30012 gen_rtx_REG (Pmode, LR_REGNO),
30013 cfa_restores);
30014 return cfa_restores;
30015 }
30016
30017 /* Return true if OFFSET from stack pointer can be clobbered by signals.
30018 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
30019 below stack pointer not cloberred by signals. */
30020
30021 static inline bool
30022 offset_below_red_zone_p (HOST_WIDE_INT offset)
30023 {
30024 return offset < (DEFAULT_ABI == ABI_V4
30025 ? 0
30026 : TARGET_32BIT ? -220 : -288);
30027 }
30028
30029 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
30030
30031 static void
30032 emit_cfa_restores (rtx cfa_restores)
30033 {
30034 rtx_insn *insn = get_last_insn ();
30035 rtx *loc = &REG_NOTES (insn);
30036
30037 while (*loc)
30038 loc = &XEXP (*loc, 1);
30039 *loc = cfa_restores;
30040 RTX_FRAME_RELATED_P (insn) = 1;
30041 }
30042
30043 /* Emit function epilogue as insns. */
30044
30045 void
30046 rs6000_emit_epilogue (int sibcall)
30047 {
30048 rs6000_stack_t *info;
30049 int restoring_GPRs_inline;
30050 int restoring_FPRs_inline;
30051 int using_load_multiple;
30052 int using_mtcr_multiple;
30053 int use_backchain_to_restore_sp;
30054 int restore_lr;
30055 int strategy;
30056 HOST_WIDE_INT frame_off = 0;
30057 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
30058 rtx frame_reg_rtx = sp_reg_rtx;
30059 rtx cfa_restores = NULL_RTX;
30060 rtx insn;
30061 rtx cr_save_reg = NULL_RTX;
30062 machine_mode reg_mode = Pmode;
30063 int reg_size = TARGET_32BIT ? 4 : 8;
30064 int i;
30065 bool exit_func;
30066 unsigned ptr_regno;
30067
30068 info = rs6000_stack_info ();
30069
30070 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
30071 {
30072 reg_mode = V2SImode;
30073 reg_size = 8;
30074 }
30075
30076 strategy = info->savres_strategy;
30077 using_load_multiple = strategy & REST_MULTIPLE;
30078 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
30079 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
30080 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
30081 || rs6000_cpu == PROCESSOR_PPC603
30082 || rs6000_cpu == PROCESSOR_PPC750
30083 || optimize_size);
30084 /* Restore via the backchain when we have a large frame, since this
30085 is more efficient than an addis, addi pair. The second condition
30086 here will not trigger at the moment; We don't actually need a
30087 frame pointer for alloca, but the generic parts of the compiler
30088 give us one anyway. */
30089 use_backchain_to_restore_sp = (info->total_size + (info->lr_save_p
30090 ? info->lr_save_offset
30091 : 0) > 32767
30092 || (cfun->calls_alloca
30093 && !frame_pointer_needed));
30094 restore_lr = (info->lr_save_p
30095 && (restoring_FPRs_inline
30096 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
30097 && (restoring_GPRs_inline
30098 || info->first_fp_reg_save < 64)
30099 && !cfun->machine->lr_is_wrapped_separately);
30100
30101
30102 if (WORLD_SAVE_P (info))
30103 {
30104 int i, j;
30105 char rname[30];
30106 const char *alloc_rname;
30107 rtvec p;
30108
30109 /* eh_rest_world_r10 will return to the location saved in the LR
30110 stack slot (which is not likely to be our caller.)
30111 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
30112 rest_world is similar, except any R10 parameter is ignored.
30113 The exception-handling stuff that was here in 2.95 is no
30114 longer necessary. */
30115
30116 p = rtvec_alloc (9
30117 + 32 - info->first_gp_reg_save
30118 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
30119 + 63 + 1 - info->first_fp_reg_save);
30120
30121 strcpy (rname, ((crtl->calls_eh_return) ?
30122 "*eh_rest_world_r10" : "*rest_world"));
30123 alloc_rname = ggc_strdup (rname);
30124
30125 j = 0;
30126 RTVEC_ELT (p, j++) = ret_rtx;
30127 RTVEC_ELT (p, j++)
30128 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
30129 /* The instruction pattern requires a clobber here;
30130 it is shared with the restVEC helper. */
30131 RTVEC_ELT (p, j++)
30132 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
30133
30134 {
30135 /* CR register traditionally saved as CR2. */
30136 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
30137 RTVEC_ELT (p, j++)
30138 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
30139 if (flag_shrink_wrap)
30140 {
30141 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
30142 gen_rtx_REG (Pmode, LR_REGNO),
30143 cfa_restores);
30144 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
30145 }
30146 }
30147
30148 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
30149 {
30150 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
30151 RTVEC_ELT (p, j++)
30152 = gen_frame_load (reg,
30153 frame_reg_rtx, info->gp_save_offset + reg_size * i);
30154 if (flag_shrink_wrap)
30155 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
30156 }
30157 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
30158 {
30159 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
30160 RTVEC_ELT (p, j++)
30161 = gen_frame_load (reg,
30162 frame_reg_rtx, info->altivec_save_offset + 16 * i);
30163 if (flag_shrink_wrap)
30164 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
30165 }
30166 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
30167 {
30168 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
30169 ? DFmode : SFmode),
30170 info->first_fp_reg_save + i);
30171 RTVEC_ELT (p, j++)
30172 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
30173 if (flag_shrink_wrap)
30174 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
30175 }
30176 RTVEC_ELT (p, j++)
30177 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
30178 RTVEC_ELT (p, j++)
30179 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
30180 RTVEC_ELT (p, j++)
30181 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
30182 RTVEC_ELT (p, j++)
30183 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
30184 RTVEC_ELT (p, j++)
30185 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
30186 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
30187
30188 if (flag_shrink_wrap)
30189 {
30190 REG_NOTES (insn) = cfa_restores;
30191 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
30192 RTX_FRAME_RELATED_P (insn) = 1;
30193 }
30194 return;
30195 }
30196
30197 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
30198 if (info->push_p)
30199 frame_off = info->total_size;
30200
30201 /* Restore AltiVec registers if we must do so before adjusting the
30202 stack. */
30203 if (info->altivec_size != 0
30204 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
30205 || (DEFAULT_ABI != ABI_V4
30206 && offset_below_red_zone_p (info->altivec_save_offset))))
30207 {
30208 int i;
30209 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
30210
30211 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
30212 if (use_backchain_to_restore_sp)
30213 {
30214 int frame_regno = 11;
30215
30216 if ((strategy & REST_INLINE_VRS) == 0)
30217 {
30218 /* Of r11 and r12, select the one not clobbered by an
30219 out-of-line restore function for the frame register. */
30220 frame_regno = 11 + 12 - scratch_regno;
30221 }
30222 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
30223 emit_move_insn (frame_reg_rtx,
30224 gen_rtx_MEM (Pmode, sp_reg_rtx));
30225 frame_off = 0;
30226 }
30227 else if (frame_pointer_needed)
30228 frame_reg_rtx = hard_frame_pointer_rtx;
30229
30230 if ((strategy & REST_INLINE_VRS) == 0)
30231 {
30232 int end_save = info->altivec_save_offset + info->altivec_size;
30233 int ptr_off;
30234 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
30235 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
30236
30237 if (end_save + frame_off != 0)
30238 {
30239 rtx offset = GEN_INT (end_save + frame_off);
30240
30241 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
30242 }
30243 else
30244 emit_move_insn (ptr_reg, frame_reg_rtx);
30245
30246 ptr_off = -end_save;
30247 insn = rs6000_emit_savres_rtx (info, scratch_reg,
30248 info->altivec_save_offset + ptr_off,
30249 0, V4SImode, SAVRES_VR);
30250 }
30251 else
30252 {
30253 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
30254 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
30255 {
30256 rtx addr, areg, mem, insn;
30257 rtx reg = gen_rtx_REG (V4SImode, i);
30258 HOST_WIDE_INT offset
30259 = (info->altivec_save_offset + frame_off
30260 + 16 * (i - info->first_altivec_reg_save));
30261
30262 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
30263 {
30264 mem = gen_frame_mem (V4SImode,
30265 gen_rtx_PLUS (Pmode, frame_reg_rtx,
30266 GEN_INT (offset)));
30267 insn = gen_rtx_SET (reg, mem);
30268 }
30269 else
30270 {
30271 areg = gen_rtx_REG (Pmode, 0);
30272 emit_move_insn (areg, GEN_INT (offset));
30273
30274 /* AltiVec addressing mode is [reg+reg]. */
30275 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
30276 mem = gen_frame_mem (V4SImode, addr);
30277
30278 /* Rather than emitting a generic move, force use of the
30279 lvx instruction, which we always want. In particular we
30280 don't want lxvd2x/xxpermdi for little endian. */
30281 insn = gen_altivec_lvx_v4si_internal (reg, mem);
30282 }
30283
30284 (void) emit_insn (insn);
30285 }
30286 }
30287
30288 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
30289 if (((strategy & REST_INLINE_VRS) == 0
30290 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
30291 && (flag_shrink_wrap
30292 || (offset_below_red_zone_p
30293 (info->altivec_save_offset
30294 + 16 * (i - info->first_altivec_reg_save)))))
30295 {
30296 rtx reg = gen_rtx_REG (V4SImode, i);
30297 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
30298 }
30299 }
30300
30301 /* Restore VRSAVE if we must do so before adjusting the stack. */
30302 if (info->vrsave_size != 0
30303 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
30304 || (DEFAULT_ABI != ABI_V4
30305 && offset_below_red_zone_p (info->vrsave_save_offset))))
30306 {
30307 rtx reg;
30308
30309 if (frame_reg_rtx == sp_reg_rtx)
30310 {
30311 if (use_backchain_to_restore_sp)
30312 {
30313 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
30314 emit_move_insn (frame_reg_rtx,
30315 gen_rtx_MEM (Pmode, sp_reg_rtx));
30316 frame_off = 0;
30317 }
30318 else if (frame_pointer_needed)
30319 frame_reg_rtx = hard_frame_pointer_rtx;
30320 }
30321
30322 reg = gen_rtx_REG (SImode, 12);
30323 emit_insn (gen_frame_load (reg, frame_reg_rtx,
30324 info->vrsave_save_offset + frame_off));
30325
30326 emit_insn (generate_set_vrsave (reg, info, 1));
30327 }
30328
30329 insn = NULL_RTX;
30330 /* If we have a large stack frame, restore the old stack pointer
30331 using the backchain. */
30332 if (use_backchain_to_restore_sp)
30333 {
30334 if (frame_reg_rtx == sp_reg_rtx)
30335 {
30336 /* Under V.4, don't reset the stack pointer until after we're done
30337 loading the saved registers. */
30338 if (DEFAULT_ABI == ABI_V4)
30339 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
30340
30341 insn = emit_move_insn (frame_reg_rtx,
30342 gen_rtx_MEM (Pmode, sp_reg_rtx));
30343 frame_off = 0;
30344 }
30345 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
30346 && DEFAULT_ABI == ABI_V4)
30347 /* frame_reg_rtx has been set up by the altivec restore. */
30348 ;
30349 else
30350 {
30351 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
30352 frame_reg_rtx = sp_reg_rtx;
30353 }
30354 }
30355 /* If we have a frame pointer, we can restore the old stack pointer
30356 from it. */
30357 else if (frame_pointer_needed)
30358 {
30359 frame_reg_rtx = sp_reg_rtx;
30360 if (DEFAULT_ABI == ABI_V4)
30361 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
30362 /* Prevent reordering memory accesses against stack pointer restore. */
30363 else if (cfun->calls_alloca
30364 || offset_below_red_zone_p (-info->total_size))
30365 rs6000_emit_stack_tie (frame_reg_rtx, true);
30366
30367 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
30368 GEN_INT (info->total_size)));
30369 frame_off = 0;
30370 }
30371 else if (info->push_p
30372 && DEFAULT_ABI != ABI_V4
30373 && !crtl->calls_eh_return)
30374 {
30375 /* Prevent reordering memory accesses against stack pointer restore. */
30376 if (cfun->calls_alloca
30377 || offset_below_red_zone_p (-info->total_size))
30378 rs6000_emit_stack_tie (frame_reg_rtx, false);
30379 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
30380 GEN_INT (info->total_size)));
30381 frame_off = 0;
30382 }
30383 if (insn && frame_reg_rtx == sp_reg_rtx)
30384 {
30385 if (cfa_restores)
30386 {
30387 REG_NOTES (insn) = cfa_restores;
30388 cfa_restores = NULL_RTX;
30389 }
30390 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
30391 RTX_FRAME_RELATED_P (insn) = 1;
30392 }
30393
30394 /* Restore AltiVec registers if we have not done so already. */
30395 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
30396 && info->altivec_size != 0
30397 && (DEFAULT_ABI == ABI_V4
30398 || !offset_below_red_zone_p (info->altivec_save_offset)))
30399 {
30400 int i;
30401
30402 if ((strategy & REST_INLINE_VRS) == 0)
30403 {
30404 int end_save = info->altivec_save_offset + info->altivec_size;
30405 int ptr_off;
30406 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
30407 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
30408 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
30409
30410 if (end_save + frame_off != 0)
30411 {
30412 rtx offset = GEN_INT (end_save + frame_off);
30413
30414 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
30415 }
30416 else
30417 emit_move_insn (ptr_reg, frame_reg_rtx);
30418
30419 ptr_off = -end_save;
30420 insn = rs6000_emit_savres_rtx (info, scratch_reg,
30421 info->altivec_save_offset + ptr_off,
30422 0, V4SImode, SAVRES_VR);
30423 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
30424 {
30425 /* Frame reg was clobbered by out-of-line save. Restore it
30426 from ptr_reg, and if we are calling out-of-line gpr or
30427 fpr restore set up the correct pointer and offset. */
30428 unsigned newptr_regno = 1;
30429 if (!restoring_GPRs_inline)
30430 {
30431 bool lr = info->gp_save_offset + info->gp_size == 0;
30432 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
30433 newptr_regno = ptr_regno_for_savres (sel);
30434 end_save = info->gp_save_offset + info->gp_size;
30435 }
30436 else if (!restoring_FPRs_inline)
30437 {
30438 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
30439 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
30440 newptr_regno = ptr_regno_for_savres (sel);
30441 end_save = info->fp_save_offset + info->fp_size;
30442 }
30443
30444 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
30445 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
30446
30447 if (end_save + ptr_off != 0)
30448 {
30449 rtx offset = GEN_INT (end_save + ptr_off);
30450
30451 frame_off = -end_save;
30452 if (TARGET_32BIT)
30453 emit_insn (gen_addsi3_carry (frame_reg_rtx,
30454 ptr_reg, offset));
30455 else
30456 emit_insn (gen_adddi3_carry (frame_reg_rtx,
30457 ptr_reg, offset));
30458 }
30459 else
30460 {
30461 frame_off = ptr_off;
30462 emit_move_insn (frame_reg_rtx, ptr_reg);
30463 }
30464 }
30465 }
30466 else
30467 {
30468 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
30469 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
30470 {
30471 rtx addr, areg, mem, insn;
30472 rtx reg = gen_rtx_REG (V4SImode, i);
30473 HOST_WIDE_INT offset
30474 = (info->altivec_save_offset + frame_off
30475 + 16 * (i - info->first_altivec_reg_save));
30476
30477 if (TARGET_P9_DFORM_VECTOR && quad_address_offset_p (offset))
30478 {
30479 mem = gen_frame_mem (V4SImode,
30480 gen_rtx_PLUS (Pmode, frame_reg_rtx,
30481 GEN_INT (offset)));
30482 insn = gen_rtx_SET (reg, mem);
30483 }
30484 else
30485 {
30486 areg = gen_rtx_REG (Pmode, 0);
30487 emit_move_insn (areg, GEN_INT (offset));
30488
30489 /* AltiVec addressing mode is [reg+reg]. */
30490 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
30491 mem = gen_frame_mem (V4SImode, addr);
30492
30493 /* Rather than emitting a generic move, force use of the
30494 lvx instruction, which we always want. In particular we
30495 don't want lxvd2x/xxpermdi for little endian. */
30496 insn = gen_altivec_lvx_v4si_internal (reg, mem);
30497 }
30498
30499 (void) emit_insn (insn);
30500 }
30501 }
30502
30503 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
30504 if (((strategy & REST_INLINE_VRS) == 0
30505 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
30506 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
30507 {
30508 rtx reg = gen_rtx_REG (V4SImode, i);
30509 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
30510 }
30511 }
30512
30513 /* Restore VRSAVE if we have not done so already. */
30514 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
30515 && info->vrsave_size != 0
30516 && (DEFAULT_ABI == ABI_V4
30517 || !offset_below_red_zone_p (info->vrsave_save_offset)))
30518 {
30519 rtx reg;
30520
30521 reg = gen_rtx_REG (SImode, 12);
30522 emit_insn (gen_frame_load (reg, frame_reg_rtx,
30523 info->vrsave_save_offset + frame_off));
30524
30525 emit_insn (generate_set_vrsave (reg, info, 1));
30526 }
30527
30528 /* If we exit by an out-of-line restore function on ABI_V4 then that
30529 function will deallocate the stack, so we don't need to worry
30530 about the unwinder restoring cr from an invalid stack frame
30531 location. */
30532 exit_func = (!restoring_FPRs_inline
30533 || (!restoring_GPRs_inline
30534 && info->first_fp_reg_save == 64));
30535
30536 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
30537 *separate* slots if the routine calls __builtin_eh_return, so
30538 that they can be independently restored by the unwinder. */
30539 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
30540 {
30541 int i, cr_off = info->ehcr_offset;
30542
30543 for (i = 0; i < 8; i++)
30544 if (!call_used_regs[CR0_REGNO + i])
30545 {
30546 rtx reg = gen_rtx_REG (SImode, 0);
30547 emit_insn (gen_frame_load (reg, frame_reg_rtx,
30548 cr_off + frame_off));
30549
30550 insn = emit_insn (gen_movsi_to_cr_one
30551 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
30552
30553 if (!exit_func && flag_shrink_wrap)
30554 {
30555 add_reg_note (insn, REG_CFA_RESTORE,
30556 gen_rtx_REG (SImode, CR0_REGNO + i));
30557
30558 RTX_FRAME_RELATED_P (insn) = 1;
30559 }
30560
30561 cr_off += reg_size;
30562 }
30563 }
30564
30565 /* Get the old lr if we saved it. If we are restoring registers
30566 out-of-line, then the out-of-line routines can do this for us. */
30567 if (restore_lr && restoring_GPRs_inline)
30568 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
30569
30570 /* Get the old cr if we saved it. */
30571 if (info->cr_save_p)
30572 {
30573 unsigned cr_save_regno = 12;
30574
30575 if (!restoring_GPRs_inline)
30576 {
30577 /* Ensure we don't use the register used by the out-of-line
30578 gpr register restore below. */
30579 bool lr = info->gp_save_offset + info->gp_size == 0;
30580 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
30581 int gpr_ptr_regno = ptr_regno_for_savres (sel);
30582
30583 if (gpr_ptr_regno == 12)
30584 cr_save_regno = 11;
30585 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
30586 }
30587 else if (REGNO (frame_reg_rtx) == 12)
30588 cr_save_regno = 11;
30589
30590 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
30591 info->cr_save_offset + frame_off,
30592 exit_func);
30593 }
30594
30595 /* Set LR here to try to overlap restores below. */
30596 if (restore_lr && restoring_GPRs_inline)
30597 restore_saved_lr (0, exit_func);
30598
30599 /* Load exception handler data registers, if needed. */
30600 if (crtl->calls_eh_return)
30601 {
30602 unsigned int i, regno;
30603
30604 if (TARGET_AIX)
30605 {
30606 rtx reg = gen_rtx_REG (reg_mode, 2);
30607 emit_insn (gen_frame_load (reg, frame_reg_rtx,
30608 frame_off + RS6000_TOC_SAVE_SLOT));
30609 }
30610
30611 for (i = 0; ; ++i)
30612 {
30613 rtx mem;
30614
30615 regno = EH_RETURN_DATA_REGNO (i);
30616 if (regno == INVALID_REGNUM)
30617 break;
30618
30619 /* Note: possible use of r0 here to address SPE regs. */
30620 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
30621 info->ehrd_offset + frame_off
30622 + reg_size * (int) i);
30623
30624 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
30625 }
30626 }
30627
30628 /* Restore GPRs. This is done as a PARALLEL if we are using
30629 the load-multiple instructions. */
30630 if (TARGET_SPE_ABI
30631 && info->spe_64bit_regs_used
30632 && info->first_gp_reg_save != 32)
30633 {
30634 /* Determine whether we can address all of the registers that need
30635 to be saved with an offset from frame_reg_rtx that fits in
30636 the small const field for SPE memory instructions. */
30637 int spe_regs_addressable
30638 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
30639 + reg_size * (32 - info->first_gp_reg_save - 1))
30640 && restoring_GPRs_inline);
30641
30642 if (!spe_regs_addressable)
30643 {
30644 int ool_adjust = 0;
30645 rtx old_frame_reg_rtx = frame_reg_rtx;
30646 /* Make r11 point to the start of the SPE save area. We worried about
30647 not clobbering it when we were saving registers in the prologue.
30648 There's no need to worry here because the static chain is passed
30649 anew to every function. */
30650
30651 if (!restoring_GPRs_inline)
30652 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
30653 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
30654 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
30655 GEN_INT (info->spe_gp_save_offset
30656 + frame_off
30657 - ool_adjust)));
30658 /* Keep the invariant that frame_reg_rtx + frame_off points
30659 at the top of the stack frame. */
30660 frame_off = -info->spe_gp_save_offset + ool_adjust;
30661 }
30662
30663 if (restoring_GPRs_inline)
30664 {
30665 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
30666
30667 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
30668 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
30669 {
30670 rtx offset, addr, mem, reg;
30671
30672 /* We're doing all this to ensure that the immediate offset
30673 fits into the immediate field of 'evldd'. */
30674 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
30675
30676 offset = GEN_INT (spe_offset + reg_size * i);
30677 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
30678 mem = gen_rtx_MEM (V2SImode, addr);
30679 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
30680
30681 emit_move_insn (reg, mem);
30682 }
30683 }
30684 else
30685 rs6000_emit_savres_rtx (info, frame_reg_rtx,
30686 info->spe_gp_save_offset + frame_off,
30687 info->lr_save_offset + frame_off,
30688 reg_mode,
30689 SAVRES_GPR | SAVRES_LR);
30690 }
30691 else if (!restoring_GPRs_inline)
30692 {
30693 /* We are jumping to an out-of-line function. */
30694 rtx ptr_reg;
30695 int end_save = info->gp_save_offset + info->gp_size;
30696 bool can_use_exit = end_save == 0;
30697 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
30698 int ptr_off;
30699
30700 /* Emit stack reset code if we need it. */
30701 ptr_regno = ptr_regno_for_savres (sel);
30702 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
30703 if (can_use_exit)
30704 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
30705 else if (end_save + frame_off != 0)
30706 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
30707 GEN_INT (end_save + frame_off)));
30708 else if (REGNO (frame_reg_rtx) != ptr_regno)
30709 emit_move_insn (ptr_reg, frame_reg_rtx);
30710 if (REGNO (frame_reg_rtx) == ptr_regno)
30711 frame_off = -end_save;
30712
30713 if (can_use_exit && info->cr_save_p)
30714 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
30715
30716 ptr_off = -end_save;
30717 rs6000_emit_savres_rtx (info, ptr_reg,
30718 info->gp_save_offset + ptr_off,
30719 info->lr_save_offset + ptr_off,
30720 reg_mode, sel);
30721 }
30722 else if (using_load_multiple)
30723 {
30724 rtvec p;
30725 p = rtvec_alloc (32 - info->first_gp_reg_save);
30726 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
30727 RTVEC_ELT (p, i)
30728 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
30729 frame_reg_rtx,
30730 info->gp_save_offset + frame_off + reg_size * i);
30731 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
30732 }
30733 else
30734 {
30735 int offset = info->gp_save_offset + frame_off;
30736 for (i = info->first_gp_reg_save; i < 32; i++)
30737 {
30738 if (rs6000_reg_live_or_pic_offset_p (i)
30739 && !cfun->machine->gpr_is_wrapped_separately[i])
30740 {
30741 rtx reg = gen_rtx_REG (reg_mode, i);
30742 emit_insn (gen_frame_load (reg, frame_reg_rtx, offset));
30743 }
30744
30745 offset += reg_size;
30746 }
30747 }
30748
30749 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
30750 {
30751 /* If the frame pointer was used then we can't delay emitting
30752 a REG_CFA_DEF_CFA note. This must happen on the insn that
30753 restores the frame pointer, r31. We may have already emitted
30754 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
30755 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
30756 be harmless if emitted. */
30757 if (frame_pointer_needed)
30758 {
30759 insn = get_last_insn ();
30760 add_reg_note (insn, REG_CFA_DEF_CFA,
30761 plus_constant (Pmode, frame_reg_rtx, frame_off));
30762 RTX_FRAME_RELATED_P (insn) = 1;
30763 }
30764
30765 /* Set up cfa_restores. We always need these when
30766 shrink-wrapping. If not shrink-wrapping then we only need
30767 the cfa_restore when the stack location is no longer valid.
30768 The cfa_restores must be emitted on or before the insn that
30769 invalidates the stack, and of course must not be emitted
30770 before the insn that actually does the restore. The latter
30771 is why it is a bad idea to emit the cfa_restores as a group
30772 on the last instruction here that actually does a restore:
30773 That insn may be reordered with respect to others doing
30774 restores. */
30775 if (flag_shrink_wrap
30776 && !restoring_GPRs_inline
30777 && info->first_fp_reg_save == 64)
30778 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
30779
30780 for (i = info->first_gp_reg_save; i < 32; i++)
30781 if (!restoring_GPRs_inline
30782 || using_load_multiple
30783 || rs6000_reg_live_or_pic_offset_p (i))
30784 {
30785 if (cfun->machine->gpr_is_wrapped_separately[i])
30786 continue;
30787
30788 rtx reg = gen_rtx_REG (reg_mode, i);
30789 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
30790 }
30791 }
30792
30793 if (!restoring_GPRs_inline
30794 && info->first_fp_reg_save == 64)
30795 {
30796 /* We are jumping to an out-of-line function. */
30797 if (cfa_restores)
30798 emit_cfa_restores (cfa_restores);
30799 return;
30800 }
30801
30802 if (restore_lr && !restoring_GPRs_inline)
30803 {
30804 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
30805 restore_saved_lr (0, exit_func);
30806 }
30807
30808 /* Restore fpr's if we need to do it without calling a function. */
30809 if (restoring_FPRs_inline)
30810 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
30811 if (save_reg_p (info->first_fp_reg_save + i))
30812 {
30813 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
30814 ? DFmode : SFmode),
30815 info->first_fp_reg_save + i);
30816 emit_insn (gen_frame_load (reg, frame_reg_rtx,
30817 info->fp_save_offset + frame_off + 8 * i));
30818 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
30819 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
30820 }
30821
30822 /* If we saved cr, restore it here. Just those that were used. */
30823 if (info->cr_save_p)
30824 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
30825
30826 /* If this is V.4, unwind the stack pointer after all of the loads
30827 have been done, or set up r11 if we are restoring fp out of line. */
30828 ptr_regno = 1;
30829 if (!restoring_FPRs_inline)
30830 {
30831 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
30832 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
30833 ptr_regno = ptr_regno_for_savres (sel);
30834 }
30835
30836 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
30837 if (REGNO (frame_reg_rtx) == ptr_regno)
30838 frame_off = 0;
30839
30840 if (insn && restoring_FPRs_inline)
30841 {
30842 if (cfa_restores)
30843 {
30844 REG_NOTES (insn) = cfa_restores;
30845 cfa_restores = NULL_RTX;
30846 }
30847 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
30848 RTX_FRAME_RELATED_P (insn) = 1;
30849 }
30850
30851 if (crtl->calls_eh_return)
30852 {
30853 rtx sa = EH_RETURN_STACKADJ_RTX;
30854 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
30855 }
30856
30857 if (!sibcall && restoring_FPRs_inline)
30858 {
30859 if (cfa_restores)
30860 {
30861 /* We can't hang the cfa_restores off a simple return,
30862 since the shrink-wrap code sometimes uses an existing
30863 return. This means there might be a path from
30864 pre-prologue code to this return, and dwarf2cfi code
30865 wants the eh_frame unwinder state to be the same on
30866 all paths to any point. So we need to emit the
30867 cfa_restores before the return. For -m64 we really
30868 don't need epilogue cfa_restores at all, except for
30869 this irritating dwarf2cfi with shrink-wrap
30870 requirement; The stack red-zone means eh_frame info
30871 from the prologue telling the unwinder to restore
30872 from the stack is perfectly good right to the end of
30873 the function. */
30874 emit_insn (gen_blockage ());
30875 emit_cfa_restores (cfa_restores);
30876 cfa_restores = NULL_RTX;
30877 }
30878
30879 emit_jump_insn (targetm.gen_simple_return ());
30880 }
30881
30882 if (!sibcall && !restoring_FPRs_inline)
30883 {
30884 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
30885 rtvec p = rtvec_alloc (3 + !!lr + 64 - info->first_fp_reg_save);
30886 int elt = 0;
30887 RTVEC_ELT (p, elt++) = ret_rtx;
30888 if (lr)
30889 RTVEC_ELT (p, elt++)
30890 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
30891
30892 /* We have to restore more than two FP registers, so branch to the
30893 restore function. It will return to our caller. */
30894 int i;
30895 int reg;
30896 rtx sym;
30897
30898 if (flag_shrink_wrap)
30899 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
30900
30901 sym = rs6000_savres_routine_sym (info, SAVRES_FPR | (lr ? SAVRES_LR : 0));
30902 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, sym);
30903 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
30904 RTVEC_ELT (p, elt++) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
30905
30906 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
30907 {
30908 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
30909
30910 RTVEC_ELT (p, elt++)
30911 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
30912 if (flag_shrink_wrap)
30913 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
30914 }
30915
30916 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
30917 }
30918
30919 if (cfa_restores)
30920 {
30921 if (sibcall)
30922 /* Ensure the cfa_restores are hung off an insn that won't
30923 be reordered above other restores. */
30924 emit_insn (gen_blockage ());
30925
30926 emit_cfa_restores (cfa_restores);
30927 }
30928 }
30929
30930 /* Write function epilogue. */
30931
30932 static void
30933 rs6000_output_function_epilogue (FILE *file,
30934 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
30935 {
30936 #if TARGET_MACHO
30937 macho_branch_islands ();
30938
30939 {
30940 rtx_insn *insn = get_last_insn ();
30941 rtx_insn *deleted_debug_label = NULL;
30942
30943 /* Mach-O doesn't support labels at the end of objects, so if
30944 it looks like we might want one, take special action.
30945
30946 First, collect any sequence of deleted debug labels. */
30947 while (insn
30948 && NOTE_P (insn)
30949 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
30950 {
30951 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
30952 notes only, instead set their CODE_LABEL_NUMBER to -1,
30953 otherwise there would be code generation differences
30954 in between -g and -g0. */
30955 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
30956 deleted_debug_label = insn;
30957 insn = PREV_INSN (insn);
30958 }
30959
30960 /* Second, if we have:
30961 label:
30962 barrier
30963 then this needs to be detected, so skip past the barrier. */
30964
30965 if (insn && BARRIER_P (insn))
30966 insn = PREV_INSN (insn);
30967
30968 /* Up to now we've only seen notes or barriers. */
30969 if (insn)
30970 {
30971 if (LABEL_P (insn)
30972 || (NOTE_P (insn)
30973 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL))
30974 /* Trailing label: <barrier>. */
30975 fputs ("\tnop\n", file);
30976 else
30977 {
30978 /* Lastly, see if we have a completely empty function body. */
30979 while (insn && ! INSN_P (insn))
30980 insn = PREV_INSN (insn);
30981 /* If we don't find any insns, we've got an empty function body;
30982 I.e. completely empty - without a return or branch. This is
30983 taken as the case where a function body has been removed
30984 because it contains an inline __builtin_unreachable(). GCC
30985 states that reaching __builtin_unreachable() means UB so we're
30986 not obliged to do anything special; however, we want
30987 non-zero-sized function bodies. To meet this, and help the
30988 user out, let's trap the case. */
30989 if (insn == NULL)
30990 fputs ("\ttrap\n", file);
30991 }
30992 }
30993 else if (deleted_debug_label)
30994 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
30995 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
30996 CODE_LABEL_NUMBER (insn) = -1;
30997 }
30998 #endif
30999
31000 /* Output a traceback table here. See /usr/include/sys/debug.h for info
31001 on its format.
31002
31003 We don't output a traceback table if -finhibit-size-directive was
31004 used. The documentation for -finhibit-size-directive reads
31005 ``don't output a @code{.size} assembler directive, or anything
31006 else that would cause trouble if the function is split in the
31007 middle, and the two halves are placed at locations far apart in
31008 memory.'' The traceback table has this property, since it
31009 includes the offset from the start of the function to the
31010 traceback table itself.
31011
31012 System V.4 Powerpc's (and the embedded ABI derived from it) use a
31013 different traceback table. */
31014 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
31015 && ! flag_inhibit_size_directive
31016 && rs6000_traceback != traceback_none && !cfun->is_thunk)
31017 {
31018 const char *fname = NULL;
31019 const char *language_string = lang_hooks.name;
31020 int fixed_parms = 0, float_parms = 0, parm_info = 0;
31021 int i;
31022 int optional_tbtab;
31023 rs6000_stack_t *info = rs6000_stack_info ();
31024
31025 if (rs6000_traceback == traceback_full)
31026 optional_tbtab = 1;
31027 else if (rs6000_traceback == traceback_part)
31028 optional_tbtab = 0;
31029 else
31030 optional_tbtab = !optimize_size && !TARGET_ELF;
31031
31032 if (optional_tbtab)
31033 {
31034 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
31035 while (*fname == '.') /* V.4 encodes . in the name */
31036 fname++;
31037
31038 /* Need label immediately before tbtab, so we can compute
31039 its offset from the function start. */
31040 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
31041 ASM_OUTPUT_LABEL (file, fname);
31042 }
31043
31044 /* The .tbtab pseudo-op can only be used for the first eight
31045 expressions, since it can't handle the possibly variable
31046 length fields that follow. However, if you omit the optional
31047 fields, the assembler outputs zeros for all optional fields
31048 anyways, giving each variable length field is minimum length
31049 (as defined in sys/debug.h). Thus we can not use the .tbtab
31050 pseudo-op at all. */
31051
31052 /* An all-zero word flags the start of the tbtab, for debuggers
31053 that have to find it by searching forward from the entry
31054 point or from the current pc. */
31055 fputs ("\t.long 0\n", file);
31056
31057 /* Tbtab format type. Use format type 0. */
31058 fputs ("\t.byte 0,", file);
31059
31060 /* Language type. Unfortunately, there does not seem to be any
31061 official way to discover the language being compiled, so we
31062 use language_string.
31063 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
31064 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
31065 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
31066 either, so for now use 0. */
31067 if (lang_GNU_C ()
31068 || ! strcmp (language_string, "GNU GIMPLE")
31069 || ! strcmp (language_string, "GNU Go")
31070 || ! strcmp (language_string, "libgccjit"))
31071 i = 0;
31072 else if (! strcmp (language_string, "GNU F77")
31073 || lang_GNU_Fortran ())
31074 i = 1;
31075 else if (! strcmp (language_string, "GNU Pascal"))
31076 i = 2;
31077 else if (! strcmp (language_string, "GNU Ada"))
31078 i = 3;
31079 else if (lang_GNU_CXX ()
31080 || ! strcmp (language_string, "GNU Objective-C++"))
31081 i = 9;
31082 else if (! strcmp (language_string, "GNU Java"))
31083 i = 13;
31084 else if (! strcmp (language_string, "GNU Objective-C"))
31085 i = 14;
31086 else
31087 gcc_unreachable ();
31088 fprintf (file, "%d,", i);
31089
31090 /* 8 single bit fields: global linkage (not set for C extern linkage,
31091 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
31092 from start of procedure stored in tbtab, internal function, function
31093 has controlled storage, function has no toc, function uses fp,
31094 function logs/aborts fp operations. */
31095 /* Assume that fp operations are used if any fp reg must be saved. */
31096 fprintf (file, "%d,",
31097 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
31098
31099 /* 6 bitfields: function is interrupt handler, name present in
31100 proc table, function calls alloca, on condition directives
31101 (controls stack walks, 3 bits), saves condition reg, saves
31102 link reg. */
31103 /* The `function calls alloca' bit seems to be set whenever reg 31 is
31104 set up as a frame pointer, even when there is no alloca call. */
31105 fprintf (file, "%d,",
31106 ((optional_tbtab << 6)
31107 | ((optional_tbtab & frame_pointer_needed) << 5)
31108 | (info->cr_save_p << 1)
31109 | (info->lr_save_p)));
31110
31111 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
31112 (6 bits). */
31113 fprintf (file, "%d,",
31114 (info->push_p << 7) | (64 - info->first_fp_reg_save));
31115
31116 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
31117 fprintf (file, "%d,", (32 - first_reg_to_save ()));
31118
31119 if (optional_tbtab)
31120 {
31121 /* Compute the parameter info from the function decl argument
31122 list. */
31123 tree decl;
31124 int next_parm_info_bit = 31;
31125
31126 for (decl = DECL_ARGUMENTS (current_function_decl);
31127 decl; decl = DECL_CHAIN (decl))
31128 {
31129 rtx parameter = DECL_INCOMING_RTL (decl);
31130 machine_mode mode = GET_MODE (parameter);
31131
31132 if (GET_CODE (parameter) == REG)
31133 {
31134 if (SCALAR_FLOAT_MODE_P (mode))
31135 {
31136 int bits;
31137
31138 float_parms++;
31139
31140 switch (mode)
31141 {
31142 case SFmode:
31143 case SDmode:
31144 bits = 0x2;
31145 break;
31146
31147 case DFmode:
31148 case DDmode:
31149 case TFmode:
31150 case TDmode:
31151 case IFmode:
31152 case KFmode:
31153 bits = 0x3;
31154 break;
31155
31156 default:
31157 gcc_unreachable ();
31158 }
31159
31160 /* If only one bit will fit, don't or in this entry. */
31161 if (next_parm_info_bit > 0)
31162 parm_info |= (bits << (next_parm_info_bit - 1));
31163 next_parm_info_bit -= 2;
31164 }
31165 else
31166 {
31167 fixed_parms += ((GET_MODE_SIZE (mode)
31168 + (UNITS_PER_WORD - 1))
31169 / UNITS_PER_WORD);
31170 next_parm_info_bit -= 1;
31171 }
31172 }
31173 }
31174 }
31175
31176 /* Number of fixed point parameters. */
31177 /* This is actually the number of words of fixed point parameters; thus
31178 an 8 byte struct counts as 2; and thus the maximum value is 8. */
31179 fprintf (file, "%d,", fixed_parms);
31180
31181 /* 2 bitfields: number of floating point parameters (7 bits), parameters
31182 all on stack. */
31183 /* This is actually the number of fp registers that hold parameters;
31184 and thus the maximum value is 13. */
31185 /* Set parameters on stack bit if parameters are not in their original
31186 registers, regardless of whether they are on the stack? Xlc
31187 seems to set the bit when not optimizing. */
31188 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
31189
31190 if (optional_tbtab)
31191 {
31192 /* Optional fields follow. Some are variable length. */
31193
31194 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
31195 float, 11 double float. */
31196 /* There is an entry for each parameter in a register, in the order
31197 that they occur in the parameter list. Any intervening arguments
31198 on the stack are ignored. If the list overflows a long (max
31199 possible length 34 bits) then completely leave off all elements
31200 that don't fit. */
31201 /* Only emit this long if there was at least one parameter. */
31202 if (fixed_parms || float_parms)
31203 fprintf (file, "\t.long %d\n", parm_info);
31204
31205 /* Offset from start of code to tb table. */
31206 fputs ("\t.long ", file);
31207 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
31208 RS6000_OUTPUT_BASENAME (file, fname);
31209 putc ('-', file);
31210 rs6000_output_function_entry (file, fname);
31211 putc ('\n', file);
31212
31213 /* Interrupt handler mask. */
31214 /* Omit this long, since we never set the interrupt handler bit
31215 above. */
31216
31217 /* Number of CTL (controlled storage) anchors. */
31218 /* Omit this long, since the has_ctl bit is never set above. */
31219
31220 /* Displacement into stack of each CTL anchor. */
31221 /* Omit this list of longs, because there are no CTL anchors. */
31222
31223 /* Length of function name. */
31224 if (*fname == '*')
31225 ++fname;
31226 fprintf (file, "\t.short %d\n", (int) strlen (fname));
31227
31228 /* Function name. */
31229 assemble_string (fname, strlen (fname));
31230
31231 /* Register for alloca automatic storage; this is always reg 31.
31232 Only emit this if the alloca bit was set above. */
31233 if (frame_pointer_needed)
31234 fputs ("\t.byte 31\n", file);
31235
31236 fputs ("\t.align 2\n", file);
31237 }
31238 }
31239
31240 /* Arrange to define .LCTOC1 label, if not already done. */
31241 if (need_toc_init)
31242 {
31243 need_toc_init = 0;
31244 if (!toc_initialized)
31245 {
31246 switch_to_section (toc_section);
31247 switch_to_section (current_function_section ());
31248 }
31249 }
31250 }
31251
31252 /* -fsplit-stack support. */
31253
31254 /* A SYMBOL_REF for __morestack. */
31255 static GTY(()) rtx morestack_ref;
31256
31257 static rtx
31258 gen_add3_const (rtx rt, rtx ra, long c)
31259 {
31260 if (TARGET_64BIT)
31261 return gen_adddi3 (rt, ra, GEN_INT (c));
31262 else
31263 return gen_addsi3 (rt, ra, GEN_INT (c));
31264 }
31265
31266 /* Emit -fsplit-stack prologue, which goes before the regular function
31267 prologue (at local entry point in the case of ELFv2). */
31268
31269 void
31270 rs6000_expand_split_stack_prologue (void)
31271 {
31272 rs6000_stack_t *info = rs6000_stack_info ();
31273 unsigned HOST_WIDE_INT allocate;
31274 long alloc_hi, alloc_lo;
31275 rtx r0, r1, r12, lr, ok_label, compare, jump, call_fusage;
31276 rtx_insn *insn;
31277
31278 gcc_assert (flag_split_stack && reload_completed);
31279
31280 if (!info->push_p)
31281 return;
31282
31283 if (global_regs[29])
31284 {
31285 error ("-fsplit-stack uses register r29");
31286 inform (DECL_SOURCE_LOCATION (global_regs_decl[29]),
31287 "conflicts with %qD", global_regs_decl[29]);
31288 }
31289
31290 allocate = info->total_size;
31291 if (allocate > (unsigned HOST_WIDE_INT) 1 << 31)
31292 {
31293 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
31294 return;
31295 }
31296 if (morestack_ref == NULL_RTX)
31297 {
31298 morestack_ref = gen_rtx_SYMBOL_REF (Pmode, "__morestack");
31299 SYMBOL_REF_FLAGS (morestack_ref) |= (SYMBOL_FLAG_LOCAL
31300 | SYMBOL_FLAG_FUNCTION);
31301 }
31302
31303 r0 = gen_rtx_REG (Pmode, 0);
31304 r1 = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
31305 r12 = gen_rtx_REG (Pmode, 12);
31306 emit_insn (gen_load_split_stack_limit (r0));
31307 /* Always emit two insns here to calculate the requested stack,
31308 so that the linker can edit them when adjusting size for calling
31309 non-split-stack code. */
31310 alloc_hi = (-allocate + 0x8000) & ~0xffffL;
31311 alloc_lo = -allocate - alloc_hi;
31312 if (alloc_hi != 0)
31313 {
31314 emit_insn (gen_add3_const (r12, r1, alloc_hi));
31315 if (alloc_lo != 0)
31316 emit_insn (gen_add3_const (r12, r12, alloc_lo));
31317 else
31318 emit_insn (gen_nop ());
31319 }
31320 else
31321 {
31322 emit_insn (gen_add3_const (r12, r1, alloc_lo));
31323 emit_insn (gen_nop ());
31324 }
31325
31326 compare = gen_rtx_REG (CCUNSmode, CR7_REGNO);
31327 emit_insn (gen_rtx_SET (compare, gen_rtx_COMPARE (CCUNSmode, r12, r0)));
31328 ok_label = gen_label_rtx ();
31329 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
31330 gen_rtx_GEU (VOIDmode, compare, const0_rtx),
31331 gen_rtx_LABEL_REF (VOIDmode, ok_label),
31332 pc_rtx);
31333 insn = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
31334 JUMP_LABEL (insn) = ok_label;
31335 /* Mark the jump as very likely to be taken. */
31336 add_int_reg_note (insn, REG_BR_PROB,
31337 REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100);
31338
31339 lr = gen_rtx_REG (Pmode, LR_REGNO);
31340 insn = emit_move_insn (r0, lr);
31341 RTX_FRAME_RELATED_P (insn) = 1;
31342 insn = emit_insn (gen_frame_store (r0, r1, info->lr_save_offset));
31343 RTX_FRAME_RELATED_P (insn) = 1;
31344
31345 insn = emit_call_insn (gen_call (gen_rtx_MEM (SImode, morestack_ref),
31346 const0_rtx, const0_rtx));
31347 call_fusage = NULL_RTX;
31348 use_reg (&call_fusage, r12);
31349 /* Say the call uses r0, even though it doesn't, to stop regrename
31350 from twiddling with the insns saving lr, trashing args for cfun.
31351 The insns restoring lr are similarly protected by making
31352 split_stack_return use r0. */
31353 use_reg (&call_fusage, r0);
31354 add_function_usage_to (insn, call_fusage);
31355 emit_insn (gen_frame_load (r0, r1, info->lr_save_offset));
31356 insn = emit_move_insn (lr, r0);
31357 add_reg_note (insn, REG_CFA_RESTORE, lr);
31358 RTX_FRAME_RELATED_P (insn) = 1;
31359 emit_insn (gen_split_stack_return ());
31360
31361 emit_label (ok_label);
31362 LABEL_NUSES (ok_label) = 1;
31363 }
31364
31365 /* Return the internal arg pointer used for function incoming
31366 arguments. When -fsplit-stack, the arg pointer is r12 so we need
31367 to copy it to a pseudo in order for it to be preserved over calls
31368 and suchlike. We'd really like to use a pseudo here for the
31369 internal arg pointer but data-flow analysis is not prepared to
31370 accept pseudos as live at the beginning of a function. */
31371
31372 static rtx
31373 rs6000_internal_arg_pointer (void)
31374 {
31375 if (flag_split_stack
31376 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun->decl))
31377 == NULL))
31378
31379 {
31380 if (cfun->machine->split_stack_arg_pointer == NULL_RTX)
31381 {
31382 rtx pat;
31383
31384 cfun->machine->split_stack_arg_pointer = gen_reg_rtx (Pmode);
31385 REG_POINTER (cfun->machine->split_stack_arg_pointer) = 1;
31386
31387 /* Put the pseudo initialization right after the note at the
31388 beginning of the function. */
31389 pat = gen_rtx_SET (cfun->machine->split_stack_arg_pointer,
31390 gen_rtx_REG (Pmode, 12));
31391 push_topmost_sequence ();
31392 emit_insn_after (pat, get_insns ());
31393 pop_topmost_sequence ();
31394 }
31395 return plus_constant (Pmode, cfun->machine->split_stack_arg_pointer,
31396 FIRST_PARM_OFFSET (current_function_decl));
31397 }
31398 return virtual_incoming_args_rtx;
31399 }
31400
31401 /* We may have to tell the dataflow pass that the split stack prologue
31402 is initializing a register. */
31403
31404 static void
31405 rs6000_live_on_entry (bitmap regs)
31406 {
31407 if (flag_split_stack)
31408 bitmap_set_bit (regs, 12);
31409 }
31410
31411 /* Emit -fsplit-stack dynamic stack allocation space check. */
31412
31413 void
31414 rs6000_split_stack_space_check (rtx size, rtx label)
31415 {
31416 rtx sp = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
31417 rtx limit = gen_reg_rtx (Pmode);
31418 rtx requested = gen_reg_rtx (Pmode);
31419 rtx cmp = gen_reg_rtx (CCUNSmode);
31420 rtx jump;
31421
31422 emit_insn (gen_load_split_stack_limit (limit));
31423 if (CONST_INT_P (size))
31424 emit_insn (gen_add3_insn (requested, sp, GEN_INT (-INTVAL (size))));
31425 else
31426 {
31427 size = force_reg (Pmode, size);
31428 emit_move_insn (requested, gen_rtx_MINUS (Pmode, sp, size));
31429 }
31430 emit_insn (gen_rtx_SET (cmp, gen_rtx_COMPARE (CCUNSmode, requested, limit)));
31431 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
31432 gen_rtx_GEU (VOIDmode, cmp, const0_rtx),
31433 gen_rtx_LABEL_REF (VOIDmode, label),
31434 pc_rtx);
31435 jump = emit_jump_insn (gen_rtx_SET (pc_rtx, jump));
31436 JUMP_LABEL (jump) = label;
31437 }
31438 \f
31439 /* A C compound statement that outputs the assembler code for a thunk
31440 function, used to implement C++ virtual function calls with
31441 multiple inheritance. The thunk acts as a wrapper around a virtual
31442 function, adjusting the implicit object parameter before handing
31443 control off to the real function.
31444
31445 First, emit code to add the integer DELTA to the location that
31446 contains the incoming first argument. Assume that this argument
31447 contains a pointer, and is the one used to pass the `this' pointer
31448 in C++. This is the incoming argument *before* the function
31449 prologue, e.g. `%o0' on a sparc. The addition must preserve the
31450 values of all other incoming arguments.
31451
31452 After the addition, emit code to jump to FUNCTION, which is a
31453 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
31454 not touch the return address. Hence returning from FUNCTION will
31455 return to whoever called the current `thunk'.
31456
31457 The effect must be as if FUNCTION had been called directly with the
31458 adjusted first argument. This macro is responsible for emitting
31459 all of the code for a thunk function; output_function_prologue()
31460 and output_function_epilogue() are not invoked.
31461
31462 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
31463 been extracted from it.) It might possibly be useful on some
31464 targets, but probably not.
31465
31466 If you do not define this macro, the target-independent code in the
31467 C++ frontend will generate a less efficient heavyweight thunk that
31468 calls FUNCTION instead of jumping to it. The generic approach does
31469 not support varargs. */
31470
31471 static void
31472 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
31473 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
31474 tree function)
31475 {
31476 rtx this_rtx, funexp;
31477 rtx_insn *insn;
31478
31479 reload_completed = 1;
31480 epilogue_completed = 1;
31481
31482 /* Mark the end of the (empty) prologue. */
31483 emit_note (NOTE_INSN_PROLOGUE_END);
31484
31485 /* Find the "this" pointer. If the function returns a structure,
31486 the structure return pointer is in r3. */
31487 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
31488 this_rtx = gen_rtx_REG (Pmode, 4);
31489 else
31490 this_rtx = gen_rtx_REG (Pmode, 3);
31491
31492 /* Apply the constant offset, if required. */
31493 if (delta)
31494 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
31495
31496 /* Apply the offset from the vtable, if required. */
31497 if (vcall_offset)
31498 {
31499 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
31500 rtx tmp = gen_rtx_REG (Pmode, 12);
31501
31502 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
31503 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
31504 {
31505 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
31506 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
31507 }
31508 else
31509 {
31510 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
31511
31512 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
31513 }
31514 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
31515 }
31516
31517 /* Generate a tail call to the target function. */
31518 if (!TREE_USED (function))
31519 {
31520 assemble_external (function);
31521 TREE_USED (function) = 1;
31522 }
31523 funexp = XEXP (DECL_RTL (function), 0);
31524 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
31525
31526 #if TARGET_MACHO
31527 if (MACHOPIC_INDIRECT)
31528 funexp = machopic_indirect_call_target (funexp);
31529 #endif
31530
31531 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
31532 generate sibcall RTL explicitly. */
31533 insn = emit_call_insn (
31534 gen_rtx_PARALLEL (VOIDmode,
31535 gen_rtvec (3,
31536 gen_rtx_CALL (VOIDmode,
31537 funexp, const0_rtx),
31538 gen_rtx_USE (VOIDmode, const0_rtx),
31539 simple_return_rtx)));
31540 SIBLING_CALL_P (insn) = 1;
31541 emit_barrier ();
31542
31543 /* Run just enough of rest_of_compilation to get the insns emitted.
31544 There's not really enough bulk here to make other passes such as
31545 instruction scheduling worth while. Note that use_thunk calls
31546 assemble_start_function and assemble_end_function. */
31547 insn = get_insns ();
31548 shorten_branches (insn);
31549 final_start_function (insn, file, 1);
31550 final (insn, file, 1);
31551 final_end_function ();
31552
31553 reload_completed = 0;
31554 epilogue_completed = 0;
31555 }
31556 \f
31557 /* A quick summary of the various types of 'constant-pool tables'
31558 under PowerPC:
31559
31560 Target Flags Name One table per
31561 AIX (none) AIX TOC object file
31562 AIX -mfull-toc AIX TOC object file
31563 AIX -mminimal-toc AIX minimal TOC translation unit
31564 SVR4/EABI (none) SVR4 SDATA object file
31565 SVR4/EABI -fpic SVR4 pic object file
31566 SVR4/EABI -fPIC SVR4 PIC translation unit
31567 SVR4/EABI -mrelocatable EABI TOC function
31568 SVR4/EABI -maix AIX TOC object file
31569 SVR4/EABI -maix -mminimal-toc
31570 AIX minimal TOC translation unit
31571
31572 Name Reg. Set by entries contains:
31573 made by addrs? fp? sum?
31574
31575 AIX TOC 2 crt0 as Y option option
31576 AIX minimal TOC 30 prolog gcc Y Y option
31577 SVR4 SDATA 13 crt0 gcc N Y N
31578 SVR4 pic 30 prolog ld Y not yet N
31579 SVR4 PIC 30 prolog gcc Y option option
31580 EABI TOC 30 prolog gcc Y option option
31581
31582 */
31583
31584 /* Hash functions for the hash table. */
31585
31586 static unsigned
31587 rs6000_hash_constant (rtx k)
31588 {
31589 enum rtx_code code = GET_CODE (k);
31590 machine_mode mode = GET_MODE (k);
31591 unsigned result = (code << 3) ^ mode;
31592 const char *format;
31593 int flen, fidx;
31594
31595 format = GET_RTX_FORMAT (code);
31596 flen = strlen (format);
31597 fidx = 0;
31598
31599 switch (code)
31600 {
31601 case LABEL_REF:
31602 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
31603
31604 case CONST_WIDE_INT:
31605 {
31606 int i;
31607 flen = CONST_WIDE_INT_NUNITS (k);
31608 for (i = 0; i < flen; i++)
31609 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
31610 return result;
31611 }
31612
31613 case CONST_DOUBLE:
31614 if (mode != VOIDmode)
31615 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
31616 flen = 2;
31617 break;
31618
31619 case CODE_LABEL:
31620 fidx = 3;
31621 break;
31622
31623 default:
31624 break;
31625 }
31626
31627 for (; fidx < flen; fidx++)
31628 switch (format[fidx])
31629 {
31630 case 's':
31631 {
31632 unsigned i, len;
31633 const char *str = XSTR (k, fidx);
31634 len = strlen (str);
31635 result = result * 613 + len;
31636 for (i = 0; i < len; i++)
31637 result = result * 613 + (unsigned) str[i];
31638 break;
31639 }
31640 case 'u':
31641 case 'e':
31642 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
31643 break;
31644 case 'i':
31645 case 'n':
31646 result = result * 613 + (unsigned) XINT (k, fidx);
31647 break;
31648 case 'w':
31649 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
31650 result = result * 613 + (unsigned) XWINT (k, fidx);
31651 else
31652 {
31653 size_t i;
31654 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
31655 result = result * 613 + (unsigned) (XWINT (k, fidx)
31656 >> CHAR_BIT * i);
31657 }
31658 break;
31659 case '0':
31660 break;
31661 default:
31662 gcc_unreachable ();
31663 }
31664
31665 return result;
31666 }
31667
31668 hashval_t
31669 toc_hasher::hash (toc_hash_struct *thc)
31670 {
31671 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
31672 }
31673
31674 /* Compare H1 and H2 for equivalence. */
31675
31676 bool
31677 toc_hasher::equal (toc_hash_struct *h1, toc_hash_struct *h2)
31678 {
31679 rtx r1 = h1->key;
31680 rtx r2 = h2->key;
31681
31682 if (h1->key_mode != h2->key_mode)
31683 return 0;
31684
31685 return rtx_equal_p (r1, r2);
31686 }
31687
31688 /* These are the names given by the C++ front-end to vtables, and
31689 vtable-like objects. Ideally, this logic should not be here;
31690 instead, there should be some programmatic way of inquiring as
31691 to whether or not an object is a vtable. */
31692
31693 #define VTABLE_NAME_P(NAME) \
31694 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
31695 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
31696 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
31697 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
31698 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
31699
31700 #ifdef NO_DOLLAR_IN_LABEL
31701 /* Return a GGC-allocated character string translating dollar signs in
31702 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
31703
31704 const char *
31705 rs6000_xcoff_strip_dollar (const char *name)
31706 {
31707 char *strip, *p;
31708 const char *q;
31709 size_t len;
31710
31711 q = (const char *) strchr (name, '$');
31712
31713 if (q == 0 || q == name)
31714 return name;
31715
31716 len = strlen (name);
31717 strip = XALLOCAVEC (char, len + 1);
31718 strcpy (strip, name);
31719 p = strip + (q - name);
31720 while (p)
31721 {
31722 *p = '_';
31723 p = strchr (p + 1, '$');
31724 }
31725
31726 return ggc_alloc_string (strip, len);
31727 }
31728 #endif
31729
31730 void
31731 rs6000_output_symbol_ref (FILE *file, rtx x)
31732 {
31733 const char *name = XSTR (x, 0);
31734
31735 /* Currently C++ toc references to vtables can be emitted before it
31736 is decided whether the vtable is public or private. If this is
31737 the case, then the linker will eventually complain that there is
31738 a reference to an unknown section. Thus, for vtables only,
31739 we emit the TOC reference to reference the identifier and not the
31740 symbol. */
31741 if (VTABLE_NAME_P (name))
31742 {
31743 RS6000_OUTPUT_BASENAME (file, name);
31744 }
31745 else
31746 assemble_name (file, name);
31747 }
31748
31749 /* Output a TOC entry. We derive the entry name from what is being
31750 written. */
31751
31752 void
31753 output_toc (FILE *file, rtx x, int labelno, machine_mode mode)
31754 {
31755 char buf[256];
31756 const char *name = buf;
31757 rtx base = x;
31758 HOST_WIDE_INT offset = 0;
31759
31760 gcc_assert (!TARGET_NO_TOC);
31761
31762 /* When the linker won't eliminate them, don't output duplicate
31763 TOC entries (this happens on AIX if there is any kind of TOC,
31764 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
31765 CODE_LABELs. */
31766 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
31767 {
31768 struct toc_hash_struct *h;
31769
31770 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
31771 time because GGC is not initialized at that point. */
31772 if (toc_hash_table == NULL)
31773 toc_hash_table = hash_table<toc_hasher>::create_ggc (1021);
31774
31775 h = ggc_alloc<toc_hash_struct> ();
31776 h->key = x;
31777 h->key_mode = mode;
31778 h->labelno = labelno;
31779
31780 toc_hash_struct **found = toc_hash_table->find_slot (h, INSERT);
31781 if (*found == NULL)
31782 *found = h;
31783 else /* This is indeed a duplicate.
31784 Set this label equal to that label. */
31785 {
31786 fputs ("\t.set ", file);
31787 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
31788 fprintf (file, "%d,", labelno);
31789 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
31790 fprintf (file, "%d\n", ((*found)->labelno));
31791
31792 #ifdef HAVE_AS_TLS
31793 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
31794 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
31795 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
31796 {
31797 fputs ("\t.set ", file);
31798 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
31799 fprintf (file, "%d,", labelno);
31800 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
31801 fprintf (file, "%d\n", ((*found)->labelno));
31802 }
31803 #endif
31804 return;
31805 }
31806 }
31807
31808 /* If we're going to put a double constant in the TOC, make sure it's
31809 aligned properly when strict alignment is on. */
31810 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
31811 && STRICT_ALIGNMENT
31812 && GET_MODE_BITSIZE (mode) >= 64
31813 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
31814 ASM_OUTPUT_ALIGN (file, 3);
31815 }
31816
31817 (*targetm.asm_out.internal_label) (file, "LC", labelno);
31818
31819 /* Handle FP constants specially. Note that if we have a minimal
31820 TOC, things we put here aren't actually in the TOC, so we can allow
31821 FP constants. */
31822 if (GET_CODE (x) == CONST_DOUBLE &&
31823 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode
31824 || GET_MODE (x) == IFmode || GET_MODE (x) == KFmode))
31825 {
31826 long k[4];
31827
31828 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
31829 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x), k);
31830 else
31831 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
31832
31833 if (TARGET_64BIT)
31834 {
31835 if (TARGET_ELF || TARGET_MINIMAL_TOC)
31836 fputs (DOUBLE_INT_ASM_OP, file);
31837 else
31838 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
31839 k[0] & 0xffffffff, k[1] & 0xffffffff,
31840 k[2] & 0xffffffff, k[3] & 0xffffffff);
31841 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
31842 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
31843 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
31844 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
31845 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
31846 return;
31847 }
31848 else
31849 {
31850 if (TARGET_ELF || TARGET_MINIMAL_TOC)
31851 fputs ("\t.long ", file);
31852 else
31853 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
31854 k[0] & 0xffffffff, k[1] & 0xffffffff,
31855 k[2] & 0xffffffff, k[3] & 0xffffffff);
31856 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
31857 k[0] & 0xffffffff, k[1] & 0xffffffff,
31858 k[2] & 0xffffffff, k[3] & 0xffffffff);
31859 return;
31860 }
31861 }
31862 else if (GET_CODE (x) == CONST_DOUBLE &&
31863 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
31864 {
31865 long k[2];
31866
31867 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
31868 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x), k);
31869 else
31870 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x), k);
31871
31872 if (TARGET_64BIT)
31873 {
31874 if (TARGET_ELF || TARGET_MINIMAL_TOC)
31875 fputs (DOUBLE_INT_ASM_OP, file);
31876 else
31877 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
31878 k[0] & 0xffffffff, k[1] & 0xffffffff);
31879 fprintf (file, "0x%lx%08lx\n",
31880 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
31881 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
31882 return;
31883 }
31884 else
31885 {
31886 if (TARGET_ELF || TARGET_MINIMAL_TOC)
31887 fputs ("\t.long ", file);
31888 else
31889 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
31890 k[0] & 0xffffffff, k[1] & 0xffffffff);
31891 fprintf (file, "0x%lx,0x%lx\n",
31892 k[0] & 0xffffffff, k[1] & 0xffffffff);
31893 return;
31894 }
31895 }
31896 else if (GET_CODE (x) == CONST_DOUBLE &&
31897 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
31898 {
31899 long l;
31900
31901 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
31902 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x), l);
31903 else
31904 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x), l);
31905
31906 if (TARGET_64BIT)
31907 {
31908 if (TARGET_ELF || TARGET_MINIMAL_TOC)
31909 fputs (DOUBLE_INT_ASM_OP, file);
31910 else
31911 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
31912 if (WORDS_BIG_ENDIAN)
31913 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
31914 else
31915 fprintf (file, "0x%lx\n", l & 0xffffffff);
31916 return;
31917 }
31918 else
31919 {
31920 if (TARGET_ELF || TARGET_MINIMAL_TOC)
31921 fputs ("\t.long ", file);
31922 else
31923 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
31924 fprintf (file, "0x%lx\n", l & 0xffffffff);
31925 return;
31926 }
31927 }
31928 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
31929 {
31930 unsigned HOST_WIDE_INT low;
31931 HOST_WIDE_INT high;
31932
31933 low = INTVAL (x) & 0xffffffff;
31934 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
31935
31936 /* TOC entries are always Pmode-sized, so when big-endian
31937 smaller integer constants in the TOC need to be padded.
31938 (This is still a win over putting the constants in
31939 a separate constant pool, because then we'd have
31940 to have both a TOC entry _and_ the actual constant.)
31941
31942 For a 32-bit target, CONST_INT values are loaded and shifted
31943 entirely within `low' and can be stored in one TOC entry. */
31944
31945 /* It would be easy to make this work, but it doesn't now. */
31946 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
31947
31948 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
31949 {
31950 low |= high << 32;
31951 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
31952 high = (HOST_WIDE_INT) low >> 32;
31953 low &= 0xffffffff;
31954 }
31955
31956 if (TARGET_64BIT)
31957 {
31958 if (TARGET_ELF || TARGET_MINIMAL_TOC)
31959 fputs (DOUBLE_INT_ASM_OP, file);
31960 else
31961 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
31962 (long) high & 0xffffffff, (long) low & 0xffffffff);
31963 fprintf (file, "0x%lx%08lx\n",
31964 (long) high & 0xffffffff, (long) low & 0xffffffff);
31965 return;
31966 }
31967 else
31968 {
31969 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
31970 {
31971 if (TARGET_ELF || TARGET_MINIMAL_TOC)
31972 fputs ("\t.long ", file);
31973 else
31974 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
31975 (long) high & 0xffffffff, (long) low & 0xffffffff);
31976 fprintf (file, "0x%lx,0x%lx\n",
31977 (long) high & 0xffffffff, (long) low & 0xffffffff);
31978 }
31979 else
31980 {
31981 if (TARGET_ELF || TARGET_MINIMAL_TOC)
31982 fputs ("\t.long ", file);
31983 else
31984 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
31985 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
31986 }
31987 return;
31988 }
31989 }
31990
31991 if (GET_CODE (x) == CONST)
31992 {
31993 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
31994 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
31995
31996 base = XEXP (XEXP (x, 0), 0);
31997 offset = INTVAL (XEXP (XEXP (x, 0), 1));
31998 }
31999
32000 switch (GET_CODE (base))
32001 {
32002 case SYMBOL_REF:
32003 name = XSTR (base, 0);
32004 break;
32005
32006 case LABEL_REF:
32007 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
32008 CODE_LABEL_NUMBER (XEXP (base, 0)));
32009 break;
32010
32011 case CODE_LABEL:
32012 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
32013 break;
32014
32015 default:
32016 gcc_unreachable ();
32017 }
32018
32019 if (TARGET_ELF || TARGET_MINIMAL_TOC)
32020 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
32021 else
32022 {
32023 fputs ("\t.tc ", file);
32024 RS6000_OUTPUT_BASENAME (file, name);
32025
32026 if (offset < 0)
32027 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
32028 else if (offset)
32029 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
32030
32031 /* Mark large TOC symbols on AIX with [TE] so they are mapped
32032 after other TOC symbols, reducing overflow of small TOC access
32033 to [TC] symbols. */
32034 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
32035 ? "[TE]," : "[TC],", file);
32036 }
32037
32038 /* Currently C++ toc references to vtables can be emitted before it
32039 is decided whether the vtable is public or private. If this is
32040 the case, then the linker will eventually complain that there is
32041 a TOC reference to an unknown section. Thus, for vtables only,
32042 we emit the TOC reference to reference the symbol and not the
32043 section. */
32044 if (VTABLE_NAME_P (name))
32045 {
32046 RS6000_OUTPUT_BASENAME (file, name);
32047 if (offset < 0)
32048 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
32049 else if (offset > 0)
32050 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
32051 }
32052 else
32053 output_addr_const (file, x);
32054
32055 #if HAVE_AS_TLS
32056 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF)
32057 {
32058 switch (SYMBOL_REF_TLS_MODEL (base))
32059 {
32060 case 0:
32061 break;
32062 case TLS_MODEL_LOCAL_EXEC:
32063 fputs ("@le", file);
32064 break;
32065 case TLS_MODEL_INITIAL_EXEC:
32066 fputs ("@ie", file);
32067 break;
32068 /* Use global-dynamic for local-dynamic. */
32069 case TLS_MODEL_GLOBAL_DYNAMIC:
32070 case TLS_MODEL_LOCAL_DYNAMIC:
32071 putc ('\n', file);
32072 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
32073 fputs ("\t.tc .", file);
32074 RS6000_OUTPUT_BASENAME (file, name);
32075 fputs ("[TC],", file);
32076 output_addr_const (file, x);
32077 fputs ("@m", file);
32078 break;
32079 default:
32080 gcc_unreachable ();
32081 }
32082 }
32083 #endif
32084
32085 putc ('\n', file);
32086 }
32087 \f
32088 /* Output an assembler pseudo-op to write an ASCII string of N characters
32089 starting at P to FILE.
32090
32091 On the RS/6000, we have to do this using the .byte operation and
32092 write out special characters outside the quoted string.
32093 Also, the assembler is broken; very long strings are truncated,
32094 so we must artificially break them up early. */
32095
32096 void
32097 output_ascii (FILE *file, const char *p, int n)
32098 {
32099 char c;
32100 int i, count_string;
32101 const char *for_string = "\t.byte \"";
32102 const char *for_decimal = "\t.byte ";
32103 const char *to_close = NULL;
32104
32105 count_string = 0;
32106 for (i = 0; i < n; i++)
32107 {
32108 c = *p++;
32109 if (c >= ' ' && c < 0177)
32110 {
32111 if (for_string)
32112 fputs (for_string, file);
32113 putc (c, file);
32114
32115 /* Write two quotes to get one. */
32116 if (c == '"')
32117 {
32118 putc (c, file);
32119 ++count_string;
32120 }
32121
32122 for_string = NULL;
32123 for_decimal = "\"\n\t.byte ";
32124 to_close = "\"\n";
32125 ++count_string;
32126
32127 if (count_string >= 512)
32128 {
32129 fputs (to_close, file);
32130
32131 for_string = "\t.byte \"";
32132 for_decimal = "\t.byte ";
32133 to_close = NULL;
32134 count_string = 0;
32135 }
32136 }
32137 else
32138 {
32139 if (for_decimal)
32140 fputs (for_decimal, file);
32141 fprintf (file, "%d", c);
32142
32143 for_string = "\n\t.byte \"";
32144 for_decimal = ", ";
32145 to_close = "\n";
32146 count_string = 0;
32147 }
32148 }
32149
32150 /* Now close the string if we have written one. Then end the line. */
32151 if (to_close)
32152 fputs (to_close, file);
32153 }
32154 \f
32155 /* Generate a unique section name for FILENAME for a section type
32156 represented by SECTION_DESC. Output goes into BUF.
32157
32158 SECTION_DESC can be any string, as long as it is different for each
32159 possible section type.
32160
32161 We name the section in the same manner as xlc. The name begins with an
32162 underscore followed by the filename (after stripping any leading directory
32163 names) with the last period replaced by the string SECTION_DESC. If
32164 FILENAME does not contain a period, SECTION_DESC is appended to the end of
32165 the name. */
32166
32167 void
32168 rs6000_gen_section_name (char **buf, const char *filename,
32169 const char *section_desc)
32170 {
32171 const char *q, *after_last_slash, *last_period = 0;
32172 char *p;
32173 int len;
32174
32175 after_last_slash = filename;
32176 for (q = filename; *q; q++)
32177 {
32178 if (*q == '/')
32179 after_last_slash = q + 1;
32180 else if (*q == '.')
32181 last_period = q;
32182 }
32183
32184 len = strlen (after_last_slash) + strlen (section_desc) + 2;
32185 *buf = (char *) xmalloc (len);
32186
32187 p = *buf;
32188 *p++ = '_';
32189
32190 for (q = after_last_slash; *q; q++)
32191 {
32192 if (q == last_period)
32193 {
32194 strcpy (p, section_desc);
32195 p += strlen (section_desc);
32196 break;
32197 }
32198
32199 else if (ISALNUM (*q))
32200 *p++ = *q;
32201 }
32202
32203 if (last_period == 0)
32204 strcpy (p, section_desc);
32205 else
32206 *p = '\0';
32207 }
32208 \f
32209 /* Emit profile function. */
32210
32211 void
32212 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
32213 {
32214 /* Non-standard profiling for kernels, which just saves LR then calls
32215 _mcount without worrying about arg saves. The idea is to change
32216 the function prologue as little as possible as it isn't easy to
32217 account for arg save/restore code added just for _mcount. */
32218 if (TARGET_PROFILE_KERNEL)
32219 return;
32220
32221 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
32222 {
32223 #ifndef NO_PROFILE_COUNTERS
32224 # define NO_PROFILE_COUNTERS 0
32225 #endif
32226 if (NO_PROFILE_COUNTERS)
32227 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
32228 LCT_NORMAL, VOIDmode, 0);
32229 else
32230 {
32231 char buf[30];
32232 const char *label_name;
32233 rtx fun;
32234
32235 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
32236 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
32237 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
32238
32239 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
32240 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
32241 }
32242 }
32243 else if (DEFAULT_ABI == ABI_DARWIN)
32244 {
32245 const char *mcount_name = RS6000_MCOUNT;
32246 int caller_addr_regno = LR_REGNO;
32247
32248 /* Be conservative and always set this, at least for now. */
32249 crtl->uses_pic_offset_table = 1;
32250
32251 #if TARGET_MACHO
32252 /* For PIC code, set up a stub and collect the caller's address
32253 from r0, which is where the prologue puts it. */
32254 if (MACHOPIC_INDIRECT
32255 && crtl->uses_pic_offset_table)
32256 caller_addr_regno = 0;
32257 #endif
32258 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
32259 LCT_NORMAL, VOIDmode, 1,
32260 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
32261 }
32262 }
32263
32264 /* Write function profiler code. */
32265
32266 void
32267 output_function_profiler (FILE *file, int labelno)
32268 {
32269 char buf[100];
32270
32271 switch (DEFAULT_ABI)
32272 {
32273 default:
32274 gcc_unreachable ();
32275
32276 case ABI_V4:
32277 if (!TARGET_32BIT)
32278 {
32279 warning (0, "no profiling of 64-bit code for this ABI");
32280 return;
32281 }
32282 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
32283 fprintf (file, "\tmflr %s\n", reg_names[0]);
32284 if (NO_PROFILE_COUNTERS)
32285 {
32286 asm_fprintf (file, "\tstw %s,4(%s)\n",
32287 reg_names[0], reg_names[1]);
32288 }
32289 else if (TARGET_SECURE_PLT && flag_pic)
32290 {
32291 if (TARGET_LINK_STACK)
32292 {
32293 char name[32];
32294 get_ppc476_thunk_name (name);
32295 asm_fprintf (file, "\tbl %s\n", name);
32296 }
32297 else
32298 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
32299 asm_fprintf (file, "\tstw %s,4(%s)\n",
32300 reg_names[0], reg_names[1]);
32301 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
32302 asm_fprintf (file, "\taddis %s,%s,",
32303 reg_names[12], reg_names[12]);
32304 assemble_name (file, buf);
32305 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
32306 assemble_name (file, buf);
32307 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
32308 }
32309 else if (flag_pic == 1)
32310 {
32311 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
32312 asm_fprintf (file, "\tstw %s,4(%s)\n",
32313 reg_names[0], reg_names[1]);
32314 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
32315 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
32316 assemble_name (file, buf);
32317 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
32318 }
32319 else if (flag_pic > 1)
32320 {
32321 asm_fprintf (file, "\tstw %s,4(%s)\n",
32322 reg_names[0], reg_names[1]);
32323 /* Now, we need to get the address of the label. */
32324 if (TARGET_LINK_STACK)
32325 {
32326 char name[32];
32327 get_ppc476_thunk_name (name);
32328 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
32329 assemble_name (file, buf);
32330 fputs ("-.\n1:", file);
32331 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
32332 asm_fprintf (file, "\taddi %s,%s,4\n",
32333 reg_names[11], reg_names[11]);
32334 }
32335 else
32336 {
32337 fputs ("\tbcl 20,31,1f\n\t.long ", file);
32338 assemble_name (file, buf);
32339 fputs ("-.\n1:", file);
32340 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
32341 }
32342 asm_fprintf (file, "\tlwz %s,0(%s)\n",
32343 reg_names[0], reg_names[11]);
32344 asm_fprintf (file, "\tadd %s,%s,%s\n",
32345 reg_names[0], reg_names[0], reg_names[11]);
32346 }
32347 else
32348 {
32349 asm_fprintf (file, "\tlis %s,", reg_names[12]);
32350 assemble_name (file, buf);
32351 fputs ("@ha\n", file);
32352 asm_fprintf (file, "\tstw %s,4(%s)\n",
32353 reg_names[0], reg_names[1]);
32354 asm_fprintf (file, "\tla %s,", reg_names[0]);
32355 assemble_name (file, buf);
32356 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
32357 }
32358
32359 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
32360 fprintf (file, "\tbl %s%s\n",
32361 RS6000_MCOUNT, flag_pic ? "@plt" : "");
32362 break;
32363
32364 case ABI_AIX:
32365 case ABI_ELFv2:
32366 case ABI_DARWIN:
32367 /* Don't do anything, done in output_profile_hook (). */
32368 break;
32369 }
32370 }
32371
32372 \f
32373
32374 /* The following variable value is the last issued insn. */
32375
32376 static rtx_insn *last_scheduled_insn;
32377
32378 /* The following variable helps to balance issuing of load and
32379 store instructions */
32380
32381 static int load_store_pendulum;
32382
32383 /* The following variable helps pair divide insns during scheduling. */
32384 static int divide_cnt;
32385 /* The following variable helps pair and alternate vector and vector load
32386 insns during scheduling. */
32387 static int vec_load_pendulum;
32388
32389
32390 /* Power4 load update and store update instructions are cracked into a
32391 load or store and an integer insn which are executed in the same cycle.
32392 Branches have their own dispatch slot which does not count against the
32393 GCC issue rate, but it changes the program flow so there are no other
32394 instructions to issue in this cycle. */
32395
32396 static int
32397 rs6000_variable_issue_1 (rtx_insn *insn, int more)
32398 {
32399 last_scheduled_insn = insn;
32400 if (GET_CODE (PATTERN (insn)) == USE
32401 || GET_CODE (PATTERN (insn)) == CLOBBER)
32402 {
32403 cached_can_issue_more = more;
32404 return cached_can_issue_more;
32405 }
32406
32407 if (insn_terminates_group_p (insn, current_group))
32408 {
32409 cached_can_issue_more = 0;
32410 return cached_can_issue_more;
32411 }
32412
32413 /* If no reservation, but reach here */
32414 if (recog_memoized (insn) < 0)
32415 return more;
32416
32417 if (rs6000_sched_groups)
32418 {
32419 if (is_microcoded_insn (insn))
32420 cached_can_issue_more = 0;
32421 else if (is_cracked_insn (insn))
32422 cached_can_issue_more = more > 2 ? more - 2 : 0;
32423 else
32424 cached_can_issue_more = more - 1;
32425
32426 return cached_can_issue_more;
32427 }
32428
32429 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
32430 return 0;
32431
32432 cached_can_issue_more = more - 1;
32433 return cached_can_issue_more;
32434 }
32435
32436 static int
32437 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
32438 {
32439 int r = rs6000_variable_issue_1 (insn, more);
32440 if (verbose)
32441 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
32442 return r;
32443 }
32444
32445 /* Adjust the cost of a scheduling dependency. Return the new cost of
32446 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
32447
32448 static int
32449 rs6000_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn, int cost,
32450 unsigned int)
32451 {
32452 enum attr_type attr_type;
32453
32454 if (recog_memoized (insn) < 0 || recog_memoized (dep_insn) < 0)
32455 return cost;
32456
32457 switch (dep_type)
32458 {
32459 case REG_DEP_TRUE:
32460 {
32461 /* Data dependency; DEP_INSN writes a register that INSN reads
32462 some cycles later. */
32463
32464 /* Separate a load from a narrower, dependent store. */
32465 if ((rs6000_sched_groups || rs6000_cpu_attr == CPU_POWER9)
32466 && GET_CODE (PATTERN (insn)) == SET
32467 && GET_CODE (PATTERN (dep_insn)) == SET
32468 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
32469 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
32470 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
32471 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
32472 return cost + 14;
32473
32474 attr_type = get_attr_type (insn);
32475
32476 switch (attr_type)
32477 {
32478 case TYPE_JMPREG:
32479 /* Tell the first scheduling pass about the latency between
32480 a mtctr and bctr (and mtlr and br/blr). The first
32481 scheduling pass will not know about this latency since
32482 the mtctr instruction, which has the latency associated
32483 to it, will be generated by reload. */
32484 return 4;
32485 case TYPE_BRANCH:
32486 /* Leave some extra cycles between a compare and its
32487 dependent branch, to inhibit expensive mispredicts. */
32488 if ((rs6000_cpu_attr == CPU_PPC603
32489 || rs6000_cpu_attr == CPU_PPC604
32490 || rs6000_cpu_attr == CPU_PPC604E
32491 || rs6000_cpu_attr == CPU_PPC620
32492 || rs6000_cpu_attr == CPU_PPC630
32493 || rs6000_cpu_attr == CPU_PPC750
32494 || rs6000_cpu_attr == CPU_PPC7400
32495 || rs6000_cpu_attr == CPU_PPC7450
32496 || rs6000_cpu_attr == CPU_PPCE5500
32497 || rs6000_cpu_attr == CPU_PPCE6500
32498 || rs6000_cpu_attr == CPU_POWER4
32499 || rs6000_cpu_attr == CPU_POWER5
32500 || rs6000_cpu_attr == CPU_POWER7
32501 || rs6000_cpu_attr == CPU_POWER8
32502 || rs6000_cpu_attr == CPU_POWER9
32503 || rs6000_cpu_attr == CPU_CELL)
32504 && recog_memoized (dep_insn)
32505 && (INSN_CODE (dep_insn) >= 0))
32506
32507 switch (get_attr_type (dep_insn))
32508 {
32509 case TYPE_CMP:
32510 case TYPE_FPCOMPARE:
32511 case TYPE_CR_LOGICAL:
32512 case TYPE_DELAYED_CR:
32513 return cost + 2;
32514 case TYPE_EXTS:
32515 case TYPE_MUL:
32516 if (get_attr_dot (dep_insn) == DOT_YES)
32517 return cost + 2;
32518 else
32519 break;
32520 case TYPE_SHIFT:
32521 if (get_attr_dot (dep_insn) == DOT_YES
32522 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
32523 return cost + 2;
32524 else
32525 break;
32526 default:
32527 break;
32528 }
32529 break;
32530
32531 case TYPE_STORE:
32532 case TYPE_FPSTORE:
32533 if ((rs6000_cpu == PROCESSOR_POWER6)
32534 && recog_memoized (dep_insn)
32535 && (INSN_CODE (dep_insn) >= 0))
32536 {
32537
32538 if (GET_CODE (PATTERN (insn)) != SET)
32539 /* If this happens, we have to extend this to schedule
32540 optimally. Return default for now. */
32541 return cost;
32542
32543 /* Adjust the cost for the case where the value written
32544 by a fixed point operation is used as the address
32545 gen value on a store. */
32546 switch (get_attr_type (dep_insn))
32547 {
32548 case TYPE_LOAD:
32549 case TYPE_CNTLZ:
32550 {
32551 if (! store_data_bypass_p (dep_insn, insn))
32552 return get_attr_sign_extend (dep_insn)
32553 == SIGN_EXTEND_YES ? 6 : 4;
32554 break;
32555 }
32556 case TYPE_SHIFT:
32557 {
32558 if (! store_data_bypass_p (dep_insn, insn))
32559 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
32560 6 : 3;
32561 break;
32562 }
32563 case TYPE_INTEGER:
32564 case TYPE_ADD:
32565 case TYPE_LOGICAL:
32566 case TYPE_EXTS:
32567 case TYPE_INSERT:
32568 {
32569 if (! store_data_bypass_p (dep_insn, insn))
32570 return 3;
32571 break;
32572 }
32573 case TYPE_STORE:
32574 case TYPE_FPLOAD:
32575 case TYPE_FPSTORE:
32576 {
32577 if (get_attr_update (dep_insn) == UPDATE_YES
32578 && ! store_data_bypass_p (dep_insn, insn))
32579 return 3;
32580 break;
32581 }
32582 case TYPE_MUL:
32583 {
32584 if (! store_data_bypass_p (dep_insn, insn))
32585 return 17;
32586 break;
32587 }
32588 case TYPE_DIV:
32589 {
32590 if (! store_data_bypass_p (dep_insn, insn))
32591 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
32592 break;
32593 }
32594 default:
32595 break;
32596 }
32597 }
32598 break;
32599
32600 case TYPE_LOAD:
32601 if ((rs6000_cpu == PROCESSOR_POWER6)
32602 && recog_memoized (dep_insn)
32603 && (INSN_CODE (dep_insn) >= 0))
32604 {
32605
32606 /* Adjust the cost for the case where the value written
32607 by a fixed point instruction is used within the address
32608 gen portion of a subsequent load(u)(x) */
32609 switch (get_attr_type (dep_insn))
32610 {
32611 case TYPE_LOAD:
32612 case TYPE_CNTLZ:
32613 {
32614 if (set_to_load_agen (dep_insn, insn))
32615 return get_attr_sign_extend (dep_insn)
32616 == SIGN_EXTEND_YES ? 6 : 4;
32617 break;
32618 }
32619 case TYPE_SHIFT:
32620 {
32621 if (set_to_load_agen (dep_insn, insn))
32622 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
32623 6 : 3;
32624 break;
32625 }
32626 case TYPE_INTEGER:
32627 case TYPE_ADD:
32628 case TYPE_LOGICAL:
32629 case TYPE_EXTS:
32630 case TYPE_INSERT:
32631 {
32632 if (set_to_load_agen (dep_insn, insn))
32633 return 3;
32634 break;
32635 }
32636 case TYPE_STORE:
32637 case TYPE_FPLOAD:
32638 case TYPE_FPSTORE:
32639 {
32640 if (get_attr_update (dep_insn) == UPDATE_YES
32641 && set_to_load_agen (dep_insn, insn))
32642 return 3;
32643 break;
32644 }
32645 case TYPE_MUL:
32646 {
32647 if (set_to_load_agen (dep_insn, insn))
32648 return 17;
32649 break;
32650 }
32651 case TYPE_DIV:
32652 {
32653 if (set_to_load_agen (dep_insn, insn))
32654 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
32655 break;
32656 }
32657 default:
32658 break;
32659 }
32660 }
32661 break;
32662
32663 case TYPE_FPLOAD:
32664 if ((rs6000_cpu == PROCESSOR_POWER6)
32665 && get_attr_update (insn) == UPDATE_NO
32666 && recog_memoized (dep_insn)
32667 && (INSN_CODE (dep_insn) >= 0)
32668 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
32669 return 2;
32670
32671 default:
32672 break;
32673 }
32674
32675 /* Fall out to return default cost. */
32676 }
32677 break;
32678
32679 case REG_DEP_OUTPUT:
32680 /* Output dependency; DEP_INSN writes a register that INSN writes some
32681 cycles later. */
32682 if ((rs6000_cpu == PROCESSOR_POWER6)
32683 && recog_memoized (dep_insn)
32684 && (INSN_CODE (dep_insn) >= 0))
32685 {
32686 attr_type = get_attr_type (insn);
32687
32688 switch (attr_type)
32689 {
32690 case TYPE_FP:
32691 case TYPE_FPSIMPLE:
32692 if (get_attr_type (dep_insn) == TYPE_FP
32693 || get_attr_type (dep_insn) == TYPE_FPSIMPLE)
32694 return 1;
32695 break;
32696 case TYPE_FPLOAD:
32697 if (get_attr_update (insn) == UPDATE_NO
32698 && get_attr_type (dep_insn) == TYPE_MFFGPR)
32699 return 2;
32700 break;
32701 default:
32702 break;
32703 }
32704 }
32705 /* Fall through, no cost for output dependency. */
32706 /* FALLTHRU */
32707
32708 case REG_DEP_ANTI:
32709 /* Anti dependency; DEP_INSN reads a register that INSN writes some
32710 cycles later. */
32711 return 0;
32712
32713 default:
32714 gcc_unreachable ();
32715 }
32716
32717 return cost;
32718 }
32719
32720 /* Debug version of rs6000_adjust_cost. */
32721
32722 static int
32723 rs6000_debug_adjust_cost (rtx_insn *insn, int dep_type, rtx_insn *dep_insn,
32724 int cost, unsigned int dw)
32725 {
32726 int ret = rs6000_adjust_cost (insn, dep_type, dep_insn, cost, dw);
32727
32728 if (ret != cost)
32729 {
32730 const char *dep;
32731
32732 switch (dep_type)
32733 {
32734 default: dep = "unknown depencency"; break;
32735 case REG_DEP_TRUE: dep = "data dependency"; break;
32736 case REG_DEP_OUTPUT: dep = "output dependency"; break;
32737 case REG_DEP_ANTI: dep = "anti depencency"; break;
32738 }
32739
32740 fprintf (stderr,
32741 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
32742 "%s, insn:\n", ret, cost, dep);
32743
32744 debug_rtx (insn);
32745 }
32746
32747 return ret;
32748 }
32749
32750 /* The function returns a true if INSN is microcoded.
32751 Return false otherwise. */
32752
32753 static bool
32754 is_microcoded_insn (rtx_insn *insn)
32755 {
32756 if (!insn || !NONDEBUG_INSN_P (insn)
32757 || GET_CODE (PATTERN (insn)) == USE
32758 || GET_CODE (PATTERN (insn)) == CLOBBER)
32759 return false;
32760
32761 if (rs6000_cpu_attr == CPU_CELL)
32762 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
32763
32764 if (rs6000_sched_groups
32765 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
32766 {
32767 enum attr_type type = get_attr_type (insn);
32768 if ((type == TYPE_LOAD
32769 && get_attr_update (insn) == UPDATE_YES
32770 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
32771 || ((type == TYPE_LOAD || type == TYPE_STORE)
32772 && get_attr_update (insn) == UPDATE_YES
32773 && get_attr_indexed (insn) == INDEXED_YES)
32774 || type == TYPE_MFCR)
32775 return true;
32776 }
32777
32778 return false;
32779 }
32780
32781 /* The function returns true if INSN is cracked into 2 instructions
32782 by the processor (and therefore occupies 2 issue slots). */
32783
32784 static bool
32785 is_cracked_insn (rtx_insn *insn)
32786 {
32787 if (!insn || !NONDEBUG_INSN_P (insn)
32788 || GET_CODE (PATTERN (insn)) == USE
32789 || GET_CODE (PATTERN (insn)) == CLOBBER)
32790 return false;
32791
32792 if (rs6000_sched_groups
32793 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
32794 {
32795 enum attr_type type = get_attr_type (insn);
32796 if ((type == TYPE_LOAD
32797 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
32798 && get_attr_update (insn) == UPDATE_NO)
32799 || (type == TYPE_LOAD
32800 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
32801 && get_attr_update (insn) == UPDATE_YES
32802 && get_attr_indexed (insn) == INDEXED_NO)
32803 || (type == TYPE_STORE
32804 && get_attr_update (insn) == UPDATE_YES
32805 && get_attr_indexed (insn) == INDEXED_NO)
32806 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
32807 && get_attr_update (insn) == UPDATE_YES)
32808 || type == TYPE_DELAYED_CR
32809 || (type == TYPE_EXTS
32810 && get_attr_dot (insn) == DOT_YES)
32811 || (type == TYPE_SHIFT
32812 && get_attr_dot (insn) == DOT_YES
32813 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
32814 || (type == TYPE_MUL
32815 && get_attr_dot (insn) == DOT_YES)
32816 || type == TYPE_DIV
32817 || (type == TYPE_INSERT
32818 && get_attr_size (insn) == SIZE_32))
32819 return true;
32820 }
32821
32822 return false;
32823 }
32824
32825 /* The function returns true if INSN can be issued only from
32826 the branch slot. */
32827
32828 static bool
32829 is_branch_slot_insn (rtx_insn *insn)
32830 {
32831 if (!insn || !NONDEBUG_INSN_P (insn)
32832 || GET_CODE (PATTERN (insn)) == USE
32833 || GET_CODE (PATTERN (insn)) == CLOBBER)
32834 return false;
32835
32836 if (rs6000_sched_groups)
32837 {
32838 enum attr_type type = get_attr_type (insn);
32839 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
32840 return true;
32841 return false;
32842 }
32843
32844 return false;
32845 }
32846
32847 /* The function returns true if out_inst sets a value that is
32848 used in the address generation computation of in_insn */
32849 static bool
32850 set_to_load_agen (rtx_insn *out_insn, rtx_insn *in_insn)
32851 {
32852 rtx out_set, in_set;
32853
32854 /* For performance reasons, only handle the simple case where
32855 both loads are a single_set. */
32856 out_set = single_set (out_insn);
32857 if (out_set)
32858 {
32859 in_set = single_set (in_insn);
32860 if (in_set)
32861 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
32862 }
32863
32864 return false;
32865 }
32866
32867 /* Try to determine base/offset/size parts of the given MEM.
32868 Return true if successful, false if all the values couldn't
32869 be determined.
32870
32871 This function only looks for REG or REG+CONST address forms.
32872 REG+REG address form will return false. */
32873
32874 static bool
32875 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
32876 HOST_WIDE_INT *size)
32877 {
32878 rtx addr_rtx;
32879 if MEM_SIZE_KNOWN_P (mem)
32880 *size = MEM_SIZE (mem);
32881 else
32882 return false;
32883
32884 addr_rtx = (XEXP (mem, 0));
32885 if (GET_CODE (addr_rtx) == PRE_MODIFY)
32886 addr_rtx = XEXP (addr_rtx, 1);
32887
32888 *offset = 0;
32889 while (GET_CODE (addr_rtx) == PLUS
32890 && CONST_INT_P (XEXP (addr_rtx, 1)))
32891 {
32892 *offset += INTVAL (XEXP (addr_rtx, 1));
32893 addr_rtx = XEXP (addr_rtx, 0);
32894 }
32895 if (!REG_P (addr_rtx))
32896 return false;
32897
32898 *base = addr_rtx;
32899 return true;
32900 }
32901
32902 /* The function returns true if the target storage location of
32903 mem1 is adjacent to the target storage location of mem2 */
32904 /* Return 1 if memory locations are adjacent. */
32905
32906 static bool
32907 adjacent_mem_locations (rtx mem1, rtx mem2)
32908 {
32909 rtx reg1, reg2;
32910 HOST_WIDE_INT off1, size1, off2, size2;
32911
32912 if (get_memref_parts (mem1, &reg1, &off1, &size1)
32913 && get_memref_parts (mem2, &reg2, &off2, &size2))
32914 return ((REGNO (reg1) == REGNO (reg2))
32915 && ((off1 + size1 == off2)
32916 || (off2 + size2 == off1)));
32917
32918 return false;
32919 }
32920
32921 /* This function returns true if it can be determined that the two MEM
32922 locations overlap by at least 1 byte based on base reg/offset/size. */
32923
32924 static bool
32925 mem_locations_overlap (rtx mem1, rtx mem2)
32926 {
32927 rtx reg1, reg2;
32928 HOST_WIDE_INT off1, size1, off2, size2;
32929
32930 if (get_memref_parts (mem1, &reg1, &off1, &size1)
32931 && get_memref_parts (mem2, &reg2, &off2, &size2))
32932 return ((REGNO (reg1) == REGNO (reg2))
32933 && (((off1 <= off2) && (off1 + size1 > off2))
32934 || ((off2 <= off1) && (off2 + size2 > off1))));
32935
32936 return false;
32937 }
32938
32939 /* A C statement (sans semicolon) to update the integer scheduling
32940 priority INSN_PRIORITY (INSN). Increase the priority to execute the
32941 INSN earlier, reduce the priority to execute INSN later. Do not
32942 define this macro if you do not need to adjust the scheduling
32943 priorities of insns. */
32944
32945 static int
32946 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
32947 {
32948 rtx load_mem, str_mem;
32949 /* On machines (like the 750) which have asymmetric integer units,
32950 where one integer unit can do multiply and divides and the other
32951 can't, reduce the priority of multiply/divide so it is scheduled
32952 before other integer operations. */
32953
32954 #if 0
32955 if (! INSN_P (insn))
32956 return priority;
32957
32958 if (GET_CODE (PATTERN (insn)) == USE)
32959 return priority;
32960
32961 switch (rs6000_cpu_attr) {
32962 case CPU_PPC750:
32963 switch (get_attr_type (insn))
32964 {
32965 default:
32966 break;
32967
32968 case TYPE_MUL:
32969 case TYPE_DIV:
32970 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
32971 priority, priority);
32972 if (priority >= 0 && priority < 0x01000000)
32973 priority >>= 3;
32974 break;
32975 }
32976 }
32977 #endif
32978
32979 if (insn_must_be_first_in_group (insn)
32980 && reload_completed
32981 && current_sched_info->sched_max_insns_priority
32982 && rs6000_sched_restricted_insns_priority)
32983 {
32984
32985 /* Prioritize insns that can be dispatched only in the first
32986 dispatch slot. */
32987 if (rs6000_sched_restricted_insns_priority == 1)
32988 /* Attach highest priority to insn. This means that in
32989 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
32990 precede 'priority' (critical path) considerations. */
32991 return current_sched_info->sched_max_insns_priority;
32992 else if (rs6000_sched_restricted_insns_priority == 2)
32993 /* Increase priority of insn by a minimal amount. This means that in
32994 haifa-sched.c:ready_sort(), only 'priority' (critical path)
32995 considerations precede dispatch-slot restriction considerations. */
32996 return (priority + 1);
32997 }
32998
32999 if (rs6000_cpu == PROCESSOR_POWER6
33000 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
33001 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
33002 /* Attach highest priority to insn if the scheduler has just issued two
33003 stores and this instruction is a load, or two loads and this instruction
33004 is a store. Power6 wants loads and stores scheduled alternately
33005 when possible */
33006 return current_sched_info->sched_max_insns_priority;
33007
33008 return priority;
33009 }
33010
33011 /* Return true if the instruction is nonpipelined on the Cell. */
33012 static bool
33013 is_nonpipeline_insn (rtx_insn *insn)
33014 {
33015 enum attr_type type;
33016 if (!insn || !NONDEBUG_INSN_P (insn)
33017 || GET_CODE (PATTERN (insn)) == USE
33018 || GET_CODE (PATTERN (insn)) == CLOBBER)
33019 return false;
33020
33021 type = get_attr_type (insn);
33022 if (type == TYPE_MUL
33023 || type == TYPE_DIV
33024 || type == TYPE_SDIV
33025 || type == TYPE_DDIV
33026 || type == TYPE_SSQRT
33027 || type == TYPE_DSQRT
33028 || type == TYPE_MFCR
33029 || type == TYPE_MFCRF
33030 || type == TYPE_MFJMPR)
33031 {
33032 return true;
33033 }
33034 return false;
33035 }
33036
33037
33038 /* Return how many instructions the machine can issue per cycle. */
33039
33040 static int
33041 rs6000_issue_rate (void)
33042 {
33043 /* Unless scheduling for register pressure, use issue rate of 1 for
33044 first scheduling pass to decrease degradation. */
33045 if (!reload_completed && !flag_sched_pressure)
33046 return 1;
33047
33048 switch (rs6000_cpu_attr) {
33049 case CPU_RS64A:
33050 case CPU_PPC601: /* ? */
33051 case CPU_PPC7450:
33052 return 3;
33053 case CPU_PPC440:
33054 case CPU_PPC603:
33055 case CPU_PPC750:
33056 case CPU_PPC7400:
33057 case CPU_PPC8540:
33058 case CPU_PPC8548:
33059 case CPU_CELL:
33060 case CPU_PPCE300C2:
33061 case CPU_PPCE300C3:
33062 case CPU_PPCE500MC:
33063 case CPU_PPCE500MC64:
33064 case CPU_PPCE5500:
33065 case CPU_PPCE6500:
33066 case CPU_TITAN:
33067 return 2;
33068 case CPU_PPC476:
33069 case CPU_PPC604:
33070 case CPU_PPC604E:
33071 case CPU_PPC620:
33072 case CPU_PPC630:
33073 return 4;
33074 case CPU_POWER4:
33075 case CPU_POWER5:
33076 case CPU_POWER6:
33077 case CPU_POWER7:
33078 return 5;
33079 case CPU_POWER8:
33080 return 7;
33081 case CPU_POWER9:
33082 return 6;
33083 default:
33084 return 1;
33085 }
33086 }
33087
33088 /* Return how many instructions to look ahead for better insn
33089 scheduling. */
33090
33091 static int
33092 rs6000_use_sched_lookahead (void)
33093 {
33094 switch (rs6000_cpu_attr)
33095 {
33096 case CPU_PPC8540:
33097 case CPU_PPC8548:
33098 return 4;
33099
33100 case CPU_CELL:
33101 return (reload_completed ? 8 : 0);
33102
33103 default:
33104 return 0;
33105 }
33106 }
33107
33108 /* We are choosing insn from the ready queue. Return zero if INSN can be
33109 chosen. */
33110 static int
33111 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
33112 {
33113 if (ready_index == 0)
33114 return 0;
33115
33116 if (rs6000_cpu_attr != CPU_CELL)
33117 return 0;
33118
33119 gcc_assert (insn != NULL_RTX && INSN_P (insn));
33120
33121 if (!reload_completed
33122 || is_nonpipeline_insn (insn)
33123 || is_microcoded_insn (insn))
33124 return 1;
33125
33126 return 0;
33127 }
33128
33129 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
33130 and return true. */
33131
33132 static bool
33133 find_mem_ref (rtx pat, rtx *mem_ref)
33134 {
33135 const char * fmt;
33136 int i, j;
33137
33138 /* stack_tie does not produce any real memory traffic. */
33139 if (tie_operand (pat, VOIDmode))
33140 return false;
33141
33142 if (GET_CODE (pat) == MEM)
33143 {
33144 *mem_ref = pat;
33145 return true;
33146 }
33147
33148 /* Recursively process the pattern. */
33149 fmt = GET_RTX_FORMAT (GET_CODE (pat));
33150
33151 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
33152 {
33153 if (fmt[i] == 'e')
33154 {
33155 if (find_mem_ref (XEXP (pat, i), mem_ref))
33156 return true;
33157 }
33158 else if (fmt[i] == 'E')
33159 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
33160 {
33161 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
33162 return true;
33163 }
33164 }
33165
33166 return false;
33167 }
33168
33169 /* Determine if PAT is a PATTERN of a load insn. */
33170
33171 static bool
33172 is_load_insn1 (rtx pat, rtx *load_mem)
33173 {
33174 if (!pat || pat == NULL_RTX)
33175 return false;
33176
33177 if (GET_CODE (pat) == SET)
33178 return find_mem_ref (SET_SRC (pat), load_mem);
33179
33180 if (GET_CODE (pat) == PARALLEL)
33181 {
33182 int i;
33183
33184 for (i = 0; i < XVECLEN (pat, 0); i++)
33185 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
33186 return true;
33187 }
33188
33189 return false;
33190 }
33191
33192 /* Determine if INSN loads from memory. */
33193
33194 static bool
33195 is_load_insn (rtx insn, rtx *load_mem)
33196 {
33197 if (!insn || !INSN_P (insn))
33198 return false;
33199
33200 if (CALL_P (insn))
33201 return false;
33202
33203 return is_load_insn1 (PATTERN (insn), load_mem);
33204 }
33205
33206 /* Determine if PAT is a PATTERN of a store insn. */
33207
33208 static bool
33209 is_store_insn1 (rtx pat, rtx *str_mem)
33210 {
33211 if (!pat || pat == NULL_RTX)
33212 return false;
33213
33214 if (GET_CODE (pat) == SET)
33215 return find_mem_ref (SET_DEST (pat), str_mem);
33216
33217 if (GET_CODE (pat) == PARALLEL)
33218 {
33219 int i;
33220
33221 for (i = 0; i < XVECLEN (pat, 0); i++)
33222 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
33223 return true;
33224 }
33225
33226 return false;
33227 }
33228
33229 /* Determine if INSN stores to memory. */
33230
33231 static bool
33232 is_store_insn (rtx insn, rtx *str_mem)
33233 {
33234 if (!insn || !INSN_P (insn))
33235 return false;
33236
33237 return is_store_insn1 (PATTERN (insn), str_mem);
33238 }
33239
33240 /* Return whether TYPE is a Power9 pairable vector instruction type. */
33241
33242 static bool
33243 is_power9_pairable_vec_type (enum attr_type type)
33244 {
33245 switch (type)
33246 {
33247 case TYPE_VECSIMPLE:
33248 case TYPE_VECCOMPLEX:
33249 case TYPE_VECDIV:
33250 case TYPE_VECCMP:
33251 case TYPE_VECPERM:
33252 case TYPE_VECFLOAT:
33253 case TYPE_VECFDIV:
33254 case TYPE_VECDOUBLE:
33255 return true;
33256 default:
33257 break;
33258 }
33259 return false;
33260 }
33261
33262 /* Returns whether the dependence between INSN and NEXT is considered
33263 costly by the given target. */
33264
33265 static bool
33266 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
33267 {
33268 rtx insn;
33269 rtx next;
33270 rtx load_mem, str_mem;
33271
33272 /* If the flag is not enabled - no dependence is considered costly;
33273 allow all dependent insns in the same group.
33274 This is the most aggressive option. */
33275 if (rs6000_sched_costly_dep == no_dep_costly)
33276 return false;
33277
33278 /* If the flag is set to 1 - a dependence is always considered costly;
33279 do not allow dependent instructions in the same group.
33280 This is the most conservative option. */
33281 if (rs6000_sched_costly_dep == all_deps_costly)
33282 return true;
33283
33284 insn = DEP_PRO (dep);
33285 next = DEP_CON (dep);
33286
33287 if (rs6000_sched_costly_dep == store_to_load_dep_costly
33288 && is_load_insn (next, &load_mem)
33289 && is_store_insn (insn, &str_mem))
33290 /* Prevent load after store in the same group. */
33291 return true;
33292
33293 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
33294 && is_load_insn (next, &load_mem)
33295 && is_store_insn (insn, &str_mem)
33296 && DEP_TYPE (dep) == REG_DEP_TRUE
33297 && mem_locations_overlap(str_mem, load_mem))
33298 /* Prevent load after store in the same group if it is a true
33299 dependence. */
33300 return true;
33301
33302 /* The flag is set to X; dependences with latency >= X are considered costly,
33303 and will not be scheduled in the same group. */
33304 if (rs6000_sched_costly_dep <= max_dep_latency
33305 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
33306 return true;
33307
33308 return false;
33309 }
33310
33311 /* Return the next insn after INSN that is found before TAIL is reached,
33312 skipping any "non-active" insns - insns that will not actually occupy
33313 an issue slot. Return NULL_RTX if such an insn is not found. */
33314
33315 static rtx_insn *
33316 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
33317 {
33318 if (insn == NULL_RTX || insn == tail)
33319 return NULL;
33320
33321 while (1)
33322 {
33323 insn = NEXT_INSN (insn);
33324 if (insn == NULL_RTX || insn == tail)
33325 return NULL;
33326
33327 if (CALL_P (insn)
33328 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
33329 || (NONJUMP_INSN_P (insn)
33330 && GET_CODE (PATTERN (insn)) != USE
33331 && GET_CODE (PATTERN (insn)) != CLOBBER
33332 && INSN_CODE (insn) != CODE_FOR_stack_tie))
33333 break;
33334 }
33335 return insn;
33336 }
33337
33338 /* Do Power9 specific sched_reorder2 reordering of ready list. */
33339
33340 static int
33341 power9_sched_reorder2 (rtx_insn **ready, int lastpos)
33342 {
33343 int pos;
33344 int i;
33345 rtx_insn *tmp;
33346 enum attr_type type;
33347
33348 type = get_attr_type (last_scheduled_insn);
33349
33350 /* Try to issue fixed point divides back-to-back in pairs so they will be
33351 routed to separate execution units and execute in parallel. */
33352 if (type == TYPE_DIV && divide_cnt == 0)
33353 {
33354 /* First divide has been scheduled. */
33355 divide_cnt = 1;
33356
33357 /* Scan the ready list looking for another divide, if found move it
33358 to the end of the list so it is chosen next. */
33359 pos = lastpos;
33360 while (pos >= 0)
33361 {
33362 if (recog_memoized (ready[pos]) >= 0
33363 && get_attr_type (ready[pos]) == TYPE_DIV)
33364 {
33365 tmp = ready[pos];
33366 for (i = pos; i < lastpos; i++)
33367 ready[i] = ready[i + 1];
33368 ready[lastpos] = tmp;
33369 break;
33370 }
33371 pos--;
33372 }
33373 }
33374 else
33375 {
33376 /* Last insn was the 2nd divide or not a divide, reset the counter. */
33377 divide_cnt = 0;
33378
33379 /* Power9 can execute 2 vector operations and 2 vector loads in a single
33380 cycle. So try to pair up and alternate groups of vector and vector
33381 load instructions.
33382
33383 To aid this formation, a counter is maintained to keep track of
33384 vec/vecload insns issued. The value of vec_load_pendulum maintains
33385 the current state with the following values:
33386
33387 0 : Initial state, no vec/vecload group has been started.
33388
33389 -1 : 1 vector load has been issued and another has been found on
33390 the ready list and moved to the end.
33391
33392 -2 : 2 vector loads have been issued and a vector operation has
33393 been found and moved to the end of the ready list.
33394
33395 -3 : 2 vector loads and a vector insn have been issued and a
33396 vector operation has been found and moved to the end of the
33397 ready list.
33398
33399 1 : 1 vector insn has been issued and another has been found and
33400 moved to the end of the ready list.
33401
33402 2 : 2 vector insns have been issued and a vector load has been
33403 found and moved to the end of the ready list.
33404
33405 3 : 2 vector insns and a vector load have been issued and another
33406 vector load has been found and moved to the end of the ready
33407 list. */
33408 if (type == TYPE_VECLOAD)
33409 {
33410 /* Issued a vecload. */
33411 if (vec_load_pendulum == 0)
33412 {
33413 /* We issued a single vecload, look for another and move it to
33414 the end of the ready list so it will be scheduled next.
33415 Set pendulum if found. */
33416 pos = lastpos;
33417 while (pos >= 0)
33418 {
33419 if (recog_memoized (ready[pos]) >= 0
33420 && get_attr_type (ready[pos]) == TYPE_VECLOAD)
33421 {
33422 tmp = ready[pos];
33423 for (i = pos; i < lastpos; i++)
33424 ready[i] = ready[i + 1];
33425 ready[lastpos] = tmp;
33426 vec_load_pendulum = -1;
33427 return cached_can_issue_more;
33428 }
33429 pos--;
33430 }
33431 }
33432 else if (vec_load_pendulum == -1)
33433 {
33434 /* This is the second vecload we've issued, search the ready
33435 list for a vector operation so we can try to schedule a
33436 pair of those next. If found move to the end of the ready
33437 list so it is scheduled next and set the pendulum. */
33438 pos = lastpos;
33439 while (pos >= 0)
33440 {
33441 if (recog_memoized (ready[pos]) >= 0
33442 && is_power9_pairable_vec_type (
33443 get_attr_type (ready[pos])))
33444 {
33445 tmp = ready[pos];
33446 for (i = pos; i < lastpos; i++)
33447 ready[i] = ready[i + 1];
33448 ready[lastpos] = tmp;
33449 vec_load_pendulum = -2;
33450 return cached_can_issue_more;
33451 }
33452 pos--;
33453 }
33454 }
33455 else if (vec_load_pendulum == 2)
33456 {
33457 /* Two vector ops have been issued and we've just issued a
33458 vecload, look for another vecload and move to end of ready
33459 list if found. */
33460 pos = lastpos;
33461 while (pos >= 0)
33462 {
33463 if (recog_memoized (ready[pos]) >= 0
33464 && get_attr_type (ready[pos]) == TYPE_VECLOAD)
33465 {
33466 tmp = ready[pos];
33467 for (i = pos; i < lastpos; i++)
33468 ready[i] = ready[i + 1];
33469 ready[lastpos] = tmp;
33470 /* Set pendulum so that next vecload will be seen as
33471 finishing a group, not start of one. */
33472 vec_load_pendulum = 3;
33473 return cached_can_issue_more;
33474 }
33475 pos--;
33476 }
33477 }
33478 }
33479 else if (is_power9_pairable_vec_type (type))
33480 {
33481 /* Issued a vector operation. */
33482 if (vec_load_pendulum == 0)
33483 /* We issued a single vec op, look for another and move it
33484 to the end of the ready list so it will be scheduled next.
33485 Set pendulum if found. */
33486 {
33487 pos = lastpos;
33488 while (pos >= 0)
33489 {
33490 if (recog_memoized (ready[pos]) >= 0
33491 && is_power9_pairable_vec_type (
33492 get_attr_type (ready[pos])))
33493 {
33494 tmp = ready[pos];
33495 for (i = pos; i < lastpos; i++)
33496 ready[i] = ready[i + 1];
33497 ready[lastpos] = tmp;
33498 vec_load_pendulum = 1;
33499 return cached_can_issue_more;
33500 }
33501 pos--;
33502 }
33503 }
33504 else if (vec_load_pendulum == 1)
33505 {
33506 /* This is the second vec op we've issued, search the ready
33507 list for a vecload operation so we can try to schedule a
33508 pair of those next. If found move to the end of the ready
33509 list so it is scheduled next and set the pendulum. */
33510 pos = lastpos;
33511 while (pos >= 0)
33512 {
33513 if (recog_memoized (ready[pos]) >= 0
33514 && get_attr_type (ready[pos]) == TYPE_VECLOAD)
33515 {
33516 tmp = ready[pos];
33517 for (i = pos; i < lastpos; i++)
33518 ready[i] = ready[i + 1];
33519 ready[lastpos] = tmp;
33520 vec_load_pendulum = 2;
33521 return cached_can_issue_more;
33522 }
33523 pos--;
33524 }
33525 }
33526 else if (vec_load_pendulum == -2)
33527 {
33528 /* Two vecload ops have been issued and we've just issued a
33529 vec op, look for another vec op and move to end of ready
33530 list if found. */
33531 pos = lastpos;
33532 while (pos >= 0)
33533 {
33534 if (recog_memoized (ready[pos]) >= 0
33535 && is_power9_pairable_vec_type (
33536 get_attr_type (ready[pos])))
33537 {
33538 tmp = ready[pos];
33539 for (i = pos; i < lastpos; i++)
33540 ready[i] = ready[i + 1];
33541 ready[lastpos] = tmp;
33542 /* Set pendulum so that next vec op will be seen as
33543 finishing a group, not start of one. */
33544 vec_load_pendulum = -3;
33545 return cached_can_issue_more;
33546 }
33547 pos--;
33548 }
33549 }
33550 }
33551
33552 /* We've either finished a vec/vecload group, couldn't find an insn to
33553 continue the current group, or the last insn had nothing to do with
33554 with a group. In any case, reset the pendulum. */
33555 vec_load_pendulum = 0;
33556 }
33557
33558 return cached_can_issue_more;
33559 }
33560
33561 /* We are about to begin issuing insns for this clock cycle. */
33562
33563 static int
33564 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
33565 rtx_insn **ready ATTRIBUTE_UNUSED,
33566 int *pn_ready ATTRIBUTE_UNUSED,
33567 int clock_var ATTRIBUTE_UNUSED)
33568 {
33569 int n_ready = *pn_ready;
33570
33571 if (sched_verbose)
33572 fprintf (dump, "// rs6000_sched_reorder :\n");
33573
33574 /* Reorder the ready list, if the second to last ready insn
33575 is a nonepipeline insn. */
33576 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
33577 {
33578 if (is_nonpipeline_insn (ready[n_ready - 1])
33579 && (recog_memoized (ready[n_ready - 2]) > 0))
33580 /* Simply swap first two insns. */
33581 std::swap (ready[n_ready - 1], ready[n_ready - 2]);
33582 }
33583
33584 if (rs6000_cpu == PROCESSOR_POWER6)
33585 load_store_pendulum = 0;
33586
33587 return rs6000_issue_rate ();
33588 }
33589
33590 /* Like rs6000_sched_reorder, but called after issuing each insn. */
33591
33592 static int
33593 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
33594 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
33595 {
33596 if (sched_verbose)
33597 fprintf (dump, "// rs6000_sched_reorder2 :\n");
33598
33599 /* For Power6, we need to handle some special cases to try and keep the
33600 store queue from overflowing and triggering expensive flushes.
33601
33602 This code monitors how load and store instructions are being issued
33603 and skews the ready list one way or the other to increase the likelihood
33604 that a desired instruction is issued at the proper time.
33605
33606 A couple of things are done. First, we maintain a "load_store_pendulum"
33607 to track the current state of load/store issue.
33608
33609 - If the pendulum is at zero, then no loads or stores have been
33610 issued in the current cycle so we do nothing.
33611
33612 - If the pendulum is 1, then a single load has been issued in this
33613 cycle and we attempt to locate another load in the ready list to
33614 issue with it.
33615
33616 - If the pendulum is -2, then two stores have already been
33617 issued in this cycle, so we increase the priority of the first load
33618 in the ready list to increase it's likelihood of being chosen first
33619 in the next cycle.
33620
33621 - If the pendulum is -1, then a single store has been issued in this
33622 cycle and we attempt to locate another store in the ready list to
33623 issue with it, preferring a store to an adjacent memory location to
33624 facilitate store pairing in the store queue.
33625
33626 - If the pendulum is 2, then two loads have already been
33627 issued in this cycle, so we increase the priority of the first store
33628 in the ready list to increase it's likelihood of being chosen first
33629 in the next cycle.
33630
33631 - If the pendulum < -2 or > 2, then do nothing.
33632
33633 Note: This code covers the most common scenarios. There exist non
33634 load/store instructions which make use of the LSU and which
33635 would need to be accounted for to strictly model the behavior
33636 of the machine. Those instructions are currently unaccounted
33637 for to help minimize compile time overhead of this code.
33638 */
33639 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
33640 {
33641 int pos;
33642 int i;
33643 rtx_insn *tmp;
33644 rtx load_mem, str_mem;
33645
33646 if (is_store_insn (last_scheduled_insn, &str_mem))
33647 /* Issuing a store, swing the load_store_pendulum to the left */
33648 load_store_pendulum--;
33649 else if (is_load_insn (last_scheduled_insn, &load_mem))
33650 /* Issuing a load, swing the load_store_pendulum to the right */
33651 load_store_pendulum++;
33652 else
33653 return cached_can_issue_more;
33654
33655 /* If the pendulum is balanced, or there is only one instruction on
33656 the ready list, then all is well, so return. */
33657 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
33658 return cached_can_issue_more;
33659
33660 if (load_store_pendulum == 1)
33661 {
33662 /* A load has been issued in this cycle. Scan the ready list
33663 for another load to issue with it */
33664 pos = *pn_ready-1;
33665
33666 while (pos >= 0)
33667 {
33668 if (is_load_insn (ready[pos], &load_mem))
33669 {
33670 /* Found a load. Move it to the head of the ready list,
33671 and adjust it's priority so that it is more likely to
33672 stay there */
33673 tmp = ready[pos];
33674 for (i=pos; i<*pn_ready-1; i++)
33675 ready[i] = ready[i + 1];
33676 ready[*pn_ready-1] = tmp;
33677
33678 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
33679 INSN_PRIORITY (tmp)++;
33680 break;
33681 }
33682 pos--;
33683 }
33684 }
33685 else if (load_store_pendulum == -2)
33686 {
33687 /* Two stores have been issued in this cycle. Increase the
33688 priority of the first load in the ready list to favor it for
33689 issuing in the next cycle. */
33690 pos = *pn_ready-1;
33691
33692 while (pos >= 0)
33693 {
33694 if (is_load_insn (ready[pos], &load_mem)
33695 && !sel_sched_p ()
33696 && INSN_PRIORITY_KNOWN (ready[pos]))
33697 {
33698 INSN_PRIORITY (ready[pos])++;
33699
33700 /* Adjust the pendulum to account for the fact that a load
33701 was found and increased in priority. This is to prevent
33702 increasing the priority of multiple loads */
33703 load_store_pendulum--;
33704
33705 break;
33706 }
33707 pos--;
33708 }
33709 }
33710 else if (load_store_pendulum == -1)
33711 {
33712 /* A store has been issued in this cycle. Scan the ready list for
33713 another store to issue with it, preferring a store to an adjacent
33714 memory location */
33715 int first_store_pos = -1;
33716
33717 pos = *pn_ready-1;
33718
33719 while (pos >= 0)
33720 {
33721 if (is_store_insn (ready[pos], &str_mem))
33722 {
33723 rtx str_mem2;
33724 /* Maintain the index of the first store found on the
33725 list */
33726 if (first_store_pos == -1)
33727 first_store_pos = pos;
33728
33729 if (is_store_insn (last_scheduled_insn, &str_mem2)
33730 && adjacent_mem_locations (str_mem, str_mem2))
33731 {
33732 /* Found an adjacent store. Move it to the head of the
33733 ready list, and adjust it's priority so that it is
33734 more likely to stay there */
33735 tmp = ready[pos];
33736 for (i=pos; i<*pn_ready-1; i++)
33737 ready[i] = ready[i + 1];
33738 ready[*pn_ready-1] = tmp;
33739
33740 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
33741 INSN_PRIORITY (tmp)++;
33742
33743 first_store_pos = -1;
33744
33745 break;
33746 };
33747 }
33748 pos--;
33749 }
33750
33751 if (first_store_pos >= 0)
33752 {
33753 /* An adjacent store wasn't found, but a non-adjacent store was,
33754 so move the non-adjacent store to the front of the ready
33755 list, and adjust its priority so that it is more likely to
33756 stay there. */
33757 tmp = ready[first_store_pos];
33758 for (i=first_store_pos; i<*pn_ready-1; i++)
33759 ready[i] = ready[i + 1];
33760 ready[*pn_ready-1] = tmp;
33761 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
33762 INSN_PRIORITY (tmp)++;
33763 }
33764 }
33765 else if (load_store_pendulum == 2)
33766 {
33767 /* Two loads have been issued in this cycle. Increase the priority
33768 of the first store in the ready list to favor it for issuing in
33769 the next cycle. */
33770 pos = *pn_ready-1;
33771
33772 while (pos >= 0)
33773 {
33774 if (is_store_insn (ready[pos], &str_mem)
33775 && !sel_sched_p ()
33776 && INSN_PRIORITY_KNOWN (ready[pos]))
33777 {
33778 INSN_PRIORITY (ready[pos])++;
33779
33780 /* Adjust the pendulum to account for the fact that a store
33781 was found and increased in priority. This is to prevent
33782 increasing the priority of multiple stores */
33783 load_store_pendulum++;
33784
33785 break;
33786 }
33787 pos--;
33788 }
33789 }
33790 }
33791
33792 /* Do Power9 dependent reordering if necessary. */
33793 if (rs6000_cpu == PROCESSOR_POWER9 && last_scheduled_insn
33794 && recog_memoized (last_scheduled_insn) >= 0)
33795 return power9_sched_reorder2 (ready, *pn_ready - 1);
33796
33797 return cached_can_issue_more;
33798 }
33799
33800 /* Return whether the presence of INSN causes a dispatch group termination
33801 of group WHICH_GROUP.
33802
33803 If WHICH_GROUP == current_group, this function will return true if INSN
33804 causes the termination of the current group (i.e, the dispatch group to
33805 which INSN belongs). This means that INSN will be the last insn in the
33806 group it belongs to.
33807
33808 If WHICH_GROUP == previous_group, this function will return true if INSN
33809 causes the termination of the previous group (i.e, the dispatch group that
33810 precedes the group to which INSN belongs). This means that INSN will be
33811 the first insn in the group it belongs to). */
33812
33813 static bool
33814 insn_terminates_group_p (rtx_insn *insn, enum group_termination which_group)
33815 {
33816 bool first, last;
33817
33818 if (! insn)
33819 return false;
33820
33821 first = insn_must_be_first_in_group (insn);
33822 last = insn_must_be_last_in_group (insn);
33823
33824 if (first && last)
33825 return true;
33826
33827 if (which_group == current_group)
33828 return last;
33829 else if (which_group == previous_group)
33830 return first;
33831
33832 return false;
33833 }
33834
33835
33836 static bool
33837 insn_must_be_first_in_group (rtx_insn *insn)
33838 {
33839 enum attr_type type;
33840
33841 if (!insn
33842 || NOTE_P (insn)
33843 || DEBUG_INSN_P (insn)
33844 || GET_CODE (PATTERN (insn)) == USE
33845 || GET_CODE (PATTERN (insn)) == CLOBBER)
33846 return false;
33847
33848 switch (rs6000_cpu)
33849 {
33850 case PROCESSOR_POWER5:
33851 if (is_cracked_insn (insn))
33852 return true;
33853 /* FALLTHRU */
33854 case PROCESSOR_POWER4:
33855 if (is_microcoded_insn (insn))
33856 return true;
33857
33858 if (!rs6000_sched_groups)
33859 return false;
33860
33861 type = get_attr_type (insn);
33862
33863 switch (type)
33864 {
33865 case TYPE_MFCR:
33866 case TYPE_MFCRF:
33867 case TYPE_MTCR:
33868 case TYPE_DELAYED_CR:
33869 case TYPE_CR_LOGICAL:
33870 case TYPE_MTJMPR:
33871 case TYPE_MFJMPR:
33872 case TYPE_DIV:
33873 case TYPE_LOAD_L:
33874 case TYPE_STORE_C:
33875 case TYPE_ISYNC:
33876 case TYPE_SYNC:
33877 return true;
33878 default:
33879 break;
33880 }
33881 break;
33882 case PROCESSOR_POWER6:
33883 type = get_attr_type (insn);
33884
33885 switch (type)
33886 {
33887 case TYPE_EXTS:
33888 case TYPE_CNTLZ:
33889 case TYPE_TRAP:
33890 case TYPE_MUL:
33891 case TYPE_INSERT:
33892 case TYPE_FPCOMPARE:
33893 case TYPE_MFCR:
33894 case TYPE_MTCR:
33895 case TYPE_MFJMPR:
33896 case TYPE_MTJMPR:
33897 case TYPE_ISYNC:
33898 case TYPE_SYNC:
33899 case TYPE_LOAD_L:
33900 case TYPE_STORE_C:
33901 return true;
33902 case TYPE_SHIFT:
33903 if (get_attr_dot (insn) == DOT_NO
33904 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
33905 return true;
33906 else
33907 break;
33908 case TYPE_DIV:
33909 if (get_attr_size (insn) == SIZE_32)
33910 return true;
33911 else
33912 break;
33913 case TYPE_LOAD:
33914 case TYPE_STORE:
33915 case TYPE_FPLOAD:
33916 case TYPE_FPSTORE:
33917 if (get_attr_update (insn) == UPDATE_YES)
33918 return true;
33919 else
33920 break;
33921 default:
33922 break;
33923 }
33924 break;
33925 case PROCESSOR_POWER7:
33926 type = get_attr_type (insn);
33927
33928 switch (type)
33929 {
33930 case TYPE_CR_LOGICAL:
33931 case TYPE_MFCR:
33932 case TYPE_MFCRF:
33933 case TYPE_MTCR:
33934 case TYPE_DIV:
33935 case TYPE_ISYNC:
33936 case TYPE_LOAD_L:
33937 case TYPE_STORE_C:
33938 case TYPE_MFJMPR:
33939 case TYPE_MTJMPR:
33940 return true;
33941 case TYPE_MUL:
33942 case TYPE_SHIFT:
33943 case TYPE_EXTS:
33944 if (get_attr_dot (insn) == DOT_YES)
33945 return true;
33946 else
33947 break;
33948 case TYPE_LOAD:
33949 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
33950 || get_attr_update (insn) == UPDATE_YES)
33951 return true;
33952 else
33953 break;
33954 case TYPE_STORE:
33955 case TYPE_FPLOAD:
33956 case TYPE_FPSTORE:
33957 if (get_attr_update (insn) == UPDATE_YES)
33958 return true;
33959 else
33960 break;
33961 default:
33962 break;
33963 }
33964 break;
33965 case PROCESSOR_POWER8:
33966 type = get_attr_type (insn);
33967
33968 switch (type)
33969 {
33970 case TYPE_CR_LOGICAL:
33971 case TYPE_DELAYED_CR:
33972 case TYPE_MFCR:
33973 case TYPE_MFCRF:
33974 case TYPE_MTCR:
33975 case TYPE_SYNC:
33976 case TYPE_ISYNC:
33977 case TYPE_LOAD_L:
33978 case TYPE_STORE_C:
33979 case TYPE_VECSTORE:
33980 case TYPE_MFJMPR:
33981 case TYPE_MTJMPR:
33982 return true;
33983 case TYPE_SHIFT:
33984 case TYPE_EXTS:
33985 case TYPE_MUL:
33986 if (get_attr_dot (insn) == DOT_YES)
33987 return true;
33988 else
33989 break;
33990 case TYPE_LOAD:
33991 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
33992 || get_attr_update (insn) == UPDATE_YES)
33993 return true;
33994 else
33995 break;
33996 case TYPE_STORE:
33997 if (get_attr_update (insn) == UPDATE_YES
33998 && get_attr_indexed (insn) == INDEXED_YES)
33999 return true;
34000 else
34001 break;
34002 default:
34003 break;
34004 }
34005 break;
34006 default:
34007 break;
34008 }
34009
34010 return false;
34011 }
34012
34013 static bool
34014 insn_must_be_last_in_group (rtx_insn *insn)
34015 {
34016 enum attr_type type;
34017
34018 if (!insn
34019 || NOTE_P (insn)
34020 || DEBUG_INSN_P (insn)
34021 || GET_CODE (PATTERN (insn)) == USE
34022 || GET_CODE (PATTERN (insn)) == CLOBBER)
34023 return false;
34024
34025 switch (rs6000_cpu) {
34026 case PROCESSOR_POWER4:
34027 case PROCESSOR_POWER5:
34028 if (is_microcoded_insn (insn))
34029 return true;
34030
34031 if (is_branch_slot_insn (insn))
34032 return true;
34033
34034 break;
34035 case PROCESSOR_POWER6:
34036 type = get_attr_type (insn);
34037
34038 switch (type)
34039 {
34040 case TYPE_EXTS:
34041 case TYPE_CNTLZ:
34042 case TYPE_TRAP:
34043 case TYPE_MUL:
34044 case TYPE_FPCOMPARE:
34045 case TYPE_MFCR:
34046 case TYPE_MTCR:
34047 case TYPE_MFJMPR:
34048 case TYPE_MTJMPR:
34049 case TYPE_ISYNC:
34050 case TYPE_SYNC:
34051 case TYPE_LOAD_L:
34052 case TYPE_STORE_C:
34053 return true;
34054 case TYPE_SHIFT:
34055 if (get_attr_dot (insn) == DOT_NO
34056 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
34057 return true;
34058 else
34059 break;
34060 case TYPE_DIV:
34061 if (get_attr_size (insn) == SIZE_32)
34062 return true;
34063 else
34064 break;
34065 default:
34066 break;
34067 }
34068 break;
34069 case PROCESSOR_POWER7:
34070 type = get_attr_type (insn);
34071
34072 switch (type)
34073 {
34074 case TYPE_ISYNC:
34075 case TYPE_SYNC:
34076 case TYPE_LOAD_L:
34077 case TYPE_STORE_C:
34078 return true;
34079 case TYPE_LOAD:
34080 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
34081 && get_attr_update (insn) == UPDATE_YES)
34082 return true;
34083 else
34084 break;
34085 case TYPE_STORE:
34086 if (get_attr_update (insn) == UPDATE_YES
34087 && get_attr_indexed (insn) == INDEXED_YES)
34088 return true;
34089 else
34090 break;
34091 default:
34092 break;
34093 }
34094 break;
34095 case PROCESSOR_POWER8:
34096 type = get_attr_type (insn);
34097
34098 switch (type)
34099 {
34100 case TYPE_MFCR:
34101 case TYPE_MTCR:
34102 case TYPE_ISYNC:
34103 case TYPE_SYNC:
34104 case TYPE_LOAD_L:
34105 case TYPE_STORE_C:
34106 return true;
34107 case TYPE_LOAD:
34108 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
34109 && get_attr_update (insn) == UPDATE_YES)
34110 return true;
34111 else
34112 break;
34113 case TYPE_STORE:
34114 if (get_attr_update (insn) == UPDATE_YES
34115 && get_attr_indexed (insn) == INDEXED_YES)
34116 return true;
34117 else
34118 break;
34119 default:
34120 break;
34121 }
34122 break;
34123 default:
34124 break;
34125 }
34126
34127 return false;
34128 }
34129
34130 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
34131 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
34132
34133 static bool
34134 is_costly_group (rtx *group_insns, rtx next_insn)
34135 {
34136 int i;
34137 int issue_rate = rs6000_issue_rate ();
34138
34139 for (i = 0; i < issue_rate; i++)
34140 {
34141 sd_iterator_def sd_it;
34142 dep_t dep;
34143 rtx insn = group_insns[i];
34144
34145 if (!insn)
34146 continue;
34147
34148 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
34149 {
34150 rtx next = DEP_CON (dep);
34151
34152 if (next == next_insn
34153 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
34154 return true;
34155 }
34156 }
34157
34158 return false;
34159 }
34160
34161 /* Utility of the function redefine_groups.
34162 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
34163 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
34164 to keep it "far" (in a separate group) from GROUP_INSNS, following
34165 one of the following schemes, depending on the value of the flag
34166 -minsert_sched_nops = X:
34167 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
34168 in order to force NEXT_INSN into a separate group.
34169 (2) X < sched_finish_regroup_exact: insert exactly X nops.
34170 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
34171 insertion (has a group just ended, how many vacant issue slots remain in the
34172 last group, and how many dispatch groups were encountered so far). */
34173
34174 static int
34175 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
34176 rtx_insn *next_insn, bool *group_end, int can_issue_more,
34177 int *group_count)
34178 {
34179 rtx nop;
34180 bool force;
34181 int issue_rate = rs6000_issue_rate ();
34182 bool end = *group_end;
34183 int i;
34184
34185 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
34186 return can_issue_more;
34187
34188 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
34189 return can_issue_more;
34190
34191 force = is_costly_group (group_insns, next_insn);
34192 if (!force)
34193 return can_issue_more;
34194
34195 if (sched_verbose > 6)
34196 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
34197 *group_count ,can_issue_more);
34198
34199 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
34200 {
34201 if (*group_end)
34202 can_issue_more = 0;
34203
34204 /* Since only a branch can be issued in the last issue_slot, it is
34205 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
34206 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
34207 in this case the last nop will start a new group and the branch
34208 will be forced to the new group. */
34209 if (can_issue_more && !is_branch_slot_insn (next_insn))
34210 can_issue_more--;
34211
34212 /* Do we have a special group ending nop? */
34213 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
34214 || rs6000_cpu_attr == CPU_POWER8)
34215 {
34216 nop = gen_group_ending_nop ();
34217 emit_insn_before (nop, next_insn);
34218 can_issue_more = 0;
34219 }
34220 else
34221 while (can_issue_more > 0)
34222 {
34223 nop = gen_nop ();
34224 emit_insn_before (nop, next_insn);
34225 can_issue_more--;
34226 }
34227
34228 *group_end = true;
34229 return 0;
34230 }
34231
34232 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
34233 {
34234 int n_nops = rs6000_sched_insert_nops;
34235
34236 /* Nops can't be issued from the branch slot, so the effective
34237 issue_rate for nops is 'issue_rate - 1'. */
34238 if (can_issue_more == 0)
34239 can_issue_more = issue_rate;
34240 can_issue_more--;
34241 if (can_issue_more == 0)
34242 {
34243 can_issue_more = issue_rate - 1;
34244 (*group_count)++;
34245 end = true;
34246 for (i = 0; i < issue_rate; i++)
34247 {
34248 group_insns[i] = 0;
34249 }
34250 }
34251
34252 while (n_nops > 0)
34253 {
34254 nop = gen_nop ();
34255 emit_insn_before (nop, next_insn);
34256 if (can_issue_more == issue_rate - 1) /* new group begins */
34257 end = false;
34258 can_issue_more--;
34259 if (can_issue_more == 0)
34260 {
34261 can_issue_more = issue_rate - 1;
34262 (*group_count)++;
34263 end = true;
34264 for (i = 0; i < issue_rate; i++)
34265 {
34266 group_insns[i] = 0;
34267 }
34268 }
34269 n_nops--;
34270 }
34271
34272 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
34273 can_issue_more++;
34274
34275 /* Is next_insn going to start a new group? */
34276 *group_end
34277 = (end
34278 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
34279 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
34280 || (can_issue_more < issue_rate &&
34281 insn_terminates_group_p (next_insn, previous_group)));
34282 if (*group_end && end)
34283 (*group_count)--;
34284
34285 if (sched_verbose > 6)
34286 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
34287 *group_count, can_issue_more);
34288 return can_issue_more;
34289 }
34290
34291 return can_issue_more;
34292 }
34293
34294 /* This function tries to synch the dispatch groups that the compiler "sees"
34295 with the dispatch groups that the processor dispatcher is expected to
34296 form in practice. It tries to achieve this synchronization by forcing the
34297 estimated processor grouping on the compiler (as opposed to the function
34298 'pad_goups' which tries to force the scheduler's grouping on the processor).
34299
34300 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
34301 examines the (estimated) dispatch groups that will be formed by the processor
34302 dispatcher. It marks these group boundaries to reflect the estimated
34303 processor grouping, overriding the grouping that the scheduler had marked.
34304 Depending on the value of the flag '-minsert-sched-nops' this function can
34305 force certain insns into separate groups or force a certain distance between
34306 them by inserting nops, for example, if there exists a "costly dependence"
34307 between the insns.
34308
34309 The function estimates the group boundaries that the processor will form as
34310 follows: It keeps track of how many vacant issue slots are available after
34311 each insn. A subsequent insn will start a new group if one of the following
34312 4 cases applies:
34313 - no more vacant issue slots remain in the current dispatch group.
34314 - only the last issue slot, which is the branch slot, is vacant, but the next
34315 insn is not a branch.
34316 - only the last 2 or less issue slots, including the branch slot, are vacant,
34317 which means that a cracked insn (which occupies two issue slots) can't be
34318 issued in this group.
34319 - less than 'issue_rate' slots are vacant, and the next insn always needs to
34320 start a new group. */
34321
34322 static int
34323 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
34324 rtx_insn *tail)
34325 {
34326 rtx_insn *insn, *next_insn;
34327 int issue_rate;
34328 int can_issue_more;
34329 int slot, i;
34330 bool group_end;
34331 int group_count = 0;
34332 rtx *group_insns;
34333
34334 /* Initialize. */
34335 issue_rate = rs6000_issue_rate ();
34336 group_insns = XALLOCAVEC (rtx, issue_rate);
34337 for (i = 0; i < issue_rate; i++)
34338 {
34339 group_insns[i] = 0;
34340 }
34341 can_issue_more = issue_rate;
34342 slot = 0;
34343 insn = get_next_active_insn (prev_head_insn, tail);
34344 group_end = false;
34345
34346 while (insn != NULL_RTX)
34347 {
34348 slot = (issue_rate - can_issue_more);
34349 group_insns[slot] = insn;
34350 can_issue_more =
34351 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
34352 if (insn_terminates_group_p (insn, current_group))
34353 can_issue_more = 0;
34354
34355 next_insn = get_next_active_insn (insn, tail);
34356 if (next_insn == NULL_RTX)
34357 return group_count + 1;
34358
34359 /* Is next_insn going to start a new group? */
34360 group_end
34361 = (can_issue_more == 0
34362 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
34363 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
34364 || (can_issue_more < issue_rate &&
34365 insn_terminates_group_p (next_insn, previous_group)));
34366
34367 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
34368 next_insn, &group_end, can_issue_more,
34369 &group_count);
34370
34371 if (group_end)
34372 {
34373 group_count++;
34374 can_issue_more = 0;
34375 for (i = 0; i < issue_rate; i++)
34376 {
34377 group_insns[i] = 0;
34378 }
34379 }
34380
34381 if (GET_MODE (next_insn) == TImode && can_issue_more)
34382 PUT_MODE (next_insn, VOIDmode);
34383 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
34384 PUT_MODE (next_insn, TImode);
34385
34386 insn = next_insn;
34387 if (can_issue_more == 0)
34388 can_issue_more = issue_rate;
34389 } /* while */
34390
34391 return group_count;
34392 }
34393
34394 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
34395 dispatch group boundaries that the scheduler had marked. Pad with nops
34396 any dispatch groups which have vacant issue slots, in order to force the
34397 scheduler's grouping on the processor dispatcher. The function
34398 returns the number of dispatch groups found. */
34399
34400 static int
34401 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
34402 rtx_insn *tail)
34403 {
34404 rtx_insn *insn, *next_insn;
34405 rtx nop;
34406 int issue_rate;
34407 int can_issue_more;
34408 int group_end;
34409 int group_count = 0;
34410
34411 /* Initialize issue_rate. */
34412 issue_rate = rs6000_issue_rate ();
34413 can_issue_more = issue_rate;
34414
34415 insn = get_next_active_insn (prev_head_insn, tail);
34416 next_insn = get_next_active_insn (insn, tail);
34417
34418 while (insn != NULL_RTX)
34419 {
34420 can_issue_more =
34421 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
34422
34423 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
34424
34425 if (next_insn == NULL_RTX)
34426 break;
34427
34428 if (group_end)
34429 {
34430 /* If the scheduler had marked group termination at this location
34431 (between insn and next_insn), and neither insn nor next_insn will
34432 force group termination, pad the group with nops to force group
34433 termination. */
34434 if (can_issue_more
34435 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
34436 && !insn_terminates_group_p (insn, current_group)
34437 && !insn_terminates_group_p (next_insn, previous_group))
34438 {
34439 if (!is_branch_slot_insn (next_insn))
34440 can_issue_more--;
34441
34442 while (can_issue_more)
34443 {
34444 nop = gen_nop ();
34445 emit_insn_before (nop, next_insn);
34446 can_issue_more--;
34447 }
34448 }
34449
34450 can_issue_more = issue_rate;
34451 group_count++;
34452 }
34453
34454 insn = next_insn;
34455 next_insn = get_next_active_insn (insn, tail);
34456 }
34457
34458 return group_count;
34459 }
34460
34461 /* We're beginning a new block. Initialize data structures as necessary. */
34462
34463 static void
34464 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
34465 int sched_verbose ATTRIBUTE_UNUSED,
34466 int max_ready ATTRIBUTE_UNUSED)
34467 {
34468 last_scheduled_insn = NULL;
34469 load_store_pendulum = 0;
34470 divide_cnt = 0;
34471 vec_load_pendulum = 0;
34472 }
34473
34474 /* The following function is called at the end of scheduling BB.
34475 After reload, it inserts nops at insn group bundling. */
34476
34477 static void
34478 rs6000_sched_finish (FILE *dump, int sched_verbose)
34479 {
34480 int n_groups;
34481
34482 if (sched_verbose)
34483 fprintf (dump, "=== Finishing schedule.\n");
34484
34485 if (reload_completed && rs6000_sched_groups)
34486 {
34487 /* Do not run sched_finish hook when selective scheduling enabled. */
34488 if (sel_sched_p ())
34489 return;
34490
34491 if (rs6000_sched_insert_nops == sched_finish_none)
34492 return;
34493
34494 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
34495 n_groups = pad_groups (dump, sched_verbose,
34496 current_sched_info->prev_head,
34497 current_sched_info->next_tail);
34498 else
34499 n_groups = redefine_groups (dump, sched_verbose,
34500 current_sched_info->prev_head,
34501 current_sched_info->next_tail);
34502
34503 if (sched_verbose >= 6)
34504 {
34505 fprintf (dump, "ngroups = %d\n", n_groups);
34506 print_rtl (dump, current_sched_info->prev_head);
34507 fprintf (dump, "Done finish_sched\n");
34508 }
34509 }
34510 }
34511
34512 struct rs6000_sched_context
34513 {
34514 short cached_can_issue_more;
34515 rtx_insn *last_scheduled_insn;
34516 int load_store_pendulum;
34517 int divide_cnt;
34518 int vec_load_pendulum;
34519 };
34520
34521 typedef struct rs6000_sched_context rs6000_sched_context_def;
34522 typedef rs6000_sched_context_def *rs6000_sched_context_t;
34523
34524 /* Allocate store for new scheduling context. */
34525 static void *
34526 rs6000_alloc_sched_context (void)
34527 {
34528 return xmalloc (sizeof (rs6000_sched_context_def));
34529 }
34530
34531 /* If CLEAN_P is true then initializes _SC with clean data,
34532 and from the global context otherwise. */
34533 static void
34534 rs6000_init_sched_context (void *_sc, bool clean_p)
34535 {
34536 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
34537
34538 if (clean_p)
34539 {
34540 sc->cached_can_issue_more = 0;
34541 sc->last_scheduled_insn = NULL;
34542 sc->load_store_pendulum = 0;
34543 sc->divide_cnt = 0;
34544 sc->vec_load_pendulum = 0;
34545 }
34546 else
34547 {
34548 sc->cached_can_issue_more = cached_can_issue_more;
34549 sc->last_scheduled_insn = last_scheduled_insn;
34550 sc->load_store_pendulum = load_store_pendulum;
34551 sc->divide_cnt = divide_cnt;
34552 sc->vec_load_pendulum = vec_load_pendulum;
34553 }
34554 }
34555
34556 /* Sets the global scheduling context to the one pointed to by _SC. */
34557 static void
34558 rs6000_set_sched_context (void *_sc)
34559 {
34560 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
34561
34562 gcc_assert (sc != NULL);
34563
34564 cached_can_issue_more = sc->cached_can_issue_more;
34565 last_scheduled_insn = sc->last_scheduled_insn;
34566 load_store_pendulum = sc->load_store_pendulum;
34567 divide_cnt = sc->divide_cnt;
34568 vec_load_pendulum = sc->vec_load_pendulum;
34569 }
34570
34571 /* Free _SC. */
34572 static void
34573 rs6000_free_sched_context (void *_sc)
34574 {
34575 gcc_assert (_sc != NULL);
34576
34577 free (_sc);
34578 }
34579
34580 \f
34581 /* Length in units of the trampoline for entering a nested function. */
34582
34583 int
34584 rs6000_trampoline_size (void)
34585 {
34586 int ret = 0;
34587
34588 switch (DEFAULT_ABI)
34589 {
34590 default:
34591 gcc_unreachable ();
34592
34593 case ABI_AIX:
34594 ret = (TARGET_32BIT) ? 12 : 24;
34595 break;
34596
34597 case ABI_ELFv2:
34598 gcc_assert (!TARGET_32BIT);
34599 ret = 32;
34600 break;
34601
34602 case ABI_DARWIN:
34603 case ABI_V4:
34604 ret = (TARGET_32BIT) ? 40 : 48;
34605 break;
34606 }
34607
34608 return ret;
34609 }
34610
34611 /* Emit RTL insns to initialize the variable parts of a trampoline.
34612 FNADDR is an RTX for the address of the function's pure code.
34613 CXT is an RTX for the static chain value for the function. */
34614
34615 static void
34616 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
34617 {
34618 int regsize = (TARGET_32BIT) ? 4 : 8;
34619 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
34620 rtx ctx_reg = force_reg (Pmode, cxt);
34621 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
34622
34623 switch (DEFAULT_ABI)
34624 {
34625 default:
34626 gcc_unreachable ();
34627
34628 /* Under AIX, just build the 3 word function descriptor */
34629 case ABI_AIX:
34630 {
34631 rtx fnmem, fn_reg, toc_reg;
34632
34633 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
34634 error ("You cannot take the address of a nested function if you use "
34635 "the -mno-pointers-to-nested-functions option.");
34636
34637 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
34638 fn_reg = gen_reg_rtx (Pmode);
34639 toc_reg = gen_reg_rtx (Pmode);
34640
34641 /* Macro to shorten the code expansions below. */
34642 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
34643
34644 m_tramp = replace_equiv_address (m_tramp, addr);
34645
34646 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
34647 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
34648 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
34649 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
34650 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
34651
34652 # undef MEM_PLUS
34653 }
34654 break;
34655
34656 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
34657 case ABI_ELFv2:
34658 case ABI_DARWIN:
34659 case ABI_V4:
34660 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
34661 LCT_NORMAL, VOIDmode, 4,
34662 addr, Pmode,
34663 GEN_INT (rs6000_trampoline_size ()), SImode,
34664 fnaddr, Pmode,
34665 ctx_reg, Pmode);
34666 break;
34667 }
34668 }
34669
34670 \f
34671 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
34672 identifier as an argument, so the front end shouldn't look it up. */
34673
34674 static bool
34675 rs6000_attribute_takes_identifier_p (const_tree attr_id)
34676 {
34677 return is_attribute_p ("altivec", attr_id);
34678 }
34679
34680 /* Handle the "altivec" attribute. The attribute may have
34681 arguments as follows:
34682
34683 __attribute__((altivec(vector__)))
34684 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
34685 __attribute__((altivec(bool__))) (always followed by 'unsigned')
34686
34687 and may appear more than once (e.g., 'vector bool char') in a
34688 given declaration. */
34689
34690 static tree
34691 rs6000_handle_altivec_attribute (tree *node,
34692 tree name ATTRIBUTE_UNUSED,
34693 tree args,
34694 int flags ATTRIBUTE_UNUSED,
34695 bool *no_add_attrs)
34696 {
34697 tree type = *node, result = NULL_TREE;
34698 machine_mode mode;
34699 int unsigned_p;
34700 char altivec_type
34701 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
34702 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
34703 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
34704 : '?');
34705
34706 while (POINTER_TYPE_P (type)
34707 || TREE_CODE (type) == FUNCTION_TYPE
34708 || TREE_CODE (type) == METHOD_TYPE
34709 || TREE_CODE (type) == ARRAY_TYPE)
34710 type = TREE_TYPE (type);
34711
34712 mode = TYPE_MODE (type);
34713
34714 /* Check for invalid AltiVec type qualifiers. */
34715 if (type == long_double_type_node)
34716 error ("use of %<long double%> in AltiVec types is invalid");
34717 else if (type == boolean_type_node)
34718 error ("use of boolean types in AltiVec types is invalid");
34719 else if (TREE_CODE (type) == COMPLEX_TYPE)
34720 error ("use of %<complex%> in AltiVec types is invalid");
34721 else if (DECIMAL_FLOAT_MODE_P (mode))
34722 error ("use of decimal floating point types in AltiVec types is invalid");
34723 else if (!TARGET_VSX)
34724 {
34725 if (type == long_unsigned_type_node || type == long_integer_type_node)
34726 {
34727 if (TARGET_64BIT)
34728 error ("use of %<long%> in AltiVec types is invalid for "
34729 "64-bit code without -mvsx");
34730 else if (rs6000_warn_altivec_long)
34731 warning (0, "use of %<long%> in AltiVec types is deprecated; "
34732 "use %<int%>");
34733 }
34734 else if (type == long_long_unsigned_type_node
34735 || type == long_long_integer_type_node)
34736 error ("use of %<long long%> in AltiVec types is invalid without "
34737 "-mvsx");
34738 else if (type == double_type_node)
34739 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
34740 }
34741
34742 switch (altivec_type)
34743 {
34744 case 'v':
34745 unsigned_p = TYPE_UNSIGNED (type);
34746 switch (mode)
34747 {
34748 case TImode:
34749 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
34750 break;
34751 case DImode:
34752 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
34753 break;
34754 case SImode:
34755 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
34756 break;
34757 case HImode:
34758 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
34759 break;
34760 case QImode:
34761 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
34762 break;
34763 case SFmode: result = V4SF_type_node; break;
34764 case DFmode: result = V2DF_type_node; break;
34765 /* If the user says 'vector int bool', we may be handed the 'bool'
34766 attribute _before_ the 'vector' attribute, and so select the
34767 proper type in the 'b' case below. */
34768 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
34769 case V2DImode: case V2DFmode:
34770 result = type;
34771 default: break;
34772 }
34773 break;
34774 case 'b':
34775 switch (mode)
34776 {
34777 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
34778 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
34779 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
34780 case QImode: case V16QImode: result = bool_V16QI_type_node;
34781 default: break;
34782 }
34783 break;
34784 case 'p':
34785 switch (mode)
34786 {
34787 case V8HImode: result = pixel_V8HI_type_node;
34788 default: break;
34789 }
34790 default: break;
34791 }
34792
34793 /* Propagate qualifiers attached to the element type
34794 onto the vector type. */
34795 if (result && result != type && TYPE_QUALS (type))
34796 result = build_qualified_type (result, TYPE_QUALS (type));
34797
34798 *no_add_attrs = true; /* No need to hang on to the attribute. */
34799
34800 if (result)
34801 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
34802
34803 return NULL_TREE;
34804 }
34805
34806 /* AltiVec defines four built-in scalar types that serve as vector
34807 elements; we must teach the compiler how to mangle them. */
34808
34809 static const char *
34810 rs6000_mangle_type (const_tree type)
34811 {
34812 type = TYPE_MAIN_VARIANT (type);
34813
34814 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
34815 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
34816 return NULL;
34817
34818 if (type == bool_char_type_node) return "U6__boolc";
34819 if (type == bool_short_type_node) return "U6__bools";
34820 if (type == pixel_type_node) return "u7__pixel";
34821 if (type == bool_int_type_node) return "U6__booli";
34822 if (type == bool_long_type_node) return "U6__booll";
34823
34824 /* Use a unique name for __float128 rather than trying to use "e" or "g". Use
34825 "g" for IBM extended double, no matter whether it is long double (using
34826 -mabi=ibmlongdouble) or the distinct __ibm128 type. */
34827 if (TARGET_FLOAT128_TYPE)
34828 {
34829 if (type == ieee128_float_type_node)
34830 return "U10__float128";
34831
34832 if (type == ibm128_float_type_node)
34833 return "g";
34834
34835 if (type == long_double_type_node && TARGET_LONG_DOUBLE_128)
34836 return (TARGET_IEEEQUAD) ? "U10__float128" : "g";
34837 }
34838
34839 /* Mangle IBM extended float long double as `g' (__float128) on
34840 powerpc*-linux where long-double-64 previously was the default. */
34841 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
34842 && TARGET_ELF
34843 && TARGET_LONG_DOUBLE_128
34844 && !TARGET_IEEEQUAD)
34845 return "g";
34846
34847 /* For all other types, use normal C++ mangling. */
34848 return NULL;
34849 }
34850
34851 /* Handle a "longcall" or "shortcall" attribute; arguments as in
34852 struct attribute_spec.handler. */
34853
34854 static tree
34855 rs6000_handle_longcall_attribute (tree *node, tree name,
34856 tree args ATTRIBUTE_UNUSED,
34857 int flags ATTRIBUTE_UNUSED,
34858 bool *no_add_attrs)
34859 {
34860 if (TREE_CODE (*node) != FUNCTION_TYPE
34861 && TREE_CODE (*node) != FIELD_DECL
34862 && TREE_CODE (*node) != TYPE_DECL)
34863 {
34864 warning (OPT_Wattributes, "%qE attribute only applies to functions",
34865 name);
34866 *no_add_attrs = true;
34867 }
34868
34869 return NULL_TREE;
34870 }
34871
34872 /* Set longcall attributes on all functions declared when
34873 rs6000_default_long_calls is true. */
34874 static void
34875 rs6000_set_default_type_attributes (tree type)
34876 {
34877 if (rs6000_default_long_calls
34878 && (TREE_CODE (type) == FUNCTION_TYPE
34879 || TREE_CODE (type) == METHOD_TYPE))
34880 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
34881 NULL_TREE,
34882 TYPE_ATTRIBUTES (type));
34883
34884 #if TARGET_MACHO
34885 darwin_set_default_type_attributes (type);
34886 #endif
34887 }
34888
34889 /* Return a reference suitable for calling a function with the
34890 longcall attribute. */
34891
34892 rtx
34893 rs6000_longcall_ref (rtx call_ref)
34894 {
34895 const char *call_name;
34896 tree node;
34897
34898 if (GET_CODE (call_ref) != SYMBOL_REF)
34899 return call_ref;
34900
34901 /* System V adds '.' to the internal name, so skip them. */
34902 call_name = XSTR (call_ref, 0);
34903 if (*call_name == '.')
34904 {
34905 while (*call_name == '.')
34906 call_name++;
34907
34908 node = get_identifier (call_name);
34909 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
34910 }
34911
34912 return force_reg (Pmode, call_ref);
34913 }
34914 \f
34915 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
34916 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
34917 #endif
34918
34919 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
34920 struct attribute_spec.handler. */
34921 static tree
34922 rs6000_handle_struct_attribute (tree *node, tree name,
34923 tree args ATTRIBUTE_UNUSED,
34924 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
34925 {
34926 tree *type = NULL;
34927 if (DECL_P (*node))
34928 {
34929 if (TREE_CODE (*node) == TYPE_DECL)
34930 type = &TREE_TYPE (*node);
34931 }
34932 else
34933 type = node;
34934
34935 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
34936 || TREE_CODE (*type) == UNION_TYPE)))
34937 {
34938 warning (OPT_Wattributes, "%qE attribute ignored", name);
34939 *no_add_attrs = true;
34940 }
34941
34942 else if ((is_attribute_p ("ms_struct", name)
34943 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
34944 || ((is_attribute_p ("gcc_struct", name)
34945 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
34946 {
34947 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
34948 name);
34949 *no_add_attrs = true;
34950 }
34951
34952 return NULL_TREE;
34953 }
34954
34955 static bool
34956 rs6000_ms_bitfield_layout_p (const_tree record_type)
34957 {
34958 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
34959 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
34960 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
34961 }
34962 \f
34963 #ifdef USING_ELFOS_H
34964
34965 /* A get_unnamed_section callback, used for switching to toc_section. */
34966
34967 static void
34968 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
34969 {
34970 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
34971 && TARGET_MINIMAL_TOC)
34972 {
34973 if (!toc_initialized)
34974 {
34975 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
34976 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
34977 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
34978 fprintf (asm_out_file, "\t.tc ");
34979 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
34980 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
34981 fprintf (asm_out_file, "\n");
34982
34983 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
34984 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
34985 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
34986 fprintf (asm_out_file, " = .+32768\n");
34987 toc_initialized = 1;
34988 }
34989 else
34990 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
34991 }
34992 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
34993 {
34994 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
34995 if (!toc_initialized)
34996 {
34997 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
34998 toc_initialized = 1;
34999 }
35000 }
35001 else
35002 {
35003 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
35004 if (!toc_initialized)
35005 {
35006 ASM_OUTPUT_ALIGN (asm_out_file, TARGET_64BIT ? 3 : 2);
35007 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
35008 fprintf (asm_out_file, " = .+32768\n");
35009 toc_initialized = 1;
35010 }
35011 }
35012 }
35013
35014 /* Implement TARGET_ASM_INIT_SECTIONS. */
35015
35016 static void
35017 rs6000_elf_asm_init_sections (void)
35018 {
35019 toc_section
35020 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
35021
35022 sdata2_section
35023 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
35024 SDATA2_SECTION_ASM_OP);
35025 }
35026
35027 /* Implement TARGET_SELECT_RTX_SECTION. */
35028
35029 static section *
35030 rs6000_elf_select_rtx_section (machine_mode mode, rtx x,
35031 unsigned HOST_WIDE_INT align)
35032 {
35033 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
35034 return toc_section;
35035 else
35036 return default_elf_select_rtx_section (mode, x, align);
35037 }
35038 \f
35039 /* For a SYMBOL_REF, set generic flags and then perform some
35040 target-specific processing.
35041
35042 When the AIX ABI is requested on a non-AIX system, replace the
35043 function name with the real name (with a leading .) rather than the
35044 function descriptor name. This saves a lot of overriding code to
35045 read the prefixes. */
35046
35047 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
35048 static void
35049 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
35050 {
35051 default_encode_section_info (decl, rtl, first);
35052
35053 if (first
35054 && TREE_CODE (decl) == FUNCTION_DECL
35055 && !TARGET_AIX
35056 && DEFAULT_ABI == ABI_AIX)
35057 {
35058 rtx sym_ref = XEXP (rtl, 0);
35059 size_t len = strlen (XSTR (sym_ref, 0));
35060 char *str = XALLOCAVEC (char, len + 2);
35061 str[0] = '.';
35062 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
35063 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
35064 }
35065 }
35066
35067 static inline bool
35068 compare_section_name (const char *section, const char *templ)
35069 {
35070 int len;
35071
35072 len = strlen (templ);
35073 return (strncmp (section, templ, len) == 0
35074 && (section[len] == 0 || section[len] == '.'));
35075 }
35076
35077 bool
35078 rs6000_elf_in_small_data_p (const_tree decl)
35079 {
35080 if (rs6000_sdata == SDATA_NONE)
35081 return false;
35082
35083 /* We want to merge strings, so we never consider them small data. */
35084 if (TREE_CODE (decl) == STRING_CST)
35085 return false;
35086
35087 /* Functions are never in the small data area. */
35088 if (TREE_CODE (decl) == FUNCTION_DECL)
35089 return false;
35090
35091 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
35092 {
35093 const char *section = DECL_SECTION_NAME (decl);
35094 if (compare_section_name (section, ".sdata")
35095 || compare_section_name (section, ".sdata2")
35096 || compare_section_name (section, ".gnu.linkonce.s")
35097 || compare_section_name (section, ".sbss")
35098 || compare_section_name (section, ".sbss2")
35099 || compare_section_name (section, ".gnu.linkonce.sb")
35100 || strcmp (section, ".PPC.EMB.sdata0") == 0
35101 || strcmp (section, ".PPC.EMB.sbss0") == 0)
35102 return true;
35103 }
35104 else
35105 {
35106 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
35107
35108 if (size > 0
35109 && size <= g_switch_value
35110 /* If it's not public, and we're not going to reference it there,
35111 there's no need to put it in the small data section. */
35112 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
35113 return true;
35114 }
35115
35116 return false;
35117 }
35118
35119 #endif /* USING_ELFOS_H */
35120 \f
35121 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
35122
35123 static bool
35124 rs6000_use_blocks_for_constant_p (machine_mode mode, const_rtx x)
35125 {
35126 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
35127 }
35128
35129 /* Do not place thread-local symbols refs in the object blocks. */
35130
35131 static bool
35132 rs6000_use_blocks_for_decl_p (const_tree decl)
35133 {
35134 return !DECL_THREAD_LOCAL_P (decl);
35135 }
35136 \f
35137 /* Return a REG that occurs in ADDR with coefficient 1.
35138 ADDR can be effectively incremented by incrementing REG.
35139
35140 r0 is special and we must not select it as an address
35141 register by this routine since our caller will try to
35142 increment the returned register via an "la" instruction. */
35143
35144 rtx
35145 find_addr_reg (rtx addr)
35146 {
35147 while (GET_CODE (addr) == PLUS)
35148 {
35149 if (GET_CODE (XEXP (addr, 0)) == REG
35150 && REGNO (XEXP (addr, 0)) != 0)
35151 addr = XEXP (addr, 0);
35152 else if (GET_CODE (XEXP (addr, 1)) == REG
35153 && REGNO (XEXP (addr, 1)) != 0)
35154 addr = XEXP (addr, 1);
35155 else if (CONSTANT_P (XEXP (addr, 0)))
35156 addr = XEXP (addr, 1);
35157 else if (CONSTANT_P (XEXP (addr, 1)))
35158 addr = XEXP (addr, 0);
35159 else
35160 gcc_unreachable ();
35161 }
35162 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
35163 return addr;
35164 }
35165
35166 void
35167 rs6000_fatal_bad_address (rtx op)
35168 {
35169 fatal_insn ("bad address", op);
35170 }
35171
35172 #if TARGET_MACHO
35173
35174 typedef struct branch_island_d {
35175 tree function_name;
35176 tree label_name;
35177 int line_number;
35178 } branch_island;
35179
35180
35181 static vec<branch_island, va_gc> *branch_islands;
35182
35183 /* Remember to generate a branch island for far calls to the given
35184 function. */
35185
35186 static void
35187 add_compiler_branch_island (tree label_name, tree function_name,
35188 int line_number)
35189 {
35190 branch_island bi = {function_name, label_name, line_number};
35191 vec_safe_push (branch_islands, bi);
35192 }
35193
35194 /* Generate far-jump branch islands for everything recorded in
35195 branch_islands. Invoked immediately after the last instruction of
35196 the epilogue has been emitted; the branch islands must be appended
35197 to, and contiguous with, the function body. Mach-O stubs are
35198 generated in machopic_output_stub(). */
35199
35200 static void
35201 macho_branch_islands (void)
35202 {
35203 char tmp_buf[512];
35204
35205 while (!vec_safe_is_empty (branch_islands))
35206 {
35207 branch_island *bi = &branch_islands->last ();
35208 const char *label = IDENTIFIER_POINTER (bi->label_name);
35209 const char *name = IDENTIFIER_POINTER (bi->function_name);
35210 char name_buf[512];
35211 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
35212 if (name[0] == '*' || name[0] == '&')
35213 strcpy (name_buf, name+1);
35214 else
35215 {
35216 name_buf[0] = '_';
35217 strcpy (name_buf+1, name);
35218 }
35219 strcpy (tmp_buf, "\n");
35220 strcat (tmp_buf, label);
35221 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
35222 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
35223 dbxout_stabd (N_SLINE, bi->line_number);
35224 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
35225 if (flag_pic)
35226 {
35227 if (TARGET_LINK_STACK)
35228 {
35229 char name[32];
35230 get_ppc476_thunk_name (name);
35231 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
35232 strcat (tmp_buf, name);
35233 strcat (tmp_buf, "\n");
35234 strcat (tmp_buf, label);
35235 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
35236 }
35237 else
35238 {
35239 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
35240 strcat (tmp_buf, label);
35241 strcat (tmp_buf, "_pic\n");
35242 strcat (tmp_buf, label);
35243 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
35244 }
35245
35246 strcat (tmp_buf, "\taddis r11,r11,ha16(");
35247 strcat (tmp_buf, name_buf);
35248 strcat (tmp_buf, " - ");
35249 strcat (tmp_buf, label);
35250 strcat (tmp_buf, "_pic)\n");
35251
35252 strcat (tmp_buf, "\tmtlr r0\n");
35253
35254 strcat (tmp_buf, "\taddi r12,r11,lo16(");
35255 strcat (tmp_buf, name_buf);
35256 strcat (tmp_buf, " - ");
35257 strcat (tmp_buf, label);
35258 strcat (tmp_buf, "_pic)\n");
35259
35260 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
35261 }
35262 else
35263 {
35264 strcat (tmp_buf, ":\nlis r12,hi16(");
35265 strcat (tmp_buf, name_buf);
35266 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
35267 strcat (tmp_buf, name_buf);
35268 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
35269 }
35270 output_asm_insn (tmp_buf, 0);
35271 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
35272 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
35273 dbxout_stabd (N_SLINE, bi->line_number);
35274 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
35275 branch_islands->pop ();
35276 }
35277 }
35278
35279 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
35280 already there or not. */
35281
35282 static int
35283 no_previous_def (tree function_name)
35284 {
35285 branch_island *bi;
35286 unsigned ix;
35287
35288 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
35289 if (function_name == bi->function_name)
35290 return 0;
35291 return 1;
35292 }
35293
35294 /* GET_PREV_LABEL gets the label name from the previous definition of
35295 the function. */
35296
35297 static tree
35298 get_prev_label (tree function_name)
35299 {
35300 branch_island *bi;
35301 unsigned ix;
35302
35303 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
35304 if (function_name == bi->function_name)
35305 return bi->label_name;
35306 return NULL_TREE;
35307 }
35308
35309 /* INSN is either a function call or a millicode call. It may have an
35310 unconditional jump in its delay slot.
35311
35312 CALL_DEST is the routine we are calling. */
35313
35314 char *
35315 output_call (rtx_insn *insn, rtx *operands, int dest_operand_number,
35316 int cookie_operand_number)
35317 {
35318 static char buf[256];
35319 if (darwin_emit_branch_islands
35320 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
35321 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
35322 {
35323 tree labelname;
35324 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
35325
35326 if (no_previous_def (funname))
35327 {
35328 rtx label_rtx = gen_label_rtx ();
35329 char *label_buf, temp_buf[256];
35330 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
35331 CODE_LABEL_NUMBER (label_rtx));
35332 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
35333 labelname = get_identifier (label_buf);
35334 add_compiler_branch_island (labelname, funname, insn_line (insn));
35335 }
35336 else
35337 labelname = get_prev_label (funname);
35338
35339 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
35340 instruction will reach 'foo', otherwise link as 'bl L42'".
35341 "L42" should be a 'branch island', that will do a far jump to
35342 'foo'. Branch islands are generated in
35343 macho_branch_islands(). */
35344 sprintf (buf, "jbsr %%z%d,%.246s",
35345 dest_operand_number, IDENTIFIER_POINTER (labelname));
35346 }
35347 else
35348 sprintf (buf, "bl %%z%d", dest_operand_number);
35349 return buf;
35350 }
35351
35352 /* Generate PIC and indirect symbol stubs. */
35353
35354 void
35355 machopic_output_stub (FILE *file, const char *symb, const char *stub)
35356 {
35357 unsigned int length;
35358 char *symbol_name, *lazy_ptr_name;
35359 char *local_label_0;
35360 static int label = 0;
35361
35362 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
35363 symb = (*targetm.strip_name_encoding) (symb);
35364
35365
35366 length = strlen (symb);
35367 symbol_name = XALLOCAVEC (char, length + 32);
35368 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
35369
35370 lazy_ptr_name = XALLOCAVEC (char, length + 32);
35371 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
35372
35373 if (flag_pic == 2)
35374 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
35375 else
35376 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
35377
35378 if (flag_pic == 2)
35379 {
35380 fprintf (file, "\t.align 5\n");
35381
35382 fprintf (file, "%s:\n", stub);
35383 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
35384
35385 label++;
35386 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
35387 sprintf (local_label_0, "\"L%011d$spb\"", label);
35388
35389 fprintf (file, "\tmflr r0\n");
35390 if (TARGET_LINK_STACK)
35391 {
35392 char name[32];
35393 get_ppc476_thunk_name (name);
35394 fprintf (file, "\tbl %s\n", name);
35395 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
35396 }
35397 else
35398 {
35399 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
35400 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
35401 }
35402 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
35403 lazy_ptr_name, local_label_0);
35404 fprintf (file, "\tmtlr r0\n");
35405 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
35406 (TARGET_64BIT ? "ldu" : "lwzu"),
35407 lazy_ptr_name, local_label_0);
35408 fprintf (file, "\tmtctr r12\n");
35409 fprintf (file, "\tbctr\n");
35410 }
35411 else
35412 {
35413 fprintf (file, "\t.align 4\n");
35414
35415 fprintf (file, "%s:\n", stub);
35416 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
35417
35418 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
35419 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
35420 (TARGET_64BIT ? "ldu" : "lwzu"),
35421 lazy_ptr_name);
35422 fprintf (file, "\tmtctr r12\n");
35423 fprintf (file, "\tbctr\n");
35424 }
35425
35426 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
35427 fprintf (file, "%s:\n", lazy_ptr_name);
35428 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
35429 fprintf (file, "%sdyld_stub_binding_helper\n",
35430 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
35431 }
35432
35433 /* Legitimize PIC addresses. If the address is already
35434 position-independent, we return ORIG. Newly generated
35435 position-independent addresses go into a reg. This is REG if non
35436 zero, otherwise we allocate register(s) as necessary. */
35437
35438 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
35439
35440 rtx
35441 rs6000_machopic_legitimize_pic_address (rtx orig, machine_mode mode,
35442 rtx reg)
35443 {
35444 rtx base, offset;
35445
35446 if (reg == NULL && ! reload_in_progress && ! reload_completed)
35447 reg = gen_reg_rtx (Pmode);
35448
35449 if (GET_CODE (orig) == CONST)
35450 {
35451 rtx reg_temp;
35452
35453 if (GET_CODE (XEXP (orig, 0)) == PLUS
35454 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
35455 return orig;
35456
35457 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
35458
35459 /* Use a different reg for the intermediate value, as
35460 it will be marked UNCHANGING. */
35461 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
35462 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
35463 Pmode, reg_temp);
35464 offset =
35465 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
35466 Pmode, reg);
35467
35468 if (GET_CODE (offset) == CONST_INT)
35469 {
35470 if (SMALL_INT (offset))
35471 return plus_constant (Pmode, base, INTVAL (offset));
35472 else if (! reload_in_progress && ! reload_completed)
35473 offset = force_reg (Pmode, offset);
35474 else
35475 {
35476 rtx mem = force_const_mem (Pmode, orig);
35477 return machopic_legitimize_pic_address (mem, Pmode, reg);
35478 }
35479 }
35480 return gen_rtx_PLUS (Pmode, base, offset);
35481 }
35482
35483 /* Fall back on generic machopic code. */
35484 return machopic_legitimize_pic_address (orig, mode, reg);
35485 }
35486
35487 /* Output a .machine directive for the Darwin assembler, and call
35488 the generic start_file routine. */
35489
35490 static void
35491 rs6000_darwin_file_start (void)
35492 {
35493 static const struct
35494 {
35495 const char *arg;
35496 const char *name;
35497 HOST_WIDE_INT if_set;
35498 } mapping[] = {
35499 { "ppc64", "ppc64", MASK_64BIT },
35500 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
35501 { "power4", "ppc970", 0 },
35502 { "G5", "ppc970", 0 },
35503 { "7450", "ppc7450", 0 },
35504 { "7400", "ppc7400", MASK_ALTIVEC },
35505 { "G4", "ppc7400", 0 },
35506 { "750", "ppc750", 0 },
35507 { "740", "ppc750", 0 },
35508 { "G3", "ppc750", 0 },
35509 { "604e", "ppc604e", 0 },
35510 { "604", "ppc604", 0 },
35511 { "603e", "ppc603", 0 },
35512 { "603", "ppc603", 0 },
35513 { "601", "ppc601", 0 },
35514 { NULL, "ppc", 0 } };
35515 const char *cpu_id = "";
35516 size_t i;
35517
35518 rs6000_file_start ();
35519 darwin_file_start ();
35520
35521 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
35522
35523 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
35524 cpu_id = rs6000_default_cpu;
35525
35526 if (global_options_set.x_rs6000_cpu_index)
35527 cpu_id = processor_target_table[rs6000_cpu_index].name;
35528
35529 /* Look through the mapping array. Pick the first name that either
35530 matches the argument, has a bit set in IF_SET that is also set
35531 in the target flags, or has a NULL name. */
35532
35533 i = 0;
35534 while (mapping[i].arg != NULL
35535 && strcmp (mapping[i].arg, cpu_id) != 0
35536 && (mapping[i].if_set & rs6000_isa_flags) == 0)
35537 i++;
35538
35539 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
35540 }
35541
35542 #endif /* TARGET_MACHO */
35543
35544 #if TARGET_ELF
35545 static int
35546 rs6000_elf_reloc_rw_mask (void)
35547 {
35548 if (flag_pic)
35549 return 3;
35550 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
35551 return 2;
35552 else
35553 return 0;
35554 }
35555
35556 /* Record an element in the table of global constructors. SYMBOL is
35557 a SYMBOL_REF of the function to be called; PRIORITY is a number
35558 between 0 and MAX_INIT_PRIORITY.
35559
35560 This differs from default_named_section_asm_out_constructor in
35561 that we have special handling for -mrelocatable. */
35562
35563 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
35564 static void
35565 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
35566 {
35567 const char *section = ".ctors";
35568 char buf[18];
35569
35570 if (priority != DEFAULT_INIT_PRIORITY)
35571 {
35572 sprintf (buf, ".ctors.%.5u",
35573 /* Invert the numbering so the linker puts us in the proper
35574 order; constructors are run from right to left, and the
35575 linker sorts in increasing order. */
35576 MAX_INIT_PRIORITY - priority);
35577 section = buf;
35578 }
35579
35580 switch_to_section (get_section (section, SECTION_WRITE, NULL));
35581 assemble_align (POINTER_SIZE);
35582
35583 if (DEFAULT_ABI == ABI_V4
35584 && (TARGET_RELOCATABLE || flag_pic > 1))
35585 {
35586 fputs ("\t.long (", asm_out_file);
35587 output_addr_const (asm_out_file, symbol);
35588 fputs (")@fixup\n", asm_out_file);
35589 }
35590 else
35591 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
35592 }
35593
35594 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
35595 static void
35596 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
35597 {
35598 const char *section = ".dtors";
35599 char buf[18];
35600
35601 if (priority != DEFAULT_INIT_PRIORITY)
35602 {
35603 sprintf (buf, ".dtors.%.5u",
35604 /* Invert the numbering so the linker puts us in the proper
35605 order; constructors are run from right to left, and the
35606 linker sorts in increasing order. */
35607 MAX_INIT_PRIORITY - priority);
35608 section = buf;
35609 }
35610
35611 switch_to_section (get_section (section, SECTION_WRITE, NULL));
35612 assemble_align (POINTER_SIZE);
35613
35614 if (DEFAULT_ABI == ABI_V4
35615 && (TARGET_RELOCATABLE || flag_pic > 1))
35616 {
35617 fputs ("\t.long (", asm_out_file);
35618 output_addr_const (asm_out_file, symbol);
35619 fputs (")@fixup\n", asm_out_file);
35620 }
35621 else
35622 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
35623 }
35624
35625 void
35626 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
35627 {
35628 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
35629 {
35630 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
35631 ASM_OUTPUT_LABEL (file, name);
35632 fputs (DOUBLE_INT_ASM_OP, file);
35633 rs6000_output_function_entry (file, name);
35634 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
35635 if (DOT_SYMBOLS)
35636 {
35637 fputs ("\t.size\t", file);
35638 assemble_name (file, name);
35639 fputs (",24\n\t.type\t.", file);
35640 assemble_name (file, name);
35641 fputs (",@function\n", file);
35642 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
35643 {
35644 fputs ("\t.globl\t.", file);
35645 assemble_name (file, name);
35646 putc ('\n', file);
35647 }
35648 }
35649 else
35650 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
35651 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
35652 rs6000_output_function_entry (file, name);
35653 fputs (":\n", file);
35654 return;
35655 }
35656
35657 if (DEFAULT_ABI == ABI_V4
35658 && (TARGET_RELOCATABLE || flag_pic > 1)
35659 && !TARGET_SECURE_PLT
35660 && (!constant_pool_empty_p () || crtl->profile)
35661 && uses_TOC ())
35662 {
35663 char buf[256];
35664
35665 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
35666
35667 fprintf (file, "\t.long ");
35668 assemble_name (file, toc_label_name);
35669 need_toc_init = 1;
35670 putc ('-', file);
35671 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
35672 assemble_name (file, buf);
35673 putc ('\n', file);
35674 }
35675
35676 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
35677 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
35678
35679 if (TARGET_CMODEL == CMODEL_LARGE && rs6000_global_entry_point_needed_p ())
35680 {
35681 char buf[256];
35682
35683 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
35684
35685 fprintf (file, "\t.quad .TOC.-");
35686 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
35687 assemble_name (file, buf);
35688 putc ('\n', file);
35689 }
35690
35691 if (DEFAULT_ABI == ABI_AIX)
35692 {
35693 const char *desc_name, *orig_name;
35694
35695 orig_name = (*targetm.strip_name_encoding) (name);
35696 desc_name = orig_name;
35697 while (*desc_name == '.')
35698 desc_name++;
35699
35700 if (TREE_PUBLIC (decl))
35701 fprintf (file, "\t.globl %s\n", desc_name);
35702
35703 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
35704 fprintf (file, "%s:\n", desc_name);
35705 fprintf (file, "\t.long %s\n", orig_name);
35706 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
35707 fputs ("\t.long 0\n", file);
35708 fprintf (file, "\t.previous\n");
35709 }
35710 ASM_OUTPUT_LABEL (file, name);
35711 }
35712
35713 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
35714 static void
35715 rs6000_elf_file_end (void)
35716 {
35717 #ifdef HAVE_AS_GNU_ATTRIBUTE
35718 /* ??? The value emitted depends on options active at file end.
35719 Assume anyone using #pragma or attributes that might change
35720 options knows what they are doing. */
35721 if ((TARGET_64BIT || DEFAULT_ABI == ABI_V4)
35722 && rs6000_passes_float)
35723 {
35724 int fp;
35725
35726 if (TARGET_DF_FPR | TARGET_DF_SPE)
35727 fp = 1;
35728 else if (TARGET_SF_FPR | TARGET_SF_SPE)
35729 fp = 3;
35730 else
35731 fp = 2;
35732 if (rs6000_passes_long_double)
35733 {
35734 if (!TARGET_LONG_DOUBLE_128)
35735 fp |= 2 * 4;
35736 else if (TARGET_IEEEQUAD)
35737 fp |= 3 * 4;
35738 else
35739 fp |= 1 * 4;
35740 }
35741 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n", fp);
35742 }
35743 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
35744 {
35745 if (rs6000_passes_vector)
35746 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
35747 (TARGET_ALTIVEC_ABI ? 2
35748 : TARGET_SPE_ABI ? 3
35749 : 1));
35750 if (rs6000_returns_struct)
35751 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
35752 aix_struct_return ? 2 : 1);
35753 }
35754 #endif
35755 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
35756 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
35757 file_end_indicate_exec_stack ();
35758 #endif
35759
35760 if (flag_split_stack)
35761 file_end_indicate_split_stack ();
35762
35763 if (cpu_builtin_p)
35764 {
35765 /* We have expanded a CPU builtin, so we need to emit a reference to
35766 the special symbol that LIBC uses to declare it supports the
35767 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
35768 switch_to_section (data_section);
35769 fprintf (asm_out_file, "\t.align %u\n", TARGET_32BIT ? 2 : 3);
35770 fprintf (asm_out_file, "\t%s %s\n",
35771 TARGET_32BIT ? ".long" : ".quad", tcb_verification_symbol);
35772 }
35773 }
35774 #endif
35775
35776 #if TARGET_XCOFF
35777
35778 #ifndef HAVE_XCOFF_DWARF_EXTRAS
35779 #define HAVE_XCOFF_DWARF_EXTRAS 0
35780 #endif
35781
35782 static enum unwind_info_type
35783 rs6000_xcoff_debug_unwind_info (void)
35784 {
35785 return UI_NONE;
35786 }
35787
35788 static void
35789 rs6000_xcoff_asm_output_anchor (rtx symbol)
35790 {
35791 char buffer[100];
35792
35793 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
35794 SYMBOL_REF_BLOCK_OFFSET (symbol));
35795 fprintf (asm_out_file, "%s", SET_ASM_OP);
35796 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
35797 fprintf (asm_out_file, ",");
35798 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
35799 fprintf (asm_out_file, "\n");
35800 }
35801
35802 static void
35803 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
35804 {
35805 fputs (GLOBAL_ASM_OP, stream);
35806 RS6000_OUTPUT_BASENAME (stream, name);
35807 putc ('\n', stream);
35808 }
35809
35810 /* A get_unnamed_decl callback, used for read-only sections. PTR
35811 points to the section string variable. */
35812
35813 static void
35814 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
35815 {
35816 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
35817 *(const char *const *) directive,
35818 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
35819 }
35820
35821 /* Likewise for read-write sections. */
35822
35823 static void
35824 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
35825 {
35826 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
35827 *(const char *const *) directive,
35828 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
35829 }
35830
35831 static void
35832 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
35833 {
35834 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
35835 *(const char *const *) directive,
35836 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
35837 }
35838
35839 /* A get_unnamed_section callback, used for switching to toc_section. */
35840
35841 static void
35842 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
35843 {
35844 if (TARGET_MINIMAL_TOC)
35845 {
35846 /* toc_section is always selected at least once from
35847 rs6000_xcoff_file_start, so this is guaranteed to
35848 always be defined once and only once in each file. */
35849 if (!toc_initialized)
35850 {
35851 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
35852 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
35853 toc_initialized = 1;
35854 }
35855 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
35856 (TARGET_32BIT ? "" : ",3"));
35857 }
35858 else
35859 fputs ("\t.toc\n", asm_out_file);
35860 }
35861
35862 /* Implement TARGET_ASM_INIT_SECTIONS. */
35863
35864 static void
35865 rs6000_xcoff_asm_init_sections (void)
35866 {
35867 read_only_data_section
35868 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
35869 &xcoff_read_only_section_name);
35870
35871 private_data_section
35872 = get_unnamed_section (SECTION_WRITE,
35873 rs6000_xcoff_output_readwrite_section_asm_op,
35874 &xcoff_private_data_section_name);
35875
35876 tls_data_section
35877 = get_unnamed_section (SECTION_TLS,
35878 rs6000_xcoff_output_tls_section_asm_op,
35879 &xcoff_tls_data_section_name);
35880
35881 tls_private_data_section
35882 = get_unnamed_section (SECTION_TLS,
35883 rs6000_xcoff_output_tls_section_asm_op,
35884 &xcoff_private_data_section_name);
35885
35886 read_only_private_data_section
35887 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
35888 &xcoff_private_data_section_name);
35889
35890 toc_section
35891 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
35892
35893 readonly_data_section = read_only_data_section;
35894 }
35895
35896 static int
35897 rs6000_xcoff_reloc_rw_mask (void)
35898 {
35899 return 3;
35900 }
35901
35902 static void
35903 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
35904 tree decl ATTRIBUTE_UNUSED)
35905 {
35906 int smclass;
35907 static const char * const suffix[5] = { "PR", "RO", "RW", "TL", "XO" };
35908
35909 if (flags & SECTION_EXCLUDE)
35910 smclass = 4;
35911 else if (flags & SECTION_DEBUG)
35912 {
35913 fprintf (asm_out_file, "\t.dwsect %s\n", name);
35914 return;
35915 }
35916 else if (flags & SECTION_CODE)
35917 smclass = 0;
35918 else if (flags & SECTION_TLS)
35919 smclass = 3;
35920 else if (flags & SECTION_WRITE)
35921 smclass = 2;
35922 else
35923 smclass = 1;
35924
35925 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
35926 (flags & SECTION_CODE) ? "." : "",
35927 name, suffix[smclass], flags & SECTION_ENTSIZE);
35928 }
35929
35930 #define IN_NAMED_SECTION(DECL) \
35931 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
35932 && DECL_SECTION_NAME (DECL) != NULL)
35933
35934 static section *
35935 rs6000_xcoff_select_section (tree decl, int reloc,
35936 unsigned HOST_WIDE_INT align)
35937 {
35938 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
35939 named section. */
35940 if (align > BIGGEST_ALIGNMENT)
35941 {
35942 resolve_unique_section (decl, reloc, true);
35943 if (IN_NAMED_SECTION (decl))
35944 return get_named_section (decl, NULL, reloc);
35945 }
35946
35947 if (decl_readonly_section (decl, reloc))
35948 {
35949 if (TREE_PUBLIC (decl))
35950 return read_only_data_section;
35951 else
35952 return read_only_private_data_section;
35953 }
35954 else
35955 {
35956 #if HAVE_AS_TLS
35957 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
35958 {
35959 if (TREE_PUBLIC (decl))
35960 return tls_data_section;
35961 else if (bss_initializer_p (decl))
35962 {
35963 /* Convert to COMMON to emit in BSS. */
35964 DECL_COMMON (decl) = 1;
35965 return tls_comm_section;
35966 }
35967 else
35968 return tls_private_data_section;
35969 }
35970 else
35971 #endif
35972 if (TREE_PUBLIC (decl))
35973 return data_section;
35974 else
35975 return private_data_section;
35976 }
35977 }
35978
35979 static void
35980 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
35981 {
35982 const char *name;
35983
35984 /* Use select_section for private data and uninitialized data with
35985 alignment <= BIGGEST_ALIGNMENT. */
35986 if (!TREE_PUBLIC (decl)
35987 || DECL_COMMON (decl)
35988 || (DECL_INITIAL (decl) == NULL_TREE
35989 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
35990 || DECL_INITIAL (decl) == error_mark_node
35991 || (flag_zero_initialized_in_bss
35992 && initializer_zerop (DECL_INITIAL (decl))))
35993 return;
35994
35995 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
35996 name = (*targetm.strip_name_encoding) (name);
35997 set_decl_section_name (decl, name);
35998 }
35999
36000 /* Select section for constant in constant pool.
36001
36002 On RS/6000, all constants are in the private read-only data area.
36003 However, if this is being placed in the TOC it must be output as a
36004 toc entry. */
36005
36006 static section *
36007 rs6000_xcoff_select_rtx_section (machine_mode mode, rtx x,
36008 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
36009 {
36010 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
36011 return toc_section;
36012 else
36013 return read_only_private_data_section;
36014 }
36015
36016 /* Remove any trailing [DS] or the like from the symbol name. */
36017
36018 static const char *
36019 rs6000_xcoff_strip_name_encoding (const char *name)
36020 {
36021 size_t len;
36022 if (*name == '*')
36023 name++;
36024 len = strlen (name);
36025 if (name[len - 1] == ']')
36026 return ggc_alloc_string (name, len - 4);
36027 else
36028 return name;
36029 }
36030
36031 /* Section attributes. AIX is always PIC. */
36032
36033 static unsigned int
36034 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
36035 {
36036 unsigned int align;
36037 unsigned int flags = default_section_type_flags (decl, name, reloc);
36038
36039 /* Align to at least UNIT size. */
36040 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
36041 align = MIN_UNITS_PER_WORD;
36042 else
36043 /* Increase alignment of large objects if not already stricter. */
36044 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
36045 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
36046 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
36047
36048 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
36049 }
36050
36051 /* Output at beginning of assembler file.
36052
36053 Initialize the section names for the RS/6000 at this point.
36054
36055 Specify filename, including full path, to assembler.
36056
36057 We want to go into the TOC section so at least one .toc will be emitted.
36058 Also, in order to output proper .bs/.es pairs, we need at least one static
36059 [RW] section emitted.
36060
36061 Finally, declare mcount when profiling to make the assembler happy. */
36062
36063 static void
36064 rs6000_xcoff_file_start (void)
36065 {
36066 rs6000_gen_section_name (&xcoff_bss_section_name,
36067 main_input_filename, ".bss_");
36068 rs6000_gen_section_name (&xcoff_private_data_section_name,
36069 main_input_filename, ".rw_");
36070 rs6000_gen_section_name (&xcoff_read_only_section_name,
36071 main_input_filename, ".ro_");
36072 rs6000_gen_section_name (&xcoff_tls_data_section_name,
36073 main_input_filename, ".tls_");
36074 rs6000_gen_section_name (&xcoff_tbss_section_name,
36075 main_input_filename, ".tbss_[UL]");
36076
36077 fputs ("\t.file\t", asm_out_file);
36078 output_quoted_string (asm_out_file, main_input_filename);
36079 fputc ('\n', asm_out_file);
36080 if (write_symbols != NO_DEBUG)
36081 switch_to_section (private_data_section);
36082 switch_to_section (toc_section);
36083 switch_to_section (text_section);
36084 if (profile_flag)
36085 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
36086 rs6000_file_start ();
36087 }
36088
36089 /* Output at end of assembler file.
36090 On the RS/6000, referencing data should automatically pull in text. */
36091
36092 static void
36093 rs6000_xcoff_file_end (void)
36094 {
36095 switch_to_section (text_section);
36096 fputs ("_section_.text:\n", asm_out_file);
36097 switch_to_section (data_section);
36098 fputs (TARGET_32BIT
36099 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
36100 asm_out_file);
36101 }
36102
36103 struct declare_alias_data
36104 {
36105 FILE *file;
36106 bool function_descriptor;
36107 };
36108
36109 /* Declare alias N. A helper function for for_node_and_aliases. */
36110
36111 static bool
36112 rs6000_declare_alias (struct symtab_node *n, void *d)
36113 {
36114 struct declare_alias_data *data = (struct declare_alias_data *)d;
36115 /* Main symbol is output specially, because varasm machinery does part of
36116 the job for us - we do not need to declare .globl/lglobs and such. */
36117 if (!n->alias || n->weakref)
36118 return false;
36119
36120 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
36121 return false;
36122
36123 /* Prevent assemble_alias from trying to use .set pseudo operation
36124 that does not behave as expected by the middle-end. */
36125 TREE_ASM_WRITTEN (n->decl) = true;
36126
36127 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
36128 char *buffer = (char *) alloca (strlen (name) + 2);
36129 char *p;
36130 int dollar_inside = 0;
36131
36132 strcpy (buffer, name);
36133 p = strchr (buffer, '$');
36134 while (p) {
36135 *p = '_';
36136 dollar_inside++;
36137 p = strchr (p + 1, '$');
36138 }
36139 if (TREE_PUBLIC (n->decl))
36140 {
36141 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
36142 {
36143 if (dollar_inside) {
36144 if (data->function_descriptor)
36145 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
36146 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
36147 }
36148 if (data->function_descriptor)
36149 {
36150 fputs ("\t.globl .", data->file);
36151 RS6000_OUTPUT_BASENAME (data->file, buffer);
36152 putc ('\n', data->file);
36153 }
36154 fputs ("\t.globl ", data->file);
36155 RS6000_OUTPUT_BASENAME (data->file, buffer);
36156 putc ('\n', data->file);
36157 }
36158 #ifdef ASM_WEAKEN_DECL
36159 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
36160 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
36161 #endif
36162 }
36163 else
36164 {
36165 if (dollar_inside)
36166 {
36167 if (data->function_descriptor)
36168 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
36169 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
36170 }
36171 if (data->function_descriptor)
36172 {
36173 fputs ("\t.lglobl .", data->file);
36174 RS6000_OUTPUT_BASENAME (data->file, buffer);
36175 putc ('\n', data->file);
36176 }
36177 fputs ("\t.lglobl ", data->file);
36178 RS6000_OUTPUT_BASENAME (data->file, buffer);
36179 putc ('\n', data->file);
36180 }
36181 if (data->function_descriptor)
36182 fputs (".", data->file);
36183 RS6000_OUTPUT_BASENAME (data->file, buffer);
36184 fputs (":\n", data->file);
36185 return false;
36186 }
36187
36188
36189 #ifdef HAVE_GAS_HIDDEN
36190 /* Helper function to calculate visibility of a DECL
36191 and return the value as a const string. */
36192
36193 static const char *
36194 rs6000_xcoff_visibility (tree decl)
36195 {
36196 static const char * const visibility_types[] = {
36197 "", ",protected", ",hidden", ",internal"
36198 };
36199
36200 enum symbol_visibility vis = DECL_VISIBILITY (decl);
36201
36202 if (TREE_CODE (decl) == FUNCTION_DECL
36203 && cgraph_node::get (decl)
36204 && cgraph_node::get (decl)->instrumentation_clone
36205 && cgraph_node::get (decl)->instrumented_version)
36206 vis = DECL_VISIBILITY (cgraph_node::get (decl)->instrumented_version->decl);
36207
36208 return visibility_types[vis];
36209 }
36210 #endif
36211
36212
36213 /* This macro produces the initial definition of a function name.
36214 On the RS/6000, we need to place an extra '.' in the function name and
36215 output the function descriptor.
36216 Dollar signs are converted to underscores.
36217
36218 The csect for the function will have already been created when
36219 text_section was selected. We do have to go back to that csect, however.
36220
36221 The third and fourth parameters to the .function pseudo-op (16 and 044)
36222 are placeholders which no longer have any use.
36223
36224 Because AIX assembler's .set command has unexpected semantics, we output
36225 all aliases as alternative labels in front of the definition. */
36226
36227 void
36228 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
36229 {
36230 char *buffer = (char *) alloca (strlen (name) + 1);
36231 char *p;
36232 int dollar_inside = 0;
36233 struct declare_alias_data data = {file, false};
36234
36235 strcpy (buffer, name);
36236 p = strchr (buffer, '$');
36237 while (p) {
36238 *p = '_';
36239 dollar_inside++;
36240 p = strchr (p + 1, '$');
36241 }
36242 if (TREE_PUBLIC (decl))
36243 {
36244 if (!RS6000_WEAK || !DECL_WEAK (decl))
36245 {
36246 if (dollar_inside) {
36247 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
36248 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
36249 }
36250 fputs ("\t.globl .", file);
36251 RS6000_OUTPUT_BASENAME (file, buffer);
36252 #ifdef HAVE_GAS_HIDDEN
36253 fputs (rs6000_xcoff_visibility (decl), file);
36254 #endif
36255 putc ('\n', file);
36256 }
36257 }
36258 else
36259 {
36260 if (dollar_inside) {
36261 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
36262 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
36263 }
36264 fputs ("\t.lglobl .", file);
36265 RS6000_OUTPUT_BASENAME (file, buffer);
36266 putc ('\n', file);
36267 }
36268 fputs ("\t.csect ", file);
36269 RS6000_OUTPUT_BASENAME (file, buffer);
36270 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
36271 RS6000_OUTPUT_BASENAME (file, buffer);
36272 fputs (":\n", file);
36273 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
36274 &data, true);
36275 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
36276 RS6000_OUTPUT_BASENAME (file, buffer);
36277 fputs (", TOC[tc0], 0\n", file);
36278 in_section = NULL;
36279 switch_to_section (function_section (decl));
36280 putc ('.', file);
36281 RS6000_OUTPUT_BASENAME (file, buffer);
36282 fputs (":\n", file);
36283 data.function_descriptor = true;
36284 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
36285 &data, true);
36286 if (!DECL_IGNORED_P (decl))
36287 {
36288 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
36289 xcoffout_declare_function (file, decl, buffer);
36290 else if (write_symbols == DWARF2_DEBUG)
36291 {
36292 name = (*targetm.strip_name_encoding) (name);
36293 fprintf (file, "\t.function .%s,.%s,2,0\n", name, name);
36294 }
36295 }
36296 return;
36297 }
36298
36299
36300 /* Output assembly language to globalize a symbol from a DECL,
36301 possibly with visibility. */
36302
36303 void
36304 rs6000_xcoff_asm_globalize_decl_name (FILE *stream, tree decl)
36305 {
36306 const char *name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
36307 fputs (GLOBAL_ASM_OP, stream);
36308 RS6000_OUTPUT_BASENAME (stream, name);
36309 #ifdef HAVE_GAS_HIDDEN
36310 fputs (rs6000_xcoff_visibility (decl), stream);
36311 #endif
36312 putc ('\n', stream);
36313 }
36314
36315 /* Output assembly language to define a symbol as COMMON from a DECL,
36316 possibly with visibility. */
36317
36318 void
36319 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream,
36320 tree decl ATTRIBUTE_UNUSED,
36321 const char *name,
36322 unsigned HOST_WIDE_INT size,
36323 unsigned HOST_WIDE_INT align)
36324 {
36325 unsigned HOST_WIDE_INT align2 = 2;
36326
36327 if (align > 32)
36328 align2 = floor_log2 (align / BITS_PER_UNIT);
36329 else if (size > 4)
36330 align2 = 3;
36331
36332 fputs (COMMON_ASM_OP, stream);
36333 RS6000_OUTPUT_BASENAME (stream, name);
36334
36335 fprintf (stream,
36336 "," HOST_WIDE_INT_PRINT_UNSIGNED "," HOST_WIDE_INT_PRINT_UNSIGNED,
36337 size, align2);
36338
36339 #ifdef HAVE_GAS_HIDDEN
36340 fputs (rs6000_xcoff_visibility (decl), stream);
36341 #endif
36342 putc ('\n', stream);
36343 }
36344
36345 /* This macro produces the initial definition of a object (variable) name.
36346 Because AIX assembler's .set command has unexpected semantics, we output
36347 all aliases as alternative labels in front of the definition. */
36348
36349 void
36350 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
36351 {
36352 struct declare_alias_data data = {file, false};
36353 RS6000_OUTPUT_BASENAME (file, name);
36354 fputs (":\n", file);
36355 symtab_node::get_create (decl)->call_for_symbol_and_aliases (rs6000_declare_alias,
36356 &data, true);
36357 }
36358
36359 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
36360
36361 void
36362 rs6000_asm_output_dwarf_pcrel (FILE *file, int size, const char *label)
36363 {
36364 fputs (integer_asm_op (size, FALSE), file);
36365 assemble_name (file, label);
36366 fputs ("-$", file);
36367 }
36368
36369 /* Output a symbol offset relative to the dbase for the current object.
36370 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
36371 signed offsets.
36372
36373 __gcc_unwind_dbase is embedded in all executables/libraries through
36374 libgcc/config/rs6000/crtdbase.S. */
36375
36376 void
36377 rs6000_asm_output_dwarf_datarel (FILE *file, int size, const char *label)
36378 {
36379 fputs (integer_asm_op (size, FALSE), file);
36380 assemble_name (file, label);
36381 fputs("-__gcc_unwind_dbase", file);
36382 }
36383
36384 #ifdef HAVE_AS_TLS
36385 static void
36386 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
36387 {
36388 rtx symbol;
36389 int flags;
36390 const char *symname;
36391
36392 default_encode_section_info (decl, rtl, first);
36393
36394 /* Careful not to prod global register variables. */
36395 if (!MEM_P (rtl))
36396 return;
36397 symbol = XEXP (rtl, 0);
36398 if (GET_CODE (symbol) != SYMBOL_REF)
36399 return;
36400
36401 flags = SYMBOL_REF_FLAGS (symbol);
36402
36403 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
36404 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
36405
36406 SYMBOL_REF_FLAGS (symbol) = flags;
36407
36408 /* Append mapping class to extern decls. */
36409 symname = XSTR (symbol, 0);
36410 if (decl /* sync condition with assemble_external () */
36411 && DECL_P (decl) && DECL_EXTERNAL (decl) && TREE_PUBLIC (decl)
36412 && ((TREE_CODE (decl) == VAR_DECL && !DECL_THREAD_LOCAL_P (decl))
36413 || TREE_CODE (decl) == FUNCTION_DECL)
36414 && symname[strlen (symname) - 1] != ']')
36415 {
36416 char *newname = (char *) alloca (strlen (symname) + 5);
36417 strcpy (newname, symname);
36418 strcat (newname, (TREE_CODE (decl) == FUNCTION_DECL
36419 ? "[DS]" : "[UA]"));
36420 XSTR (symbol, 0) = ggc_strdup (newname);
36421 }
36422 }
36423 #endif /* HAVE_AS_TLS */
36424 #endif /* TARGET_XCOFF */
36425
36426 void
36427 rs6000_asm_weaken_decl (FILE *stream, tree decl,
36428 const char *name, const char *val)
36429 {
36430 fputs ("\t.weak\t", stream);
36431 RS6000_OUTPUT_BASENAME (stream, name);
36432 if (decl && TREE_CODE (decl) == FUNCTION_DECL
36433 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
36434 {
36435 if (TARGET_XCOFF)
36436 fputs ("[DS]", stream);
36437 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
36438 if (TARGET_XCOFF)
36439 fputs (rs6000_xcoff_visibility (decl), stream);
36440 #endif
36441 fputs ("\n\t.weak\t.", stream);
36442 RS6000_OUTPUT_BASENAME (stream, name);
36443 }
36444 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
36445 if (TARGET_XCOFF)
36446 fputs (rs6000_xcoff_visibility (decl), stream);
36447 #endif
36448 fputc ('\n', stream);
36449 if (val)
36450 {
36451 #ifdef ASM_OUTPUT_DEF
36452 ASM_OUTPUT_DEF (stream, name, val);
36453 #endif
36454 if (decl && TREE_CODE (decl) == FUNCTION_DECL
36455 && DEFAULT_ABI == ABI_AIX && DOT_SYMBOLS)
36456 {
36457 fputs ("\t.set\t.", stream);
36458 RS6000_OUTPUT_BASENAME (stream, name);
36459 fputs (",.", stream);
36460 RS6000_OUTPUT_BASENAME (stream, val);
36461 fputc ('\n', stream);
36462 }
36463 }
36464 }
36465
36466
36467 /* Return true if INSN should not be copied. */
36468
36469 static bool
36470 rs6000_cannot_copy_insn_p (rtx_insn *insn)
36471 {
36472 return recog_memoized (insn) >= 0
36473 && get_attr_cannot_copy (insn);
36474 }
36475
36476 /* Compute a (partial) cost for rtx X. Return true if the complete
36477 cost has been computed, and false if subexpressions should be
36478 scanned. In either case, *TOTAL contains the cost result. */
36479
36480 static bool
36481 rs6000_rtx_costs (rtx x, machine_mode mode, int outer_code,
36482 int opno ATTRIBUTE_UNUSED, int *total, bool speed)
36483 {
36484 int code = GET_CODE (x);
36485
36486 switch (code)
36487 {
36488 /* On the RS/6000, if it is valid in the insn, it is free. */
36489 case CONST_INT:
36490 if (((outer_code == SET
36491 || outer_code == PLUS
36492 || outer_code == MINUS)
36493 && (satisfies_constraint_I (x)
36494 || satisfies_constraint_L (x)))
36495 || (outer_code == AND
36496 && (satisfies_constraint_K (x)
36497 || (mode == SImode
36498 ? satisfies_constraint_L (x)
36499 : satisfies_constraint_J (x))))
36500 || ((outer_code == IOR || outer_code == XOR)
36501 && (satisfies_constraint_K (x)
36502 || (mode == SImode
36503 ? satisfies_constraint_L (x)
36504 : satisfies_constraint_J (x))))
36505 || outer_code == ASHIFT
36506 || outer_code == ASHIFTRT
36507 || outer_code == LSHIFTRT
36508 || outer_code == ROTATE
36509 || outer_code == ROTATERT
36510 || outer_code == ZERO_EXTRACT
36511 || (outer_code == MULT
36512 && satisfies_constraint_I (x))
36513 || ((outer_code == DIV || outer_code == UDIV
36514 || outer_code == MOD || outer_code == UMOD)
36515 && exact_log2 (INTVAL (x)) >= 0)
36516 || (outer_code == COMPARE
36517 && (satisfies_constraint_I (x)
36518 || satisfies_constraint_K (x)))
36519 || ((outer_code == EQ || outer_code == NE)
36520 && (satisfies_constraint_I (x)
36521 || satisfies_constraint_K (x)
36522 || (mode == SImode
36523 ? satisfies_constraint_L (x)
36524 : satisfies_constraint_J (x))))
36525 || (outer_code == GTU
36526 && satisfies_constraint_I (x))
36527 || (outer_code == LTU
36528 && satisfies_constraint_P (x)))
36529 {
36530 *total = 0;
36531 return true;
36532 }
36533 else if ((outer_code == PLUS
36534 && reg_or_add_cint_operand (x, VOIDmode))
36535 || (outer_code == MINUS
36536 && reg_or_sub_cint_operand (x, VOIDmode))
36537 || ((outer_code == SET
36538 || outer_code == IOR
36539 || outer_code == XOR)
36540 && (INTVAL (x)
36541 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
36542 {
36543 *total = COSTS_N_INSNS (1);
36544 return true;
36545 }
36546 /* FALLTHRU */
36547
36548 case CONST_DOUBLE:
36549 case CONST_WIDE_INT:
36550 case CONST:
36551 case HIGH:
36552 case SYMBOL_REF:
36553 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
36554 return true;
36555
36556 case MEM:
36557 /* When optimizing for size, MEM should be slightly more expensive
36558 than generating address, e.g., (plus (reg) (const)).
36559 L1 cache latency is about two instructions. */
36560 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
36561 if (SLOW_UNALIGNED_ACCESS (mode, MEM_ALIGN (x)))
36562 *total += COSTS_N_INSNS (100);
36563 return true;
36564
36565 case LABEL_REF:
36566 *total = 0;
36567 return true;
36568
36569 case PLUS:
36570 case MINUS:
36571 if (FLOAT_MODE_P (mode))
36572 *total = rs6000_cost->fp;
36573 else
36574 *total = COSTS_N_INSNS (1);
36575 return false;
36576
36577 case MULT:
36578 if (GET_CODE (XEXP (x, 1)) == CONST_INT
36579 && satisfies_constraint_I (XEXP (x, 1)))
36580 {
36581 if (INTVAL (XEXP (x, 1)) >= -256
36582 && INTVAL (XEXP (x, 1)) <= 255)
36583 *total = rs6000_cost->mulsi_const9;
36584 else
36585 *total = rs6000_cost->mulsi_const;
36586 }
36587 else if (mode == SFmode)
36588 *total = rs6000_cost->fp;
36589 else if (FLOAT_MODE_P (mode))
36590 *total = rs6000_cost->dmul;
36591 else if (mode == DImode)
36592 *total = rs6000_cost->muldi;
36593 else
36594 *total = rs6000_cost->mulsi;
36595 return false;
36596
36597 case FMA:
36598 if (mode == SFmode)
36599 *total = rs6000_cost->fp;
36600 else
36601 *total = rs6000_cost->dmul;
36602 break;
36603
36604 case DIV:
36605 case MOD:
36606 if (FLOAT_MODE_P (mode))
36607 {
36608 *total = mode == DFmode ? rs6000_cost->ddiv
36609 : rs6000_cost->sdiv;
36610 return false;
36611 }
36612 /* FALLTHRU */
36613
36614 case UDIV:
36615 case UMOD:
36616 if (GET_CODE (XEXP (x, 1)) == CONST_INT
36617 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
36618 {
36619 if (code == DIV || code == MOD)
36620 /* Shift, addze */
36621 *total = COSTS_N_INSNS (2);
36622 else
36623 /* Shift */
36624 *total = COSTS_N_INSNS (1);
36625 }
36626 else
36627 {
36628 if (GET_MODE (XEXP (x, 1)) == DImode)
36629 *total = rs6000_cost->divdi;
36630 else
36631 *total = rs6000_cost->divsi;
36632 }
36633 /* Add in shift and subtract for MOD unless we have a mod instruction. */
36634 if (!TARGET_MODULO && (code == MOD || code == UMOD))
36635 *total += COSTS_N_INSNS (2);
36636 return false;
36637
36638 case CTZ:
36639 *total = COSTS_N_INSNS (TARGET_CTZ ? 1 : 4);
36640 return false;
36641
36642 case FFS:
36643 *total = COSTS_N_INSNS (4);
36644 return false;
36645
36646 case POPCOUNT:
36647 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
36648 return false;
36649
36650 case PARITY:
36651 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
36652 return false;
36653
36654 case NOT:
36655 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
36656 *total = 0;
36657 else
36658 *total = COSTS_N_INSNS (1);
36659 return false;
36660
36661 case AND:
36662 if (CONST_INT_P (XEXP (x, 1)))
36663 {
36664 rtx left = XEXP (x, 0);
36665 rtx_code left_code = GET_CODE (left);
36666
36667 /* rotate-and-mask: 1 insn. */
36668 if ((left_code == ROTATE
36669 || left_code == ASHIFT
36670 || left_code == LSHIFTRT)
36671 && rs6000_is_valid_shift_mask (XEXP (x, 1), left, mode))
36672 {
36673 *total = rtx_cost (XEXP (left, 0), mode, left_code, 0, speed);
36674 if (!CONST_INT_P (XEXP (left, 1)))
36675 *total += rtx_cost (XEXP (left, 1), SImode, left_code, 1, speed);
36676 *total += COSTS_N_INSNS (1);
36677 return true;
36678 }
36679
36680 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
36681 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
36682 if (rs6000_is_valid_and_mask (XEXP (x, 1), mode)
36683 || (val & 0xffff) == val
36684 || (val & 0xffff0000) == val
36685 || ((val & 0xffff) == 0 && mode == SImode))
36686 {
36687 *total = rtx_cost (left, mode, AND, 0, speed);
36688 *total += COSTS_N_INSNS (1);
36689 return true;
36690 }
36691
36692 /* 2 insns. */
36693 if (rs6000_is_valid_2insn_and (XEXP (x, 1), mode))
36694 {
36695 *total = rtx_cost (left, mode, AND, 0, speed);
36696 *total += COSTS_N_INSNS (2);
36697 return true;
36698 }
36699 }
36700
36701 *total = COSTS_N_INSNS (1);
36702 return false;
36703
36704 case IOR:
36705 /* FIXME */
36706 *total = COSTS_N_INSNS (1);
36707 return true;
36708
36709 case CLZ:
36710 case XOR:
36711 case ZERO_EXTRACT:
36712 *total = COSTS_N_INSNS (1);
36713 return false;
36714
36715 case ASHIFT:
36716 /* The EXTSWSLI instruction is a combined instruction. Don't count both
36717 the sign extend and shift separately within the insn. */
36718 if (TARGET_EXTSWSLI && mode == DImode
36719 && GET_CODE (XEXP (x, 0)) == SIGN_EXTEND
36720 && GET_MODE (XEXP (XEXP (x, 0), 0)) == SImode)
36721 {
36722 *total = 0;
36723 return false;
36724 }
36725 /* fall through */
36726
36727 case ASHIFTRT:
36728 case LSHIFTRT:
36729 case ROTATE:
36730 case ROTATERT:
36731 /* Handle mul_highpart. */
36732 if (outer_code == TRUNCATE
36733 && GET_CODE (XEXP (x, 0)) == MULT)
36734 {
36735 if (mode == DImode)
36736 *total = rs6000_cost->muldi;
36737 else
36738 *total = rs6000_cost->mulsi;
36739 return true;
36740 }
36741 else if (outer_code == AND)
36742 *total = 0;
36743 else
36744 *total = COSTS_N_INSNS (1);
36745 return false;
36746
36747 case SIGN_EXTEND:
36748 case ZERO_EXTEND:
36749 if (GET_CODE (XEXP (x, 0)) == MEM)
36750 *total = 0;
36751 else
36752 *total = COSTS_N_INSNS (1);
36753 return false;
36754
36755 case COMPARE:
36756 case NEG:
36757 case ABS:
36758 if (!FLOAT_MODE_P (mode))
36759 {
36760 *total = COSTS_N_INSNS (1);
36761 return false;
36762 }
36763 /* FALLTHRU */
36764
36765 case FLOAT:
36766 case UNSIGNED_FLOAT:
36767 case FIX:
36768 case UNSIGNED_FIX:
36769 case FLOAT_TRUNCATE:
36770 *total = rs6000_cost->fp;
36771 return false;
36772
36773 case FLOAT_EXTEND:
36774 if (mode == DFmode)
36775 *total = rs6000_cost->sfdf_convert;
36776 else
36777 *total = rs6000_cost->fp;
36778 return false;
36779
36780 case UNSPEC:
36781 switch (XINT (x, 1))
36782 {
36783 case UNSPEC_FRSP:
36784 *total = rs6000_cost->fp;
36785 return true;
36786
36787 default:
36788 break;
36789 }
36790 break;
36791
36792 case CALL:
36793 case IF_THEN_ELSE:
36794 if (!speed)
36795 {
36796 *total = COSTS_N_INSNS (1);
36797 return true;
36798 }
36799 else if (FLOAT_MODE_P (mode)
36800 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
36801 {
36802 *total = rs6000_cost->fp;
36803 return false;
36804 }
36805 break;
36806
36807 case NE:
36808 case EQ:
36809 case GTU:
36810 case LTU:
36811 /* Carry bit requires mode == Pmode.
36812 NEG or PLUS already counted so only add one. */
36813 if (mode == Pmode
36814 && (outer_code == NEG || outer_code == PLUS))
36815 {
36816 *total = COSTS_N_INSNS (1);
36817 return true;
36818 }
36819 if (outer_code == SET)
36820 {
36821 if (XEXP (x, 1) == const0_rtx)
36822 {
36823 if (TARGET_ISEL && !TARGET_MFCRF)
36824 *total = COSTS_N_INSNS (8);
36825 else
36826 *total = COSTS_N_INSNS (2);
36827 return true;
36828 }
36829 else
36830 {
36831 *total = COSTS_N_INSNS (3);
36832 return false;
36833 }
36834 }
36835 /* FALLTHRU */
36836
36837 case GT:
36838 case LT:
36839 case UNORDERED:
36840 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
36841 {
36842 if (TARGET_ISEL && !TARGET_MFCRF)
36843 *total = COSTS_N_INSNS (8);
36844 else
36845 *total = COSTS_N_INSNS (2);
36846 return true;
36847 }
36848 /* CC COMPARE. */
36849 if (outer_code == COMPARE)
36850 {
36851 *total = 0;
36852 return true;
36853 }
36854 break;
36855
36856 default:
36857 break;
36858 }
36859
36860 return false;
36861 }
36862
36863 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
36864
36865 static bool
36866 rs6000_debug_rtx_costs (rtx x, machine_mode mode, int outer_code,
36867 int opno, int *total, bool speed)
36868 {
36869 bool ret = rs6000_rtx_costs (x, mode, outer_code, opno, total, speed);
36870
36871 fprintf (stderr,
36872 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
36873 "opno = %d, total = %d, speed = %s, x:\n",
36874 ret ? "complete" : "scan inner",
36875 GET_MODE_NAME (mode),
36876 GET_RTX_NAME (outer_code),
36877 opno,
36878 *total,
36879 speed ? "true" : "false");
36880
36881 debug_rtx (x);
36882
36883 return ret;
36884 }
36885
36886 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
36887
36888 static int
36889 rs6000_debug_address_cost (rtx x, machine_mode mode,
36890 addr_space_t as, bool speed)
36891 {
36892 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
36893
36894 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
36895 ret, speed ? "true" : "false");
36896 debug_rtx (x);
36897
36898 return ret;
36899 }
36900
36901
36902 /* A C expression returning the cost of moving data from a register of class
36903 CLASS1 to one of CLASS2. */
36904
36905 static int
36906 rs6000_register_move_cost (machine_mode mode,
36907 reg_class_t from, reg_class_t to)
36908 {
36909 int ret;
36910
36911 if (TARGET_DEBUG_COST)
36912 dbg_cost_ctrl++;
36913
36914 /* Moves from/to GENERAL_REGS. */
36915 if (reg_classes_intersect_p (to, GENERAL_REGS)
36916 || reg_classes_intersect_p (from, GENERAL_REGS))
36917 {
36918 reg_class_t rclass = from;
36919
36920 if (! reg_classes_intersect_p (to, GENERAL_REGS))
36921 rclass = to;
36922
36923 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
36924 ret = (rs6000_memory_move_cost (mode, rclass, false)
36925 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
36926
36927 /* It's more expensive to move CR_REGS than CR0_REGS because of the
36928 shift. */
36929 else if (rclass == CR_REGS)
36930 ret = 4;
36931
36932 /* For those processors that have slow LR/CTR moves, make them more
36933 expensive than memory in order to bias spills to memory .*/
36934 else if ((rs6000_cpu == PROCESSOR_POWER6
36935 || rs6000_cpu == PROCESSOR_POWER7
36936 || rs6000_cpu == PROCESSOR_POWER8
36937 || rs6000_cpu == PROCESSOR_POWER9)
36938 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
36939 ret = 6 * hard_regno_nregs[0][mode];
36940
36941 else
36942 /* A move will cost one instruction per GPR moved. */
36943 ret = 2 * hard_regno_nregs[0][mode];
36944 }
36945
36946 /* If we have VSX, we can easily move between FPR or Altivec registers. */
36947 else if (VECTOR_MEM_VSX_P (mode)
36948 && reg_classes_intersect_p (to, VSX_REGS)
36949 && reg_classes_intersect_p (from, VSX_REGS))
36950 ret = 2 * hard_regno_nregs[FIRST_FPR_REGNO][mode];
36951
36952 /* Moving between two similar registers is just one instruction. */
36953 else if (reg_classes_intersect_p (to, from))
36954 ret = (FLOAT128_2REG_P (mode)) ? 4 : 2;
36955
36956 /* Everything else has to go through GENERAL_REGS. */
36957 else
36958 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
36959 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
36960
36961 if (TARGET_DEBUG_COST)
36962 {
36963 if (dbg_cost_ctrl == 1)
36964 fprintf (stderr,
36965 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
36966 ret, GET_MODE_NAME (mode), reg_class_names[from],
36967 reg_class_names[to]);
36968 dbg_cost_ctrl--;
36969 }
36970
36971 return ret;
36972 }
36973
36974 /* A C expressions returning the cost of moving data of MODE from a register to
36975 or from memory. */
36976
36977 static int
36978 rs6000_memory_move_cost (machine_mode mode, reg_class_t rclass,
36979 bool in ATTRIBUTE_UNUSED)
36980 {
36981 int ret;
36982
36983 if (TARGET_DEBUG_COST)
36984 dbg_cost_ctrl++;
36985
36986 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
36987 ret = 4 * hard_regno_nregs[0][mode];
36988 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
36989 || reg_classes_intersect_p (rclass, VSX_REGS)))
36990 ret = 4 * hard_regno_nregs[32][mode];
36991 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
36992 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
36993 else
36994 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
36995
36996 if (TARGET_DEBUG_COST)
36997 {
36998 if (dbg_cost_ctrl == 1)
36999 fprintf (stderr,
37000 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
37001 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
37002 dbg_cost_ctrl--;
37003 }
37004
37005 return ret;
37006 }
37007
37008 /* Returns a code for a target-specific builtin that implements
37009 reciprocal of the function, or NULL_TREE if not available. */
37010
37011 static tree
37012 rs6000_builtin_reciprocal (tree fndecl)
37013 {
37014 switch (DECL_FUNCTION_CODE (fndecl))
37015 {
37016 case VSX_BUILTIN_XVSQRTDP:
37017 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
37018 return NULL_TREE;
37019
37020 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
37021
37022 case VSX_BUILTIN_XVSQRTSP:
37023 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
37024 return NULL_TREE;
37025
37026 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
37027
37028 default:
37029 return NULL_TREE;
37030 }
37031 }
37032
37033 /* Load up a constant. If the mode is a vector mode, splat the value across
37034 all of the vector elements. */
37035
37036 static rtx
37037 rs6000_load_constant_and_splat (machine_mode mode, REAL_VALUE_TYPE dconst)
37038 {
37039 rtx reg;
37040
37041 if (mode == SFmode || mode == DFmode)
37042 {
37043 rtx d = const_double_from_real_value (dconst, mode);
37044 reg = force_reg (mode, d);
37045 }
37046 else if (mode == V4SFmode)
37047 {
37048 rtx d = const_double_from_real_value (dconst, SFmode);
37049 rtvec v = gen_rtvec (4, d, d, d, d);
37050 reg = gen_reg_rtx (mode);
37051 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
37052 }
37053 else if (mode == V2DFmode)
37054 {
37055 rtx d = const_double_from_real_value (dconst, DFmode);
37056 rtvec v = gen_rtvec (2, d, d);
37057 reg = gen_reg_rtx (mode);
37058 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
37059 }
37060 else
37061 gcc_unreachable ();
37062
37063 return reg;
37064 }
37065
37066 /* Generate an FMA instruction. */
37067
37068 static void
37069 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
37070 {
37071 machine_mode mode = GET_MODE (target);
37072 rtx dst;
37073
37074 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
37075 gcc_assert (dst != NULL);
37076
37077 if (dst != target)
37078 emit_move_insn (target, dst);
37079 }
37080
37081 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
37082
37083 static void
37084 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
37085 {
37086 machine_mode mode = GET_MODE (dst);
37087 rtx r;
37088
37089 /* This is a tad more complicated, since the fnma_optab is for
37090 a different expression: fma(-m1, m2, a), which is the same
37091 thing except in the case of signed zeros.
37092
37093 Fortunately we know that if FMA is supported that FNMSUB is
37094 also supported in the ISA. Just expand it directly. */
37095
37096 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
37097
37098 r = gen_rtx_NEG (mode, a);
37099 r = gen_rtx_FMA (mode, m1, m2, r);
37100 r = gen_rtx_NEG (mode, r);
37101 emit_insn (gen_rtx_SET (dst, r));
37102 }
37103
37104 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
37105 add a reg_note saying that this was a division. Support both scalar and
37106 vector divide. Assumes no trapping math and finite arguments. */
37107
37108 void
37109 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
37110 {
37111 machine_mode mode = GET_MODE (dst);
37112 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
37113 int i;
37114
37115 /* Low precision estimates guarantee 5 bits of accuracy. High
37116 precision estimates guarantee 14 bits of accuracy. SFmode
37117 requires 23 bits of accuracy. DFmode requires 52 bits of
37118 accuracy. Each pass at least doubles the accuracy, leading
37119 to the following. */
37120 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
37121 if (mode == DFmode || mode == V2DFmode)
37122 passes++;
37123
37124 enum insn_code code = optab_handler (smul_optab, mode);
37125 insn_gen_fn gen_mul = GEN_FCN (code);
37126
37127 gcc_assert (code != CODE_FOR_nothing);
37128
37129 one = rs6000_load_constant_and_splat (mode, dconst1);
37130
37131 /* x0 = 1./d estimate */
37132 x0 = gen_reg_rtx (mode);
37133 emit_insn (gen_rtx_SET (x0, gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
37134 UNSPEC_FRES)));
37135
37136 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
37137 if (passes > 1) {
37138
37139 /* e0 = 1. - d * x0 */
37140 e0 = gen_reg_rtx (mode);
37141 rs6000_emit_nmsub (e0, d, x0, one);
37142
37143 /* x1 = x0 + e0 * x0 */
37144 x1 = gen_reg_rtx (mode);
37145 rs6000_emit_madd (x1, e0, x0, x0);
37146
37147 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
37148 ++i, xprev = xnext, eprev = enext) {
37149
37150 /* enext = eprev * eprev */
37151 enext = gen_reg_rtx (mode);
37152 emit_insn (gen_mul (enext, eprev, eprev));
37153
37154 /* xnext = xprev + enext * xprev */
37155 xnext = gen_reg_rtx (mode);
37156 rs6000_emit_madd (xnext, enext, xprev, xprev);
37157 }
37158
37159 } else
37160 xprev = x0;
37161
37162 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
37163
37164 /* u = n * xprev */
37165 u = gen_reg_rtx (mode);
37166 emit_insn (gen_mul (u, n, xprev));
37167
37168 /* v = n - (d * u) */
37169 v = gen_reg_rtx (mode);
37170 rs6000_emit_nmsub (v, d, u, n);
37171
37172 /* dst = (v * xprev) + u */
37173 rs6000_emit_madd (dst, v, xprev, u);
37174
37175 if (note_p)
37176 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
37177 }
37178
37179 /* Goldschmidt's Algorithm for single/double-precision floating point
37180 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
37181
37182 void
37183 rs6000_emit_swsqrt (rtx dst, rtx src, bool recip)
37184 {
37185 machine_mode mode = GET_MODE (src);
37186 rtx e = gen_reg_rtx (mode);
37187 rtx g = gen_reg_rtx (mode);
37188 rtx h = gen_reg_rtx (mode);
37189
37190 /* Low precision estimates guarantee 5 bits of accuracy. High
37191 precision estimates guarantee 14 bits of accuracy. SFmode
37192 requires 23 bits of accuracy. DFmode requires 52 bits of
37193 accuracy. Each pass at least doubles the accuracy, leading
37194 to the following. */
37195 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
37196 if (mode == DFmode || mode == V2DFmode)
37197 passes++;
37198
37199 int i;
37200 rtx mhalf;
37201 enum insn_code code = optab_handler (smul_optab, mode);
37202 insn_gen_fn gen_mul = GEN_FCN (code);
37203
37204 gcc_assert (code != CODE_FOR_nothing);
37205
37206 mhalf = rs6000_load_constant_and_splat (mode, dconsthalf);
37207
37208 /* e = rsqrt estimate */
37209 emit_insn (gen_rtx_SET (e, gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
37210 UNSPEC_RSQRT)));
37211
37212 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
37213 if (!recip)
37214 {
37215 rtx zero = force_reg (mode, CONST0_RTX (mode));
37216
37217 if (mode == SFmode)
37218 {
37219 rtx target = emit_conditional_move (e, GT, src, zero, mode,
37220 e, zero, mode, 0);
37221 if (target != e)
37222 emit_move_insn (e, target);
37223 }
37224 else
37225 {
37226 rtx cond = gen_rtx_GT (VOIDmode, e, zero);
37227 rs6000_emit_vector_cond_expr (e, e, zero, cond, src, zero);
37228 }
37229 }
37230
37231 /* g = sqrt estimate. */
37232 emit_insn (gen_mul (g, e, src));
37233 /* h = 1/(2*sqrt) estimate. */
37234 emit_insn (gen_mul (h, e, mhalf));
37235
37236 if (recip)
37237 {
37238 if (passes == 1)
37239 {
37240 rtx t = gen_reg_rtx (mode);
37241 rs6000_emit_nmsub (t, g, h, mhalf);
37242 /* Apply correction directly to 1/rsqrt estimate. */
37243 rs6000_emit_madd (dst, e, t, e);
37244 }
37245 else
37246 {
37247 for (i = 0; i < passes; i++)
37248 {
37249 rtx t1 = gen_reg_rtx (mode);
37250 rtx g1 = gen_reg_rtx (mode);
37251 rtx h1 = gen_reg_rtx (mode);
37252
37253 rs6000_emit_nmsub (t1, g, h, mhalf);
37254 rs6000_emit_madd (g1, g, t1, g);
37255 rs6000_emit_madd (h1, h, t1, h);
37256
37257 g = g1;
37258 h = h1;
37259 }
37260 /* Multiply by 2 for 1/rsqrt. */
37261 emit_insn (gen_add3_insn (dst, h, h));
37262 }
37263 }
37264 else
37265 {
37266 rtx t = gen_reg_rtx (mode);
37267 rs6000_emit_nmsub (t, g, h, mhalf);
37268 rs6000_emit_madd (dst, g, t, g);
37269 }
37270
37271 return;
37272 }
37273
37274 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
37275 (Power7) targets. DST is the target, and SRC is the argument operand. */
37276
37277 void
37278 rs6000_emit_popcount (rtx dst, rtx src)
37279 {
37280 machine_mode mode = GET_MODE (dst);
37281 rtx tmp1, tmp2;
37282
37283 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
37284 if (TARGET_POPCNTD)
37285 {
37286 if (mode == SImode)
37287 emit_insn (gen_popcntdsi2 (dst, src));
37288 else
37289 emit_insn (gen_popcntddi2 (dst, src));
37290 return;
37291 }
37292
37293 tmp1 = gen_reg_rtx (mode);
37294
37295 if (mode == SImode)
37296 {
37297 emit_insn (gen_popcntbsi2 (tmp1, src));
37298 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
37299 NULL_RTX, 0);
37300 tmp2 = force_reg (SImode, tmp2);
37301 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
37302 }
37303 else
37304 {
37305 emit_insn (gen_popcntbdi2 (tmp1, src));
37306 tmp2 = expand_mult (DImode, tmp1,
37307 GEN_INT ((HOST_WIDE_INT)
37308 0x01010101 << 32 | 0x01010101),
37309 NULL_RTX, 0);
37310 tmp2 = force_reg (DImode, tmp2);
37311 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
37312 }
37313 }
37314
37315
37316 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
37317 target, and SRC is the argument operand. */
37318
37319 void
37320 rs6000_emit_parity (rtx dst, rtx src)
37321 {
37322 machine_mode mode = GET_MODE (dst);
37323 rtx tmp;
37324
37325 tmp = gen_reg_rtx (mode);
37326
37327 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
37328 if (TARGET_CMPB)
37329 {
37330 if (mode == SImode)
37331 {
37332 emit_insn (gen_popcntbsi2 (tmp, src));
37333 emit_insn (gen_paritysi2_cmpb (dst, tmp));
37334 }
37335 else
37336 {
37337 emit_insn (gen_popcntbdi2 (tmp, src));
37338 emit_insn (gen_paritydi2_cmpb (dst, tmp));
37339 }
37340 return;
37341 }
37342
37343 if (mode == SImode)
37344 {
37345 /* Is mult+shift >= shift+xor+shift+xor? */
37346 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
37347 {
37348 rtx tmp1, tmp2, tmp3, tmp4;
37349
37350 tmp1 = gen_reg_rtx (SImode);
37351 emit_insn (gen_popcntbsi2 (tmp1, src));
37352
37353 tmp2 = gen_reg_rtx (SImode);
37354 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
37355 tmp3 = gen_reg_rtx (SImode);
37356 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
37357
37358 tmp4 = gen_reg_rtx (SImode);
37359 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
37360 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
37361 }
37362 else
37363 rs6000_emit_popcount (tmp, src);
37364 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
37365 }
37366 else
37367 {
37368 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
37369 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
37370 {
37371 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
37372
37373 tmp1 = gen_reg_rtx (DImode);
37374 emit_insn (gen_popcntbdi2 (tmp1, src));
37375
37376 tmp2 = gen_reg_rtx (DImode);
37377 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
37378 tmp3 = gen_reg_rtx (DImode);
37379 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
37380
37381 tmp4 = gen_reg_rtx (DImode);
37382 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
37383 tmp5 = gen_reg_rtx (DImode);
37384 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
37385
37386 tmp6 = gen_reg_rtx (DImode);
37387 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
37388 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
37389 }
37390 else
37391 rs6000_emit_popcount (tmp, src);
37392 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
37393 }
37394 }
37395
37396 /* Expand an Altivec constant permutation for little endian mode.
37397 There are two issues: First, the two input operands must be
37398 swapped so that together they form a double-wide array in LE
37399 order. Second, the vperm instruction has surprising behavior
37400 in LE mode: it interprets the elements of the source vectors
37401 in BE mode ("left to right") and interprets the elements of
37402 the destination vector in LE mode ("right to left"). To
37403 correct for this, we must subtract each element of the permute
37404 control vector from 31.
37405
37406 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
37407 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
37408 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
37409 serve as the permute control vector. Then, in BE mode,
37410
37411 vperm 9,10,11,12
37412
37413 places the desired result in vr9. However, in LE mode the
37414 vector contents will be
37415
37416 vr10 = 00000003 00000002 00000001 00000000
37417 vr11 = 00000007 00000006 00000005 00000004
37418
37419 The result of the vperm using the same permute control vector is
37420
37421 vr9 = 05000000 07000000 01000000 03000000
37422
37423 That is, the leftmost 4 bytes of vr10 are interpreted as the
37424 source for the rightmost 4 bytes of vr9, and so on.
37425
37426 If we change the permute control vector to
37427
37428 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
37429
37430 and issue
37431
37432 vperm 9,11,10,12
37433
37434 we get the desired
37435
37436 vr9 = 00000006 00000004 00000002 00000000. */
37437
37438 void
37439 altivec_expand_vec_perm_const_le (rtx operands[4])
37440 {
37441 unsigned int i;
37442 rtx perm[16];
37443 rtx constv, unspec;
37444 rtx target = operands[0];
37445 rtx op0 = operands[1];
37446 rtx op1 = operands[2];
37447 rtx sel = operands[3];
37448
37449 /* Unpack and adjust the constant selector. */
37450 for (i = 0; i < 16; ++i)
37451 {
37452 rtx e = XVECEXP (sel, 0, i);
37453 unsigned int elt = 31 - (INTVAL (e) & 31);
37454 perm[i] = GEN_INT (elt);
37455 }
37456
37457 /* Expand to a permute, swapping the inputs and using the
37458 adjusted selector. */
37459 if (!REG_P (op0))
37460 op0 = force_reg (V16QImode, op0);
37461 if (!REG_P (op1))
37462 op1 = force_reg (V16QImode, op1);
37463
37464 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
37465 constv = force_reg (V16QImode, constv);
37466 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
37467 UNSPEC_VPERM);
37468 if (!REG_P (target))
37469 {
37470 rtx tmp = gen_reg_rtx (V16QImode);
37471 emit_move_insn (tmp, unspec);
37472 unspec = tmp;
37473 }
37474
37475 emit_move_insn (target, unspec);
37476 }
37477
37478 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
37479 permute control vector. But here it's not a constant, so we must
37480 generate a vector NAND or NOR to do the adjustment. */
37481
37482 void
37483 altivec_expand_vec_perm_le (rtx operands[4])
37484 {
37485 rtx notx, iorx, unspec;
37486 rtx target = operands[0];
37487 rtx op0 = operands[1];
37488 rtx op1 = operands[2];
37489 rtx sel = operands[3];
37490 rtx tmp = target;
37491 rtx norreg = gen_reg_rtx (V16QImode);
37492 machine_mode mode = GET_MODE (target);
37493
37494 /* Get everything in regs so the pattern matches. */
37495 if (!REG_P (op0))
37496 op0 = force_reg (mode, op0);
37497 if (!REG_P (op1))
37498 op1 = force_reg (mode, op1);
37499 if (!REG_P (sel))
37500 sel = force_reg (V16QImode, sel);
37501 if (!REG_P (target))
37502 tmp = gen_reg_rtx (mode);
37503
37504 if (TARGET_P9_VECTOR)
37505 {
37506 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op0, op1, sel),
37507 UNSPEC_VPERMR);
37508 }
37509 else
37510 {
37511 /* Invert the selector with a VNAND if available, else a VNOR.
37512 The VNAND is preferred for future fusion opportunities. */
37513 notx = gen_rtx_NOT (V16QImode, sel);
37514 iorx = (TARGET_P8_VECTOR
37515 ? gen_rtx_IOR (V16QImode, notx, notx)
37516 : gen_rtx_AND (V16QImode, notx, notx));
37517 emit_insn (gen_rtx_SET (norreg, iorx));
37518
37519 /* Permute with operands reversed and adjusted selector. */
37520 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
37521 UNSPEC_VPERM);
37522 }
37523
37524 /* Copy into target, possibly by way of a register. */
37525 if (!REG_P (target))
37526 {
37527 emit_move_insn (tmp, unspec);
37528 unspec = tmp;
37529 }
37530
37531 emit_move_insn (target, unspec);
37532 }
37533
37534 /* Expand an Altivec constant permutation. Return true if we match
37535 an efficient implementation; false to fall back to VPERM. */
37536
37537 bool
37538 altivec_expand_vec_perm_const (rtx operands[4])
37539 {
37540 struct altivec_perm_insn {
37541 HOST_WIDE_INT mask;
37542 enum insn_code impl;
37543 unsigned char perm[16];
37544 };
37545 static const struct altivec_perm_insn patterns[] = {
37546 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
37547 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
37548 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
37549 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
37550 { OPTION_MASK_ALTIVEC,
37551 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
37552 : CODE_FOR_altivec_vmrglb_direct),
37553 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
37554 { OPTION_MASK_ALTIVEC,
37555 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
37556 : CODE_FOR_altivec_vmrglh_direct),
37557 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
37558 { OPTION_MASK_ALTIVEC,
37559 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
37560 : CODE_FOR_altivec_vmrglw_direct),
37561 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
37562 { OPTION_MASK_ALTIVEC,
37563 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
37564 : CODE_FOR_altivec_vmrghb_direct),
37565 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
37566 { OPTION_MASK_ALTIVEC,
37567 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
37568 : CODE_FOR_altivec_vmrghh_direct),
37569 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
37570 { OPTION_MASK_ALTIVEC,
37571 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
37572 : CODE_FOR_altivec_vmrghw_direct),
37573 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
37574 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
37575 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
37576 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
37577 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
37578 };
37579
37580 unsigned int i, j, elt, which;
37581 unsigned char perm[16];
37582 rtx target, op0, op1, sel, x;
37583 bool one_vec;
37584
37585 target = operands[0];
37586 op0 = operands[1];
37587 op1 = operands[2];
37588 sel = operands[3];
37589
37590 /* Unpack the constant selector. */
37591 for (i = which = 0; i < 16; ++i)
37592 {
37593 rtx e = XVECEXP (sel, 0, i);
37594 elt = INTVAL (e) & 31;
37595 which |= (elt < 16 ? 1 : 2);
37596 perm[i] = elt;
37597 }
37598
37599 /* Simplify the constant selector based on operands. */
37600 switch (which)
37601 {
37602 default:
37603 gcc_unreachable ();
37604
37605 case 3:
37606 one_vec = false;
37607 if (!rtx_equal_p (op0, op1))
37608 break;
37609 /* FALLTHRU */
37610
37611 case 2:
37612 for (i = 0; i < 16; ++i)
37613 perm[i] &= 15;
37614 op0 = op1;
37615 one_vec = true;
37616 break;
37617
37618 case 1:
37619 op1 = op0;
37620 one_vec = true;
37621 break;
37622 }
37623
37624 /* Look for splat patterns. */
37625 if (one_vec)
37626 {
37627 elt = perm[0];
37628
37629 for (i = 0; i < 16; ++i)
37630 if (perm[i] != elt)
37631 break;
37632 if (i == 16)
37633 {
37634 if (!BYTES_BIG_ENDIAN)
37635 elt = 15 - elt;
37636 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
37637 return true;
37638 }
37639
37640 if (elt % 2 == 0)
37641 {
37642 for (i = 0; i < 16; i += 2)
37643 if (perm[i] != elt || perm[i + 1] != elt + 1)
37644 break;
37645 if (i == 16)
37646 {
37647 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
37648 x = gen_reg_rtx (V8HImode);
37649 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
37650 GEN_INT (field)));
37651 emit_move_insn (target, gen_lowpart (V16QImode, x));
37652 return true;
37653 }
37654 }
37655
37656 if (elt % 4 == 0)
37657 {
37658 for (i = 0; i < 16; i += 4)
37659 if (perm[i] != elt
37660 || perm[i + 1] != elt + 1
37661 || perm[i + 2] != elt + 2
37662 || perm[i + 3] != elt + 3)
37663 break;
37664 if (i == 16)
37665 {
37666 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
37667 x = gen_reg_rtx (V4SImode);
37668 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
37669 GEN_INT (field)));
37670 emit_move_insn (target, gen_lowpart (V16QImode, x));
37671 return true;
37672 }
37673 }
37674 }
37675
37676 /* Look for merge and pack patterns. */
37677 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
37678 {
37679 bool swapped;
37680
37681 if ((patterns[j].mask & rs6000_isa_flags) == 0)
37682 continue;
37683
37684 elt = patterns[j].perm[0];
37685 if (perm[0] == elt)
37686 swapped = false;
37687 else if (perm[0] == elt + 16)
37688 swapped = true;
37689 else
37690 continue;
37691 for (i = 1; i < 16; ++i)
37692 {
37693 elt = patterns[j].perm[i];
37694 if (swapped)
37695 elt = (elt >= 16 ? elt - 16 : elt + 16);
37696 else if (one_vec && elt >= 16)
37697 elt -= 16;
37698 if (perm[i] != elt)
37699 break;
37700 }
37701 if (i == 16)
37702 {
37703 enum insn_code icode = patterns[j].impl;
37704 machine_mode omode = insn_data[icode].operand[0].mode;
37705 machine_mode imode = insn_data[icode].operand[1].mode;
37706
37707 /* For little-endian, don't use vpkuwum and vpkuhum if the
37708 underlying vector type is not V4SI and V8HI, respectively.
37709 For example, using vpkuwum with a V8HI picks up the even
37710 halfwords (BE numbering) when the even halfwords (LE
37711 numbering) are what we need. */
37712 if (!BYTES_BIG_ENDIAN
37713 && icode == CODE_FOR_altivec_vpkuwum_direct
37714 && ((GET_CODE (op0) == REG
37715 && GET_MODE (op0) != V4SImode)
37716 || (GET_CODE (op0) == SUBREG
37717 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
37718 continue;
37719 if (!BYTES_BIG_ENDIAN
37720 && icode == CODE_FOR_altivec_vpkuhum_direct
37721 && ((GET_CODE (op0) == REG
37722 && GET_MODE (op0) != V8HImode)
37723 || (GET_CODE (op0) == SUBREG
37724 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
37725 continue;
37726
37727 /* For little-endian, the two input operands must be swapped
37728 (or swapped back) to ensure proper right-to-left numbering
37729 from 0 to 2N-1. */
37730 if (swapped ^ !BYTES_BIG_ENDIAN)
37731 std::swap (op0, op1);
37732 if (imode != V16QImode)
37733 {
37734 op0 = gen_lowpart (imode, op0);
37735 op1 = gen_lowpart (imode, op1);
37736 }
37737 if (omode == V16QImode)
37738 x = target;
37739 else
37740 x = gen_reg_rtx (omode);
37741 emit_insn (GEN_FCN (icode) (x, op0, op1));
37742 if (omode != V16QImode)
37743 emit_move_insn (target, gen_lowpart (V16QImode, x));
37744 return true;
37745 }
37746 }
37747
37748 if (!BYTES_BIG_ENDIAN)
37749 {
37750 altivec_expand_vec_perm_const_le (operands);
37751 return true;
37752 }
37753
37754 return false;
37755 }
37756
37757 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
37758 Return true if we match an efficient implementation. */
37759
37760 static bool
37761 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
37762 unsigned char perm0, unsigned char perm1)
37763 {
37764 rtx x;
37765
37766 /* If both selectors come from the same operand, fold to single op. */
37767 if ((perm0 & 2) == (perm1 & 2))
37768 {
37769 if (perm0 & 2)
37770 op0 = op1;
37771 else
37772 op1 = op0;
37773 }
37774 /* If both operands are equal, fold to simpler permutation. */
37775 if (rtx_equal_p (op0, op1))
37776 {
37777 perm0 = perm0 & 1;
37778 perm1 = (perm1 & 1) + 2;
37779 }
37780 /* If the first selector comes from the second operand, swap. */
37781 else if (perm0 & 2)
37782 {
37783 if (perm1 & 2)
37784 return false;
37785 perm0 -= 2;
37786 perm1 += 2;
37787 std::swap (op0, op1);
37788 }
37789 /* If the second selector does not come from the second operand, fail. */
37790 else if ((perm1 & 2) == 0)
37791 return false;
37792
37793 /* Success! */
37794 if (target != NULL)
37795 {
37796 machine_mode vmode, dmode;
37797 rtvec v;
37798
37799 vmode = GET_MODE (target);
37800 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
37801 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
37802 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
37803 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
37804 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
37805 emit_insn (gen_rtx_SET (target, x));
37806 }
37807 return true;
37808 }
37809
37810 bool
37811 rs6000_expand_vec_perm_const (rtx operands[4])
37812 {
37813 rtx target, op0, op1, sel;
37814 unsigned char perm0, perm1;
37815
37816 target = operands[0];
37817 op0 = operands[1];
37818 op1 = operands[2];
37819 sel = operands[3];
37820
37821 /* Unpack the constant selector. */
37822 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
37823 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
37824
37825 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
37826 }
37827
37828 /* Test whether a constant permutation is supported. */
37829
37830 static bool
37831 rs6000_vectorize_vec_perm_const_ok (machine_mode vmode,
37832 const unsigned char *sel)
37833 {
37834 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
37835 if (TARGET_ALTIVEC)
37836 return true;
37837
37838 /* Check for ps_merge* or evmerge* insns. */
37839 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
37840 || (TARGET_SPE && vmode == V2SImode))
37841 {
37842 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
37843 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
37844 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
37845 }
37846
37847 return false;
37848 }
37849
37850 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
37851
37852 static void
37853 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
37854 machine_mode vmode, unsigned nelt, rtx perm[])
37855 {
37856 machine_mode imode;
37857 rtx x;
37858
37859 imode = vmode;
37860 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
37861 {
37862 imode = mode_for_size (GET_MODE_UNIT_BITSIZE (vmode), MODE_INT, 0);
37863 imode = mode_for_vector (imode, nelt);
37864 }
37865
37866 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
37867 x = expand_vec_perm (vmode, op0, op1, x, target);
37868 if (x != target)
37869 emit_move_insn (target, x);
37870 }
37871
37872 /* Expand an extract even operation. */
37873
37874 void
37875 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
37876 {
37877 machine_mode vmode = GET_MODE (target);
37878 unsigned i, nelt = GET_MODE_NUNITS (vmode);
37879 rtx perm[16];
37880
37881 for (i = 0; i < nelt; i++)
37882 perm[i] = GEN_INT (i * 2);
37883
37884 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
37885 }
37886
37887 /* Expand a vector interleave operation. */
37888
37889 void
37890 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
37891 {
37892 machine_mode vmode = GET_MODE (target);
37893 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
37894 rtx perm[16];
37895
37896 high = (highp ? 0 : nelt / 2);
37897 for (i = 0; i < nelt / 2; i++)
37898 {
37899 perm[i * 2] = GEN_INT (i + high);
37900 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
37901 }
37902
37903 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
37904 }
37905
37906 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
37907 void
37908 rs6000_scale_v2df (rtx tgt, rtx src, int scale)
37909 {
37910 HOST_WIDE_INT hwi_scale (scale);
37911 REAL_VALUE_TYPE r_pow;
37912 rtvec v = rtvec_alloc (2);
37913 rtx elt;
37914 rtx scale_vec = gen_reg_rtx (V2DFmode);
37915 (void)real_powi (&r_pow, DFmode, &dconst2, hwi_scale);
37916 elt = const_double_from_real_value (r_pow, DFmode);
37917 RTVEC_ELT (v, 0) = elt;
37918 RTVEC_ELT (v, 1) = elt;
37919 rs6000_expand_vector_init (scale_vec, gen_rtx_PARALLEL (V2DFmode, v));
37920 emit_insn (gen_mulv2df3 (tgt, src, scale_vec));
37921 }
37922
37923 /* Return an RTX representing where to find the function value of a
37924 function returning MODE. */
37925 static rtx
37926 rs6000_complex_function_value (machine_mode mode)
37927 {
37928 unsigned int regno;
37929 rtx r1, r2;
37930 machine_mode inner = GET_MODE_INNER (mode);
37931 unsigned int inner_bytes = GET_MODE_UNIT_SIZE (mode);
37932
37933 if (TARGET_FLOAT128_TYPE
37934 && (mode == KCmode
37935 || (mode == TCmode && TARGET_IEEEQUAD)))
37936 regno = ALTIVEC_ARG_RETURN;
37937
37938 else if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
37939 regno = FP_ARG_RETURN;
37940
37941 else
37942 {
37943 regno = GP_ARG_RETURN;
37944
37945 /* 32-bit is OK since it'll go in r3/r4. */
37946 if (TARGET_32BIT && inner_bytes >= 4)
37947 return gen_rtx_REG (mode, regno);
37948 }
37949
37950 if (inner_bytes >= 8)
37951 return gen_rtx_REG (mode, regno);
37952
37953 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
37954 const0_rtx);
37955 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
37956 GEN_INT (inner_bytes));
37957 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
37958 }
37959
37960 /* Return an rtx describing a return value of MODE as a PARALLEL
37961 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
37962 stride REG_STRIDE. */
37963
37964 static rtx
37965 rs6000_parallel_return (machine_mode mode,
37966 int n_elts, machine_mode elt_mode,
37967 unsigned int regno, unsigned int reg_stride)
37968 {
37969 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
37970
37971 int i;
37972 for (i = 0; i < n_elts; i++)
37973 {
37974 rtx r = gen_rtx_REG (elt_mode, regno);
37975 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
37976 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
37977 regno += reg_stride;
37978 }
37979
37980 return par;
37981 }
37982
37983 /* Target hook for TARGET_FUNCTION_VALUE.
37984
37985 On the SPE, both FPs and vectors are returned in r3.
37986
37987 On RS/6000 an integer value is in r3 and a floating-point value is in
37988 fp1, unless -msoft-float. */
37989
37990 static rtx
37991 rs6000_function_value (const_tree valtype,
37992 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
37993 bool outgoing ATTRIBUTE_UNUSED)
37994 {
37995 machine_mode mode;
37996 unsigned int regno;
37997 machine_mode elt_mode;
37998 int n_elts;
37999
38000 /* Special handling for structs in darwin64. */
38001 if (TARGET_MACHO
38002 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
38003 {
38004 CUMULATIVE_ARGS valcum;
38005 rtx valret;
38006
38007 valcum.words = 0;
38008 valcum.fregno = FP_ARG_MIN_REG;
38009 valcum.vregno = ALTIVEC_ARG_MIN_REG;
38010 /* Do a trial code generation as if this were going to be passed as
38011 an argument; if any part goes in memory, we return NULL. */
38012 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
38013 if (valret)
38014 return valret;
38015 /* Otherwise fall through to standard ABI rules. */
38016 }
38017
38018 mode = TYPE_MODE (valtype);
38019
38020 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
38021 if (rs6000_discover_homogeneous_aggregate (mode, valtype, &elt_mode, &n_elts))
38022 {
38023 int first_reg, n_regs;
38024
38025 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode))
38026 {
38027 /* _Decimal128 must use even/odd register pairs. */
38028 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
38029 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
38030 }
38031 else
38032 {
38033 first_reg = ALTIVEC_ARG_RETURN;
38034 n_regs = 1;
38035 }
38036
38037 return rs6000_parallel_return (mode, n_elts, elt_mode, first_reg, n_regs);
38038 }
38039
38040 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
38041 if (TARGET_32BIT && TARGET_POWERPC64)
38042 switch (mode)
38043 {
38044 default:
38045 break;
38046 case DImode:
38047 case SCmode:
38048 case DCmode:
38049 case TCmode:
38050 int count = GET_MODE_SIZE (mode) / 4;
38051 return rs6000_parallel_return (mode, count, SImode, GP_ARG_RETURN, 1);
38052 }
38053
38054 if ((INTEGRAL_TYPE_P (valtype)
38055 && GET_MODE_BITSIZE (mode) < (TARGET_32BIT ? 32 : 64))
38056 || POINTER_TYPE_P (valtype))
38057 mode = TARGET_32BIT ? SImode : DImode;
38058
38059 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
38060 /* _Decimal128 must use an even/odd register pair. */
38061 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
38062 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
38063 && !FLOAT128_VECTOR_P (mode)
38064 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
38065 regno = FP_ARG_RETURN;
38066 else if (TREE_CODE (valtype) == COMPLEX_TYPE
38067 && targetm.calls.split_complex_arg)
38068 return rs6000_complex_function_value (mode);
38069 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
38070 return register is used in both cases, and we won't see V2DImode/V2DFmode
38071 for pure altivec, combine the two cases. */
38072 else if ((TREE_CODE (valtype) == VECTOR_TYPE || FLOAT128_VECTOR_P (mode))
38073 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
38074 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
38075 regno = ALTIVEC_ARG_RETURN;
38076 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
38077 && (mode == DFmode || mode == DCmode
38078 || FLOAT128_IBM_P (mode) || mode == TCmode))
38079 return spe_build_register_parallel (mode, GP_ARG_RETURN);
38080 else
38081 regno = GP_ARG_RETURN;
38082
38083 return gen_rtx_REG (mode, regno);
38084 }
38085
38086 /* Define how to find the value returned by a library function
38087 assuming the value has mode MODE. */
38088 rtx
38089 rs6000_libcall_value (machine_mode mode)
38090 {
38091 unsigned int regno;
38092
38093 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
38094 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
38095 return rs6000_parallel_return (mode, 2, SImode, GP_ARG_RETURN, 1);
38096
38097 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
38098 /* _Decimal128 must use an even/odd register pair. */
38099 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
38100 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode)
38101 && TARGET_HARD_FLOAT && TARGET_FPRS
38102 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
38103 regno = FP_ARG_RETURN;
38104 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
38105 return register is used in both cases, and we won't see V2DImode/V2DFmode
38106 for pure altivec, combine the two cases. */
38107 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
38108 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
38109 regno = ALTIVEC_ARG_RETURN;
38110 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
38111 return rs6000_complex_function_value (mode);
38112 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
38113 && (mode == DFmode || mode == DCmode
38114 || FLOAT128_IBM_P (mode) || mode == TCmode))
38115 return spe_build_register_parallel (mode, GP_ARG_RETURN);
38116 else
38117 regno = GP_ARG_RETURN;
38118
38119 return gen_rtx_REG (mode, regno);
38120 }
38121
38122
38123 /* Return true if we use LRA instead of reload pass. */
38124 static bool
38125 rs6000_lra_p (void)
38126 {
38127 return TARGET_LRA;
38128 }
38129
38130 /* Compute register pressure classes. We implement the target hook to avoid
38131 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
38132 lead to incorrect estimates of number of available registers and therefor
38133 increased register pressure/spill. */
38134 static int
38135 rs6000_compute_pressure_classes (enum reg_class *pressure_classes)
38136 {
38137 int n;
38138
38139 n = 0;
38140 pressure_classes[n++] = GENERAL_REGS;
38141 if (TARGET_VSX)
38142 pressure_classes[n++] = VSX_REGS;
38143 else
38144 {
38145 if (TARGET_ALTIVEC)
38146 pressure_classes[n++] = ALTIVEC_REGS;
38147 if (TARGET_HARD_FLOAT && TARGET_FPRS)
38148 pressure_classes[n++] = FLOAT_REGS;
38149 }
38150 pressure_classes[n++] = CR_REGS;
38151 pressure_classes[n++] = SPECIAL_REGS;
38152
38153 return n;
38154 }
38155
38156 /* Given FROM and TO register numbers, say whether this elimination is allowed.
38157 Frame pointer elimination is automatically handled.
38158
38159 For the RS/6000, if frame pointer elimination is being done, we would like
38160 to convert ap into fp, not sp.
38161
38162 We need r30 if -mminimal-toc was specified, and there are constant pool
38163 references. */
38164
38165 static bool
38166 rs6000_can_eliminate (const int from, const int to)
38167 {
38168 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
38169 ? ! frame_pointer_needed
38170 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
38171 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC
38172 || constant_pool_empty_p ()
38173 : true);
38174 }
38175
38176 /* Define the offset between two registers, FROM to be eliminated and its
38177 replacement TO, at the start of a routine. */
38178 HOST_WIDE_INT
38179 rs6000_initial_elimination_offset (int from, int to)
38180 {
38181 rs6000_stack_t *info = rs6000_stack_info ();
38182 HOST_WIDE_INT offset;
38183
38184 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
38185 offset = info->push_p ? 0 : -info->total_size;
38186 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
38187 {
38188 offset = info->push_p ? 0 : -info->total_size;
38189 if (FRAME_GROWS_DOWNWARD)
38190 offset += info->fixed_size + info->vars_size + info->parm_size;
38191 }
38192 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
38193 offset = FRAME_GROWS_DOWNWARD
38194 ? info->fixed_size + info->vars_size + info->parm_size
38195 : 0;
38196 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
38197 offset = info->total_size;
38198 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
38199 offset = info->push_p ? info->total_size : 0;
38200 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
38201 offset = 0;
38202 else
38203 gcc_unreachable ();
38204
38205 return offset;
38206 }
38207
38208 static rtx
38209 rs6000_dwarf_register_span (rtx reg)
38210 {
38211 rtx parts[8];
38212 int i, words;
38213 unsigned regno = REGNO (reg);
38214 machine_mode mode = GET_MODE (reg);
38215
38216 if (TARGET_SPE
38217 && regno < 32
38218 && (SPE_VECTOR_MODE (GET_MODE (reg))
38219 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
38220 && mode != SFmode && mode != SDmode && mode != SCmode)))
38221 ;
38222 else
38223 return NULL_RTX;
38224
38225 regno = REGNO (reg);
38226
38227 /* The duality of the SPE register size wreaks all kinds of havoc.
38228 This is a way of distinguishing r0 in 32-bits from r0 in
38229 64-bits. */
38230 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
38231 gcc_assert (words <= 4);
38232 for (i = 0; i < words; i++, regno++)
38233 {
38234 if (BYTES_BIG_ENDIAN)
38235 {
38236 parts[2 * i] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
38237 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
38238 }
38239 else
38240 {
38241 parts[2 * i] = gen_rtx_REG (SImode, regno);
38242 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
38243 }
38244 }
38245
38246 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
38247 }
38248
38249 /* Fill in sizes for SPE register high parts in table used by unwinder. */
38250
38251 static void
38252 rs6000_init_dwarf_reg_sizes_extra (tree address)
38253 {
38254 if (TARGET_SPE)
38255 {
38256 int i;
38257 machine_mode mode = TYPE_MODE (char_type_node);
38258 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
38259 rtx mem = gen_rtx_MEM (BLKmode, addr);
38260 rtx value = gen_int_mode (4, mode);
38261
38262 for (i = FIRST_SPE_HIGH_REGNO; i < LAST_SPE_HIGH_REGNO+1; i++)
38263 {
38264 int column = DWARF_REG_TO_UNWIND_COLUMN
38265 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
38266 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
38267
38268 emit_move_insn (adjust_address (mem, mode, offset), value);
38269 }
38270 }
38271
38272 if (TARGET_MACHO && ! TARGET_ALTIVEC)
38273 {
38274 int i;
38275 machine_mode mode = TYPE_MODE (char_type_node);
38276 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
38277 rtx mem = gen_rtx_MEM (BLKmode, addr);
38278 rtx value = gen_int_mode (16, mode);
38279
38280 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
38281 The unwinder still needs to know the size of Altivec registers. */
38282
38283 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
38284 {
38285 int column = DWARF_REG_TO_UNWIND_COLUMN
38286 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
38287 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
38288
38289 emit_move_insn (adjust_address (mem, mode, offset), value);
38290 }
38291 }
38292 }
38293
38294 /* Map internal gcc register numbers to debug format register numbers.
38295 FORMAT specifies the type of debug register number to use:
38296 0 -- debug information, except for frame-related sections
38297 1 -- DWARF .debug_frame section
38298 2 -- DWARF .eh_frame section */
38299
38300 unsigned int
38301 rs6000_dbx_register_number (unsigned int regno, unsigned int format)
38302 {
38303 /* We never use the GCC internal number for SPE high registers.
38304 Those are mapped to the 1200..1231 range for all debug formats. */
38305 if (SPE_HIGH_REGNO_P (regno))
38306 return regno - FIRST_SPE_HIGH_REGNO + 1200;
38307
38308 /* Except for the above, we use the internal number for non-DWARF
38309 debug information, and also for .eh_frame. */
38310 if ((format == 0 && write_symbols != DWARF2_DEBUG) || format == 2)
38311 return regno;
38312
38313 /* On some platforms, we use the standard DWARF register
38314 numbering for .debug_info and .debug_frame. */
38315 #ifdef RS6000_USE_DWARF_NUMBERING
38316 if (regno <= 63)
38317 return regno;
38318 if (regno == LR_REGNO)
38319 return 108;
38320 if (regno == CTR_REGNO)
38321 return 109;
38322 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
38323 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
38324 The actual code emitted saves the whole of CR, so we map CR2_REGNO
38325 to the DWARF reg for CR. */
38326 if (format == 1 && regno == CR2_REGNO)
38327 return 64;
38328 if (CR_REGNO_P (regno))
38329 return regno - CR0_REGNO + 86;
38330 if (regno == CA_REGNO)
38331 return 101; /* XER */
38332 if (ALTIVEC_REGNO_P (regno))
38333 return regno - FIRST_ALTIVEC_REGNO + 1124;
38334 if (regno == VRSAVE_REGNO)
38335 return 356;
38336 if (regno == VSCR_REGNO)
38337 return 67;
38338 if (regno == SPE_ACC_REGNO)
38339 return 99;
38340 if (regno == SPEFSCR_REGNO)
38341 return 612;
38342 #endif
38343 return regno;
38344 }
38345
38346 /* target hook eh_return_filter_mode */
38347 static machine_mode
38348 rs6000_eh_return_filter_mode (void)
38349 {
38350 return TARGET_32BIT ? SImode : word_mode;
38351 }
38352
38353 /* Target hook for scalar_mode_supported_p. */
38354 static bool
38355 rs6000_scalar_mode_supported_p (machine_mode mode)
38356 {
38357 /* -m32 does not support TImode. This is the default, from
38358 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
38359 same ABI as for -m32. But default_scalar_mode_supported_p allows
38360 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
38361 for -mpowerpc64. */
38362 if (TARGET_32BIT && mode == TImode)
38363 return false;
38364
38365 if (DECIMAL_FLOAT_MODE_P (mode))
38366 return default_decimal_float_supported_p ();
38367 else if (TARGET_FLOAT128_TYPE && (mode == KFmode || mode == IFmode))
38368 return true;
38369 else
38370 return default_scalar_mode_supported_p (mode);
38371 }
38372
38373 /* Target hook for vector_mode_supported_p. */
38374 static bool
38375 rs6000_vector_mode_supported_p (machine_mode mode)
38376 {
38377
38378 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
38379 return true;
38380
38381 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
38382 return true;
38383
38384 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
38385 128-bit, the compiler might try to widen IEEE 128-bit to IBM
38386 double-double. */
38387 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode) && !FLOAT128_IEEE_P (mode))
38388 return true;
38389
38390 else
38391 return false;
38392 }
38393
38394 /* Target hook for floatn_mode. */
38395 static machine_mode
38396 rs6000_floatn_mode (int n, bool extended)
38397 {
38398 if (extended)
38399 {
38400 switch (n)
38401 {
38402 case 32:
38403 return DFmode;
38404
38405 case 64:
38406 if (TARGET_FLOAT128_KEYWORD)
38407 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
38408 else
38409 return VOIDmode;
38410
38411 case 128:
38412 return VOIDmode;
38413
38414 default:
38415 /* Those are the only valid _FloatNx types. */
38416 gcc_unreachable ();
38417 }
38418 }
38419 else
38420 {
38421 switch (n)
38422 {
38423 case 32:
38424 return SFmode;
38425
38426 case 64:
38427 return DFmode;
38428
38429 case 128:
38430 if (TARGET_FLOAT128_KEYWORD)
38431 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
38432 else
38433 return VOIDmode;
38434
38435 default:
38436 return VOIDmode;
38437 }
38438 }
38439
38440 }
38441
38442 /* Target hook for c_mode_for_suffix. */
38443 static machine_mode
38444 rs6000_c_mode_for_suffix (char suffix)
38445 {
38446 if (TARGET_FLOAT128_TYPE)
38447 {
38448 if (suffix == 'q' || suffix == 'Q')
38449 return (FLOAT128_IEEE_P (TFmode)) ? TFmode : KFmode;
38450
38451 /* At the moment, we are not defining a suffix for IBM extended double.
38452 If/when the default for -mabi=ieeelongdouble is changed, and we want
38453 to support __ibm128 constants in legacy library code, we may need to
38454 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
38455 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
38456 __float80 constants. */
38457 }
38458
38459 return VOIDmode;
38460 }
38461
38462 /* Target hook for invalid_arg_for_unprototyped_fn. */
38463 static const char *
38464 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
38465 {
38466 return (!rs6000_darwin64_abi
38467 && typelist == 0
38468 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
38469 && (funcdecl == NULL_TREE
38470 || (TREE_CODE (funcdecl) == FUNCTION_DECL
38471 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
38472 ? N_("AltiVec argument passed to unprototyped function")
38473 : NULL;
38474 }
38475
38476 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
38477 setup by using __stack_chk_fail_local hidden function instead of
38478 calling __stack_chk_fail directly. Otherwise it is better to call
38479 __stack_chk_fail directly. */
38480
38481 static tree ATTRIBUTE_UNUSED
38482 rs6000_stack_protect_fail (void)
38483 {
38484 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
38485 ? default_hidden_stack_protect_fail ()
38486 : default_external_stack_protect_fail ();
38487 }
38488
38489 void
38490 rs6000_final_prescan_insn (rtx_insn *insn, rtx *operand ATTRIBUTE_UNUSED,
38491 int num_operands ATTRIBUTE_UNUSED)
38492 {
38493 if (rs6000_warn_cell_microcode)
38494 {
38495 const char *temp;
38496 int insn_code_number = recog_memoized (insn);
38497 location_t location = INSN_LOCATION (insn);
38498
38499 /* Punt on insns we cannot recognize. */
38500 if (insn_code_number < 0)
38501 return;
38502
38503 temp = get_insn_template (insn_code_number, insn);
38504
38505 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
38506 warning_at (location, OPT_mwarn_cell_microcode,
38507 "emitting microcode insn %s\t[%s] #%d",
38508 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
38509 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
38510 warning_at (location, OPT_mwarn_cell_microcode,
38511 "emitting conditional microcode insn %s\t[%s] #%d",
38512 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
38513 }
38514 }
38515
38516 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
38517
38518 #if TARGET_ELF
38519 static unsigned HOST_WIDE_INT
38520 rs6000_asan_shadow_offset (void)
38521 {
38522 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
38523 }
38524 #endif
38525 \f
38526 /* Mask options that we want to support inside of attribute((target)) and
38527 #pragma GCC target operations. Note, we do not include things like
38528 64/32-bit, endianness, hard/soft floating point, etc. that would have
38529 different calling sequences. */
38530
38531 struct rs6000_opt_mask {
38532 const char *name; /* option name */
38533 HOST_WIDE_INT mask; /* mask to set */
38534 bool invert; /* invert sense of mask */
38535 bool valid_target; /* option is a target option */
38536 };
38537
38538 static struct rs6000_opt_mask const rs6000_opt_masks[] =
38539 {
38540 { "altivec", OPTION_MASK_ALTIVEC, false, true },
38541 { "cmpb", OPTION_MASK_CMPB, false, true },
38542 { "crypto", OPTION_MASK_CRYPTO, false, true },
38543 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
38544 { "dlmzb", OPTION_MASK_DLMZB, false, true },
38545 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX,
38546 false, true },
38547 { "float128", OPTION_MASK_FLOAT128_KEYWORD, false, false },
38548 { "float128-type", OPTION_MASK_FLOAT128_TYPE, false, false },
38549 { "float128-hardware", OPTION_MASK_FLOAT128_HW, false, false },
38550 { "fprnd", OPTION_MASK_FPRND, false, true },
38551 { "hard-dfp", OPTION_MASK_DFP, false, true },
38552 { "htm", OPTION_MASK_HTM, false, true },
38553 { "isel", OPTION_MASK_ISEL, false, true },
38554 { "mfcrf", OPTION_MASK_MFCRF, false, true },
38555 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
38556 { "modulo", OPTION_MASK_MODULO, false, true },
38557 { "mulhw", OPTION_MASK_MULHW, false, true },
38558 { "multiple", OPTION_MASK_MULTIPLE, false, true },
38559 { "popcntb", OPTION_MASK_POPCNTB, false, true },
38560 { "popcntd", OPTION_MASK_POPCNTD, false, true },
38561 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
38562 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
38563 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
38564 { "power9-dform-scalar", OPTION_MASK_P9_DFORM_SCALAR, false, true },
38565 { "power9-dform-vector", OPTION_MASK_P9_DFORM_VECTOR, false, true },
38566 { "power9-fusion", OPTION_MASK_P9_FUSION, false, true },
38567 { "power9-minmax", OPTION_MASK_P9_MINMAX, false, true },
38568 { "power9-misc", OPTION_MASK_P9_MISC, false, true },
38569 { "power9-vector", OPTION_MASK_P9_VECTOR, false, true },
38570 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
38571 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
38572 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
38573 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
38574 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
38575 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT, false, true },
38576 { "string", OPTION_MASK_STRING, false, true },
38577 { "toc-fusion", OPTION_MASK_TOC_FUSION, false, true },
38578 { "update", OPTION_MASK_NO_UPDATE, true , true },
38579 { "upper-regs-di", OPTION_MASK_UPPER_REGS_DI, false, true },
38580 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF, false, true },
38581 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF, false, true },
38582 { "vsx", OPTION_MASK_VSX, false, true },
38583 { "vsx-small-integer", OPTION_MASK_VSX_SMALL_INTEGER, false, true },
38584 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
38585 #ifdef OPTION_MASK_64BIT
38586 #if TARGET_AIX_OS
38587 { "aix64", OPTION_MASK_64BIT, false, false },
38588 { "aix32", OPTION_MASK_64BIT, true, false },
38589 #else
38590 { "64", OPTION_MASK_64BIT, false, false },
38591 { "32", OPTION_MASK_64BIT, true, false },
38592 #endif
38593 #endif
38594 #ifdef OPTION_MASK_EABI
38595 { "eabi", OPTION_MASK_EABI, false, false },
38596 #endif
38597 #ifdef OPTION_MASK_LITTLE_ENDIAN
38598 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
38599 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
38600 #endif
38601 #ifdef OPTION_MASK_RELOCATABLE
38602 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
38603 #endif
38604 #ifdef OPTION_MASK_STRICT_ALIGN
38605 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
38606 #endif
38607 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
38608 { "string", OPTION_MASK_STRING, false, false },
38609 };
38610
38611 /* Builtin mask mapping for printing the flags. */
38612 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
38613 {
38614 { "altivec", RS6000_BTM_ALTIVEC, false, false },
38615 { "vsx", RS6000_BTM_VSX, false, false },
38616 { "spe", RS6000_BTM_SPE, false, false },
38617 { "paired", RS6000_BTM_PAIRED, false, false },
38618 { "fre", RS6000_BTM_FRE, false, false },
38619 { "fres", RS6000_BTM_FRES, false, false },
38620 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
38621 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
38622 { "popcntd", RS6000_BTM_POPCNTD, false, false },
38623 { "cell", RS6000_BTM_CELL, false, false },
38624 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
38625 { "power9-vector", RS6000_BTM_P9_VECTOR, false, false },
38626 { "power9-misc", RS6000_BTM_P9_MISC, false, false },
38627 { "crypto", RS6000_BTM_CRYPTO, false, false },
38628 { "htm", RS6000_BTM_HTM, false, false },
38629 { "hard-dfp", RS6000_BTM_DFP, false, false },
38630 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
38631 { "long-double-128", RS6000_BTM_LDBL128, false, false },
38632 { "float128", RS6000_BTM_FLOAT128, false, false },
38633 };
38634
38635 /* Option variables that we want to support inside attribute((target)) and
38636 #pragma GCC target operations. */
38637
38638 struct rs6000_opt_var {
38639 const char *name; /* option name */
38640 size_t global_offset; /* offset of the option in global_options. */
38641 size_t target_offset; /* offset of the option in target options. */
38642 };
38643
38644 static struct rs6000_opt_var const rs6000_opt_vars[] =
38645 {
38646 { "friz",
38647 offsetof (struct gcc_options, x_TARGET_FRIZ),
38648 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
38649 { "avoid-indexed-addresses",
38650 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
38651 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
38652 { "paired",
38653 offsetof (struct gcc_options, x_rs6000_paired_float),
38654 offsetof (struct cl_target_option, x_rs6000_paired_float), },
38655 { "longcall",
38656 offsetof (struct gcc_options, x_rs6000_default_long_calls),
38657 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
38658 { "optimize-swaps",
38659 offsetof (struct gcc_options, x_rs6000_optimize_swaps),
38660 offsetof (struct cl_target_option, x_rs6000_optimize_swaps), },
38661 { "allow-movmisalign",
38662 offsetof (struct gcc_options, x_TARGET_ALLOW_MOVMISALIGN),
38663 offsetof (struct cl_target_option, x_TARGET_ALLOW_MOVMISALIGN), },
38664 { "allow-df-permute",
38665 offsetof (struct gcc_options, x_TARGET_ALLOW_DF_PERMUTE),
38666 offsetof (struct cl_target_option, x_TARGET_ALLOW_DF_PERMUTE), },
38667 { "sched-groups",
38668 offsetof (struct gcc_options, x_TARGET_SCHED_GROUPS),
38669 offsetof (struct cl_target_option, x_TARGET_SCHED_GROUPS), },
38670 { "always-hint",
38671 offsetof (struct gcc_options, x_TARGET_ALWAYS_HINT),
38672 offsetof (struct cl_target_option, x_TARGET_ALWAYS_HINT), },
38673 { "align-branch-targets",
38674 offsetof (struct gcc_options, x_TARGET_ALIGN_BRANCH_TARGETS),
38675 offsetof (struct cl_target_option, x_TARGET_ALIGN_BRANCH_TARGETS), },
38676 { "vectorize-builtins",
38677 offsetof (struct gcc_options, x_TARGET_VECTORIZE_BUILTINS),
38678 offsetof (struct cl_target_option, x_TARGET_VECTORIZE_BUILTINS), },
38679 { "tls-markers",
38680 offsetof (struct gcc_options, x_tls_markers),
38681 offsetof (struct cl_target_option, x_tls_markers), },
38682 { "sched-prolog",
38683 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
38684 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
38685 { "sched-epilog",
38686 offsetof (struct gcc_options, x_TARGET_SCHED_PROLOG),
38687 offsetof (struct cl_target_option, x_TARGET_SCHED_PROLOG), },
38688 { "gen-cell-microcode",
38689 offsetof (struct gcc_options, x_rs6000_gen_cell_microcode),
38690 offsetof (struct cl_target_option, x_rs6000_gen_cell_microcode), },
38691 { "warn-cell-microcode",
38692 offsetof (struct gcc_options, x_rs6000_warn_cell_microcode),
38693 offsetof (struct cl_target_option, x_rs6000_warn_cell_microcode), },
38694 };
38695
38696 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
38697 parsing. Return true if there were no errors. */
38698
38699 static bool
38700 rs6000_inner_target_options (tree args, bool attr_p)
38701 {
38702 bool ret = true;
38703
38704 if (args == NULL_TREE)
38705 ;
38706
38707 else if (TREE_CODE (args) == STRING_CST)
38708 {
38709 char *p = ASTRDUP (TREE_STRING_POINTER (args));
38710 char *q;
38711
38712 while ((q = strtok (p, ",")) != NULL)
38713 {
38714 bool error_p = false;
38715 bool not_valid_p = false;
38716 const char *cpu_opt = NULL;
38717
38718 p = NULL;
38719 if (strncmp (q, "cpu=", 4) == 0)
38720 {
38721 int cpu_index = rs6000_cpu_name_lookup (q+4);
38722 if (cpu_index >= 0)
38723 rs6000_cpu_index = cpu_index;
38724 else
38725 {
38726 error_p = true;
38727 cpu_opt = q+4;
38728 }
38729 }
38730 else if (strncmp (q, "tune=", 5) == 0)
38731 {
38732 int tune_index = rs6000_cpu_name_lookup (q+5);
38733 if (tune_index >= 0)
38734 rs6000_tune_index = tune_index;
38735 else
38736 {
38737 error_p = true;
38738 cpu_opt = q+5;
38739 }
38740 }
38741 else
38742 {
38743 size_t i;
38744 bool invert = false;
38745 char *r = q;
38746
38747 error_p = true;
38748 if (strncmp (r, "no-", 3) == 0)
38749 {
38750 invert = true;
38751 r += 3;
38752 }
38753
38754 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
38755 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
38756 {
38757 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
38758
38759 if (!rs6000_opt_masks[i].valid_target)
38760 not_valid_p = true;
38761 else
38762 {
38763 error_p = false;
38764 rs6000_isa_flags_explicit |= mask;
38765
38766 /* VSX needs altivec, so -mvsx automagically sets
38767 altivec and disables -mavoid-indexed-addresses. */
38768 if (!invert)
38769 {
38770 if (mask == OPTION_MASK_VSX)
38771 {
38772 mask |= OPTION_MASK_ALTIVEC;
38773 TARGET_AVOID_XFORM = 0;
38774 }
38775 }
38776
38777 if (rs6000_opt_masks[i].invert)
38778 invert = !invert;
38779
38780 if (invert)
38781 rs6000_isa_flags &= ~mask;
38782 else
38783 rs6000_isa_flags |= mask;
38784 }
38785 break;
38786 }
38787
38788 if (error_p && !not_valid_p)
38789 {
38790 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
38791 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
38792 {
38793 size_t j = rs6000_opt_vars[i].global_offset;
38794 *((int *) ((char *)&global_options + j)) = !invert;
38795 error_p = false;
38796 not_valid_p = false;
38797 break;
38798 }
38799 }
38800 }
38801
38802 if (error_p)
38803 {
38804 const char *eprefix, *esuffix;
38805
38806 ret = false;
38807 if (attr_p)
38808 {
38809 eprefix = "__attribute__((__target__(";
38810 esuffix = ")))";
38811 }
38812 else
38813 {
38814 eprefix = "#pragma GCC target ";
38815 esuffix = "";
38816 }
38817
38818 if (cpu_opt)
38819 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
38820 q, esuffix);
38821 else if (not_valid_p)
38822 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
38823 else
38824 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
38825 }
38826 }
38827 }
38828
38829 else if (TREE_CODE (args) == TREE_LIST)
38830 {
38831 do
38832 {
38833 tree value = TREE_VALUE (args);
38834 if (value)
38835 {
38836 bool ret2 = rs6000_inner_target_options (value, attr_p);
38837 if (!ret2)
38838 ret = false;
38839 }
38840 args = TREE_CHAIN (args);
38841 }
38842 while (args != NULL_TREE);
38843 }
38844
38845 else
38846 gcc_unreachable ();
38847
38848 return ret;
38849 }
38850
38851 /* Print out the target options as a list for -mdebug=target. */
38852
38853 static void
38854 rs6000_debug_target_options (tree args, const char *prefix)
38855 {
38856 if (args == NULL_TREE)
38857 fprintf (stderr, "%s<NULL>", prefix);
38858
38859 else if (TREE_CODE (args) == STRING_CST)
38860 {
38861 char *p = ASTRDUP (TREE_STRING_POINTER (args));
38862 char *q;
38863
38864 while ((q = strtok (p, ",")) != NULL)
38865 {
38866 p = NULL;
38867 fprintf (stderr, "%s\"%s\"", prefix, q);
38868 prefix = ", ";
38869 }
38870 }
38871
38872 else if (TREE_CODE (args) == TREE_LIST)
38873 {
38874 do
38875 {
38876 tree value = TREE_VALUE (args);
38877 if (value)
38878 {
38879 rs6000_debug_target_options (value, prefix);
38880 prefix = ", ";
38881 }
38882 args = TREE_CHAIN (args);
38883 }
38884 while (args != NULL_TREE);
38885 }
38886
38887 else
38888 gcc_unreachable ();
38889
38890 return;
38891 }
38892
38893 \f
38894 /* Hook to validate attribute((target("..."))). */
38895
38896 static bool
38897 rs6000_valid_attribute_p (tree fndecl,
38898 tree ARG_UNUSED (name),
38899 tree args,
38900 int flags)
38901 {
38902 struct cl_target_option cur_target;
38903 bool ret;
38904 tree old_optimize = build_optimization_node (&global_options);
38905 tree new_target, new_optimize;
38906 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
38907
38908 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
38909
38910 if (TARGET_DEBUG_TARGET)
38911 {
38912 tree tname = DECL_NAME (fndecl);
38913 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
38914 if (tname)
38915 fprintf (stderr, "function: %.*s\n",
38916 (int) IDENTIFIER_LENGTH (tname),
38917 IDENTIFIER_POINTER (tname));
38918 else
38919 fprintf (stderr, "function: unknown\n");
38920
38921 fprintf (stderr, "args:");
38922 rs6000_debug_target_options (args, " ");
38923 fprintf (stderr, "\n");
38924
38925 if (flags)
38926 fprintf (stderr, "flags: 0x%x\n", flags);
38927
38928 fprintf (stderr, "--------------------\n");
38929 }
38930
38931 old_optimize = build_optimization_node (&global_options);
38932 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
38933
38934 /* If the function changed the optimization levels as well as setting target
38935 options, start with the optimizations specified. */
38936 if (func_optimize && func_optimize != old_optimize)
38937 cl_optimization_restore (&global_options,
38938 TREE_OPTIMIZATION (func_optimize));
38939
38940 /* The target attributes may also change some optimization flags, so update
38941 the optimization options if necessary. */
38942 cl_target_option_save (&cur_target, &global_options);
38943 rs6000_cpu_index = rs6000_tune_index = -1;
38944 ret = rs6000_inner_target_options (args, true);
38945
38946 /* Set up any additional state. */
38947 if (ret)
38948 {
38949 ret = rs6000_option_override_internal (false);
38950 new_target = build_target_option_node (&global_options);
38951 }
38952 else
38953 new_target = NULL;
38954
38955 new_optimize = build_optimization_node (&global_options);
38956
38957 if (!new_target)
38958 ret = false;
38959
38960 else if (fndecl)
38961 {
38962 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
38963
38964 if (old_optimize != new_optimize)
38965 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
38966 }
38967
38968 cl_target_option_restore (&global_options, &cur_target);
38969
38970 if (old_optimize != new_optimize)
38971 cl_optimization_restore (&global_options,
38972 TREE_OPTIMIZATION (old_optimize));
38973
38974 return ret;
38975 }
38976
38977 \f
38978 /* Hook to validate the current #pragma GCC target and set the state, and
38979 update the macros based on what was changed. If ARGS is NULL, then
38980 POP_TARGET is used to reset the options. */
38981
38982 bool
38983 rs6000_pragma_target_parse (tree args, tree pop_target)
38984 {
38985 tree prev_tree = build_target_option_node (&global_options);
38986 tree cur_tree;
38987 struct cl_target_option *prev_opt, *cur_opt;
38988 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
38989 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
38990
38991 if (TARGET_DEBUG_TARGET)
38992 {
38993 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
38994 fprintf (stderr, "args:");
38995 rs6000_debug_target_options (args, " ");
38996 fprintf (stderr, "\n");
38997
38998 if (pop_target)
38999 {
39000 fprintf (stderr, "pop_target:\n");
39001 debug_tree (pop_target);
39002 }
39003 else
39004 fprintf (stderr, "pop_target: <NULL>\n");
39005
39006 fprintf (stderr, "--------------------\n");
39007 }
39008
39009 if (! args)
39010 {
39011 cur_tree = ((pop_target)
39012 ? pop_target
39013 : target_option_default_node);
39014 cl_target_option_restore (&global_options,
39015 TREE_TARGET_OPTION (cur_tree));
39016 }
39017 else
39018 {
39019 rs6000_cpu_index = rs6000_tune_index = -1;
39020 if (!rs6000_inner_target_options (args, false)
39021 || !rs6000_option_override_internal (false)
39022 || (cur_tree = build_target_option_node (&global_options))
39023 == NULL_TREE)
39024 {
39025 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
39026 fprintf (stderr, "invalid pragma\n");
39027
39028 return false;
39029 }
39030 }
39031
39032 target_option_current_node = cur_tree;
39033
39034 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
39035 change the macros that are defined. */
39036 if (rs6000_target_modify_macros_ptr)
39037 {
39038 prev_opt = TREE_TARGET_OPTION (prev_tree);
39039 prev_bumask = prev_opt->x_rs6000_builtin_mask;
39040 prev_flags = prev_opt->x_rs6000_isa_flags;
39041
39042 cur_opt = TREE_TARGET_OPTION (cur_tree);
39043 cur_flags = cur_opt->x_rs6000_isa_flags;
39044 cur_bumask = cur_opt->x_rs6000_builtin_mask;
39045
39046 diff_bumask = (prev_bumask ^ cur_bumask);
39047 diff_flags = (prev_flags ^ cur_flags);
39048
39049 if ((diff_flags != 0) || (diff_bumask != 0))
39050 {
39051 /* Delete old macros. */
39052 rs6000_target_modify_macros_ptr (false,
39053 prev_flags & diff_flags,
39054 prev_bumask & diff_bumask);
39055
39056 /* Define new macros. */
39057 rs6000_target_modify_macros_ptr (true,
39058 cur_flags & diff_flags,
39059 cur_bumask & diff_bumask);
39060 }
39061 }
39062
39063 return true;
39064 }
39065
39066 \f
39067 /* Remember the last target of rs6000_set_current_function. */
39068 static GTY(()) tree rs6000_previous_fndecl;
39069
39070 /* Establish appropriate back-end context for processing the function
39071 FNDECL. The argument might be NULL to indicate processing at top
39072 level, outside of any function scope. */
39073 static void
39074 rs6000_set_current_function (tree fndecl)
39075 {
39076 tree old_tree = (rs6000_previous_fndecl
39077 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
39078 : NULL_TREE);
39079
39080 tree new_tree = (fndecl
39081 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
39082 : NULL_TREE);
39083
39084 if (TARGET_DEBUG_TARGET)
39085 {
39086 bool print_final = false;
39087 fprintf (stderr, "\n==================== rs6000_set_current_function");
39088
39089 if (fndecl)
39090 fprintf (stderr, ", fndecl %s (%p)",
39091 (DECL_NAME (fndecl)
39092 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
39093 : "<unknown>"), (void *)fndecl);
39094
39095 if (rs6000_previous_fndecl)
39096 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
39097
39098 fprintf (stderr, "\n");
39099 if (new_tree)
39100 {
39101 fprintf (stderr, "\nnew fndecl target specific options:\n");
39102 debug_tree (new_tree);
39103 print_final = true;
39104 }
39105
39106 if (old_tree)
39107 {
39108 fprintf (stderr, "\nold fndecl target specific options:\n");
39109 debug_tree (old_tree);
39110 print_final = true;
39111 }
39112
39113 if (print_final)
39114 fprintf (stderr, "--------------------\n");
39115 }
39116
39117 /* Only change the context if the function changes. This hook is called
39118 several times in the course of compiling a function, and we don't want to
39119 slow things down too much or call target_reinit when it isn't safe. */
39120 if (fndecl && fndecl != rs6000_previous_fndecl)
39121 {
39122 rs6000_previous_fndecl = fndecl;
39123 if (old_tree == new_tree)
39124 ;
39125
39126 else if (new_tree && new_tree != target_option_default_node)
39127 {
39128 cl_target_option_restore (&global_options,
39129 TREE_TARGET_OPTION (new_tree));
39130 if (TREE_TARGET_GLOBALS (new_tree))
39131 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
39132 else
39133 TREE_TARGET_GLOBALS (new_tree)
39134 = save_target_globals_default_opts ();
39135 }
39136
39137 else if (old_tree && old_tree != target_option_default_node)
39138 {
39139 new_tree = target_option_current_node;
39140 cl_target_option_restore (&global_options,
39141 TREE_TARGET_OPTION (new_tree));
39142 if (TREE_TARGET_GLOBALS (new_tree))
39143 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
39144 else if (new_tree == target_option_default_node)
39145 restore_target_globals (&default_target_globals);
39146 else
39147 TREE_TARGET_GLOBALS (new_tree)
39148 = save_target_globals_default_opts ();
39149 }
39150 }
39151 }
39152
39153 \f
39154 /* Save the current options */
39155
39156 static void
39157 rs6000_function_specific_save (struct cl_target_option *ptr,
39158 struct gcc_options *opts)
39159 {
39160 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
39161 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
39162 }
39163
39164 /* Restore the current options */
39165
39166 static void
39167 rs6000_function_specific_restore (struct gcc_options *opts,
39168 struct cl_target_option *ptr)
39169
39170 {
39171 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
39172 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
39173 (void) rs6000_option_override_internal (false);
39174 }
39175
39176 /* Print the current options */
39177
39178 static void
39179 rs6000_function_specific_print (FILE *file, int indent,
39180 struct cl_target_option *ptr)
39181 {
39182 rs6000_print_isa_options (file, indent, "Isa options set",
39183 ptr->x_rs6000_isa_flags);
39184
39185 rs6000_print_isa_options (file, indent, "Isa options explicit",
39186 ptr->x_rs6000_isa_flags_explicit);
39187 }
39188
39189 /* Helper function to print the current isa or misc options on a line. */
39190
39191 static void
39192 rs6000_print_options_internal (FILE *file,
39193 int indent,
39194 const char *string,
39195 HOST_WIDE_INT flags,
39196 const char *prefix,
39197 const struct rs6000_opt_mask *opts,
39198 size_t num_elements)
39199 {
39200 size_t i;
39201 size_t start_column = 0;
39202 size_t cur_column;
39203 size_t max_column = 120;
39204 size_t prefix_len = strlen (prefix);
39205 size_t comma_len = 0;
39206 const char *comma = "";
39207
39208 if (indent)
39209 start_column += fprintf (file, "%*s", indent, "");
39210
39211 if (!flags)
39212 {
39213 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
39214 return;
39215 }
39216
39217 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
39218
39219 /* Print the various mask options. */
39220 cur_column = start_column;
39221 for (i = 0; i < num_elements; i++)
39222 {
39223 bool invert = opts[i].invert;
39224 const char *name = opts[i].name;
39225 const char *no_str = "";
39226 HOST_WIDE_INT mask = opts[i].mask;
39227 size_t len = comma_len + prefix_len + strlen (name);
39228
39229 if (!invert)
39230 {
39231 if ((flags & mask) == 0)
39232 {
39233 no_str = "no-";
39234 len += sizeof ("no-") - 1;
39235 }
39236
39237 flags &= ~mask;
39238 }
39239
39240 else
39241 {
39242 if ((flags & mask) != 0)
39243 {
39244 no_str = "no-";
39245 len += sizeof ("no-") - 1;
39246 }
39247
39248 flags |= mask;
39249 }
39250
39251 cur_column += len;
39252 if (cur_column > max_column)
39253 {
39254 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
39255 cur_column = start_column + len;
39256 comma = "";
39257 }
39258
39259 fprintf (file, "%s%s%s%s", comma, prefix, no_str, name);
39260 comma = ", ";
39261 comma_len = sizeof (", ") - 1;
39262 }
39263
39264 fputs ("\n", file);
39265 }
39266
39267 /* Helper function to print the current isa options on a line. */
39268
39269 static void
39270 rs6000_print_isa_options (FILE *file, int indent, const char *string,
39271 HOST_WIDE_INT flags)
39272 {
39273 rs6000_print_options_internal (file, indent, string, flags, "-m",
39274 &rs6000_opt_masks[0],
39275 ARRAY_SIZE (rs6000_opt_masks));
39276 }
39277
39278 static void
39279 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
39280 HOST_WIDE_INT flags)
39281 {
39282 rs6000_print_options_internal (file, indent, string, flags, "",
39283 &rs6000_builtin_mask_names[0],
39284 ARRAY_SIZE (rs6000_builtin_mask_names));
39285 }
39286
39287 \f
39288 /* Hook to determine if one function can safely inline another. */
39289
39290 static bool
39291 rs6000_can_inline_p (tree caller, tree callee)
39292 {
39293 bool ret = false;
39294 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
39295 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
39296
39297 /* If callee has no option attributes, then it is ok to inline. */
39298 if (!callee_tree)
39299 ret = true;
39300
39301 /* If caller has no option attributes, but callee does then it is not ok to
39302 inline. */
39303 else if (!caller_tree)
39304 ret = false;
39305
39306 else
39307 {
39308 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
39309 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
39310
39311 /* Callee's options should a subset of the caller's, i.e. a vsx function
39312 can inline an altivec function but a non-vsx function can't inline a
39313 vsx function. */
39314 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
39315 == callee_opts->x_rs6000_isa_flags)
39316 ret = true;
39317 }
39318
39319 if (TARGET_DEBUG_TARGET)
39320 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
39321 (DECL_NAME (caller)
39322 ? IDENTIFIER_POINTER (DECL_NAME (caller))
39323 : "<unknown>"),
39324 (DECL_NAME (callee)
39325 ? IDENTIFIER_POINTER (DECL_NAME (callee))
39326 : "<unknown>"),
39327 (ret ? "can" : "cannot"));
39328
39329 return ret;
39330 }
39331 \f
39332 /* Allocate a stack temp and fixup the address so it meets the particular
39333 memory requirements (either offetable or REG+REG addressing). */
39334
39335 rtx
39336 rs6000_allocate_stack_temp (machine_mode mode,
39337 bool offsettable_p,
39338 bool reg_reg_p)
39339 {
39340 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
39341 rtx addr = XEXP (stack, 0);
39342 int strict_p = (reload_in_progress || reload_completed);
39343
39344 if (!legitimate_indirect_address_p (addr, strict_p))
39345 {
39346 if (offsettable_p
39347 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
39348 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
39349
39350 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
39351 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
39352 }
39353
39354 return stack;
39355 }
39356
39357 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
39358 to such a form to deal with memory reference instructions like STFIWX that
39359 only take reg+reg addressing. */
39360
39361 rtx
39362 rs6000_address_for_fpconvert (rtx x)
39363 {
39364 int strict_p = (reload_in_progress || reload_completed);
39365 rtx addr;
39366
39367 gcc_assert (MEM_P (x));
39368 addr = XEXP (x, 0);
39369 if (! legitimate_indirect_address_p (addr, strict_p)
39370 && ! legitimate_indexed_address_p (addr, strict_p))
39371 {
39372 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
39373 {
39374 rtx reg = XEXP (addr, 0);
39375 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
39376 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
39377 gcc_assert (REG_P (reg));
39378 emit_insn (gen_add3_insn (reg, reg, size_rtx));
39379 addr = reg;
39380 }
39381 else if (GET_CODE (addr) == PRE_MODIFY)
39382 {
39383 rtx reg = XEXP (addr, 0);
39384 rtx expr = XEXP (addr, 1);
39385 gcc_assert (REG_P (reg));
39386 gcc_assert (GET_CODE (expr) == PLUS);
39387 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
39388 addr = reg;
39389 }
39390
39391 x = replace_equiv_address (x, copy_addr_to_reg (addr));
39392 }
39393
39394 return x;
39395 }
39396
39397 /* Given a memory reference, if it is not in the form for altivec memory
39398 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
39399 convert to the altivec format. */
39400
39401 rtx
39402 rs6000_address_for_altivec (rtx x)
39403 {
39404 gcc_assert (MEM_P (x));
39405 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
39406 {
39407 rtx addr = XEXP (x, 0);
39408 int strict_p = (reload_in_progress || reload_completed);
39409
39410 if (!legitimate_indexed_address_p (addr, strict_p)
39411 && !legitimate_indirect_address_p (addr, strict_p))
39412 addr = copy_to_mode_reg (Pmode, addr);
39413
39414 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
39415 x = change_address (x, GET_MODE (x), addr);
39416 }
39417
39418 return x;
39419 }
39420
39421 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
39422
39423 On the RS/6000, all integer constants are acceptable, most won't be valid
39424 for particular insns, though. Only easy FP constants are acceptable. */
39425
39426 static bool
39427 rs6000_legitimate_constant_p (machine_mode mode, rtx x)
39428 {
39429 if (TARGET_ELF && tls_referenced_p (x))
39430 return false;
39431
39432 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
39433 || GET_MODE (x) == VOIDmode
39434 || (TARGET_POWERPC64 && mode == DImode)
39435 || easy_fp_constant (x, mode)
39436 || easy_vector_constant (x, mode));
39437 }
39438
39439 \f
39440 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
39441
39442 static bool
39443 chain_already_loaded (rtx_insn *last)
39444 {
39445 for (; last != NULL; last = PREV_INSN (last))
39446 {
39447 if (NONJUMP_INSN_P (last))
39448 {
39449 rtx patt = PATTERN (last);
39450
39451 if (GET_CODE (patt) == SET)
39452 {
39453 rtx lhs = XEXP (patt, 0);
39454
39455 if (REG_P (lhs) && REGNO (lhs) == STATIC_CHAIN_REGNUM)
39456 return true;
39457 }
39458 }
39459 }
39460 return false;
39461 }
39462
39463 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
39464
39465 void
39466 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
39467 {
39468 const bool direct_call_p
39469 = GET_CODE (func_desc) == SYMBOL_REF && SYMBOL_REF_FUNCTION_P (func_desc);
39470 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
39471 rtx toc_load = NULL_RTX;
39472 rtx toc_restore = NULL_RTX;
39473 rtx func_addr;
39474 rtx abi_reg = NULL_RTX;
39475 rtx call[4];
39476 int n_call;
39477 rtx insn;
39478
39479 /* Handle longcall attributes. */
39480 if (INTVAL (cookie) & CALL_LONG)
39481 func_desc = rs6000_longcall_ref (func_desc);
39482
39483 /* Handle indirect calls. */
39484 if (GET_CODE (func_desc) != SYMBOL_REF
39485 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
39486 {
39487 /* Save the TOC into its reserved slot before the call,
39488 and prepare to restore it after the call. */
39489 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
39490 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
39491 rtx stack_toc_mem = gen_frame_mem (Pmode,
39492 gen_rtx_PLUS (Pmode, stack_ptr,
39493 stack_toc_offset));
39494 rtx stack_toc_unspec = gen_rtx_UNSPEC (Pmode,
39495 gen_rtvec (1, stack_toc_offset),
39496 UNSPEC_TOCSLOT);
39497 toc_restore = gen_rtx_SET (toc_reg, stack_toc_unspec);
39498
39499 /* Can we optimize saving the TOC in the prologue or
39500 do we need to do it at every call? */
39501 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
39502 cfun->machine->save_toc_in_prologue = true;
39503 else
39504 {
39505 MEM_VOLATILE_P (stack_toc_mem) = 1;
39506 emit_move_insn (stack_toc_mem, toc_reg);
39507 }
39508
39509 if (DEFAULT_ABI == ABI_ELFv2)
39510 {
39511 /* A function pointer in the ELFv2 ABI is just a plain address, but
39512 the ABI requires it to be loaded into r12 before the call. */
39513 func_addr = gen_rtx_REG (Pmode, 12);
39514 emit_move_insn (func_addr, func_desc);
39515 abi_reg = func_addr;
39516 }
39517 else
39518 {
39519 /* A function pointer under AIX is a pointer to a data area whose
39520 first word contains the actual address of the function, whose
39521 second word contains a pointer to its TOC, and whose third word
39522 contains a value to place in the static chain register (r11).
39523 Note that if we load the static chain, our "trampoline" need
39524 not have any executable code. */
39525
39526 /* Load up address of the actual function. */
39527 func_desc = force_reg (Pmode, func_desc);
39528 func_addr = gen_reg_rtx (Pmode);
39529 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
39530
39531 /* Prepare to load the TOC of the called function. Note that the
39532 TOC load must happen immediately before the actual call so
39533 that unwinding the TOC registers works correctly. See the
39534 comment in frob_update_context. */
39535 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
39536 rtx func_toc_mem = gen_rtx_MEM (Pmode,
39537 gen_rtx_PLUS (Pmode, func_desc,
39538 func_toc_offset));
39539 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
39540
39541 /* If we have a static chain, load it up. But, if the call was
39542 originally direct, the 3rd word has not been written since no
39543 trampoline has been built, so we ought not to load it, lest we
39544 override a static chain value. */
39545 if (!direct_call_p
39546 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
39547 && !chain_already_loaded (get_current_sequence ()->next->last))
39548 {
39549 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
39550 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
39551 rtx func_sc_mem = gen_rtx_MEM (Pmode,
39552 gen_rtx_PLUS (Pmode, func_desc,
39553 func_sc_offset));
39554 emit_move_insn (sc_reg, func_sc_mem);
39555 abi_reg = sc_reg;
39556 }
39557 }
39558 }
39559 else
39560 {
39561 /* Direct calls use the TOC: for local calls, the callee will
39562 assume the TOC register is set; for non-local calls, the
39563 PLT stub needs the TOC register. */
39564 abi_reg = toc_reg;
39565 func_addr = func_desc;
39566 }
39567
39568 /* Create the call. */
39569 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
39570 if (value != NULL_RTX)
39571 call[0] = gen_rtx_SET (value, call[0]);
39572 n_call = 1;
39573
39574 if (toc_load)
39575 call[n_call++] = toc_load;
39576 if (toc_restore)
39577 call[n_call++] = toc_restore;
39578
39579 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
39580
39581 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
39582 insn = emit_call_insn (insn);
39583
39584 /* Mention all registers defined by the ABI to hold information
39585 as uses in CALL_INSN_FUNCTION_USAGE. */
39586 if (abi_reg)
39587 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
39588 }
39589
39590 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
39591
39592 void
39593 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
39594 {
39595 rtx call[2];
39596 rtx insn;
39597
39598 gcc_assert (INTVAL (cookie) == 0);
39599
39600 /* Create the call. */
39601 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
39602 if (value != NULL_RTX)
39603 call[0] = gen_rtx_SET (value, call[0]);
39604
39605 call[1] = simple_return_rtx;
39606
39607 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
39608 insn = emit_call_insn (insn);
39609
39610 /* Note use of the TOC register. */
39611 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
39612 }
39613
39614 /* Return whether we need to always update the saved TOC pointer when we update
39615 the stack pointer. */
39616
39617 static bool
39618 rs6000_save_toc_in_prologue_p (void)
39619 {
39620 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
39621 }
39622
39623 #ifdef HAVE_GAS_HIDDEN
39624 # define USE_HIDDEN_LINKONCE 1
39625 #else
39626 # define USE_HIDDEN_LINKONCE 0
39627 #endif
39628
39629 /* Fills in the label name that should be used for a 476 link stack thunk. */
39630
39631 void
39632 get_ppc476_thunk_name (char name[32])
39633 {
39634 gcc_assert (TARGET_LINK_STACK);
39635
39636 if (USE_HIDDEN_LINKONCE)
39637 sprintf (name, "__ppc476.get_thunk");
39638 else
39639 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
39640 }
39641
39642 /* This function emits the simple thunk routine that is used to preserve
39643 the link stack on the 476 cpu. */
39644
39645 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
39646 static void
39647 rs6000_code_end (void)
39648 {
39649 char name[32];
39650 tree decl;
39651
39652 if (!TARGET_LINK_STACK)
39653 return;
39654
39655 get_ppc476_thunk_name (name);
39656
39657 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
39658 build_function_type_list (void_type_node, NULL_TREE));
39659 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
39660 NULL_TREE, void_type_node);
39661 TREE_PUBLIC (decl) = 1;
39662 TREE_STATIC (decl) = 1;
39663
39664 #if RS6000_WEAK
39665 if (USE_HIDDEN_LINKONCE && !TARGET_XCOFF)
39666 {
39667 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
39668 targetm.asm_out.unique_section (decl, 0);
39669 switch_to_section (get_named_section (decl, NULL, 0));
39670 DECL_WEAK (decl) = 1;
39671 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
39672 targetm.asm_out.globalize_label (asm_out_file, name);
39673 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
39674 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
39675 }
39676 else
39677 #endif
39678 {
39679 switch_to_section (text_section);
39680 ASM_OUTPUT_LABEL (asm_out_file, name);
39681 }
39682
39683 DECL_INITIAL (decl) = make_node (BLOCK);
39684 current_function_decl = decl;
39685 allocate_struct_function (decl, false);
39686 init_function_start (decl);
39687 first_function_block_is_cold = false;
39688 /* Make sure unwind info is emitted for the thunk if needed. */
39689 final_start_function (emit_barrier (), asm_out_file, 1);
39690
39691 fputs ("\tblr\n", asm_out_file);
39692
39693 final_end_function ();
39694 init_insn_lengths ();
39695 free_after_compilation (cfun);
39696 set_cfun (NULL);
39697 current_function_decl = NULL;
39698 }
39699
39700 /* Add r30 to hard reg set if the prologue sets it up and it is not
39701 pic_offset_table_rtx. */
39702
39703 static void
39704 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
39705 {
39706 if (!TARGET_SINGLE_PIC_BASE
39707 && TARGET_TOC
39708 && TARGET_MINIMAL_TOC
39709 && !constant_pool_empty_p ())
39710 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
39711 if (cfun->machine->split_stack_argp_used)
39712 add_to_hard_reg_set (&set->set, Pmode, 12);
39713 }
39714
39715 \f
39716 /* Helper function for rs6000_split_logical to emit a logical instruction after
39717 spliting the operation to single GPR registers.
39718
39719 DEST is the destination register.
39720 OP1 and OP2 are the input source registers.
39721 CODE is the base operation (AND, IOR, XOR, NOT).
39722 MODE is the machine mode.
39723 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
39724 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
39725 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
39726
39727 static void
39728 rs6000_split_logical_inner (rtx dest,
39729 rtx op1,
39730 rtx op2,
39731 enum rtx_code code,
39732 machine_mode mode,
39733 bool complement_final_p,
39734 bool complement_op1_p,
39735 bool complement_op2_p)
39736 {
39737 rtx bool_rtx;
39738
39739 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
39740 if (op2 && GET_CODE (op2) == CONST_INT
39741 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
39742 && !complement_final_p && !complement_op1_p && !complement_op2_p)
39743 {
39744 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
39745 HOST_WIDE_INT value = INTVAL (op2) & mask;
39746
39747 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
39748 if (code == AND)
39749 {
39750 if (value == 0)
39751 {
39752 emit_insn (gen_rtx_SET (dest, const0_rtx));
39753 return;
39754 }
39755
39756 else if (value == mask)
39757 {
39758 if (!rtx_equal_p (dest, op1))
39759 emit_insn (gen_rtx_SET (dest, op1));
39760 return;
39761 }
39762 }
39763
39764 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
39765 into separate ORI/ORIS or XORI/XORIS instrucitons. */
39766 else if (code == IOR || code == XOR)
39767 {
39768 if (value == 0)
39769 {
39770 if (!rtx_equal_p (dest, op1))
39771 emit_insn (gen_rtx_SET (dest, op1));
39772 return;
39773 }
39774 }
39775 }
39776
39777 if (code == AND && mode == SImode
39778 && !complement_final_p && !complement_op1_p && !complement_op2_p)
39779 {
39780 emit_insn (gen_andsi3 (dest, op1, op2));
39781 return;
39782 }
39783
39784 if (complement_op1_p)
39785 op1 = gen_rtx_NOT (mode, op1);
39786
39787 if (complement_op2_p)
39788 op2 = gen_rtx_NOT (mode, op2);
39789
39790 /* For canonical RTL, if only one arm is inverted it is the first. */
39791 if (!complement_op1_p && complement_op2_p)
39792 std::swap (op1, op2);
39793
39794 bool_rtx = ((code == NOT)
39795 ? gen_rtx_NOT (mode, op1)
39796 : gen_rtx_fmt_ee (code, mode, op1, op2));
39797
39798 if (complement_final_p)
39799 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
39800
39801 emit_insn (gen_rtx_SET (dest, bool_rtx));
39802 }
39803
39804 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
39805 operations are split immediately during RTL generation to allow for more
39806 optimizations of the AND/IOR/XOR.
39807
39808 OPERANDS is an array containing the destination and two input operands.
39809 CODE is the base operation (AND, IOR, XOR, NOT).
39810 MODE is the machine mode.
39811 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
39812 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
39813 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
39814 CLOBBER_REG is either NULL or a scratch register of type CC to allow
39815 formation of the AND instructions. */
39816
39817 static void
39818 rs6000_split_logical_di (rtx operands[3],
39819 enum rtx_code code,
39820 bool complement_final_p,
39821 bool complement_op1_p,
39822 bool complement_op2_p)
39823 {
39824 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
39825 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
39826 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
39827 enum hi_lo { hi = 0, lo = 1 };
39828 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
39829 size_t i;
39830
39831 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
39832 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
39833 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
39834 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
39835
39836 if (code == NOT)
39837 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
39838 else
39839 {
39840 if (GET_CODE (operands[2]) != CONST_INT)
39841 {
39842 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
39843 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
39844 }
39845 else
39846 {
39847 HOST_WIDE_INT value = INTVAL (operands[2]);
39848 HOST_WIDE_INT value_hi_lo[2];
39849
39850 gcc_assert (!complement_final_p);
39851 gcc_assert (!complement_op1_p);
39852 gcc_assert (!complement_op2_p);
39853
39854 value_hi_lo[hi] = value >> 32;
39855 value_hi_lo[lo] = value & lower_32bits;
39856
39857 for (i = 0; i < 2; i++)
39858 {
39859 HOST_WIDE_INT sub_value = value_hi_lo[i];
39860
39861 if (sub_value & sign_bit)
39862 sub_value |= upper_32bits;
39863
39864 op2_hi_lo[i] = GEN_INT (sub_value);
39865
39866 /* If this is an AND instruction, check to see if we need to load
39867 the value in a register. */
39868 if (code == AND && sub_value != -1 && sub_value != 0
39869 && !and_operand (op2_hi_lo[i], SImode))
39870 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
39871 }
39872 }
39873 }
39874
39875 for (i = 0; i < 2; i++)
39876 {
39877 /* Split large IOR/XOR operations. */
39878 if ((code == IOR || code == XOR)
39879 && GET_CODE (op2_hi_lo[i]) == CONST_INT
39880 && !complement_final_p
39881 && !complement_op1_p
39882 && !complement_op2_p
39883 && !logical_const_operand (op2_hi_lo[i], SImode))
39884 {
39885 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
39886 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
39887 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
39888 rtx tmp = gen_reg_rtx (SImode);
39889
39890 /* Make sure the constant is sign extended. */
39891 if ((hi_16bits & sign_bit) != 0)
39892 hi_16bits |= upper_32bits;
39893
39894 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
39895 code, SImode, false, false, false);
39896
39897 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
39898 code, SImode, false, false, false);
39899 }
39900 else
39901 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
39902 code, SImode, complement_final_p,
39903 complement_op1_p, complement_op2_p);
39904 }
39905
39906 return;
39907 }
39908
39909 /* Split the insns that make up boolean operations operating on multiple GPR
39910 registers. The boolean MD patterns ensure that the inputs either are
39911 exactly the same as the output registers, or there is no overlap.
39912
39913 OPERANDS is an array containing the destination and two input operands.
39914 CODE is the base operation (AND, IOR, XOR, NOT).
39915 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
39916 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
39917 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
39918
39919 void
39920 rs6000_split_logical (rtx operands[3],
39921 enum rtx_code code,
39922 bool complement_final_p,
39923 bool complement_op1_p,
39924 bool complement_op2_p)
39925 {
39926 machine_mode mode = GET_MODE (operands[0]);
39927 machine_mode sub_mode;
39928 rtx op0, op1, op2;
39929 int sub_size, regno0, regno1, nregs, i;
39930
39931 /* If this is DImode, use the specialized version that can run before
39932 register allocation. */
39933 if (mode == DImode && !TARGET_POWERPC64)
39934 {
39935 rs6000_split_logical_di (operands, code, complement_final_p,
39936 complement_op1_p, complement_op2_p);
39937 return;
39938 }
39939
39940 op0 = operands[0];
39941 op1 = operands[1];
39942 op2 = (code == NOT) ? NULL_RTX : operands[2];
39943 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
39944 sub_size = GET_MODE_SIZE (sub_mode);
39945 regno0 = REGNO (op0);
39946 regno1 = REGNO (op1);
39947
39948 gcc_assert (reload_completed);
39949 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
39950 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
39951
39952 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
39953 gcc_assert (nregs > 1);
39954
39955 if (op2 && REG_P (op2))
39956 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
39957
39958 for (i = 0; i < nregs; i++)
39959 {
39960 int offset = i * sub_size;
39961 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
39962 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
39963 rtx sub_op2 = ((code == NOT)
39964 ? NULL_RTX
39965 : simplify_subreg (sub_mode, op2, mode, offset));
39966
39967 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
39968 complement_final_p, complement_op1_p,
39969 complement_op2_p);
39970 }
39971
39972 return;
39973 }
39974
39975 \f
39976 /* Return true if the peephole2 can combine a load involving a combination of
39977 an addis instruction and a load with an offset that can be fused together on
39978 a power8. */
39979
39980 bool
39981 fusion_gpr_load_p (rtx addis_reg, /* register set via addis. */
39982 rtx addis_value, /* addis value. */
39983 rtx target, /* target register that is loaded. */
39984 rtx mem) /* bottom part of the memory addr. */
39985 {
39986 rtx addr;
39987 rtx base_reg;
39988
39989 /* Validate arguments. */
39990 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
39991 return false;
39992
39993 if (!base_reg_operand (target, GET_MODE (target)))
39994 return false;
39995
39996 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
39997 return false;
39998
39999 /* Allow sign/zero extension. */
40000 if (GET_CODE (mem) == ZERO_EXTEND
40001 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
40002 mem = XEXP (mem, 0);
40003
40004 if (!MEM_P (mem))
40005 return false;
40006
40007 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
40008 return false;
40009
40010 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
40011 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
40012 return false;
40013
40014 /* Validate that the register used to load the high value is either the
40015 register being loaded, or we can safely replace its use.
40016
40017 This function is only called from the peephole2 pass and we assume that
40018 there are 2 instructions in the peephole (addis and load), so we want to
40019 check if the target register was not used in the memory address and the
40020 register to hold the addis result is dead after the peephole. */
40021 if (REGNO (addis_reg) != REGNO (target))
40022 {
40023 if (reg_mentioned_p (target, mem))
40024 return false;
40025
40026 if (!peep2_reg_dead_p (2, addis_reg))
40027 return false;
40028
40029 /* If the target register being loaded is the stack pointer, we must
40030 avoid loading any other value into it, even temporarily. */
40031 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
40032 return false;
40033 }
40034
40035 base_reg = XEXP (addr, 0);
40036 return REGNO (addis_reg) == REGNO (base_reg);
40037 }
40038
40039 /* During the peephole2 pass, adjust and expand the insns for a load fusion
40040 sequence. We adjust the addis register to use the target register. If the
40041 load sign extends, we adjust the code to do the zero extending load, and an
40042 explicit sign extension later since the fusion only covers zero extending
40043 loads.
40044
40045 The operands are:
40046 operands[0] register set with addis (to be replaced with target)
40047 operands[1] value set via addis
40048 operands[2] target register being loaded
40049 operands[3] D-form memory reference using operands[0]. */
40050
40051 void
40052 expand_fusion_gpr_load (rtx *operands)
40053 {
40054 rtx addis_value = operands[1];
40055 rtx target = operands[2];
40056 rtx orig_mem = operands[3];
40057 rtx new_addr, new_mem, orig_addr, offset;
40058 enum rtx_code plus_or_lo_sum;
40059 machine_mode target_mode = GET_MODE (target);
40060 machine_mode extend_mode = target_mode;
40061 machine_mode ptr_mode = Pmode;
40062 enum rtx_code extend = UNKNOWN;
40063
40064 if (GET_CODE (orig_mem) == ZERO_EXTEND
40065 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
40066 {
40067 extend = GET_CODE (orig_mem);
40068 orig_mem = XEXP (orig_mem, 0);
40069 target_mode = GET_MODE (orig_mem);
40070 }
40071
40072 gcc_assert (MEM_P (orig_mem));
40073
40074 orig_addr = XEXP (orig_mem, 0);
40075 plus_or_lo_sum = GET_CODE (orig_addr);
40076 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
40077
40078 offset = XEXP (orig_addr, 1);
40079 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
40080 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
40081
40082 if (extend != UNKNOWN)
40083 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
40084
40085 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
40086 UNSPEC_FUSION_GPR);
40087 emit_insn (gen_rtx_SET (target, new_mem));
40088
40089 if (extend == SIGN_EXTEND)
40090 {
40091 int sub_off = ((BYTES_BIG_ENDIAN)
40092 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
40093 : 0);
40094 rtx sign_reg
40095 = simplify_subreg (target_mode, target, extend_mode, sub_off);
40096
40097 emit_insn (gen_rtx_SET (target,
40098 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
40099 }
40100
40101 return;
40102 }
40103
40104 /* Emit the addis instruction that will be part of a fused instruction
40105 sequence. */
40106
40107 void
40108 emit_fusion_addis (rtx target, rtx addis_value, const char *comment,
40109 const char *mode_name)
40110 {
40111 rtx fuse_ops[10];
40112 char insn_template[80];
40113 const char *addis_str = NULL;
40114 const char *comment_str = ASM_COMMENT_START;
40115
40116 if (*comment_str == ' ')
40117 comment_str++;
40118
40119 /* Emit the addis instruction. */
40120 fuse_ops[0] = target;
40121 if (satisfies_constraint_L (addis_value))
40122 {
40123 fuse_ops[1] = addis_value;
40124 addis_str = "lis %0,%v1";
40125 }
40126
40127 else if (GET_CODE (addis_value) == PLUS)
40128 {
40129 rtx op0 = XEXP (addis_value, 0);
40130 rtx op1 = XEXP (addis_value, 1);
40131
40132 if (REG_P (op0) && CONST_INT_P (op1)
40133 && satisfies_constraint_L (op1))
40134 {
40135 fuse_ops[1] = op0;
40136 fuse_ops[2] = op1;
40137 addis_str = "addis %0,%1,%v2";
40138 }
40139 }
40140
40141 else if (GET_CODE (addis_value) == HIGH)
40142 {
40143 rtx value = XEXP (addis_value, 0);
40144 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
40145 {
40146 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
40147 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
40148 if (TARGET_ELF)
40149 addis_str = "addis %0,%2,%1@toc@ha";
40150
40151 else if (TARGET_XCOFF)
40152 addis_str = "addis %0,%1@u(%2)";
40153
40154 else
40155 gcc_unreachable ();
40156 }
40157
40158 else if (GET_CODE (value) == PLUS)
40159 {
40160 rtx op0 = XEXP (value, 0);
40161 rtx op1 = XEXP (value, 1);
40162
40163 if (GET_CODE (op0) == UNSPEC
40164 && XINT (op0, 1) == UNSPEC_TOCREL
40165 && CONST_INT_P (op1))
40166 {
40167 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
40168 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
40169 fuse_ops[3] = op1;
40170 if (TARGET_ELF)
40171 addis_str = "addis %0,%2,%1+%3@toc@ha";
40172
40173 else if (TARGET_XCOFF)
40174 addis_str = "addis %0,%1+%3@u(%2)";
40175
40176 else
40177 gcc_unreachable ();
40178 }
40179 }
40180
40181 else if (satisfies_constraint_L (value))
40182 {
40183 fuse_ops[1] = value;
40184 addis_str = "lis %0,%v1";
40185 }
40186
40187 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
40188 {
40189 fuse_ops[1] = value;
40190 addis_str = "lis %0,%1@ha";
40191 }
40192 }
40193
40194 if (!addis_str)
40195 fatal_insn ("Could not generate addis value for fusion", addis_value);
40196
40197 sprintf (insn_template, "%s\t\t%s %s, type %s", addis_str, comment_str,
40198 comment, mode_name);
40199 output_asm_insn (insn_template, fuse_ops);
40200 }
40201
40202 /* Emit a D-form load or store instruction that is the second instruction
40203 of a fusion sequence. */
40204
40205 void
40206 emit_fusion_load_store (rtx load_store_reg, rtx addis_reg, rtx offset,
40207 const char *insn_str)
40208 {
40209 rtx fuse_ops[10];
40210 char insn_template[80];
40211
40212 fuse_ops[0] = load_store_reg;
40213 fuse_ops[1] = addis_reg;
40214
40215 if (CONST_INT_P (offset) && satisfies_constraint_I (offset))
40216 {
40217 sprintf (insn_template, "%s %%0,%%2(%%1)", insn_str);
40218 fuse_ops[2] = offset;
40219 output_asm_insn (insn_template, fuse_ops);
40220 }
40221
40222 else if (GET_CODE (offset) == UNSPEC
40223 && XINT (offset, 1) == UNSPEC_TOCREL)
40224 {
40225 if (TARGET_ELF)
40226 sprintf (insn_template, "%s %%0,%%2@toc@l(%%1)", insn_str);
40227
40228 else if (TARGET_XCOFF)
40229 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
40230
40231 else
40232 gcc_unreachable ();
40233
40234 fuse_ops[2] = XVECEXP (offset, 0, 0);
40235 output_asm_insn (insn_template, fuse_ops);
40236 }
40237
40238 else if (GET_CODE (offset) == PLUS
40239 && GET_CODE (XEXP (offset, 0)) == UNSPEC
40240 && XINT (XEXP (offset, 0), 1) == UNSPEC_TOCREL
40241 && CONST_INT_P (XEXP (offset, 1)))
40242 {
40243 rtx tocrel_unspec = XEXP (offset, 0);
40244 if (TARGET_ELF)
40245 sprintf (insn_template, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str);
40246
40247 else if (TARGET_XCOFF)
40248 sprintf (insn_template, "%s %%0,%%2+%%3@l(%%1)", insn_str);
40249
40250 else
40251 gcc_unreachable ();
40252
40253 fuse_ops[2] = XVECEXP (tocrel_unspec, 0, 0);
40254 fuse_ops[3] = XEXP (offset, 1);
40255 output_asm_insn (insn_template, fuse_ops);
40256 }
40257
40258 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (offset))
40259 {
40260 sprintf (insn_template, "%s %%0,%%2@l(%%1)", insn_str);
40261
40262 fuse_ops[2] = offset;
40263 output_asm_insn (insn_template, fuse_ops);
40264 }
40265
40266 else
40267 fatal_insn ("Unable to generate load/store offset for fusion", offset);
40268
40269 return;
40270 }
40271
40272 /* Wrap a TOC address that can be fused to indicate that special fusion
40273 processing is needed. */
40274
40275 rtx
40276 fusion_wrap_memory_address (rtx old_mem)
40277 {
40278 rtx old_addr = XEXP (old_mem, 0);
40279 rtvec v = gen_rtvec (1, old_addr);
40280 rtx new_addr = gen_rtx_UNSPEC (Pmode, v, UNSPEC_FUSION_ADDIS);
40281 return replace_equiv_address_nv (old_mem, new_addr, false);
40282 }
40283
40284 /* Given an address, convert it into the addis and load offset parts. Addresses
40285 created during the peephole2 process look like:
40286 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
40287 (unspec [(...)] UNSPEC_TOCREL))
40288
40289 Addresses created via toc fusion look like:
40290 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
40291
40292 static void
40293 fusion_split_address (rtx addr, rtx *p_hi, rtx *p_lo)
40294 {
40295 rtx hi, lo;
40296
40297 if (GET_CODE (addr) == UNSPEC && XINT (addr, 1) == UNSPEC_FUSION_ADDIS)
40298 {
40299 lo = XVECEXP (addr, 0, 0);
40300 hi = gen_rtx_HIGH (Pmode, lo);
40301 }
40302 else if (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
40303 {
40304 hi = XEXP (addr, 0);
40305 lo = XEXP (addr, 1);
40306 }
40307 else
40308 gcc_unreachable ();
40309
40310 *p_hi = hi;
40311 *p_lo = lo;
40312 }
40313
40314 /* Return a string to fuse an addis instruction with a gpr load to the same
40315 register that we loaded up the addis instruction. The address that is used
40316 is the logical address that was formed during peephole2:
40317 (lo_sum (high) (low-part))
40318
40319 Or the address is the TOC address that is wrapped before register allocation:
40320 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
40321
40322 The code is complicated, so we call output_asm_insn directly, and just
40323 return "". */
40324
40325 const char *
40326 emit_fusion_gpr_load (rtx target, rtx mem)
40327 {
40328 rtx addis_value;
40329 rtx addr;
40330 rtx load_offset;
40331 const char *load_str = NULL;
40332 const char *mode_name = NULL;
40333 machine_mode mode;
40334
40335 if (GET_CODE (mem) == ZERO_EXTEND)
40336 mem = XEXP (mem, 0);
40337
40338 gcc_assert (REG_P (target) && MEM_P (mem));
40339
40340 addr = XEXP (mem, 0);
40341 fusion_split_address (addr, &addis_value, &load_offset);
40342
40343 /* Now emit the load instruction to the same register. */
40344 mode = GET_MODE (mem);
40345 switch (mode)
40346 {
40347 case QImode:
40348 mode_name = "char";
40349 load_str = "lbz";
40350 break;
40351
40352 case HImode:
40353 mode_name = "short";
40354 load_str = "lhz";
40355 break;
40356
40357 case SImode:
40358 case SFmode:
40359 mode_name = (mode == SFmode) ? "float" : "int";
40360 load_str = "lwz";
40361 break;
40362
40363 case DImode:
40364 case DFmode:
40365 gcc_assert (TARGET_POWERPC64);
40366 mode_name = (mode == DFmode) ? "double" : "long";
40367 load_str = "ld";
40368 break;
40369
40370 default:
40371 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target, mem));
40372 }
40373
40374 /* Emit the addis instruction. */
40375 emit_fusion_addis (target, addis_value, "gpr load fusion", mode_name);
40376
40377 /* Emit the D-form load instruction. */
40378 emit_fusion_load_store (target, target, load_offset, load_str);
40379
40380 return "";
40381 }
40382 \f
40383
40384 /* Return true if the peephole2 can combine a load/store involving a
40385 combination of an addis instruction and the memory operation. This was
40386 added to the ISA 3.0 (power9) hardware. */
40387
40388 bool
40389 fusion_p9_p (rtx addis_reg, /* register set via addis. */
40390 rtx addis_value, /* addis value. */
40391 rtx dest, /* destination (memory or register). */
40392 rtx src) /* source (register or memory). */
40393 {
40394 rtx addr, mem, offset;
40395 enum machine_mode mode = GET_MODE (src);
40396
40397 /* Validate arguments. */
40398 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
40399 return false;
40400
40401 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
40402 return false;
40403
40404 /* Ignore extend operations that are part of the load. */
40405 if (GET_CODE (src) == FLOAT_EXTEND || GET_CODE (src) == ZERO_EXTEND)
40406 src = XEXP (src, 0);
40407
40408 /* Test for memory<-register or register<-memory. */
40409 if (fpr_reg_operand (src, mode) || int_reg_operand (src, mode))
40410 {
40411 if (!MEM_P (dest))
40412 return false;
40413
40414 mem = dest;
40415 }
40416
40417 else if (MEM_P (src))
40418 {
40419 if (!fpr_reg_operand (dest, mode) && !int_reg_operand (dest, mode))
40420 return false;
40421
40422 mem = src;
40423 }
40424
40425 else
40426 return false;
40427
40428 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
40429 if (GET_CODE (addr) == PLUS)
40430 {
40431 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
40432 return false;
40433
40434 return satisfies_constraint_I (XEXP (addr, 1));
40435 }
40436
40437 else if (GET_CODE (addr) == LO_SUM)
40438 {
40439 if (!rtx_equal_p (addis_reg, XEXP (addr, 0)))
40440 return false;
40441
40442 offset = XEXP (addr, 1);
40443 if (TARGET_XCOFF || (TARGET_ELF && TARGET_POWERPC64))
40444 return small_toc_ref (offset, GET_MODE (offset));
40445
40446 else if (TARGET_ELF && !TARGET_POWERPC64)
40447 return CONSTANT_P (offset);
40448 }
40449
40450 return false;
40451 }
40452
40453 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
40454 load sequence.
40455
40456 The operands are:
40457 operands[0] register set with addis
40458 operands[1] value set via addis
40459 operands[2] target register being loaded
40460 operands[3] D-form memory reference using operands[0].
40461
40462 This is similar to the fusion introduced with power8, except it scales to
40463 both loads/stores and does not require the result register to be the same as
40464 the base register. At the moment, we only do this if register set with addis
40465 is dead. */
40466
40467 void
40468 expand_fusion_p9_load (rtx *operands)
40469 {
40470 rtx tmp_reg = operands[0];
40471 rtx addis_value = operands[1];
40472 rtx target = operands[2];
40473 rtx orig_mem = operands[3];
40474 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn;
40475 enum rtx_code plus_or_lo_sum;
40476 machine_mode target_mode = GET_MODE (target);
40477 machine_mode extend_mode = target_mode;
40478 machine_mode ptr_mode = Pmode;
40479 enum rtx_code extend = UNKNOWN;
40480
40481 if (GET_CODE (orig_mem) == FLOAT_EXTEND || GET_CODE (orig_mem) == ZERO_EXTEND)
40482 {
40483 extend = GET_CODE (orig_mem);
40484 orig_mem = XEXP (orig_mem, 0);
40485 target_mode = GET_MODE (orig_mem);
40486 }
40487
40488 gcc_assert (MEM_P (orig_mem));
40489
40490 orig_addr = XEXP (orig_mem, 0);
40491 plus_or_lo_sum = GET_CODE (orig_addr);
40492 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
40493
40494 offset = XEXP (orig_addr, 1);
40495 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
40496 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
40497
40498 if (extend != UNKNOWN)
40499 new_mem = gen_rtx_fmt_e (extend, extend_mode, new_mem);
40500
40501 new_mem = gen_rtx_UNSPEC (extend_mode, gen_rtvec (1, new_mem),
40502 UNSPEC_FUSION_P9);
40503
40504 set = gen_rtx_SET (target, new_mem);
40505 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
40506 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
40507 emit_insn (insn);
40508
40509 return;
40510 }
40511
40512 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
40513 store sequence.
40514
40515 The operands are:
40516 operands[0] register set with addis
40517 operands[1] value set via addis
40518 operands[2] target D-form memory being stored to
40519 operands[3] register being stored
40520
40521 This is similar to the fusion introduced with power8, except it scales to
40522 both loads/stores and does not require the result register to be the same as
40523 the base register. At the moment, we only do this if register set with addis
40524 is dead. */
40525
40526 void
40527 expand_fusion_p9_store (rtx *operands)
40528 {
40529 rtx tmp_reg = operands[0];
40530 rtx addis_value = operands[1];
40531 rtx orig_mem = operands[2];
40532 rtx src = operands[3];
40533 rtx new_addr, new_mem, orig_addr, offset, set, clobber, insn, new_src;
40534 enum rtx_code plus_or_lo_sum;
40535 machine_mode target_mode = GET_MODE (orig_mem);
40536 machine_mode ptr_mode = Pmode;
40537
40538 gcc_assert (MEM_P (orig_mem));
40539
40540 orig_addr = XEXP (orig_mem, 0);
40541 plus_or_lo_sum = GET_CODE (orig_addr);
40542 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
40543
40544 offset = XEXP (orig_addr, 1);
40545 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_value, offset);
40546 new_mem = replace_equiv_address_nv (orig_mem, new_addr, false);
40547
40548 new_src = gen_rtx_UNSPEC (target_mode, gen_rtvec (1, src),
40549 UNSPEC_FUSION_P9);
40550
40551 set = gen_rtx_SET (new_mem, new_src);
40552 clobber = gen_rtx_CLOBBER (VOIDmode, tmp_reg);
40553 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber));
40554 emit_insn (insn);
40555
40556 return;
40557 }
40558
40559 /* Return a string to fuse an addis instruction with a load using extended
40560 fusion. The address that is used is the logical address that was formed
40561 during peephole2: (lo_sum (high) (low-part))
40562
40563 The code is complicated, so we call output_asm_insn directly, and just
40564 return "". */
40565
40566 const char *
40567 emit_fusion_p9_load (rtx reg, rtx mem, rtx tmp_reg)
40568 {
40569 enum machine_mode mode = GET_MODE (reg);
40570 rtx hi;
40571 rtx lo;
40572 rtx addr;
40573 const char *load_string;
40574 int r;
40575
40576 if (GET_CODE (mem) == FLOAT_EXTEND || GET_CODE (mem) == ZERO_EXTEND)
40577 {
40578 mem = XEXP (mem, 0);
40579 mode = GET_MODE (mem);
40580 }
40581
40582 if (GET_CODE (reg) == SUBREG)
40583 {
40584 gcc_assert (SUBREG_BYTE (reg) == 0);
40585 reg = SUBREG_REG (reg);
40586 }
40587
40588 if (!REG_P (reg))
40589 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg);
40590
40591 r = REGNO (reg);
40592 if (FP_REGNO_P (r))
40593 {
40594 if (mode == SFmode)
40595 load_string = "lfs";
40596 else if (mode == DFmode || mode == DImode)
40597 load_string = "lfd";
40598 else
40599 gcc_unreachable ();
40600 }
40601 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_DFORM_SCALAR)
40602 {
40603 if (mode == SFmode)
40604 load_string = "lxssp";
40605 else if (mode == DFmode || mode == DImode)
40606 load_string = "lxsd";
40607 else
40608 gcc_unreachable ();
40609 }
40610 else if (INT_REGNO_P (r))
40611 {
40612 switch (mode)
40613 {
40614 case QImode:
40615 load_string = "lbz";
40616 break;
40617 case HImode:
40618 load_string = "lhz";
40619 break;
40620 case SImode:
40621 case SFmode:
40622 load_string = "lwz";
40623 break;
40624 case DImode:
40625 case DFmode:
40626 if (!TARGET_POWERPC64)
40627 gcc_unreachable ();
40628 load_string = "ld";
40629 break;
40630 default:
40631 gcc_unreachable ();
40632 }
40633 }
40634 else
40635 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg);
40636
40637 if (!MEM_P (mem))
40638 fatal_insn ("emit_fusion_p9_load not MEM", mem);
40639
40640 addr = XEXP (mem, 0);
40641 fusion_split_address (addr, &hi, &lo);
40642
40643 /* Emit the addis instruction. */
40644 emit_fusion_addis (tmp_reg, hi, "power9 load fusion", GET_MODE_NAME (mode));
40645
40646 /* Emit the D-form load instruction. */
40647 emit_fusion_load_store (reg, tmp_reg, lo, load_string);
40648
40649 return "";
40650 }
40651
40652 /* Return a string to fuse an addis instruction with a store using extended
40653 fusion. The address that is used is the logical address that was formed
40654 during peephole2: (lo_sum (high) (low-part))
40655
40656 The code is complicated, so we call output_asm_insn directly, and just
40657 return "". */
40658
40659 const char *
40660 emit_fusion_p9_store (rtx mem, rtx reg, rtx tmp_reg)
40661 {
40662 enum machine_mode mode = GET_MODE (reg);
40663 rtx hi;
40664 rtx lo;
40665 rtx addr;
40666 const char *store_string;
40667 int r;
40668
40669 if (GET_CODE (reg) == SUBREG)
40670 {
40671 gcc_assert (SUBREG_BYTE (reg) == 0);
40672 reg = SUBREG_REG (reg);
40673 }
40674
40675 if (!REG_P (reg))
40676 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg);
40677
40678 r = REGNO (reg);
40679 if (FP_REGNO_P (r))
40680 {
40681 if (mode == SFmode)
40682 store_string = "stfs";
40683 else if (mode == DFmode)
40684 store_string = "stfd";
40685 else
40686 gcc_unreachable ();
40687 }
40688 else if (ALTIVEC_REGNO_P (r) && TARGET_P9_DFORM_SCALAR)
40689 {
40690 if (mode == SFmode)
40691 store_string = "stxssp";
40692 else if (mode == DFmode || mode == DImode)
40693 store_string = "stxsd";
40694 else
40695 gcc_unreachable ();
40696 }
40697 else if (INT_REGNO_P (r))
40698 {
40699 switch (mode)
40700 {
40701 case QImode:
40702 store_string = "stb";
40703 break;
40704 case HImode:
40705 store_string = "sth";
40706 break;
40707 case SImode:
40708 case SFmode:
40709 store_string = "stw";
40710 break;
40711 case DImode:
40712 case DFmode:
40713 if (!TARGET_POWERPC64)
40714 gcc_unreachable ();
40715 store_string = "std";
40716 break;
40717 default:
40718 gcc_unreachable ();
40719 }
40720 }
40721 else
40722 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg);
40723
40724 if (!MEM_P (mem))
40725 fatal_insn ("emit_fusion_p9_store not MEM", mem);
40726
40727 addr = XEXP (mem, 0);
40728 fusion_split_address (addr, &hi, &lo);
40729
40730 /* Emit the addis instruction. */
40731 emit_fusion_addis (tmp_reg, hi, "power9 store fusion", GET_MODE_NAME (mode));
40732
40733 /* Emit the D-form load instruction. */
40734 emit_fusion_load_store (reg, tmp_reg, lo, store_string);
40735
40736 return "";
40737 }
40738
40739 \f
40740 /* Analyze vector computations and remove unnecessary doubleword
40741 swaps (xxswapdi instructions). This pass is performed only
40742 for little-endian VSX code generation.
40743
40744 For this specific case, loads and stores of 4x32 and 2x64 vectors
40745 are inefficient. These are implemented using the lvx2dx and
40746 stvx2dx instructions, which invert the order of doublewords in
40747 a vector register. Thus the code generation inserts an xxswapdi
40748 after each such load, and prior to each such store. (For spill
40749 code after register assignment, an additional xxswapdi is inserted
40750 following each store in order to return a hard register to its
40751 unpermuted value.)
40752
40753 The extra xxswapdi instructions reduce performance. This can be
40754 particularly bad for vectorized code. The purpose of this pass
40755 is to reduce the number of xxswapdi instructions required for
40756 correctness.
40757
40758 The primary insight is that much code that operates on vectors
40759 does not care about the relative order of elements in a register,
40760 so long as the correct memory order is preserved. If we have
40761 a computation where all input values are provided by lvxd2x/xxswapdi
40762 sequences, all outputs are stored using xxswapdi/stvxd2x sequences,
40763 and all intermediate computations are pure SIMD (independent of
40764 element order), then all the xxswapdi's associated with the loads
40765 and stores may be removed.
40766
40767 This pass uses some of the infrastructure and logical ideas from
40768 the "web" pass in web.c. We create maximal webs of computations
40769 fitting the description above using union-find. Each such web is
40770 then optimized by removing its unnecessary xxswapdi instructions.
40771
40772 The pass is placed prior to global optimization so that we can
40773 perform the optimization in the safest and simplest way possible;
40774 that is, by replacing each xxswapdi insn with a register copy insn.
40775 Subsequent forward propagation will remove copies where possible.
40776
40777 There are some operations sensitive to element order for which we
40778 can still allow the operation, provided we modify those operations.
40779 These include CONST_VECTORs, for which we must swap the first and
40780 second halves of the constant vector; and SUBREGs, for which we
40781 must adjust the byte offset to account for the swapped doublewords.
40782 A remaining opportunity would be non-immediate-form splats, for
40783 which we should adjust the selected lane of the input. We should
40784 also make code generation adjustments for sum-across operations,
40785 since this is a common vectorizer reduction.
40786
40787 Because we run prior to the first split, we can see loads and stores
40788 here that match *vsx_le_perm_{load,store}_<mode>. These are vanilla
40789 vector loads and stores that have not yet been split into a permuting
40790 load/store and a swap. (One way this can happen is with a builtin
40791 call to vec_vsx_{ld,st}.) We can handle these as well, but rather
40792 than deleting a swap, we convert the load/store into a permuting
40793 load/store (which effectively removes the swap). */
40794
40795 /* Notes on Permutes
40796
40797 We do not currently handle computations that contain permutes. There
40798 is a general transformation that can be performed correctly, but it
40799 may introduce more expensive code than it replaces. To handle these
40800 would require a cost model to determine when to perform the optimization.
40801 This commentary records how this could be done if desired.
40802
40803 The most general permute is something like this (example for V16QI):
40804
40805 (vec_select:V16QI (vec_concat:V32QI (op1:V16QI) (op2:V16QI))
40806 (parallel [(const_int a0) (const_int a1)
40807 ...
40808 (const_int a14) (const_int a15)]))
40809
40810 where a0,...,a15 are in [0,31] and select elements from op1 and op2
40811 to produce in the result.
40812
40813 Regardless of mode, we can convert the PARALLEL to a mask of 16
40814 byte-element selectors. Let's call this M, with M[i] representing
40815 the ith byte-element selector value. Then if we swap doublewords
40816 throughout the computation, we can get correct behavior by replacing
40817 M with M' as follows:
40818
40819 M'[i] = { (M[i]+8)%16 : M[i] in [0,15]
40820 { ((M[i]+8)%16)+16 : M[i] in [16,31]
40821
40822 This seems promising at first, since we are just replacing one mask
40823 with another. But certain masks are preferable to others. If M
40824 is a mask that matches a vmrghh pattern, for example, M' certainly
40825 will not. Instead of a single vmrghh, we would generate a load of
40826 M' and a vperm. So we would need to know how many xxswapd's we can
40827 remove as a result of this transformation to determine if it's
40828 profitable; and preferably the logic would need to be aware of all
40829 the special preferable masks.
40830
40831 Another form of permute is an UNSPEC_VPERM, in which the mask is
40832 already in a register. In some cases, this mask may be a constant
40833 that we can discover with ud-chains, in which case the above
40834 transformation is ok. However, the common usage here is for the
40835 mask to be produced by an UNSPEC_LVSL, in which case the mask
40836 cannot be known at compile time. In such a case we would have to
40837 generate several instructions to compute M' as above at run time,
40838 and a cost model is needed again.
40839
40840 However, when the mask M for an UNSPEC_VPERM is loaded from the
40841 constant pool, we can replace M with M' as above at no cost
40842 beyond adding a constant pool entry. */
40843
40844 /* This is based on the union-find logic in web.c. web_entry_base is
40845 defined in df.h. */
40846 class swap_web_entry : public web_entry_base
40847 {
40848 public:
40849 /* Pointer to the insn. */
40850 rtx_insn *insn;
40851 /* Set if insn contains a mention of a vector register. All other
40852 fields are undefined if this field is unset. */
40853 unsigned int is_relevant : 1;
40854 /* Set if insn is a load. */
40855 unsigned int is_load : 1;
40856 /* Set if insn is a store. */
40857 unsigned int is_store : 1;
40858 /* Set if insn is a doubleword swap. This can either be a register swap
40859 or a permuting load or store (test is_load and is_store for this). */
40860 unsigned int is_swap : 1;
40861 /* Set if the insn has a live-in use of a parameter register. */
40862 unsigned int is_live_in : 1;
40863 /* Set if the insn has a live-out def of a return register. */
40864 unsigned int is_live_out : 1;
40865 /* Set if the insn contains a subreg reference of a vector register. */
40866 unsigned int contains_subreg : 1;
40867 /* Set if the insn contains a 128-bit integer operand. */
40868 unsigned int is_128_int : 1;
40869 /* Set if this is a call-insn. */
40870 unsigned int is_call : 1;
40871 /* Set if this insn does not perform a vector operation for which
40872 element order matters, or if we know how to fix it up if it does.
40873 Undefined if is_swap is set. */
40874 unsigned int is_swappable : 1;
40875 /* A nonzero value indicates what kind of special handling for this
40876 insn is required if doublewords are swapped. Undefined if
40877 is_swappable is not set. */
40878 unsigned int special_handling : 4;
40879 /* Set if the web represented by this entry cannot be optimized. */
40880 unsigned int web_not_optimizable : 1;
40881 /* Set if this insn should be deleted. */
40882 unsigned int will_delete : 1;
40883 };
40884
40885 enum special_handling_values {
40886 SH_NONE = 0,
40887 SH_CONST_VECTOR,
40888 SH_SUBREG,
40889 SH_NOSWAP_LD,
40890 SH_NOSWAP_ST,
40891 SH_EXTRACT,
40892 SH_SPLAT,
40893 SH_XXPERMDI,
40894 SH_CONCAT,
40895 SH_VPERM
40896 };
40897
40898 /* Union INSN with all insns containing definitions that reach USE.
40899 Detect whether USE is live-in to the current function. */
40900 static void
40901 union_defs (swap_web_entry *insn_entry, rtx insn, df_ref use)
40902 {
40903 struct df_link *link = DF_REF_CHAIN (use);
40904
40905 if (!link)
40906 insn_entry[INSN_UID (insn)].is_live_in = 1;
40907
40908 while (link)
40909 {
40910 if (DF_REF_IS_ARTIFICIAL (link->ref))
40911 insn_entry[INSN_UID (insn)].is_live_in = 1;
40912
40913 if (DF_REF_INSN_INFO (link->ref))
40914 {
40915 rtx def_insn = DF_REF_INSN (link->ref);
40916 (void)unionfind_union (insn_entry + INSN_UID (insn),
40917 insn_entry + INSN_UID (def_insn));
40918 }
40919
40920 link = link->next;
40921 }
40922 }
40923
40924 /* Union INSN with all insns containing uses reached from DEF.
40925 Detect whether DEF is live-out from the current function. */
40926 static void
40927 union_uses (swap_web_entry *insn_entry, rtx insn, df_ref def)
40928 {
40929 struct df_link *link = DF_REF_CHAIN (def);
40930
40931 if (!link)
40932 insn_entry[INSN_UID (insn)].is_live_out = 1;
40933
40934 while (link)
40935 {
40936 /* This could be an eh use or some other artificial use;
40937 we treat these all the same (killing the optimization). */
40938 if (DF_REF_IS_ARTIFICIAL (link->ref))
40939 insn_entry[INSN_UID (insn)].is_live_out = 1;
40940
40941 if (DF_REF_INSN_INFO (link->ref))
40942 {
40943 rtx use_insn = DF_REF_INSN (link->ref);
40944 (void)unionfind_union (insn_entry + INSN_UID (insn),
40945 insn_entry + INSN_UID (use_insn));
40946 }
40947
40948 link = link->next;
40949 }
40950 }
40951
40952 /* Return 1 iff INSN is a load insn, including permuting loads that
40953 represent an lvxd2x instruction; else return 0. */
40954 static unsigned int
40955 insn_is_load_p (rtx insn)
40956 {
40957 rtx body = PATTERN (insn);
40958
40959 if (GET_CODE (body) == SET)
40960 {
40961 if (GET_CODE (SET_SRC (body)) == MEM)
40962 return 1;
40963
40964 if (GET_CODE (SET_SRC (body)) == VEC_SELECT
40965 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM)
40966 return 1;
40967
40968 return 0;
40969 }
40970
40971 if (GET_CODE (body) != PARALLEL)
40972 return 0;
40973
40974 rtx set = XVECEXP (body, 0, 0);
40975
40976 if (GET_CODE (set) == SET && GET_CODE (SET_SRC (set)) == MEM)
40977 return 1;
40978
40979 return 0;
40980 }
40981
40982 /* Return 1 iff INSN is a store insn, including permuting stores that
40983 represent an stvxd2x instruction; else return 0. */
40984 static unsigned int
40985 insn_is_store_p (rtx insn)
40986 {
40987 rtx body = PATTERN (insn);
40988 if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == MEM)
40989 return 1;
40990 if (GET_CODE (body) != PARALLEL)
40991 return 0;
40992 rtx set = XVECEXP (body, 0, 0);
40993 if (GET_CODE (set) == SET && GET_CODE (SET_DEST (set)) == MEM)
40994 return 1;
40995 return 0;
40996 }
40997
40998 /* Return 1 iff INSN swaps doublewords. This may be a reg-reg swap,
40999 a permuting load, or a permuting store. */
41000 static unsigned int
41001 insn_is_swap_p (rtx insn)
41002 {
41003 rtx body = PATTERN (insn);
41004 if (GET_CODE (body) != SET)
41005 return 0;
41006 rtx rhs = SET_SRC (body);
41007 if (GET_CODE (rhs) != VEC_SELECT)
41008 return 0;
41009 rtx parallel = XEXP (rhs, 1);
41010 if (GET_CODE (parallel) != PARALLEL)
41011 return 0;
41012 unsigned int len = XVECLEN (parallel, 0);
41013 if (len != 2 && len != 4 && len != 8 && len != 16)
41014 return 0;
41015 for (unsigned int i = 0; i < len / 2; ++i)
41016 {
41017 rtx op = XVECEXP (parallel, 0, i);
41018 if (GET_CODE (op) != CONST_INT || INTVAL (op) != len / 2 + i)
41019 return 0;
41020 }
41021 for (unsigned int i = len / 2; i < len; ++i)
41022 {
41023 rtx op = XVECEXP (parallel, 0, i);
41024 if (GET_CODE (op) != CONST_INT || INTVAL (op) != i - len / 2)
41025 return 0;
41026 }
41027 return 1;
41028 }
41029
41030 /* Return TRUE if insn is a swap fed by a load from the constant pool. */
41031 static bool
41032 const_load_sequence_p (swap_web_entry *insn_entry, rtx insn)
41033 {
41034 unsigned uid = INSN_UID (insn);
41035 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load)
41036 return false;
41037
41038 /* Find the unique use in the swap and locate its def. If the def
41039 isn't unique, punt. */
41040 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
41041 df_ref use;
41042 FOR_EACH_INSN_INFO_USE (use, insn_info)
41043 {
41044 struct df_link *def_link = DF_REF_CHAIN (use);
41045 if (!def_link || def_link->next)
41046 return false;
41047
41048 rtx def_insn = DF_REF_INSN (def_link->ref);
41049 unsigned uid2 = INSN_UID (def_insn);
41050 if (!insn_entry[uid2].is_load || !insn_entry[uid2].is_swap)
41051 return false;
41052
41053 rtx body = PATTERN (def_insn);
41054 if (GET_CODE (body) != SET
41055 || GET_CODE (SET_SRC (body)) != VEC_SELECT
41056 || GET_CODE (XEXP (SET_SRC (body), 0)) != MEM)
41057 return false;
41058
41059 rtx mem = XEXP (SET_SRC (body), 0);
41060 rtx base_reg = XEXP (mem, 0);
41061
41062 df_ref base_use;
41063 insn_info = DF_INSN_INFO_GET (def_insn);
41064 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
41065 {
41066 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
41067 continue;
41068
41069 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
41070 if (!base_def_link || base_def_link->next)
41071 return false;
41072
41073 rtx tocrel_insn = DF_REF_INSN (base_def_link->ref);
41074 rtx tocrel_body = PATTERN (tocrel_insn);
41075 rtx base, offset;
41076 if (GET_CODE (tocrel_body) != SET)
41077 return false;
41078 /* There is an extra level of indirection for small/large
41079 code models. */
41080 rtx tocrel_expr = SET_SRC (tocrel_body);
41081 if (GET_CODE (tocrel_expr) == MEM)
41082 tocrel_expr = XEXP (tocrel_expr, 0);
41083 if (!toc_relative_expr_p (tocrel_expr, false))
41084 return false;
41085 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
41086 if (GET_CODE (base) != SYMBOL_REF || !CONSTANT_POOL_ADDRESS_P (base))
41087 return false;
41088 }
41089 }
41090 return true;
41091 }
41092
41093 /* Return TRUE iff OP matches a V2DF reduction pattern. See the
41094 definition of vsx_reduc_<VEC_reduc_name>_v2df in vsx.md. */
41095 static bool
41096 v2df_reduction_p (rtx op)
41097 {
41098 if (GET_MODE (op) != V2DFmode)
41099 return false;
41100
41101 enum rtx_code code = GET_CODE (op);
41102 if (code != PLUS && code != SMIN && code != SMAX)
41103 return false;
41104
41105 rtx concat = XEXP (op, 0);
41106 if (GET_CODE (concat) != VEC_CONCAT)
41107 return false;
41108
41109 rtx select0 = XEXP (concat, 0);
41110 rtx select1 = XEXP (concat, 1);
41111 if (GET_CODE (select0) != VEC_SELECT || GET_CODE (select1) != VEC_SELECT)
41112 return false;
41113
41114 rtx reg0 = XEXP (select0, 0);
41115 rtx reg1 = XEXP (select1, 0);
41116 if (!rtx_equal_p (reg0, reg1) || !REG_P (reg0))
41117 return false;
41118
41119 rtx parallel0 = XEXP (select0, 1);
41120 rtx parallel1 = XEXP (select1, 1);
41121 if (GET_CODE (parallel0) != PARALLEL || GET_CODE (parallel1) != PARALLEL)
41122 return false;
41123
41124 if (!rtx_equal_p (XVECEXP (parallel0, 0, 0), const1_rtx)
41125 || !rtx_equal_p (XVECEXP (parallel1, 0, 0), const0_rtx))
41126 return false;
41127
41128 return true;
41129 }
41130
41131 /* Return 1 iff OP is an operand that will not be affected by having
41132 vector doublewords swapped in memory. */
41133 static unsigned int
41134 rtx_is_swappable_p (rtx op, unsigned int *special)
41135 {
41136 enum rtx_code code = GET_CODE (op);
41137 int i, j;
41138 rtx parallel;
41139
41140 switch (code)
41141 {
41142 case LABEL_REF:
41143 case SYMBOL_REF:
41144 case CLOBBER:
41145 case REG:
41146 return 1;
41147
41148 case VEC_CONCAT:
41149 case ASM_INPUT:
41150 case ASM_OPERANDS:
41151 return 0;
41152
41153 case CONST_VECTOR:
41154 {
41155 *special = SH_CONST_VECTOR;
41156 return 1;
41157 }
41158
41159 case VEC_DUPLICATE:
41160 /* Opportunity: If XEXP (op, 0) has the same mode as the result,
41161 and XEXP (op, 1) is a PARALLEL with a single QImode const int,
41162 it represents a vector splat for which we can do special
41163 handling. */
41164 if (GET_CODE (XEXP (op, 0)) == CONST_INT)
41165 return 1;
41166 else if (REG_P (XEXP (op, 0))
41167 && GET_MODE_INNER (GET_MODE (op)) == GET_MODE (XEXP (op, 0)))
41168 /* This catches V2DF and V2DI splat, at a minimum. */
41169 return 1;
41170 else if (GET_CODE (XEXP (op, 0)) == TRUNCATE
41171 && REG_P (XEXP (XEXP (op, 0), 0))
41172 && GET_MODE_INNER (GET_MODE (op)) == GET_MODE (XEXP (op, 0)))
41173 /* This catches splat of a truncated value. */
41174 return 1;
41175 else if (GET_CODE (XEXP (op, 0)) == VEC_SELECT)
41176 /* If the duplicated item is from a select, defer to the select
41177 processing to see if we can change the lane for the splat. */
41178 return rtx_is_swappable_p (XEXP (op, 0), special);
41179 else
41180 return 0;
41181
41182 case VEC_SELECT:
41183 /* A vec_extract operation is ok if we change the lane. */
41184 if (GET_CODE (XEXP (op, 0)) == REG
41185 && GET_MODE_INNER (GET_MODE (XEXP (op, 0))) == GET_MODE (op)
41186 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
41187 && XVECLEN (parallel, 0) == 1
41188 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT)
41189 {
41190 *special = SH_EXTRACT;
41191 return 1;
41192 }
41193 /* An XXPERMDI is ok if we adjust the lanes. Note that if the
41194 XXPERMDI is a swap operation, it will be identified by
41195 insn_is_swap_p and therefore we won't get here. */
41196 else if (GET_CODE (XEXP (op, 0)) == VEC_CONCAT
41197 && (GET_MODE (XEXP (op, 0)) == V4DFmode
41198 || GET_MODE (XEXP (op, 0)) == V4DImode)
41199 && GET_CODE ((parallel = XEXP (op, 1))) == PARALLEL
41200 && XVECLEN (parallel, 0) == 2
41201 && GET_CODE (XVECEXP (parallel, 0, 0)) == CONST_INT
41202 && GET_CODE (XVECEXP (parallel, 0, 1)) == CONST_INT)
41203 {
41204 *special = SH_XXPERMDI;
41205 return 1;
41206 }
41207 else if (v2df_reduction_p (op))
41208 return 1;
41209 else
41210 return 0;
41211
41212 case UNSPEC:
41213 {
41214 /* Various operations are unsafe for this optimization, at least
41215 without significant additional work. Permutes are obviously
41216 problematic, as both the permute control vector and the ordering
41217 of the target values are invalidated by doubleword swapping.
41218 Vector pack and unpack modify the number of vector lanes.
41219 Merge-high/low will not operate correctly on swapped operands.
41220 Vector shifts across element boundaries are clearly uncool,
41221 as are vector select and concatenate operations. Vector
41222 sum-across instructions define one operand with a specific
41223 order-dependent element, so additional fixup code would be
41224 needed to make those work. Vector set and non-immediate-form
41225 vector splat are element-order sensitive. A few of these
41226 cases might be workable with special handling if required.
41227 Adding cost modeling would be appropriate in some cases. */
41228 int val = XINT (op, 1);
41229 switch (val)
41230 {
41231 default:
41232 break;
41233 case UNSPEC_VMRGH_DIRECT:
41234 case UNSPEC_VMRGL_DIRECT:
41235 case UNSPEC_VPACK_SIGN_SIGN_SAT:
41236 case UNSPEC_VPACK_SIGN_UNS_SAT:
41237 case UNSPEC_VPACK_UNS_UNS_MOD:
41238 case UNSPEC_VPACK_UNS_UNS_MOD_DIRECT:
41239 case UNSPEC_VPACK_UNS_UNS_SAT:
41240 case UNSPEC_VPERM:
41241 case UNSPEC_VPERM_UNS:
41242 case UNSPEC_VPERMHI:
41243 case UNSPEC_VPERMSI:
41244 case UNSPEC_VPKPX:
41245 case UNSPEC_VSLDOI:
41246 case UNSPEC_VSLO:
41247 case UNSPEC_VSRO:
41248 case UNSPEC_VSUM2SWS:
41249 case UNSPEC_VSUM4S:
41250 case UNSPEC_VSUM4UBS:
41251 case UNSPEC_VSUMSWS:
41252 case UNSPEC_VSUMSWS_DIRECT:
41253 case UNSPEC_VSX_CONCAT:
41254 case UNSPEC_VSX_SET:
41255 case UNSPEC_VSX_SLDWI:
41256 case UNSPEC_VUNPACK_HI_SIGN:
41257 case UNSPEC_VUNPACK_HI_SIGN_DIRECT:
41258 case UNSPEC_VUNPACK_LO_SIGN:
41259 case UNSPEC_VUNPACK_LO_SIGN_DIRECT:
41260 case UNSPEC_VUPKHPX:
41261 case UNSPEC_VUPKHS_V4SF:
41262 case UNSPEC_VUPKHU_V4SF:
41263 case UNSPEC_VUPKLPX:
41264 case UNSPEC_VUPKLS_V4SF:
41265 case UNSPEC_VUPKLU_V4SF:
41266 case UNSPEC_VSX_CVDPSPN:
41267 case UNSPEC_VSX_CVSPDP:
41268 case UNSPEC_VSX_CVSPDPN:
41269 case UNSPEC_VSX_EXTRACT:
41270 case UNSPEC_VSX_VSLO:
41271 case UNSPEC_VSX_VEC_INIT:
41272 return 0;
41273 case UNSPEC_VSPLT_DIRECT:
41274 *special = SH_SPLAT;
41275 return 1;
41276 case UNSPEC_REDUC_PLUS:
41277 case UNSPEC_REDUC:
41278 return 1;
41279 }
41280 }
41281
41282 default:
41283 break;
41284 }
41285
41286 const char *fmt = GET_RTX_FORMAT (code);
41287 int ok = 1;
41288
41289 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
41290 if (fmt[i] == 'e' || fmt[i] == 'u')
41291 {
41292 unsigned int special_op = SH_NONE;
41293 ok &= rtx_is_swappable_p (XEXP (op, i), &special_op);
41294 if (special_op == SH_NONE)
41295 continue;
41296 /* Ensure we never have two kinds of special handling
41297 for the same insn. */
41298 if (*special != SH_NONE && *special != special_op)
41299 return 0;
41300 *special = special_op;
41301 }
41302 else if (fmt[i] == 'E')
41303 for (j = 0; j < XVECLEN (op, i); ++j)
41304 {
41305 unsigned int special_op = SH_NONE;
41306 ok &= rtx_is_swappable_p (XVECEXP (op, i, j), &special_op);
41307 if (special_op == SH_NONE)
41308 continue;
41309 /* Ensure we never have two kinds of special handling
41310 for the same insn. */
41311 if (*special != SH_NONE && *special != special_op)
41312 return 0;
41313 *special = special_op;
41314 }
41315
41316 return ok;
41317 }
41318
41319 /* Return 1 iff INSN is an operand that will not be affected by
41320 having vector doublewords swapped in memory (in which case
41321 *SPECIAL is unchanged), or that can be modified to be correct
41322 if vector doublewords are swapped in memory (in which case
41323 *SPECIAL is changed to a value indicating how). */
41324 static unsigned int
41325 insn_is_swappable_p (swap_web_entry *insn_entry, rtx insn,
41326 unsigned int *special)
41327 {
41328 /* Calls are always bad. */
41329 if (GET_CODE (insn) == CALL_INSN)
41330 return 0;
41331
41332 /* Loads and stores seen here are not permuting, but we can still
41333 fix them up by converting them to permuting ones. Exceptions:
41334 UNSPEC_LVE, UNSPEC_LVX, and UNSPEC_STVX, which have a PARALLEL
41335 body instead of a SET; and UNSPEC_STVE, which has an UNSPEC
41336 for the SET source. Also we must now make an exception for lvx
41337 and stvx when they are not in the UNSPEC_LVX/STVX form (with the
41338 explicit "& -16") since this leads to unrecognizable insns. */
41339 rtx body = PATTERN (insn);
41340 int i = INSN_UID (insn);
41341
41342 if (insn_entry[i].is_load)
41343 {
41344 if (GET_CODE (body) == SET)
41345 {
41346 rtx rhs = SET_SRC (body);
41347 gcc_assert (GET_CODE (rhs) == MEM);
41348 if (GET_CODE (XEXP (rhs, 0)) == AND)
41349 return 0;
41350
41351 *special = SH_NOSWAP_LD;
41352 return 1;
41353 }
41354 else
41355 return 0;
41356 }
41357
41358 if (insn_entry[i].is_store)
41359 {
41360 if (GET_CODE (body) == SET
41361 && GET_CODE (SET_SRC (body)) != UNSPEC)
41362 {
41363 rtx lhs = SET_DEST (body);
41364 gcc_assert (GET_CODE (lhs) == MEM);
41365 if (GET_CODE (XEXP (lhs, 0)) == AND)
41366 return 0;
41367
41368 *special = SH_NOSWAP_ST;
41369 return 1;
41370 }
41371 else
41372 return 0;
41373 }
41374
41375 /* A convert to single precision can be left as is provided that
41376 all of its uses are in xxspltw instructions that splat BE element
41377 zero. */
41378 if (GET_CODE (body) == SET
41379 && GET_CODE (SET_SRC (body)) == UNSPEC
41380 && XINT (SET_SRC (body), 1) == UNSPEC_VSX_CVDPSPN)
41381 {
41382 df_ref def;
41383 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
41384
41385 FOR_EACH_INSN_INFO_DEF (def, insn_info)
41386 {
41387 struct df_link *link = DF_REF_CHAIN (def);
41388 if (!link)
41389 return 0;
41390
41391 for (; link; link = link->next) {
41392 rtx use_insn = DF_REF_INSN (link->ref);
41393 rtx use_body = PATTERN (use_insn);
41394 if (GET_CODE (use_body) != SET
41395 || GET_CODE (SET_SRC (use_body)) != UNSPEC
41396 || XINT (SET_SRC (use_body), 1) != UNSPEC_VSX_XXSPLTW
41397 || XVECEXP (SET_SRC (use_body), 0, 1) != const0_rtx)
41398 return 0;
41399 }
41400 }
41401
41402 return 1;
41403 }
41404
41405 /* A concatenation of two doublewords is ok if we reverse the
41406 order of the inputs. */
41407 if (GET_CODE (body) == SET
41408 && GET_CODE (SET_SRC (body)) == VEC_CONCAT
41409 && (GET_MODE (SET_SRC (body)) == V2DFmode
41410 || GET_MODE (SET_SRC (body)) == V2DImode))
41411 {
41412 *special = SH_CONCAT;
41413 return 1;
41414 }
41415
41416 /* V2DF reductions are always swappable. */
41417 if (GET_CODE (body) == PARALLEL)
41418 {
41419 rtx expr = XVECEXP (body, 0, 0);
41420 if (GET_CODE (expr) == SET
41421 && v2df_reduction_p (SET_SRC (expr)))
41422 return 1;
41423 }
41424
41425 /* An UNSPEC_VPERM is ok if the mask operand is loaded from the
41426 constant pool. */
41427 if (GET_CODE (body) == SET
41428 && GET_CODE (SET_SRC (body)) == UNSPEC
41429 && XINT (SET_SRC (body), 1) == UNSPEC_VPERM
41430 && XVECLEN (SET_SRC (body), 0) == 3
41431 && GET_CODE (XVECEXP (SET_SRC (body), 0, 2)) == REG)
41432 {
41433 rtx mask_reg = XVECEXP (SET_SRC (body), 0, 2);
41434 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
41435 df_ref use;
41436 FOR_EACH_INSN_INFO_USE (use, insn_info)
41437 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
41438 {
41439 struct df_link *def_link = DF_REF_CHAIN (use);
41440 /* Punt if multiple definitions for this reg. */
41441 if (def_link && !def_link->next &&
41442 const_load_sequence_p (insn_entry,
41443 DF_REF_INSN (def_link->ref)))
41444 {
41445 *special = SH_VPERM;
41446 return 1;
41447 }
41448 }
41449 }
41450
41451 /* Otherwise check the operands for vector lane violations. */
41452 return rtx_is_swappable_p (body, special);
41453 }
41454
41455 enum chain_purpose { FOR_LOADS, FOR_STORES };
41456
41457 /* Return true if the UD or DU chain headed by LINK is non-empty,
41458 and every entry on the chain references an insn that is a
41459 register swap. Furthermore, if PURPOSE is FOR_LOADS, each such
41460 register swap must have only permuting loads as reaching defs.
41461 If PURPOSE is FOR_STORES, each such register swap must have only
41462 register swaps or permuting stores as reached uses. */
41463 static bool
41464 chain_contains_only_swaps (swap_web_entry *insn_entry, struct df_link *link,
41465 enum chain_purpose purpose)
41466 {
41467 if (!link)
41468 return false;
41469
41470 for (; link; link = link->next)
41471 {
41472 if (!ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (DF_REF_REG (link->ref))))
41473 continue;
41474
41475 if (DF_REF_IS_ARTIFICIAL (link->ref))
41476 return false;
41477
41478 rtx reached_insn = DF_REF_INSN (link->ref);
41479 unsigned uid = INSN_UID (reached_insn);
41480 struct df_insn_info *insn_info = DF_INSN_INFO_GET (reached_insn);
41481
41482 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load
41483 || insn_entry[uid].is_store)
41484 return false;
41485
41486 if (purpose == FOR_LOADS)
41487 {
41488 df_ref use;
41489 FOR_EACH_INSN_INFO_USE (use, insn_info)
41490 {
41491 struct df_link *swap_link = DF_REF_CHAIN (use);
41492
41493 while (swap_link)
41494 {
41495 if (DF_REF_IS_ARTIFICIAL (link->ref))
41496 return false;
41497
41498 rtx swap_def_insn = DF_REF_INSN (swap_link->ref);
41499 unsigned uid2 = INSN_UID (swap_def_insn);
41500
41501 /* Only permuting loads are allowed. */
41502 if (!insn_entry[uid2].is_swap || !insn_entry[uid2].is_load)
41503 return false;
41504
41505 swap_link = swap_link->next;
41506 }
41507 }
41508 }
41509 else if (purpose == FOR_STORES)
41510 {
41511 df_ref def;
41512 FOR_EACH_INSN_INFO_DEF (def, insn_info)
41513 {
41514 struct df_link *swap_link = DF_REF_CHAIN (def);
41515
41516 while (swap_link)
41517 {
41518 if (DF_REF_IS_ARTIFICIAL (link->ref))
41519 return false;
41520
41521 rtx swap_use_insn = DF_REF_INSN (swap_link->ref);
41522 unsigned uid2 = INSN_UID (swap_use_insn);
41523
41524 /* Permuting stores or register swaps are allowed. */
41525 if (!insn_entry[uid2].is_swap || insn_entry[uid2].is_load)
41526 return false;
41527
41528 swap_link = swap_link->next;
41529 }
41530 }
41531 }
41532 }
41533
41534 return true;
41535 }
41536
41537 /* Mark the xxswapdi instructions associated with permuting loads and
41538 stores for removal. Note that we only flag them for deletion here,
41539 as there is a possibility of a swap being reached from multiple
41540 loads, etc. */
41541 static void
41542 mark_swaps_for_removal (swap_web_entry *insn_entry, unsigned int i)
41543 {
41544 rtx insn = insn_entry[i].insn;
41545 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
41546
41547 if (insn_entry[i].is_load)
41548 {
41549 df_ref def;
41550 FOR_EACH_INSN_INFO_DEF (def, insn_info)
41551 {
41552 struct df_link *link = DF_REF_CHAIN (def);
41553
41554 /* We know by now that these are swaps, so we can delete
41555 them confidently. */
41556 while (link)
41557 {
41558 rtx use_insn = DF_REF_INSN (link->ref);
41559 insn_entry[INSN_UID (use_insn)].will_delete = 1;
41560 link = link->next;
41561 }
41562 }
41563 }
41564 else if (insn_entry[i].is_store)
41565 {
41566 df_ref use;
41567 FOR_EACH_INSN_INFO_USE (use, insn_info)
41568 {
41569 /* Ignore uses for addressability. */
41570 machine_mode mode = GET_MODE (DF_REF_REG (use));
41571 if (!ALTIVEC_OR_VSX_VECTOR_MODE (mode))
41572 continue;
41573
41574 struct df_link *link = DF_REF_CHAIN (use);
41575
41576 /* We know by now that these are swaps, so we can delete
41577 them confidently. */
41578 while (link)
41579 {
41580 rtx def_insn = DF_REF_INSN (link->ref);
41581 insn_entry[INSN_UID (def_insn)].will_delete = 1;
41582 link = link->next;
41583 }
41584 }
41585 }
41586 }
41587
41588 /* OP is either a CONST_VECTOR or an expression containing one.
41589 Swap the first half of the vector with the second in the first
41590 case. Recurse to find it in the second. */
41591 static void
41592 swap_const_vector_halves (rtx op)
41593 {
41594 int i;
41595 enum rtx_code code = GET_CODE (op);
41596 if (GET_CODE (op) == CONST_VECTOR)
41597 {
41598 int half_units = GET_MODE_NUNITS (GET_MODE (op)) / 2;
41599 for (i = 0; i < half_units; ++i)
41600 {
41601 rtx temp = CONST_VECTOR_ELT (op, i);
41602 CONST_VECTOR_ELT (op, i) = CONST_VECTOR_ELT (op, i + half_units);
41603 CONST_VECTOR_ELT (op, i + half_units) = temp;
41604 }
41605 }
41606 else
41607 {
41608 int j;
41609 const char *fmt = GET_RTX_FORMAT (code);
41610 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
41611 if (fmt[i] == 'e' || fmt[i] == 'u')
41612 swap_const_vector_halves (XEXP (op, i));
41613 else if (fmt[i] == 'E')
41614 for (j = 0; j < XVECLEN (op, i); ++j)
41615 swap_const_vector_halves (XVECEXP (op, i, j));
41616 }
41617 }
41618
41619 /* Find all subregs of a vector expression that perform a narrowing,
41620 and adjust the subreg index to account for doubleword swapping. */
41621 static void
41622 adjust_subreg_index (rtx op)
41623 {
41624 enum rtx_code code = GET_CODE (op);
41625 if (code == SUBREG
41626 && (GET_MODE_SIZE (GET_MODE (op))
41627 < GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))))
41628 {
41629 unsigned int index = SUBREG_BYTE (op);
41630 if (index < 8)
41631 index += 8;
41632 else
41633 index -= 8;
41634 SUBREG_BYTE (op) = index;
41635 }
41636
41637 const char *fmt = GET_RTX_FORMAT (code);
41638 int i,j;
41639 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
41640 if (fmt[i] == 'e' || fmt[i] == 'u')
41641 adjust_subreg_index (XEXP (op, i));
41642 else if (fmt[i] == 'E')
41643 for (j = 0; j < XVECLEN (op, i); ++j)
41644 adjust_subreg_index (XVECEXP (op, i, j));
41645 }
41646
41647 /* Convert the non-permuting load INSN to a permuting one. */
41648 static void
41649 permute_load (rtx_insn *insn)
41650 {
41651 rtx body = PATTERN (insn);
41652 rtx mem_op = SET_SRC (body);
41653 rtx tgt_reg = SET_DEST (body);
41654 machine_mode mode = GET_MODE (tgt_reg);
41655 int n_elts = GET_MODE_NUNITS (mode);
41656 int half_elts = n_elts / 2;
41657 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
41658 int i, j;
41659 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
41660 XVECEXP (par, 0, i) = GEN_INT (j);
41661 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
41662 XVECEXP (par, 0, i) = GEN_INT (j);
41663 rtx sel = gen_rtx_VEC_SELECT (mode, mem_op, par);
41664 SET_SRC (body) = sel;
41665 INSN_CODE (insn) = -1; /* Force re-recognition. */
41666 df_insn_rescan (insn);
41667
41668 if (dump_file)
41669 fprintf (dump_file, "Replacing load %d with permuted load\n",
41670 INSN_UID (insn));
41671 }
41672
41673 /* Convert the non-permuting store INSN to a permuting one. */
41674 static void
41675 permute_store (rtx_insn *insn)
41676 {
41677 rtx body = PATTERN (insn);
41678 rtx src_reg = SET_SRC (body);
41679 machine_mode mode = GET_MODE (src_reg);
41680 int n_elts = GET_MODE_NUNITS (mode);
41681 int half_elts = n_elts / 2;
41682 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
41683 int i, j;
41684 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
41685 XVECEXP (par, 0, i) = GEN_INT (j);
41686 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
41687 XVECEXP (par, 0, i) = GEN_INT (j);
41688 rtx sel = gen_rtx_VEC_SELECT (mode, src_reg, par);
41689 SET_SRC (body) = sel;
41690 INSN_CODE (insn) = -1; /* Force re-recognition. */
41691 df_insn_rescan (insn);
41692
41693 if (dump_file)
41694 fprintf (dump_file, "Replacing store %d with permuted store\n",
41695 INSN_UID (insn));
41696 }
41697
41698 /* Given OP that contains a vector extract operation, adjust the index
41699 of the extracted lane to account for the doubleword swap. */
41700 static void
41701 adjust_extract (rtx_insn *insn)
41702 {
41703 rtx pattern = PATTERN (insn);
41704 if (GET_CODE (pattern) == PARALLEL)
41705 pattern = XVECEXP (pattern, 0, 0);
41706 rtx src = SET_SRC (pattern);
41707 /* The vec_select may be wrapped in a vec_duplicate for a splat, so
41708 account for that. */
41709 rtx sel = GET_CODE (src) == VEC_DUPLICATE ? XEXP (src, 0) : src;
41710 rtx par = XEXP (sel, 1);
41711 int half_elts = GET_MODE_NUNITS (GET_MODE (XEXP (sel, 0))) >> 1;
41712 int lane = INTVAL (XVECEXP (par, 0, 0));
41713 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
41714 XVECEXP (par, 0, 0) = GEN_INT (lane);
41715 INSN_CODE (insn) = -1; /* Force re-recognition. */
41716 df_insn_rescan (insn);
41717
41718 if (dump_file)
41719 fprintf (dump_file, "Changing lane for extract %d\n", INSN_UID (insn));
41720 }
41721
41722 /* Given OP that contains a vector direct-splat operation, adjust the index
41723 of the source lane to account for the doubleword swap. */
41724 static void
41725 adjust_splat (rtx_insn *insn)
41726 {
41727 rtx body = PATTERN (insn);
41728 rtx unspec = XEXP (body, 1);
41729 int half_elts = GET_MODE_NUNITS (GET_MODE (unspec)) >> 1;
41730 int lane = INTVAL (XVECEXP (unspec, 0, 1));
41731 lane = lane >= half_elts ? lane - half_elts : lane + half_elts;
41732 XVECEXP (unspec, 0, 1) = GEN_INT (lane);
41733 INSN_CODE (insn) = -1; /* Force re-recognition. */
41734 df_insn_rescan (insn);
41735
41736 if (dump_file)
41737 fprintf (dump_file, "Changing lane for splat %d\n", INSN_UID (insn));
41738 }
41739
41740 /* Given OP that contains an XXPERMDI operation (that is not a doubleword
41741 swap), reverse the order of the source operands and adjust the indices
41742 of the source lanes to account for doubleword reversal. */
41743 static void
41744 adjust_xxpermdi (rtx_insn *insn)
41745 {
41746 rtx set = PATTERN (insn);
41747 rtx select = XEXP (set, 1);
41748 rtx concat = XEXP (select, 0);
41749 rtx src0 = XEXP (concat, 0);
41750 XEXP (concat, 0) = XEXP (concat, 1);
41751 XEXP (concat, 1) = src0;
41752 rtx parallel = XEXP (select, 1);
41753 int lane0 = INTVAL (XVECEXP (parallel, 0, 0));
41754 int lane1 = INTVAL (XVECEXP (parallel, 0, 1));
41755 int new_lane0 = 3 - lane1;
41756 int new_lane1 = 3 - lane0;
41757 XVECEXP (parallel, 0, 0) = GEN_INT (new_lane0);
41758 XVECEXP (parallel, 0, 1) = GEN_INT (new_lane1);
41759 INSN_CODE (insn) = -1; /* Force re-recognition. */
41760 df_insn_rescan (insn);
41761
41762 if (dump_file)
41763 fprintf (dump_file, "Changing lanes for xxpermdi %d\n", INSN_UID (insn));
41764 }
41765
41766 /* Given OP that contains a VEC_CONCAT operation of two doublewords,
41767 reverse the order of those inputs. */
41768 static void
41769 adjust_concat (rtx_insn *insn)
41770 {
41771 rtx set = PATTERN (insn);
41772 rtx concat = XEXP (set, 1);
41773 rtx src0 = XEXP (concat, 0);
41774 XEXP (concat, 0) = XEXP (concat, 1);
41775 XEXP (concat, 1) = src0;
41776 INSN_CODE (insn) = -1; /* Force re-recognition. */
41777 df_insn_rescan (insn);
41778
41779 if (dump_file)
41780 fprintf (dump_file, "Reversing inputs for concat %d\n", INSN_UID (insn));
41781 }
41782
41783 /* Given an UNSPEC_VPERM insn, modify the mask loaded from the
41784 constant pool to reflect swapped doublewords. */
41785 static void
41786 adjust_vperm (rtx_insn *insn)
41787 {
41788 /* We previously determined that the UNSPEC_VPERM was fed by a
41789 swap of a swapping load of a TOC-relative constant pool symbol.
41790 Find the MEM in the swapping load and replace it with a MEM for
41791 the adjusted mask constant. */
41792 rtx set = PATTERN (insn);
41793 rtx mask_reg = XVECEXP (SET_SRC (set), 0, 2);
41794
41795 /* Find the swap. */
41796 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
41797 df_ref use;
41798 rtx_insn *swap_insn = 0;
41799 FOR_EACH_INSN_INFO_USE (use, insn_info)
41800 if (rtx_equal_p (DF_REF_REG (use), mask_reg))
41801 {
41802 struct df_link *def_link = DF_REF_CHAIN (use);
41803 gcc_assert (def_link && !def_link->next);
41804 swap_insn = DF_REF_INSN (def_link->ref);
41805 break;
41806 }
41807 gcc_assert (swap_insn);
41808
41809 /* Find the load. */
41810 insn_info = DF_INSN_INFO_GET (swap_insn);
41811 rtx_insn *load_insn = 0;
41812 FOR_EACH_INSN_INFO_USE (use, insn_info)
41813 {
41814 struct df_link *def_link = DF_REF_CHAIN (use);
41815 gcc_assert (def_link && !def_link->next);
41816 load_insn = DF_REF_INSN (def_link->ref);
41817 break;
41818 }
41819 gcc_assert (load_insn);
41820
41821 /* Find the TOC-relative symbol access. */
41822 insn_info = DF_INSN_INFO_GET (load_insn);
41823 rtx_insn *tocrel_insn = 0;
41824 FOR_EACH_INSN_INFO_USE (use, insn_info)
41825 {
41826 struct df_link *def_link = DF_REF_CHAIN (use);
41827 gcc_assert (def_link && !def_link->next);
41828 tocrel_insn = DF_REF_INSN (def_link->ref);
41829 break;
41830 }
41831 gcc_assert (tocrel_insn);
41832
41833 /* Find the embedded CONST_VECTOR. We have to call toc_relative_expr_p
41834 to set tocrel_base; otherwise it would be unnecessary as we've
41835 already established it will return true. */
41836 rtx base, offset;
41837 rtx tocrel_expr = SET_SRC (PATTERN (tocrel_insn));
41838 /* There is an extra level of indirection for small/large code models. */
41839 if (GET_CODE (tocrel_expr) == MEM)
41840 tocrel_expr = XEXP (tocrel_expr, 0);
41841 if (!toc_relative_expr_p (tocrel_expr, false))
41842 gcc_unreachable ();
41843 split_const (XVECEXP (tocrel_base, 0, 0), &base, &offset);
41844 rtx const_vector = get_pool_constant (base);
41845 /* With the extra indirection, get_pool_constant will produce the
41846 real constant from the reg_equal expression, so get the real
41847 constant. */
41848 if (GET_CODE (const_vector) == SYMBOL_REF)
41849 const_vector = get_pool_constant (const_vector);
41850 gcc_assert (GET_CODE (const_vector) == CONST_VECTOR);
41851
41852 /* Create an adjusted mask from the initial mask. */
41853 unsigned int new_mask[16], i, val;
41854 for (i = 0; i < 16; ++i) {
41855 val = INTVAL (XVECEXP (const_vector, 0, i));
41856 if (val < 16)
41857 new_mask[i] = (val + 8) % 16;
41858 else
41859 new_mask[i] = ((val + 8) % 16) + 16;
41860 }
41861
41862 /* Create a new CONST_VECTOR and a MEM that references it. */
41863 rtx vals = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
41864 for (i = 0; i < 16; ++i)
41865 XVECEXP (vals, 0, i) = GEN_INT (new_mask[i]);
41866 rtx new_const_vector = gen_rtx_CONST_VECTOR (V16QImode, XVEC (vals, 0));
41867 rtx new_mem = force_const_mem (V16QImode, new_const_vector);
41868 /* This gives us a MEM whose base operand is a SYMBOL_REF, which we
41869 can't recognize. Force the SYMBOL_REF into a register. */
41870 if (!REG_P (XEXP (new_mem, 0))) {
41871 rtx base_reg = force_reg (Pmode, XEXP (new_mem, 0));
41872 XEXP (new_mem, 0) = base_reg;
41873 /* Move the newly created insn ahead of the load insn. */
41874 rtx_insn *force_insn = get_last_insn ();
41875 remove_insn (force_insn);
41876 rtx_insn *before_load_insn = PREV_INSN (load_insn);
41877 add_insn_after (force_insn, before_load_insn, BLOCK_FOR_INSN (load_insn));
41878 df_insn_rescan (before_load_insn);
41879 df_insn_rescan (force_insn);
41880 }
41881
41882 /* Replace the MEM in the load instruction and rescan it. */
41883 XEXP (SET_SRC (PATTERN (load_insn)), 0) = new_mem;
41884 INSN_CODE (load_insn) = -1; /* Force re-recognition. */
41885 df_insn_rescan (load_insn);
41886
41887 if (dump_file)
41888 fprintf (dump_file, "Adjusting mask for vperm %d\n", INSN_UID (insn));
41889 }
41890
41891 /* The insn described by INSN_ENTRY[I] can be swapped, but only
41892 with special handling. Take care of that here. */
41893 static void
41894 handle_special_swappables (swap_web_entry *insn_entry, unsigned i)
41895 {
41896 rtx_insn *insn = insn_entry[i].insn;
41897 rtx body = PATTERN (insn);
41898
41899 switch (insn_entry[i].special_handling)
41900 {
41901 default:
41902 gcc_unreachable ();
41903 case SH_CONST_VECTOR:
41904 {
41905 /* A CONST_VECTOR will only show up somewhere in the RHS of a SET. */
41906 gcc_assert (GET_CODE (body) == SET);
41907 rtx rhs = SET_SRC (body);
41908 swap_const_vector_halves (rhs);
41909 if (dump_file)
41910 fprintf (dump_file, "Swapping constant halves in insn %d\n", i);
41911 break;
41912 }
41913 case SH_SUBREG:
41914 /* A subreg of the same size is already safe. For subregs that
41915 select a smaller portion of a reg, adjust the index for
41916 swapped doublewords. */
41917 adjust_subreg_index (body);
41918 if (dump_file)
41919 fprintf (dump_file, "Adjusting subreg in insn %d\n", i);
41920 break;
41921 case SH_NOSWAP_LD:
41922 /* Convert a non-permuting load to a permuting one. */
41923 permute_load (insn);
41924 break;
41925 case SH_NOSWAP_ST:
41926 /* Convert a non-permuting store to a permuting one. */
41927 permute_store (insn);
41928 break;
41929 case SH_EXTRACT:
41930 /* Change the lane on an extract operation. */
41931 adjust_extract (insn);
41932 break;
41933 case SH_SPLAT:
41934 /* Change the lane on a direct-splat operation. */
41935 adjust_splat (insn);
41936 break;
41937 case SH_XXPERMDI:
41938 /* Change the lanes on an XXPERMDI operation. */
41939 adjust_xxpermdi (insn);
41940 break;
41941 case SH_CONCAT:
41942 /* Reverse the order of a concatenation operation. */
41943 adjust_concat (insn);
41944 break;
41945 case SH_VPERM:
41946 /* Change the mask loaded from the constant pool for a VPERM. */
41947 adjust_vperm (insn);
41948 break;
41949 }
41950 }
41951
41952 /* Find the insn from the Ith table entry, which is known to be a
41953 register swap Y = SWAP(X). Replace it with a copy Y = X. */
41954 static void
41955 replace_swap_with_copy (swap_web_entry *insn_entry, unsigned i)
41956 {
41957 rtx_insn *insn = insn_entry[i].insn;
41958 rtx body = PATTERN (insn);
41959 rtx src_reg = XEXP (SET_SRC (body), 0);
41960 rtx copy = gen_rtx_SET (SET_DEST (body), src_reg);
41961 rtx_insn *new_insn = emit_insn_before (copy, insn);
41962 set_block_for_insn (new_insn, BLOCK_FOR_INSN (insn));
41963 df_insn_rescan (new_insn);
41964
41965 if (dump_file)
41966 {
41967 unsigned int new_uid = INSN_UID (new_insn);
41968 fprintf (dump_file, "Replacing swap %d with copy %d\n", i, new_uid);
41969 }
41970
41971 df_insn_delete (insn);
41972 remove_insn (insn);
41973 insn->set_deleted ();
41974 }
41975
41976 /* Dump the swap table to DUMP_FILE. */
41977 static void
41978 dump_swap_insn_table (swap_web_entry *insn_entry)
41979 {
41980 int e = get_max_uid ();
41981 fprintf (dump_file, "\nRelevant insns with their flag settings\n\n");
41982
41983 for (int i = 0; i < e; ++i)
41984 if (insn_entry[i].is_relevant)
41985 {
41986 swap_web_entry *pred_entry = (swap_web_entry *)insn_entry[i].pred ();
41987 fprintf (dump_file, "%6d %6d ", i,
41988 pred_entry && pred_entry->insn
41989 ? INSN_UID (pred_entry->insn) : 0);
41990 if (insn_entry[i].is_load)
41991 fputs ("load ", dump_file);
41992 if (insn_entry[i].is_store)
41993 fputs ("store ", dump_file);
41994 if (insn_entry[i].is_swap)
41995 fputs ("swap ", dump_file);
41996 if (insn_entry[i].is_live_in)
41997 fputs ("live-in ", dump_file);
41998 if (insn_entry[i].is_live_out)
41999 fputs ("live-out ", dump_file);
42000 if (insn_entry[i].contains_subreg)
42001 fputs ("subreg ", dump_file);
42002 if (insn_entry[i].is_128_int)
42003 fputs ("int128 ", dump_file);
42004 if (insn_entry[i].is_call)
42005 fputs ("call ", dump_file);
42006 if (insn_entry[i].is_swappable)
42007 {
42008 fputs ("swappable ", dump_file);
42009 if (insn_entry[i].special_handling == SH_CONST_VECTOR)
42010 fputs ("special:constvec ", dump_file);
42011 else if (insn_entry[i].special_handling == SH_SUBREG)
42012 fputs ("special:subreg ", dump_file);
42013 else if (insn_entry[i].special_handling == SH_NOSWAP_LD)
42014 fputs ("special:load ", dump_file);
42015 else if (insn_entry[i].special_handling == SH_NOSWAP_ST)
42016 fputs ("special:store ", dump_file);
42017 else if (insn_entry[i].special_handling == SH_EXTRACT)
42018 fputs ("special:extract ", dump_file);
42019 else if (insn_entry[i].special_handling == SH_SPLAT)
42020 fputs ("special:splat ", dump_file);
42021 else if (insn_entry[i].special_handling == SH_XXPERMDI)
42022 fputs ("special:xxpermdi ", dump_file);
42023 else if (insn_entry[i].special_handling == SH_CONCAT)
42024 fputs ("special:concat ", dump_file);
42025 else if (insn_entry[i].special_handling == SH_VPERM)
42026 fputs ("special:vperm ", dump_file);
42027 }
42028 if (insn_entry[i].web_not_optimizable)
42029 fputs ("unoptimizable ", dump_file);
42030 if (insn_entry[i].will_delete)
42031 fputs ("delete ", dump_file);
42032 fputs ("\n", dump_file);
42033 }
42034 fputs ("\n", dump_file);
42035 }
42036
42037 /* Return RTX with its address canonicalized to (reg) or (+ reg reg).
42038 Here RTX is an (& addr (const_int -16)). Always return a new copy
42039 to avoid problems with combine. */
42040 static rtx
42041 alignment_with_canonical_addr (rtx align)
42042 {
42043 rtx canon;
42044 rtx addr = XEXP (align, 0);
42045
42046 if (REG_P (addr))
42047 canon = addr;
42048
42049 else if (GET_CODE (addr) == PLUS)
42050 {
42051 rtx addrop0 = XEXP (addr, 0);
42052 rtx addrop1 = XEXP (addr, 1);
42053
42054 if (!REG_P (addrop0))
42055 addrop0 = force_reg (GET_MODE (addrop0), addrop0);
42056
42057 if (!REG_P (addrop1))
42058 addrop1 = force_reg (GET_MODE (addrop1), addrop1);
42059
42060 canon = gen_rtx_PLUS (GET_MODE (addr), addrop0, addrop1);
42061 }
42062
42063 else
42064 canon = force_reg (GET_MODE (addr), addr);
42065
42066 return gen_rtx_AND (GET_MODE (align), canon, GEN_INT (-16));
42067 }
42068
42069 /* Check whether an rtx is an alignment mask, and if so, return
42070 a fully-expanded rtx for the masking operation. */
42071 static rtx
42072 alignment_mask (rtx_insn *insn)
42073 {
42074 rtx body = PATTERN (insn);
42075
42076 if (GET_CODE (body) != SET
42077 || GET_CODE (SET_SRC (body)) != AND
42078 || !REG_P (XEXP (SET_SRC (body), 0)))
42079 return 0;
42080
42081 rtx mask = XEXP (SET_SRC (body), 1);
42082
42083 if (GET_CODE (mask) == CONST_INT)
42084 {
42085 if (INTVAL (mask) == -16)
42086 return alignment_with_canonical_addr (SET_SRC (body));
42087 else
42088 return 0;
42089 }
42090
42091 if (!REG_P (mask))
42092 return 0;
42093
42094 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42095 df_ref use;
42096 rtx real_mask = 0;
42097
42098 FOR_EACH_INSN_INFO_USE (use, insn_info)
42099 {
42100 if (!rtx_equal_p (DF_REF_REG (use), mask))
42101 continue;
42102
42103 struct df_link *def_link = DF_REF_CHAIN (use);
42104 if (!def_link || def_link->next)
42105 return 0;
42106
42107 rtx_insn *const_insn = DF_REF_INSN (def_link->ref);
42108 rtx const_body = PATTERN (const_insn);
42109 if (GET_CODE (const_body) != SET)
42110 return 0;
42111
42112 real_mask = SET_SRC (const_body);
42113
42114 if (GET_CODE (real_mask) != CONST_INT
42115 || INTVAL (real_mask) != -16)
42116 return 0;
42117 }
42118
42119 if (real_mask == 0)
42120 return 0;
42121
42122 return alignment_with_canonical_addr (SET_SRC (body));
42123 }
42124
42125 /* Given INSN that's a load or store based at BASE_REG, look for a
42126 feeding computation that aligns its address on a 16-byte boundary. */
42127 static rtx
42128 find_alignment_op (rtx_insn *insn, rtx base_reg)
42129 {
42130 df_ref base_use;
42131 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42132 rtx and_operation = 0;
42133
42134 FOR_EACH_INSN_INFO_USE (base_use, insn_info)
42135 {
42136 if (!rtx_equal_p (DF_REF_REG (base_use), base_reg))
42137 continue;
42138
42139 struct df_link *base_def_link = DF_REF_CHAIN (base_use);
42140 if (!base_def_link || base_def_link->next)
42141 break;
42142
42143 /* With stack-protector code enabled, and possibly in other
42144 circumstances, there may not be an associated insn for
42145 the def. */
42146 if (DF_REF_IS_ARTIFICIAL (base_def_link->ref))
42147 break;
42148
42149 rtx_insn *and_insn = DF_REF_INSN (base_def_link->ref);
42150 and_operation = alignment_mask (and_insn);
42151 if (and_operation != 0)
42152 break;
42153 }
42154
42155 return and_operation;
42156 }
42157
42158 struct del_info { bool replace; rtx_insn *replace_insn; };
42159
42160 /* If INSN is the load for an lvx pattern, put it in canonical form. */
42161 static void
42162 recombine_lvx_pattern (rtx_insn *insn, del_info *to_delete)
42163 {
42164 rtx body = PATTERN (insn);
42165 gcc_assert (GET_CODE (body) == SET
42166 && GET_CODE (SET_SRC (body)) == VEC_SELECT
42167 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM);
42168
42169 rtx mem = XEXP (SET_SRC (body), 0);
42170 rtx base_reg = XEXP (mem, 0);
42171
42172 rtx and_operation = find_alignment_op (insn, base_reg);
42173
42174 if (and_operation != 0)
42175 {
42176 df_ref def;
42177 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42178 FOR_EACH_INSN_INFO_DEF (def, insn_info)
42179 {
42180 struct df_link *link = DF_REF_CHAIN (def);
42181 if (!link || link->next)
42182 break;
42183
42184 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
42185 if (!insn_is_swap_p (swap_insn)
42186 || insn_is_load_p (swap_insn)
42187 || insn_is_store_p (swap_insn))
42188 break;
42189
42190 /* Expected lvx pattern found. Change the swap to
42191 a copy, and propagate the AND operation into the
42192 load. */
42193 to_delete[INSN_UID (swap_insn)].replace = true;
42194 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
42195
42196 XEXP (mem, 0) = and_operation;
42197 SET_SRC (body) = mem;
42198 INSN_CODE (insn) = -1; /* Force re-recognition. */
42199 df_insn_rescan (insn);
42200
42201 if (dump_file)
42202 fprintf (dump_file, "lvx opportunity found at %d\n",
42203 INSN_UID (insn));
42204 }
42205 }
42206 }
42207
42208 /* If INSN is the store for an stvx pattern, put it in canonical form. */
42209 static void
42210 recombine_stvx_pattern (rtx_insn *insn, del_info *to_delete)
42211 {
42212 rtx body = PATTERN (insn);
42213 gcc_assert (GET_CODE (body) == SET
42214 && GET_CODE (SET_DEST (body)) == MEM
42215 && GET_CODE (SET_SRC (body)) == VEC_SELECT);
42216 rtx mem = SET_DEST (body);
42217 rtx base_reg = XEXP (mem, 0);
42218
42219 rtx and_operation = find_alignment_op (insn, base_reg);
42220
42221 if (and_operation != 0)
42222 {
42223 rtx src_reg = XEXP (SET_SRC (body), 0);
42224 df_ref src_use;
42225 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42226 FOR_EACH_INSN_INFO_USE (src_use, insn_info)
42227 {
42228 if (!rtx_equal_p (DF_REF_REG (src_use), src_reg))
42229 continue;
42230
42231 struct df_link *link = DF_REF_CHAIN (src_use);
42232 if (!link || link->next)
42233 break;
42234
42235 rtx_insn *swap_insn = DF_REF_INSN (link->ref);
42236 if (!insn_is_swap_p (swap_insn)
42237 || insn_is_load_p (swap_insn)
42238 || insn_is_store_p (swap_insn))
42239 break;
42240
42241 /* Expected stvx pattern found. Change the swap to
42242 a copy, and propagate the AND operation into the
42243 store. */
42244 to_delete[INSN_UID (swap_insn)].replace = true;
42245 to_delete[INSN_UID (swap_insn)].replace_insn = swap_insn;
42246
42247 XEXP (mem, 0) = and_operation;
42248 SET_SRC (body) = src_reg;
42249 INSN_CODE (insn) = -1; /* Force re-recognition. */
42250 df_insn_rescan (insn);
42251
42252 if (dump_file)
42253 fprintf (dump_file, "stvx opportunity found at %d\n",
42254 INSN_UID (insn));
42255 }
42256 }
42257 }
42258
42259 /* Look for patterns created from builtin lvx and stvx calls, and
42260 canonicalize them to be properly recognized as such. */
42261 static void
42262 recombine_lvx_stvx_patterns (function *fun)
42263 {
42264 int i;
42265 basic_block bb;
42266 rtx_insn *insn;
42267
42268 int num_insns = get_max_uid ();
42269 del_info *to_delete = XCNEWVEC (del_info, num_insns);
42270
42271 FOR_ALL_BB_FN (bb, fun)
42272 FOR_BB_INSNS (bb, insn)
42273 {
42274 if (!NONDEBUG_INSN_P (insn))
42275 continue;
42276
42277 if (insn_is_load_p (insn) && insn_is_swap_p (insn))
42278 recombine_lvx_pattern (insn, to_delete);
42279 else if (insn_is_store_p (insn) && insn_is_swap_p (insn))
42280 recombine_stvx_pattern (insn, to_delete);
42281 }
42282
42283 /* Turning swaps into copies is delayed until now, to avoid problems
42284 with deleting instructions during the insn walk. */
42285 for (i = 0; i < num_insns; i++)
42286 if (to_delete[i].replace)
42287 {
42288 rtx swap_body = PATTERN (to_delete[i].replace_insn);
42289 rtx src_reg = XEXP (SET_SRC (swap_body), 0);
42290 rtx copy = gen_rtx_SET (SET_DEST (swap_body), src_reg);
42291 rtx_insn *new_insn = emit_insn_before (copy,
42292 to_delete[i].replace_insn);
42293 set_block_for_insn (new_insn,
42294 BLOCK_FOR_INSN (to_delete[i].replace_insn));
42295 df_insn_rescan (new_insn);
42296 df_insn_delete (to_delete[i].replace_insn);
42297 remove_insn (to_delete[i].replace_insn);
42298 to_delete[i].replace_insn->set_deleted ();
42299 }
42300
42301 free (to_delete);
42302 }
42303
42304 /* Main entry point for this pass. */
42305 unsigned int
42306 rs6000_analyze_swaps (function *fun)
42307 {
42308 swap_web_entry *insn_entry;
42309 basic_block bb;
42310 rtx_insn *insn, *curr_insn = 0;
42311
42312 /* Dataflow analysis for use-def chains. */
42313 df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
42314 df_chain_add_problem (DF_DU_CHAIN | DF_UD_CHAIN);
42315 df_analyze ();
42316 df_set_flags (DF_DEFER_INSN_RESCAN);
42317
42318 /* Pre-pass to recombine lvx and stvx patterns so we don't lose info. */
42319 recombine_lvx_stvx_patterns (fun);
42320
42321 /* Allocate structure to represent webs of insns. */
42322 insn_entry = XCNEWVEC (swap_web_entry, get_max_uid ());
42323
42324 /* Walk the insns to gather basic data. */
42325 FOR_ALL_BB_FN (bb, fun)
42326 FOR_BB_INSNS_SAFE (bb, insn, curr_insn)
42327 {
42328 unsigned int uid = INSN_UID (insn);
42329 if (NONDEBUG_INSN_P (insn))
42330 {
42331 insn_entry[uid].insn = insn;
42332
42333 if (GET_CODE (insn) == CALL_INSN)
42334 insn_entry[uid].is_call = 1;
42335
42336 /* Walk the uses and defs to see if we mention vector regs.
42337 Record any constraints on optimization of such mentions. */
42338 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42339 df_ref mention;
42340 FOR_EACH_INSN_INFO_USE (mention, insn_info)
42341 {
42342 /* We use DF_REF_REAL_REG here to get inside any subregs. */
42343 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
42344
42345 /* If a use gets its value from a call insn, it will be
42346 a hard register and will look like (reg:V4SI 3 3).
42347 The df analysis creates two mentions for GPR3 and GPR4,
42348 both DImode. We must recognize this and treat it as a
42349 vector mention to ensure the call is unioned with this
42350 use. */
42351 if (mode == DImode && DF_REF_INSN_INFO (mention))
42352 {
42353 rtx feeder = DF_REF_INSN (mention);
42354 /* FIXME: It is pretty hard to get from the df mention
42355 to the mode of the use in the insn. We arbitrarily
42356 pick a vector mode here, even though the use might
42357 be a real DImode. We can be too conservative
42358 (create a web larger than necessary) because of
42359 this, so consider eventually fixing this. */
42360 if (GET_CODE (feeder) == CALL_INSN)
42361 mode = V4SImode;
42362 }
42363
42364 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
42365 {
42366 insn_entry[uid].is_relevant = 1;
42367 if (mode == TImode || mode == V1TImode
42368 || FLOAT128_VECTOR_P (mode))
42369 insn_entry[uid].is_128_int = 1;
42370 if (DF_REF_INSN_INFO (mention))
42371 insn_entry[uid].contains_subreg
42372 = !rtx_equal_p (DF_REF_REG (mention),
42373 DF_REF_REAL_REG (mention));
42374 union_defs (insn_entry, insn, mention);
42375 }
42376 }
42377 FOR_EACH_INSN_INFO_DEF (mention, insn_info)
42378 {
42379 /* We use DF_REF_REAL_REG here to get inside any subregs. */
42380 machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
42381
42382 /* If we're loading up a hard vector register for a call,
42383 it looks like (set (reg:V4SI 9 9) (...)). The df
42384 analysis creates two mentions for GPR9 and GPR10, both
42385 DImode. So relying on the mode from the mentions
42386 isn't sufficient to ensure we union the call into the
42387 web with the parameter setup code. */
42388 if (mode == DImode && GET_CODE (insn) == SET
42389 && ALTIVEC_OR_VSX_VECTOR_MODE (GET_MODE (SET_DEST (insn))))
42390 mode = GET_MODE (SET_DEST (insn));
42391
42392 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode) || mode == TImode)
42393 {
42394 insn_entry[uid].is_relevant = 1;
42395 if (mode == TImode || mode == V1TImode
42396 || FLOAT128_VECTOR_P (mode))
42397 insn_entry[uid].is_128_int = 1;
42398 if (DF_REF_INSN_INFO (mention))
42399 insn_entry[uid].contains_subreg
42400 = !rtx_equal_p (DF_REF_REG (mention),
42401 DF_REF_REAL_REG (mention));
42402 /* REG_FUNCTION_VALUE_P is not valid for subregs. */
42403 else if (REG_FUNCTION_VALUE_P (DF_REF_REG (mention)))
42404 insn_entry[uid].is_live_out = 1;
42405 union_uses (insn_entry, insn, mention);
42406 }
42407 }
42408
42409 if (insn_entry[uid].is_relevant)
42410 {
42411 /* Determine if this is a load or store. */
42412 insn_entry[uid].is_load = insn_is_load_p (insn);
42413 insn_entry[uid].is_store = insn_is_store_p (insn);
42414
42415 /* Determine if this is a doubleword swap. If not,
42416 determine whether it can legally be swapped. */
42417 if (insn_is_swap_p (insn))
42418 insn_entry[uid].is_swap = 1;
42419 else
42420 {
42421 unsigned int special = SH_NONE;
42422 insn_entry[uid].is_swappable
42423 = insn_is_swappable_p (insn_entry, insn, &special);
42424 if (special != SH_NONE && insn_entry[uid].contains_subreg)
42425 insn_entry[uid].is_swappable = 0;
42426 else if (special != SH_NONE)
42427 insn_entry[uid].special_handling = special;
42428 else if (insn_entry[uid].contains_subreg)
42429 insn_entry[uid].special_handling = SH_SUBREG;
42430 }
42431 }
42432 }
42433 }
42434
42435 if (dump_file)
42436 {
42437 fprintf (dump_file, "\nSwap insn entry table when first built\n");
42438 dump_swap_insn_table (insn_entry);
42439 }
42440
42441 /* Record unoptimizable webs. */
42442 unsigned e = get_max_uid (), i;
42443 for (i = 0; i < e; ++i)
42444 {
42445 if (!insn_entry[i].is_relevant)
42446 continue;
42447
42448 swap_web_entry *root
42449 = (swap_web_entry*)(&insn_entry[i])->unionfind_root ();
42450
42451 if (insn_entry[i].is_live_in || insn_entry[i].is_live_out
42452 || (insn_entry[i].contains_subreg
42453 && insn_entry[i].special_handling != SH_SUBREG)
42454 || insn_entry[i].is_128_int || insn_entry[i].is_call
42455 || !(insn_entry[i].is_swappable || insn_entry[i].is_swap))
42456 root->web_not_optimizable = 1;
42457
42458 /* If we have loads or stores that aren't permuting then the
42459 optimization isn't appropriate. */
42460 else if ((insn_entry[i].is_load || insn_entry[i].is_store)
42461 && !insn_entry[i].is_swap && !insn_entry[i].is_swappable)
42462 root->web_not_optimizable = 1;
42463
42464 /* If we have permuting loads or stores that are not accompanied
42465 by a register swap, the optimization isn't appropriate. */
42466 else if (insn_entry[i].is_load && insn_entry[i].is_swap)
42467 {
42468 rtx insn = insn_entry[i].insn;
42469 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42470 df_ref def;
42471
42472 FOR_EACH_INSN_INFO_DEF (def, insn_info)
42473 {
42474 struct df_link *link = DF_REF_CHAIN (def);
42475
42476 if (!chain_contains_only_swaps (insn_entry, link, FOR_LOADS))
42477 {
42478 root->web_not_optimizable = 1;
42479 break;
42480 }
42481 }
42482 }
42483 else if (insn_entry[i].is_store && insn_entry[i].is_swap)
42484 {
42485 rtx insn = insn_entry[i].insn;
42486 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
42487 df_ref use;
42488
42489 FOR_EACH_INSN_INFO_USE (use, insn_info)
42490 {
42491 struct df_link *link = DF_REF_CHAIN (use);
42492
42493 if (!chain_contains_only_swaps (insn_entry, link, FOR_STORES))
42494 {
42495 root->web_not_optimizable = 1;
42496 break;
42497 }
42498 }
42499 }
42500 }
42501
42502 if (dump_file)
42503 {
42504 fprintf (dump_file, "\nSwap insn entry table after web analysis\n");
42505 dump_swap_insn_table (insn_entry);
42506 }
42507
42508 /* For each load and store in an optimizable web (which implies
42509 the loads and stores are permuting), find the associated
42510 register swaps and mark them for removal. Due to various
42511 optimizations we may mark the same swap more than once. Also
42512 perform special handling for swappable insns that require it. */
42513 for (i = 0; i < e; ++i)
42514 if ((insn_entry[i].is_load || insn_entry[i].is_store)
42515 && insn_entry[i].is_swap)
42516 {
42517 swap_web_entry* root_entry
42518 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
42519 if (!root_entry->web_not_optimizable)
42520 mark_swaps_for_removal (insn_entry, i);
42521 }
42522 else if (insn_entry[i].is_swappable && insn_entry[i].special_handling)
42523 {
42524 swap_web_entry* root_entry
42525 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
42526 if (!root_entry->web_not_optimizable)
42527 handle_special_swappables (insn_entry, i);
42528 }
42529
42530 /* Now delete the swaps marked for removal. */
42531 for (i = 0; i < e; ++i)
42532 if (insn_entry[i].will_delete)
42533 replace_swap_with_copy (insn_entry, i);
42534
42535 /* Clean up. */
42536 free (insn_entry);
42537 return 0;
42538 }
42539
42540 const pass_data pass_data_analyze_swaps =
42541 {
42542 RTL_PASS, /* type */
42543 "swaps", /* name */
42544 OPTGROUP_NONE, /* optinfo_flags */
42545 TV_NONE, /* tv_id */
42546 0, /* properties_required */
42547 0, /* properties_provided */
42548 0, /* properties_destroyed */
42549 0, /* todo_flags_start */
42550 TODO_df_finish, /* todo_flags_finish */
42551 };
42552
42553 class pass_analyze_swaps : public rtl_opt_pass
42554 {
42555 public:
42556 pass_analyze_swaps(gcc::context *ctxt)
42557 : rtl_opt_pass(pass_data_analyze_swaps, ctxt)
42558 {}
42559
42560 /* opt_pass methods: */
42561 virtual bool gate (function *)
42562 {
42563 return (optimize > 0 && !BYTES_BIG_ENDIAN && TARGET_VSX
42564 && !TARGET_P9_VECTOR && rs6000_optimize_swaps);
42565 }
42566
42567 virtual unsigned int execute (function *fun)
42568 {
42569 return rs6000_analyze_swaps (fun);
42570 }
42571
42572 opt_pass *clone ()
42573 {
42574 return new pass_analyze_swaps (m_ctxt);
42575 }
42576
42577 }; // class pass_analyze_swaps
42578
42579 rtl_opt_pass *
42580 make_pass_analyze_swaps (gcc::context *ctxt)
42581 {
42582 return new pass_analyze_swaps (ctxt);
42583 }
42584
42585 #ifdef RS6000_GLIBC_ATOMIC_FENV
42586 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
42587 static tree atomic_hold_decl, atomic_clear_decl, atomic_update_decl;
42588 #endif
42589
42590 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
42591
42592 static void
42593 rs6000_atomic_assign_expand_fenv (tree *hold, tree *clear, tree *update)
42594 {
42595 if (!TARGET_HARD_FLOAT || !TARGET_FPRS)
42596 {
42597 #ifdef RS6000_GLIBC_ATOMIC_FENV
42598 if (atomic_hold_decl == NULL_TREE)
42599 {
42600 atomic_hold_decl
42601 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
42602 get_identifier ("__atomic_feholdexcept"),
42603 build_function_type_list (void_type_node,
42604 double_ptr_type_node,
42605 NULL_TREE));
42606 TREE_PUBLIC (atomic_hold_decl) = 1;
42607 DECL_EXTERNAL (atomic_hold_decl) = 1;
42608 }
42609
42610 if (atomic_clear_decl == NULL_TREE)
42611 {
42612 atomic_clear_decl
42613 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
42614 get_identifier ("__atomic_feclearexcept"),
42615 build_function_type_list (void_type_node,
42616 NULL_TREE));
42617 TREE_PUBLIC (atomic_clear_decl) = 1;
42618 DECL_EXTERNAL (atomic_clear_decl) = 1;
42619 }
42620
42621 tree const_double = build_qualified_type (double_type_node,
42622 TYPE_QUAL_CONST);
42623 tree const_double_ptr = build_pointer_type (const_double);
42624 if (atomic_update_decl == NULL_TREE)
42625 {
42626 atomic_update_decl
42627 = build_decl (BUILTINS_LOCATION, FUNCTION_DECL,
42628 get_identifier ("__atomic_feupdateenv"),
42629 build_function_type_list (void_type_node,
42630 const_double_ptr,
42631 NULL_TREE));
42632 TREE_PUBLIC (atomic_update_decl) = 1;
42633 DECL_EXTERNAL (atomic_update_decl) = 1;
42634 }
42635
42636 tree fenv_var = create_tmp_var_raw (double_type_node);
42637 TREE_ADDRESSABLE (fenv_var) = 1;
42638 tree fenv_addr = build1 (ADDR_EXPR, double_ptr_type_node, fenv_var);
42639
42640 *hold = build_call_expr (atomic_hold_decl, 1, fenv_addr);
42641 *clear = build_call_expr (atomic_clear_decl, 0);
42642 *update = build_call_expr (atomic_update_decl, 1,
42643 fold_convert (const_double_ptr, fenv_addr));
42644 #endif
42645 return;
42646 }
42647
42648 tree mffs = rs6000_builtin_decls[RS6000_BUILTIN_MFFS];
42649 tree mtfsf = rs6000_builtin_decls[RS6000_BUILTIN_MTFSF];
42650 tree call_mffs = build_call_expr (mffs, 0);
42651
42652 /* Generates the equivalent of feholdexcept (&fenv_var)
42653
42654 *fenv_var = __builtin_mffs ();
42655 double fenv_hold;
42656 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
42657 __builtin_mtfsf (0xff, fenv_hold); */
42658
42659 /* Mask to clear everything except for the rounding modes and non-IEEE
42660 arithmetic flag. */
42661 const unsigned HOST_WIDE_INT hold_exception_mask =
42662 HOST_WIDE_INT_C (0xffffffff00000007);
42663
42664 tree fenv_var = create_tmp_var_raw (double_type_node);
42665
42666 tree hold_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_var, call_mffs);
42667
42668 tree fenv_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_var);
42669 tree fenv_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
42670 build_int_cst (uint64_type_node,
42671 hold_exception_mask));
42672
42673 tree fenv_hold_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
42674 fenv_llu_and);
42675
42676 tree hold_mtfsf = build_call_expr (mtfsf, 2,
42677 build_int_cst (unsigned_type_node, 0xff),
42678 fenv_hold_mtfsf);
42679
42680 *hold = build2 (COMPOUND_EXPR, void_type_node, hold_mffs, hold_mtfsf);
42681
42682 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
42683
42684 double fenv_clear = __builtin_mffs ();
42685 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
42686 __builtin_mtfsf (0xff, fenv_clear); */
42687
42688 /* Mask to clear everything except for the rounding modes and non-IEEE
42689 arithmetic flag. */
42690 const unsigned HOST_WIDE_INT clear_exception_mask =
42691 HOST_WIDE_INT_C (0xffffffff00000000);
42692
42693 tree fenv_clear = create_tmp_var_raw (double_type_node);
42694
42695 tree clear_mffs = build2 (MODIFY_EXPR, void_type_node, fenv_clear, call_mffs);
42696
42697 tree fenv_clean_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, fenv_clear);
42698 tree fenv_clear_llu_and = build2 (BIT_AND_EXPR, uint64_type_node,
42699 fenv_clean_llu,
42700 build_int_cst (uint64_type_node,
42701 clear_exception_mask));
42702
42703 tree fenv_clear_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
42704 fenv_clear_llu_and);
42705
42706 tree clear_mtfsf = build_call_expr (mtfsf, 2,
42707 build_int_cst (unsigned_type_node, 0xff),
42708 fenv_clear_mtfsf);
42709
42710 *clear = build2 (COMPOUND_EXPR, void_type_node, clear_mffs, clear_mtfsf);
42711
42712 /* Generates the equivalent of feupdateenv (&fenv_var)
42713
42714 double old_fenv = __builtin_mffs ();
42715 double fenv_update;
42716 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
42717 (*(uint64_t*)fenv_var 0x1ff80fff);
42718 __builtin_mtfsf (0xff, fenv_update); */
42719
42720 const unsigned HOST_WIDE_INT update_exception_mask =
42721 HOST_WIDE_INT_C (0xffffffff1fffff00);
42722 const unsigned HOST_WIDE_INT new_exception_mask =
42723 HOST_WIDE_INT_C (0x1ff80fff);
42724
42725 tree old_fenv = create_tmp_var_raw (double_type_node);
42726 tree update_mffs = build2 (MODIFY_EXPR, void_type_node, old_fenv, call_mffs);
42727
42728 tree old_llu = build1 (VIEW_CONVERT_EXPR, uint64_type_node, old_fenv);
42729 tree old_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, old_llu,
42730 build_int_cst (uint64_type_node,
42731 update_exception_mask));
42732
42733 tree new_llu_and = build2 (BIT_AND_EXPR, uint64_type_node, fenv_llu,
42734 build_int_cst (uint64_type_node,
42735 new_exception_mask));
42736
42737 tree new_llu_mask = build2 (BIT_IOR_EXPR, uint64_type_node,
42738 old_llu_and, new_llu_and);
42739
42740 tree fenv_update_mtfsf = build1 (VIEW_CONVERT_EXPR, double_type_node,
42741 new_llu_mask);
42742
42743 tree update_mtfsf = build_call_expr (mtfsf, 2,
42744 build_int_cst (unsigned_type_node, 0xff),
42745 fenv_update_mtfsf);
42746
42747 *update = build2 (COMPOUND_EXPR, void_type_node, update_mffs, update_mtfsf);
42748 }
42749
42750 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
42751
42752 static bool
42753 rs6000_optab_supported_p (int op, machine_mode mode1, machine_mode,
42754 optimization_type opt_type)
42755 {
42756 switch (op)
42757 {
42758 case rsqrt_optab:
42759 return (opt_type == OPTIMIZE_FOR_SPEED
42760 && RS6000_RECIP_AUTO_RSQRTE_P (mode1));
42761
42762 default:
42763 return true;
42764 }
42765 }
42766 \f
42767 struct gcc_target targetm = TARGET_INITIALIZER;
42768
42769 #include "gt-rs6000.h"