1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2018 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #define IN_TARGET_CODE 1
25 #include "coretypes.h"
35 #include "stringpool.h"
42 #include "diagnostic-core.h"
43 #include "insn-attr.h"
46 #include "fold-const.h"
48 #include "stor-layout.h"
50 #include "print-tree.h"
56 #include "common/common-target.h"
57 #include "langhooks.h"
59 #include "sched-int.h"
61 #include "gimple-fold.h"
62 #include "gimple-iterator.h"
63 #include "gimple-ssa.h"
64 #include "gimple-walk.h"
67 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
69 #include "target-globals.h"
71 #include "tree-vector-builder.h"
73 #include "tree-pass.h"
76 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
79 #include "gstab.h" /* for N_SLINE */
81 #include "case-cfn-macros.h"
83 #include "tree-ssa-propagate.h"
85 /* This file should be included last. */
86 #include "target-def.h"
88 #ifndef TARGET_NO_PROTOTYPE
89 #define TARGET_NO_PROTOTYPE 0
92 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
93 systems will also set long double to be IEEE 128-bit. AIX and Darwin
94 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
95 those systems will not pick up this default. This needs to be after all
96 of the include files, so that POWERPC_LINUX and POWERPC_FREEBSD are
98 #ifndef TARGET_IEEEQUAD_DEFAULT
99 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
100 #define TARGET_IEEEQUAD_DEFAULT 1
102 #define TARGET_IEEEQUAD_DEFAULT 0
106 static pad_direction
rs6000_function_arg_padding (machine_mode
, const_tree
);
108 /* Structure used to define the rs6000 stack */
109 typedef struct rs6000_stack
{
110 int reload_completed
; /* stack info won't change from here on */
111 int first_gp_reg_save
; /* first callee saved GP register used */
112 int first_fp_reg_save
; /* first callee saved FP register used */
113 int first_altivec_reg_save
; /* first callee saved AltiVec register used */
114 int lr_save_p
; /* true if the link reg needs to be saved */
115 int cr_save_p
; /* true if the CR reg needs to be saved */
116 unsigned int vrsave_mask
; /* mask of vec registers to save */
117 int push_p
; /* true if we need to allocate stack space */
118 int calls_p
; /* true if the function makes any calls */
119 int world_save_p
; /* true if we're saving *everything*:
120 r13-r31, cr, f14-f31, vrsave, v20-v31 */
121 enum rs6000_abi abi
; /* which ABI to use */
122 int gp_save_offset
; /* offset to save GP regs from initial SP */
123 int fp_save_offset
; /* offset to save FP regs from initial SP */
124 int altivec_save_offset
; /* offset to save AltiVec regs from initial SP */
125 int lr_save_offset
; /* offset to save LR from initial SP */
126 int cr_save_offset
; /* offset to save CR from initial SP */
127 int vrsave_save_offset
; /* offset to save VRSAVE from initial SP */
128 int varargs_save_offset
; /* offset to save the varargs registers */
129 int ehrd_offset
; /* offset to EH return data */
130 int ehcr_offset
; /* offset to EH CR field data */
131 int reg_size
; /* register size (4 or 8) */
132 HOST_WIDE_INT vars_size
; /* variable save area size */
133 int parm_size
; /* outgoing parameter size */
134 int save_size
; /* save area size */
135 int fixed_size
; /* fixed size of stack frame */
136 int gp_size
; /* size of saved GP registers */
137 int fp_size
; /* size of saved FP registers */
138 int altivec_size
; /* size of saved AltiVec registers */
139 int cr_size
; /* size to hold CR if not in fixed area */
140 int vrsave_size
; /* size to hold VRSAVE */
141 int altivec_padding_size
; /* size of altivec alignment padding */
142 HOST_WIDE_INT total_size
; /* total bytes allocated for stack */
146 /* A C structure for machine-specific, per-function data.
147 This is added to the cfun structure. */
148 typedef struct GTY(()) machine_function
150 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
151 int ra_needs_full_frame
;
152 /* Flags if __builtin_return_address (0) was used. */
154 /* Cache lr_save_p after expansion of builtin_eh_return. */
156 /* Whether we need to save the TOC to the reserved stack location in the
157 function prologue. */
158 bool save_toc_in_prologue
;
159 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
160 varargs save area. */
161 HOST_WIDE_INT varargs_save_offset
;
162 /* Alternative internal arg pointer for -fsplit-stack. */
163 rtx split_stack_arg_pointer
;
164 bool split_stack_argp_used
;
165 /* Flag if r2 setup is needed with ELFv2 ABI. */
166 bool r2_setup_needed
;
167 /* The number of components we use for separate shrink-wrapping. */
169 /* The components already handled by separate shrink-wrapping, which should
170 not be considered by the prologue and epilogue. */
171 bool gpr_is_wrapped_separately
[32];
172 bool fpr_is_wrapped_separately
[32];
173 bool lr_is_wrapped_separately
;
174 bool toc_is_wrapped_separately
;
177 /* Support targetm.vectorize.builtin_mask_for_load. */
178 static GTY(()) tree altivec_builtin_mask_for_load
;
180 /* Set to nonzero once AIX common-mode calls have been defined. */
181 static GTY(()) int common_mode_defined
;
183 /* Label number of label created for -mrelocatable, to call to so we can
184 get the address of the GOT section */
185 static int rs6000_pic_labelno
;
188 /* Counter for labels which are to be placed in .fixup. */
189 int fixuplabelno
= 0;
192 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
195 /* Specify the machine mode that pointers have. After generation of rtl, the
196 compiler makes no further distinction between pointers and any other objects
197 of this machine mode. */
198 scalar_int_mode rs6000_pmode
;
201 /* Note whether IEEE 128-bit floating point was passed or returned, either as
202 the __float128/_Float128 explicit type, or when long double is IEEE 128-bit
203 floating point. We changed the default C++ mangling for these types and we
204 may want to generate a weak alias of the old mangling (U10__float128) to the
205 new mangling (u9__ieee128). */
206 static bool rs6000_passes_ieee128
;
209 /* Generate the manged name (i.e. U10__float128) used in GCC 8.1, and not the
210 name used in current releases (i.e. u9__ieee128). */
211 static bool ieee128_mangling_gcc_8_1
;
213 /* Width in bits of a pointer. */
214 unsigned rs6000_pointer_size
;
216 #ifdef HAVE_AS_GNU_ATTRIBUTE
217 # ifndef HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
218 # define HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE 0
220 /* Flag whether floating point values have been passed/returned.
221 Note that this doesn't say whether fprs are used, since the
222 Tag_GNU_Power_ABI_FP .gnu.attributes value this flag controls
223 should be set for soft-float values passed in gprs and ieee128
224 values passed in vsx registers. */
225 static bool rs6000_passes_float
;
226 static bool rs6000_passes_long_double
;
227 /* Flag whether vector values have been passed/returned. */
228 static bool rs6000_passes_vector
;
229 /* Flag whether small (<= 8 byte) structures have been returned. */
230 static bool rs6000_returns_struct
;
233 /* Value is TRUE if register/mode pair is acceptable. */
234 static bool rs6000_hard_regno_mode_ok_p
235 [NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
237 /* Maximum number of registers needed for a given register class and mode. */
238 unsigned char rs6000_class_max_nregs
[NUM_MACHINE_MODES
][LIM_REG_CLASSES
];
240 /* How many registers are needed for a given register and mode. */
241 unsigned char rs6000_hard_regno_nregs
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
243 /* Map register number to register class. */
244 enum reg_class rs6000_regno_regclass
[FIRST_PSEUDO_REGISTER
];
246 static int dbg_cost_ctrl
;
248 /* Built in types. */
249 tree rs6000_builtin_types
[RS6000_BTI_MAX
];
250 tree rs6000_builtin_decls
[RS6000_BUILTIN_COUNT
];
252 /* Flag to say the TOC is initialized */
253 int toc_initialized
, need_toc_init
;
254 char toc_label_name
[10];
256 /* Cached value of rs6000_variable_issue. This is cached in
257 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
258 static short cached_can_issue_more
;
260 static GTY(()) section
*read_only_data_section
;
261 static GTY(()) section
*private_data_section
;
262 static GTY(()) section
*tls_data_section
;
263 static GTY(()) section
*tls_private_data_section
;
264 static GTY(()) section
*read_only_private_data_section
;
265 static GTY(()) section
*sdata2_section
;
266 static GTY(()) section
*toc_section
;
268 struct builtin_description
270 const HOST_WIDE_INT mask
;
271 const enum insn_code icode
;
272 const char *const name
;
273 const enum rs6000_builtins code
;
276 /* Describe the vector unit used for modes. */
277 enum rs6000_vector rs6000_vector_unit
[NUM_MACHINE_MODES
];
278 enum rs6000_vector rs6000_vector_mem
[NUM_MACHINE_MODES
];
280 /* Register classes for various constraints that are based on the target
282 enum reg_class rs6000_constraints
[RS6000_CONSTRAINT_MAX
];
284 /* Describe the alignment of a vector. */
285 int rs6000_vector_align
[NUM_MACHINE_MODES
];
287 /* Map selected modes to types for builtins. */
288 static GTY(()) tree builtin_mode_to_type
[MAX_MACHINE_MODE
][2];
290 /* What modes to automatically generate reciprocal divide estimate (fre) and
291 reciprocal sqrt (frsqrte) for. */
292 unsigned char rs6000_recip_bits
[MAX_MACHINE_MODE
];
294 /* Masks to determine which reciprocal esitmate instructions to generate
296 enum rs6000_recip_mask
{
297 RECIP_SF_DIV
= 0x001, /* Use divide estimate */
298 RECIP_DF_DIV
= 0x002,
299 RECIP_V4SF_DIV
= 0x004,
300 RECIP_V2DF_DIV
= 0x008,
302 RECIP_SF_RSQRT
= 0x010, /* Use reciprocal sqrt estimate. */
303 RECIP_DF_RSQRT
= 0x020,
304 RECIP_V4SF_RSQRT
= 0x040,
305 RECIP_V2DF_RSQRT
= 0x080,
307 /* Various combination of flags for -mrecip=xxx. */
309 RECIP_ALL
= (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
310 | RECIP_V2DF_DIV
| RECIP_SF_RSQRT
| RECIP_DF_RSQRT
311 | RECIP_V4SF_RSQRT
| RECIP_V2DF_RSQRT
),
313 RECIP_HIGH_PRECISION
= RECIP_ALL
,
315 /* On low precision machines like the power5, don't enable double precision
316 reciprocal square root estimate, since it isn't accurate enough. */
317 RECIP_LOW_PRECISION
= (RECIP_ALL
& ~(RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
))
320 /* -mrecip options. */
323 const char *string
; /* option name */
324 unsigned int mask
; /* mask bits to set */
325 } recip_options
[] = {
326 { "all", RECIP_ALL
},
327 { "none", RECIP_NONE
},
328 { "div", (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
330 { "divf", (RECIP_SF_DIV
| RECIP_V4SF_DIV
) },
331 { "divd", (RECIP_DF_DIV
| RECIP_V2DF_DIV
) },
332 { "rsqrt", (RECIP_SF_RSQRT
| RECIP_DF_RSQRT
| RECIP_V4SF_RSQRT
333 | RECIP_V2DF_RSQRT
) },
334 { "rsqrtf", (RECIP_SF_RSQRT
| RECIP_V4SF_RSQRT
) },
335 { "rsqrtd", (RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
) },
338 /* Used by __builtin_cpu_is(), mapping from PLATFORM names to values. */
344 { "power9", PPC_PLATFORM_POWER9
},
345 { "power8", PPC_PLATFORM_POWER8
},
346 { "power7", PPC_PLATFORM_POWER7
},
347 { "power6x", PPC_PLATFORM_POWER6X
},
348 { "power6", PPC_PLATFORM_POWER6
},
349 { "power5+", PPC_PLATFORM_POWER5_PLUS
},
350 { "power5", PPC_PLATFORM_POWER5
},
351 { "ppc970", PPC_PLATFORM_PPC970
},
352 { "power4", PPC_PLATFORM_POWER4
},
353 { "ppca2", PPC_PLATFORM_PPCA2
},
354 { "ppc476", PPC_PLATFORM_PPC476
},
355 { "ppc464", PPC_PLATFORM_PPC464
},
356 { "ppc440", PPC_PLATFORM_PPC440
},
357 { "ppc405", PPC_PLATFORM_PPC405
},
358 { "ppc-cell-be", PPC_PLATFORM_CELL_BE
}
361 /* Used by __builtin_cpu_supports(), mapping from HWCAP names to masks. */
367 } cpu_supports_info
[] = {
368 /* AT_HWCAP masks. */
369 { "4xxmac", PPC_FEATURE_HAS_4xxMAC
, 0 },
370 { "altivec", PPC_FEATURE_HAS_ALTIVEC
, 0 },
371 { "arch_2_05", PPC_FEATURE_ARCH_2_05
, 0 },
372 { "arch_2_06", PPC_FEATURE_ARCH_2_06
, 0 },
373 { "archpmu", PPC_FEATURE_PERFMON_COMPAT
, 0 },
374 { "booke", PPC_FEATURE_BOOKE
, 0 },
375 { "cellbe", PPC_FEATURE_CELL_BE
, 0 },
376 { "dfp", PPC_FEATURE_HAS_DFP
, 0 },
377 { "efpdouble", PPC_FEATURE_HAS_EFP_DOUBLE
, 0 },
378 { "efpsingle", PPC_FEATURE_HAS_EFP_SINGLE
, 0 },
379 { "fpu", PPC_FEATURE_HAS_FPU
, 0 },
380 { "ic_snoop", PPC_FEATURE_ICACHE_SNOOP
, 0 },
381 { "mmu", PPC_FEATURE_HAS_MMU
, 0 },
382 { "notb", PPC_FEATURE_NO_TB
, 0 },
383 { "pa6t", PPC_FEATURE_PA6T
, 0 },
384 { "power4", PPC_FEATURE_POWER4
, 0 },
385 { "power5", PPC_FEATURE_POWER5
, 0 },
386 { "power5+", PPC_FEATURE_POWER5_PLUS
, 0 },
387 { "power6x", PPC_FEATURE_POWER6_EXT
, 0 },
388 { "ppc32", PPC_FEATURE_32
, 0 },
389 { "ppc601", PPC_FEATURE_601_INSTR
, 0 },
390 { "ppc64", PPC_FEATURE_64
, 0 },
391 { "ppcle", PPC_FEATURE_PPC_LE
, 0 },
392 { "smt", PPC_FEATURE_SMT
, 0 },
393 { "spe", PPC_FEATURE_HAS_SPE
, 0 },
394 { "true_le", PPC_FEATURE_TRUE_LE
, 0 },
395 { "ucache", PPC_FEATURE_UNIFIED_CACHE
, 0 },
396 { "vsx", PPC_FEATURE_HAS_VSX
, 0 },
398 /* AT_HWCAP2 masks. */
399 { "arch_2_07", PPC_FEATURE2_ARCH_2_07
, 1 },
400 { "dscr", PPC_FEATURE2_HAS_DSCR
, 1 },
401 { "ebb", PPC_FEATURE2_HAS_EBB
, 1 },
402 { "htm", PPC_FEATURE2_HAS_HTM
, 1 },
403 { "htm-nosc", PPC_FEATURE2_HTM_NOSC
, 1 },
404 { "htm-no-suspend", PPC_FEATURE2_HTM_NO_SUSPEND
, 1 },
405 { "isel", PPC_FEATURE2_HAS_ISEL
, 1 },
406 { "tar", PPC_FEATURE2_HAS_TAR
, 1 },
407 { "vcrypto", PPC_FEATURE2_HAS_VEC_CRYPTO
, 1 },
408 { "arch_3_00", PPC_FEATURE2_ARCH_3_00
, 1 },
409 { "ieee128", PPC_FEATURE2_HAS_IEEE128
, 1 },
410 { "darn", PPC_FEATURE2_DARN
, 1 },
411 { "scv", PPC_FEATURE2_SCV
, 1 }
414 /* On PowerPC, we have a limited number of target clones that we care about
415 which means we can use an array to hold the options, rather than having more
416 elaborate data structures to identify each possible variation. Order the
417 clones from the default to the highest ISA. */
419 CLONE_DEFAULT
= 0, /* default clone. */
420 CLONE_ISA_2_05
, /* ISA 2.05 (power6). */
421 CLONE_ISA_2_06
, /* ISA 2.06 (power7). */
422 CLONE_ISA_2_07
, /* ISA 2.07 (power8). */
423 CLONE_ISA_3_00
, /* ISA 3.00 (power9). */
427 /* Map compiler ISA bits into HWCAP names. */
429 HOST_WIDE_INT isa_mask
; /* rs6000_isa mask */
430 const char *name
; /* name to use in __builtin_cpu_supports. */
433 static const struct clone_map rs6000_clone_map
[CLONE_MAX
] = {
434 { 0, "" }, /* Default options. */
435 { OPTION_MASK_CMPB
, "arch_2_05" }, /* ISA 2.05 (power6). */
436 { OPTION_MASK_POPCNTD
, "arch_2_06" }, /* ISA 2.06 (power7). */
437 { OPTION_MASK_P8_VECTOR
, "arch_2_07" }, /* ISA 2.07 (power8). */
438 { OPTION_MASK_P9_VECTOR
, "arch_3_00" }, /* ISA 3.00 (power9). */
442 /* Newer LIBCs explicitly export this symbol to declare that they provide
443 the AT_PLATFORM and AT_HWCAP/AT_HWCAP2 values in the TCB. We emit a
444 reference to this symbol whenever we expand a CPU builtin, so that
445 we never link against an old LIBC. */
446 const char *tcb_verification_symbol
= "__parse_hwcap_and_convert_at_platform";
448 /* True if we have expanded a CPU builtin. */
451 /* Pointer to function (in rs6000-c.c) that can define or undefine target
452 macros that have changed. Languages that don't support the preprocessor
453 don't link in rs6000-c.c, so we can't call it directly. */
454 void (*rs6000_target_modify_macros_ptr
) (bool, HOST_WIDE_INT
, HOST_WIDE_INT
);
456 /* Simplfy register classes into simpler classifications. We assume
457 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
458 check for standard register classes (gpr/floating/altivec/vsx) and
459 floating/vector classes (float/altivec/vsx). */
461 enum rs6000_reg_type
{
472 /* Map register class to register type. */
473 static enum rs6000_reg_type reg_class_to_reg_type
[N_REG_CLASSES
];
475 /* First/last register type for the 'normal' register types (i.e. general
476 purpose, floating point, altivec, and VSX registers). */
477 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
479 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
482 /* Register classes we care about in secondary reload or go if legitimate
483 address. We only need to worry about GPR, FPR, and Altivec registers here,
484 along an ANY field that is the OR of the 3 register classes. */
486 enum rs6000_reload_reg_type
{
487 RELOAD_REG_GPR
, /* General purpose registers. */
488 RELOAD_REG_FPR
, /* Traditional floating point regs. */
489 RELOAD_REG_VMX
, /* Altivec (VMX) registers. */
490 RELOAD_REG_ANY
, /* OR of GPR, FPR, Altivec masks. */
494 /* For setting up register classes, loop through the 3 register classes mapping
495 into real registers, and skip the ANY class, which is just an OR of the
497 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
498 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
500 /* Map reload register type to a register in the register class. */
501 struct reload_reg_map_type
{
502 const char *name
; /* Register class name. */
503 int reg
; /* Register in the register class. */
506 static const struct reload_reg_map_type reload_reg_map
[N_RELOAD_REG
] = {
507 { "Gpr", FIRST_GPR_REGNO
}, /* RELOAD_REG_GPR. */
508 { "Fpr", FIRST_FPR_REGNO
}, /* RELOAD_REG_FPR. */
509 { "VMX", FIRST_ALTIVEC_REGNO
}, /* RELOAD_REG_VMX. */
510 { "Any", -1 }, /* RELOAD_REG_ANY. */
513 /* Mask bits for each register class, indexed per mode. Historically the
514 compiler has been more restrictive which types can do PRE_MODIFY instead of
515 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
516 typedef unsigned char addr_mask_type
;
518 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
519 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
520 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
521 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
522 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
523 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
524 #define RELOAD_REG_AND_M16 0x40 /* AND -16 addressing. */
525 #define RELOAD_REG_QUAD_OFFSET 0x80 /* quad offset is limited. */
527 /* Register type masks based on the type, of valid addressing modes. */
528 struct rs6000_reg_addr
{
529 enum insn_code reload_load
; /* INSN to reload for loading. */
530 enum insn_code reload_store
; /* INSN to reload for storing. */
531 enum insn_code reload_fpr_gpr
; /* INSN to move from FPR to GPR. */
532 enum insn_code reload_gpr_vsx
; /* INSN to move from GPR to VSX. */
533 enum insn_code reload_vsx_gpr
; /* INSN to move from VSX to GPR. */
534 enum insn_code fusion_gpr_ld
; /* INSN for fusing gpr ADDIS/loads. */
535 /* INSNs for fusing addi with loads
536 or stores for each reg. class. */
537 enum insn_code fusion_addi_ld
[(int)N_RELOAD_REG
];
538 enum insn_code fusion_addi_st
[(int)N_RELOAD_REG
];
539 /* INSNs for fusing addis with loads
540 or stores for each reg. class. */
541 enum insn_code fusion_addis_ld
[(int)N_RELOAD_REG
];
542 enum insn_code fusion_addis_st
[(int)N_RELOAD_REG
];
543 addr_mask_type addr_mask
[(int)N_RELOAD_REG
]; /* Valid address masks. */
544 bool scalar_in_vmx_p
; /* Scalar value can go in VMX. */
545 bool fused_toc
; /* Mode supports TOC fusion. */
548 static struct rs6000_reg_addr reg_addr
[NUM_MACHINE_MODES
];
550 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
552 mode_supports_pre_incdec_p (machine_mode mode
)
554 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_INCDEC
)
558 /* Helper function to say whether a mode supports PRE_MODIFY. */
560 mode_supports_pre_modify_p (machine_mode mode
)
562 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_MODIFY
)
566 /* Return true if we have D-form addressing in altivec registers. */
568 mode_supports_vmx_dform (machine_mode mode
)
570 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_OFFSET
) != 0);
573 /* Return true if we have D-form addressing in VSX registers. This addressing
574 is more limited than normal d-form addressing in that the offset must be
575 aligned on a 16-byte boundary. */
577 mode_supports_dq_form (machine_mode mode
)
579 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_QUAD_OFFSET
)
583 /* Given that there exists at least one variable that is set (produced)
584 by OUT_INSN and read (consumed) by IN_INSN, return true iff
585 IN_INSN represents one or more memory store operations and none of
586 the variables set by OUT_INSN is used by IN_INSN as the address of a
587 store operation. If either IN_INSN or OUT_INSN does not represent
588 a "single" RTL SET expression (as loosely defined by the
589 implementation of the single_set function) or a PARALLEL with only
590 SETs, CLOBBERs, and USEs inside, this function returns false.
592 This rs6000-specific version of store_data_bypass_p checks for
593 certain conditions that result in assertion failures (and internal
594 compiler errors) in the generic store_data_bypass_p function and
595 returns false rather than calling store_data_bypass_p if one of the
596 problematic conditions is detected. */
599 rs6000_store_data_bypass_p (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
606 in_set
= single_set (in_insn
);
609 if (MEM_P (SET_DEST (in_set
)))
611 out_set
= single_set (out_insn
);
614 out_pat
= PATTERN (out_insn
);
615 if (GET_CODE (out_pat
) == PARALLEL
)
617 for (i
= 0; i
< XVECLEN (out_pat
, 0); i
++)
619 out_exp
= XVECEXP (out_pat
, 0, i
);
620 if ((GET_CODE (out_exp
) == CLOBBER
)
621 || (GET_CODE (out_exp
) == USE
))
623 else if (GET_CODE (out_exp
) != SET
)
632 in_pat
= PATTERN (in_insn
);
633 if (GET_CODE (in_pat
) != PARALLEL
)
636 for (i
= 0; i
< XVECLEN (in_pat
, 0); i
++)
638 in_exp
= XVECEXP (in_pat
, 0, i
);
639 if ((GET_CODE (in_exp
) == CLOBBER
) || (GET_CODE (in_exp
) == USE
))
641 else if (GET_CODE (in_exp
) != SET
)
644 if (MEM_P (SET_DEST (in_exp
)))
646 out_set
= single_set (out_insn
);
649 out_pat
= PATTERN (out_insn
);
650 if (GET_CODE (out_pat
) != PARALLEL
)
652 for (j
= 0; j
< XVECLEN (out_pat
, 0); j
++)
654 out_exp
= XVECEXP (out_pat
, 0, j
);
655 if ((GET_CODE (out_exp
) == CLOBBER
)
656 || (GET_CODE (out_exp
) == USE
))
658 else if (GET_CODE (out_exp
) != SET
)
665 return store_data_bypass_p (out_insn
, in_insn
);
669 /* Processor costs (relative to an add) */
671 const struct processor_costs
*rs6000_cost
;
673 /* Instruction size costs on 32bit processors. */
675 struct processor_costs size32_cost
= {
676 COSTS_N_INSNS (1), /* mulsi */
677 COSTS_N_INSNS (1), /* mulsi_const */
678 COSTS_N_INSNS (1), /* mulsi_const9 */
679 COSTS_N_INSNS (1), /* muldi */
680 COSTS_N_INSNS (1), /* divsi */
681 COSTS_N_INSNS (1), /* divdi */
682 COSTS_N_INSNS (1), /* fp */
683 COSTS_N_INSNS (1), /* dmul */
684 COSTS_N_INSNS (1), /* sdiv */
685 COSTS_N_INSNS (1), /* ddiv */
686 32, /* cache line size */
690 0, /* SF->DF convert */
693 /* Instruction size costs on 64bit processors. */
695 struct processor_costs size64_cost
= {
696 COSTS_N_INSNS (1), /* mulsi */
697 COSTS_N_INSNS (1), /* mulsi_const */
698 COSTS_N_INSNS (1), /* mulsi_const9 */
699 COSTS_N_INSNS (1), /* muldi */
700 COSTS_N_INSNS (1), /* divsi */
701 COSTS_N_INSNS (1), /* divdi */
702 COSTS_N_INSNS (1), /* fp */
703 COSTS_N_INSNS (1), /* dmul */
704 COSTS_N_INSNS (1), /* sdiv */
705 COSTS_N_INSNS (1), /* ddiv */
706 128, /* cache line size */
710 0, /* SF->DF convert */
713 /* Instruction costs on RS64A processors. */
715 struct processor_costs rs64a_cost
= {
716 COSTS_N_INSNS (20), /* mulsi */
717 COSTS_N_INSNS (12), /* mulsi_const */
718 COSTS_N_INSNS (8), /* mulsi_const9 */
719 COSTS_N_INSNS (34), /* muldi */
720 COSTS_N_INSNS (65), /* divsi */
721 COSTS_N_INSNS (67), /* divdi */
722 COSTS_N_INSNS (4), /* fp */
723 COSTS_N_INSNS (4), /* dmul */
724 COSTS_N_INSNS (31), /* sdiv */
725 COSTS_N_INSNS (31), /* ddiv */
726 128, /* cache line size */
730 0, /* SF->DF convert */
733 /* Instruction costs on MPCCORE processors. */
735 struct processor_costs mpccore_cost
= {
736 COSTS_N_INSNS (2), /* mulsi */
737 COSTS_N_INSNS (2), /* mulsi_const */
738 COSTS_N_INSNS (2), /* mulsi_const9 */
739 COSTS_N_INSNS (2), /* muldi */
740 COSTS_N_INSNS (6), /* divsi */
741 COSTS_N_INSNS (6), /* divdi */
742 COSTS_N_INSNS (4), /* fp */
743 COSTS_N_INSNS (5), /* dmul */
744 COSTS_N_INSNS (10), /* sdiv */
745 COSTS_N_INSNS (17), /* ddiv */
746 32, /* cache line size */
750 0, /* SF->DF convert */
753 /* Instruction costs on PPC403 processors. */
755 struct processor_costs ppc403_cost
= {
756 COSTS_N_INSNS (4), /* mulsi */
757 COSTS_N_INSNS (4), /* mulsi_const */
758 COSTS_N_INSNS (4), /* mulsi_const9 */
759 COSTS_N_INSNS (4), /* muldi */
760 COSTS_N_INSNS (33), /* divsi */
761 COSTS_N_INSNS (33), /* divdi */
762 COSTS_N_INSNS (11), /* fp */
763 COSTS_N_INSNS (11), /* dmul */
764 COSTS_N_INSNS (11), /* sdiv */
765 COSTS_N_INSNS (11), /* ddiv */
766 32, /* cache line size */
770 0, /* SF->DF convert */
773 /* Instruction costs on PPC405 processors. */
775 struct processor_costs ppc405_cost
= {
776 COSTS_N_INSNS (5), /* mulsi */
777 COSTS_N_INSNS (4), /* mulsi_const */
778 COSTS_N_INSNS (3), /* mulsi_const9 */
779 COSTS_N_INSNS (5), /* muldi */
780 COSTS_N_INSNS (35), /* divsi */
781 COSTS_N_INSNS (35), /* divdi */
782 COSTS_N_INSNS (11), /* fp */
783 COSTS_N_INSNS (11), /* dmul */
784 COSTS_N_INSNS (11), /* sdiv */
785 COSTS_N_INSNS (11), /* ddiv */
786 32, /* cache line size */
790 0, /* SF->DF convert */
793 /* Instruction costs on PPC440 processors. */
795 struct processor_costs ppc440_cost
= {
796 COSTS_N_INSNS (3), /* mulsi */
797 COSTS_N_INSNS (2), /* mulsi_const */
798 COSTS_N_INSNS (2), /* mulsi_const9 */
799 COSTS_N_INSNS (3), /* muldi */
800 COSTS_N_INSNS (34), /* divsi */
801 COSTS_N_INSNS (34), /* divdi */
802 COSTS_N_INSNS (5), /* fp */
803 COSTS_N_INSNS (5), /* dmul */
804 COSTS_N_INSNS (19), /* sdiv */
805 COSTS_N_INSNS (33), /* ddiv */
806 32, /* cache line size */
810 0, /* SF->DF convert */
813 /* Instruction costs on PPC476 processors. */
815 struct processor_costs ppc476_cost
= {
816 COSTS_N_INSNS (4), /* mulsi */
817 COSTS_N_INSNS (4), /* mulsi_const */
818 COSTS_N_INSNS (4), /* mulsi_const9 */
819 COSTS_N_INSNS (4), /* muldi */
820 COSTS_N_INSNS (11), /* divsi */
821 COSTS_N_INSNS (11), /* divdi */
822 COSTS_N_INSNS (6), /* fp */
823 COSTS_N_INSNS (6), /* dmul */
824 COSTS_N_INSNS (19), /* sdiv */
825 COSTS_N_INSNS (33), /* ddiv */
826 32, /* l1 cache line size */
830 0, /* SF->DF convert */
833 /* Instruction costs on PPC601 processors. */
835 struct processor_costs ppc601_cost
= {
836 COSTS_N_INSNS (5), /* mulsi */
837 COSTS_N_INSNS (5), /* mulsi_const */
838 COSTS_N_INSNS (5), /* mulsi_const9 */
839 COSTS_N_INSNS (5), /* muldi */
840 COSTS_N_INSNS (36), /* divsi */
841 COSTS_N_INSNS (36), /* divdi */
842 COSTS_N_INSNS (4), /* fp */
843 COSTS_N_INSNS (5), /* dmul */
844 COSTS_N_INSNS (17), /* sdiv */
845 COSTS_N_INSNS (31), /* ddiv */
846 32, /* cache line size */
850 0, /* SF->DF convert */
853 /* Instruction costs on PPC603 processors. */
855 struct processor_costs ppc603_cost
= {
856 COSTS_N_INSNS (5), /* mulsi */
857 COSTS_N_INSNS (3), /* mulsi_const */
858 COSTS_N_INSNS (2), /* mulsi_const9 */
859 COSTS_N_INSNS (5), /* muldi */
860 COSTS_N_INSNS (37), /* divsi */
861 COSTS_N_INSNS (37), /* divdi */
862 COSTS_N_INSNS (3), /* fp */
863 COSTS_N_INSNS (4), /* dmul */
864 COSTS_N_INSNS (18), /* sdiv */
865 COSTS_N_INSNS (33), /* ddiv */
866 32, /* cache line size */
870 0, /* SF->DF convert */
873 /* Instruction costs on PPC604 processors. */
875 struct processor_costs ppc604_cost
= {
876 COSTS_N_INSNS (4), /* mulsi */
877 COSTS_N_INSNS (4), /* mulsi_const */
878 COSTS_N_INSNS (4), /* mulsi_const9 */
879 COSTS_N_INSNS (4), /* muldi */
880 COSTS_N_INSNS (20), /* divsi */
881 COSTS_N_INSNS (20), /* divdi */
882 COSTS_N_INSNS (3), /* fp */
883 COSTS_N_INSNS (3), /* dmul */
884 COSTS_N_INSNS (18), /* sdiv */
885 COSTS_N_INSNS (32), /* ddiv */
886 32, /* cache line size */
890 0, /* SF->DF convert */
893 /* Instruction costs on PPC604e processors. */
895 struct processor_costs ppc604e_cost
= {
896 COSTS_N_INSNS (2), /* mulsi */
897 COSTS_N_INSNS (2), /* mulsi_const */
898 COSTS_N_INSNS (2), /* mulsi_const9 */
899 COSTS_N_INSNS (2), /* muldi */
900 COSTS_N_INSNS (20), /* divsi */
901 COSTS_N_INSNS (20), /* divdi */
902 COSTS_N_INSNS (3), /* fp */
903 COSTS_N_INSNS (3), /* dmul */
904 COSTS_N_INSNS (18), /* sdiv */
905 COSTS_N_INSNS (32), /* ddiv */
906 32, /* cache line size */
910 0, /* SF->DF convert */
913 /* Instruction costs on PPC620 processors. */
915 struct processor_costs ppc620_cost
= {
916 COSTS_N_INSNS (5), /* mulsi */
917 COSTS_N_INSNS (4), /* mulsi_const */
918 COSTS_N_INSNS (3), /* mulsi_const9 */
919 COSTS_N_INSNS (7), /* muldi */
920 COSTS_N_INSNS (21), /* divsi */
921 COSTS_N_INSNS (37), /* divdi */
922 COSTS_N_INSNS (3), /* fp */
923 COSTS_N_INSNS (3), /* dmul */
924 COSTS_N_INSNS (18), /* sdiv */
925 COSTS_N_INSNS (32), /* ddiv */
926 128, /* cache line size */
930 0, /* SF->DF convert */
933 /* Instruction costs on PPC630 processors. */
935 struct processor_costs ppc630_cost
= {
936 COSTS_N_INSNS (5), /* mulsi */
937 COSTS_N_INSNS (4), /* mulsi_const */
938 COSTS_N_INSNS (3), /* mulsi_const9 */
939 COSTS_N_INSNS (7), /* muldi */
940 COSTS_N_INSNS (21), /* divsi */
941 COSTS_N_INSNS (37), /* divdi */
942 COSTS_N_INSNS (3), /* fp */
943 COSTS_N_INSNS (3), /* dmul */
944 COSTS_N_INSNS (17), /* sdiv */
945 COSTS_N_INSNS (21), /* ddiv */
946 128, /* cache line size */
950 0, /* SF->DF convert */
953 /* Instruction costs on Cell processor. */
954 /* COSTS_N_INSNS (1) ~ one add. */
956 struct processor_costs ppccell_cost
= {
957 COSTS_N_INSNS (9/2)+2, /* mulsi */
958 COSTS_N_INSNS (6/2), /* mulsi_const */
959 COSTS_N_INSNS (6/2), /* mulsi_const9 */
960 COSTS_N_INSNS (15/2)+2, /* muldi */
961 COSTS_N_INSNS (38/2), /* divsi */
962 COSTS_N_INSNS (70/2), /* divdi */
963 COSTS_N_INSNS (10/2), /* fp */
964 COSTS_N_INSNS (10/2), /* dmul */
965 COSTS_N_INSNS (74/2), /* sdiv */
966 COSTS_N_INSNS (74/2), /* ddiv */
967 128, /* cache line size */
971 0, /* SF->DF convert */
974 /* Instruction costs on PPC750 and PPC7400 processors. */
976 struct processor_costs ppc750_cost
= {
977 COSTS_N_INSNS (5), /* mulsi */
978 COSTS_N_INSNS (3), /* mulsi_const */
979 COSTS_N_INSNS (2), /* mulsi_const9 */
980 COSTS_N_INSNS (5), /* muldi */
981 COSTS_N_INSNS (17), /* divsi */
982 COSTS_N_INSNS (17), /* divdi */
983 COSTS_N_INSNS (3), /* fp */
984 COSTS_N_INSNS (3), /* dmul */
985 COSTS_N_INSNS (17), /* sdiv */
986 COSTS_N_INSNS (31), /* ddiv */
987 32, /* cache line size */
991 0, /* SF->DF convert */
994 /* Instruction costs on PPC7450 processors. */
996 struct processor_costs ppc7450_cost
= {
997 COSTS_N_INSNS (4), /* mulsi */
998 COSTS_N_INSNS (3), /* mulsi_const */
999 COSTS_N_INSNS (3), /* mulsi_const9 */
1000 COSTS_N_INSNS (4), /* muldi */
1001 COSTS_N_INSNS (23), /* divsi */
1002 COSTS_N_INSNS (23), /* divdi */
1003 COSTS_N_INSNS (5), /* fp */
1004 COSTS_N_INSNS (5), /* dmul */
1005 COSTS_N_INSNS (21), /* sdiv */
1006 COSTS_N_INSNS (35), /* ddiv */
1007 32, /* cache line size */
1009 1024, /* l2 cache */
1011 0, /* SF->DF convert */
1014 /* Instruction costs on PPC8540 processors. */
1016 struct processor_costs ppc8540_cost
= {
1017 COSTS_N_INSNS (4), /* mulsi */
1018 COSTS_N_INSNS (4), /* mulsi_const */
1019 COSTS_N_INSNS (4), /* mulsi_const9 */
1020 COSTS_N_INSNS (4), /* muldi */
1021 COSTS_N_INSNS (19), /* divsi */
1022 COSTS_N_INSNS (19), /* divdi */
1023 COSTS_N_INSNS (4), /* fp */
1024 COSTS_N_INSNS (4), /* dmul */
1025 COSTS_N_INSNS (29), /* sdiv */
1026 COSTS_N_INSNS (29), /* ddiv */
1027 32, /* cache line size */
1030 1, /* prefetch streams /*/
1031 0, /* SF->DF convert */
1034 /* Instruction costs on E300C2 and E300C3 cores. */
1036 struct processor_costs ppce300c2c3_cost
= {
1037 COSTS_N_INSNS (4), /* mulsi */
1038 COSTS_N_INSNS (4), /* mulsi_const */
1039 COSTS_N_INSNS (4), /* mulsi_const9 */
1040 COSTS_N_INSNS (4), /* muldi */
1041 COSTS_N_INSNS (19), /* divsi */
1042 COSTS_N_INSNS (19), /* divdi */
1043 COSTS_N_INSNS (3), /* fp */
1044 COSTS_N_INSNS (4), /* dmul */
1045 COSTS_N_INSNS (18), /* sdiv */
1046 COSTS_N_INSNS (33), /* ddiv */
1050 1, /* prefetch streams /*/
1051 0, /* SF->DF convert */
1054 /* Instruction costs on PPCE500MC processors. */
1056 struct processor_costs ppce500mc_cost
= {
1057 COSTS_N_INSNS (4), /* mulsi */
1058 COSTS_N_INSNS (4), /* mulsi_const */
1059 COSTS_N_INSNS (4), /* mulsi_const9 */
1060 COSTS_N_INSNS (4), /* muldi */
1061 COSTS_N_INSNS (14), /* divsi */
1062 COSTS_N_INSNS (14), /* divdi */
1063 COSTS_N_INSNS (8), /* fp */
1064 COSTS_N_INSNS (10), /* dmul */
1065 COSTS_N_INSNS (36), /* sdiv */
1066 COSTS_N_INSNS (66), /* ddiv */
1067 64, /* cache line size */
1070 1, /* prefetch streams /*/
1071 0, /* SF->DF convert */
1074 /* Instruction costs on PPCE500MC64 processors. */
1076 struct processor_costs ppce500mc64_cost
= {
1077 COSTS_N_INSNS (4), /* mulsi */
1078 COSTS_N_INSNS (4), /* mulsi_const */
1079 COSTS_N_INSNS (4), /* mulsi_const9 */
1080 COSTS_N_INSNS (4), /* muldi */
1081 COSTS_N_INSNS (14), /* divsi */
1082 COSTS_N_INSNS (14), /* divdi */
1083 COSTS_N_INSNS (4), /* fp */
1084 COSTS_N_INSNS (10), /* dmul */
1085 COSTS_N_INSNS (36), /* sdiv */
1086 COSTS_N_INSNS (66), /* ddiv */
1087 64, /* cache line size */
1090 1, /* prefetch streams /*/
1091 0, /* SF->DF convert */
1094 /* Instruction costs on PPCE5500 processors. */
1096 struct processor_costs ppce5500_cost
= {
1097 COSTS_N_INSNS (5), /* mulsi */
1098 COSTS_N_INSNS (5), /* mulsi_const */
1099 COSTS_N_INSNS (4), /* mulsi_const9 */
1100 COSTS_N_INSNS (5), /* muldi */
1101 COSTS_N_INSNS (14), /* divsi */
1102 COSTS_N_INSNS (14), /* divdi */
1103 COSTS_N_INSNS (7), /* fp */
1104 COSTS_N_INSNS (10), /* dmul */
1105 COSTS_N_INSNS (36), /* sdiv */
1106 COSTS_N_INSNS (66), /* ddiv */
1107 64, /* cache line size */
1110 1, /* prefetch streams /*/
1111 0, /* SF->DF convert */
1114 /* Instruction costs on PPCE6500 processors. */
1116 struct processor_costs ppce6500_cost
= {
1117 COSTS_N_INSNS (5), /* mulsi */
1118 COSTS_N_INSNS (5), /* mulsi_const */
1119 COSTS_N_INSNS (4), /* mulsi_const9 */
1120 COSTS_N_INSNS (5), /* muldi */
1121 COSTS_N_INSNS (14), /* divsi */
1122 COSTS_N_INSNS (14), /* divdi */
1123 COSTS_N_INSNS (7), /* fp */
1124 COSTS_N_INSNS (10), /* dmul */
1125 COSTS_N_INSNS (36), /* sdiv */
1126 COSTS_N_INSNS (66), /* ddiv */
1127 64, /* cache line size */
1130 1, /* prefetch streams /*/
1131 0, /* SF->DF convert */
1134 /* Instruction costs on AppliedMicro Titan processors. */
1136 struct processor_costs titan_cost
= {
1137 COSTS_N_INSNS (5), /* mulsi */
1138 COSTS_N_INSNS (5), /* mulsi_const */
1139 COSTS_N_INSNS (5), /* mulsi_const9 */
1140 COSTS_N_INSNS (5), /* muldi */
1141 COSTS_N_INSNS (18), /* divsi */
1142 COSTS_N_INSNS (18), /* divdi */
1143 COSTS_N_INSNS (10), /* fp */
1144 COSTS_N_INSNS (10), /* dmul */
1145 COSTS_N_INSNS (46), /* sdiv */
1146 COSTS_N_INSNS (72), /* ddiv */
1147 32, /* cache line size */
1150 1, /* prefetch streams /*/
1151 0, /* SF->DF convert */
1154 /* Instruction costs on POWER4 and POWER5 processors. */
1156 struct processor_costs power4_cost
= {
1157 COSTS_N_INSNS (3), /* mulsi */
1158 COSTS_N_INSNS (2), /* mulsi_const */
1159 COSTS_N_INSNS (2), /* mulsi_const9 */
1160 COSTS_N_INSNS (4), /* muldi */
1161 COSTS_N_INSNS (18), /* divsi */
1162 COSTS_N_INSNS (34), /* divdi */
1163 COSTS_N_INSNS (3), /* fp */
1164 COSTS_N_INSNS (3), /* dmul */
1165 COSTS_N_INSNS (17), /* sdiv */
1166 COSTS_N_INSNS (17), /* ddiv */
1167 128, /* cache line size */
1169 1024, /* l2 cache */
1170 8, /* prefetch streams /*/
1171 0, /* SF->DF convert */
1174 /* Instruction costs on POWER6 processors. */
1176 struct processor_costs power6_cost
= {
1177 COSTS_N_INSNS (8), /* mulsi */
1178 COSTS_N_INSNS (8), /* mulsi_const */
1179 COSTS_N_INSNS (8), /* mulsi_const9 */
1180 COSTS_N_INSNS (8), /* muldi */
1181 COSTS_N_INSNS (22), /* divsi */
1182 COSTS_N_INSNS (28), /* divdi */
1183 COSTS_N_INSNS (3), /* fp */
1184 COSTS_N_INSNS (3), /* dmul */
1185 COSTS_N_INSNS (13), /* sdiv */
1186 COSTS_N_INSNS (16), /* ddiv */
1187 128, /* cache line size */
1189 2048, /* l2 cache */
1190 16, /* prefetch streams */
1191 0, /* SF->DF convert */
1194 /* Instruction costs on POWER7 processors. */
1196 struct processor_costs power7_cost
= {
1197 COSTS_N_INSNS (2), /* mulsi */
1198 COSTS_N_INSNS (2), /* mulsi_const */
1199 COSTS_N_INSNS (2), /* mulsi_const9 */
1200 COSTS_N_INSNS (2), /* muldi */
1201 COSTS_N_INSNS (18), /* divsi */
1202 COSTS_N_INSNS (34), /* divdi */
1203 COSTS_N_INSNS (3), /* fp */
1204 COSTS_N_INSNS (3), /* dmul */
1205 COSTS_N_INSNS (13), /* sdiv */
1206 COSTS_N_INSNS (16), /* ddiv */
1207 128, /* cache line size */
1210 12, /* prefetch streams */
1211 COSTS_N_INSNS (3), /* SF->DF convert */
1214 /* Instruction costs on POWER8 processors. */
1216 struct processor_costs power8_cost
= {
1217 COSTS_N_INSNS (3), /* mulsi */
1218 COSTS_N_INSNS (3), /* mulsi_const */
1219 COSTS_N_INSNS (3), /* mulsi_const9 */
1220 COSTS_N_INSNS (3), /* muldi */
1221 COSTS_N_INSNS (19), /* divsi */
1222 COSTS_N_INSNS (35), /* divdi */
1223 COSTS_N_INSNS (3), /* fp */
1224 COSTS_N_INSNS (3), /* dmul */
1225 COSTS_N_INSNS (14), /* sdiv */
1226 COSTS_N_INSNS (17), /* ddiv */
1227 128, /* cache line size */
1230 12, /* prefetch streams */
1231 COSTS_N_INSNS (3), /* SF->DF convert */
1234 /* Instruction costs on POWER9 processors. */
1236 struct processor_costs power9_cost
= {
1237 COSTS_N_INSNS (3), /* mulsi */
1238 COSTS_N_INSNS (3), /* mulsi_const */
1239 COSTS_N_INSNS (3), /* mulsi_const9 */
1240 COSTS_N_INSNS (3), /* muldi */
1241 COSTS_N_INSNS (8), /* divsi */
1242 COSTS_N_INSNS (12), /* divdi */
1243 COSTS_N_INSNS (3), /* fp */
1244 COSTS_N_INSNS (3), /* dmul */
1245 COSTS_N_INSNS (13), /* sdiv */
1246 COSTS_N_INSNS (18), /* ddiv */
1247 128, /* cache line size */
1250 8, /* prefetch streams */
1251 COSTS_N_INSNS (3), /* SF->DF convert */
1254 /* Instruction costs on POWER A2 processors. */
1256 struct processor_costs ppca2_cost
= {
1257 COSTS_N_INSNS (16), /* mulsi */
1258 COSTS_N_INSNS (16), /* mulsi_const */
1259 COSTS_N_INSNS (16), /* mulsi_const9 */
1260 COSTS_N_INSNS (16), /* muldi */
1261 COSTS_N_INSNS (22), /* divsi */
1262 COSTS_N_INSNS (28), /* divdi */
1263 COSTS_N_INSNS (3), /* fp */
1264 COSTS_N_INSNS (3), /* dmul */
1265 COSTS_N_INSNS (59), /* sdiv */
1266 COSTS_N_INSNS (72), /* ddiv */
1269 2048, /* l2 cache */
1270 16, /* prefetch streams */
1271 0, /* SF->DF convert */
1275 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
1276 #undef RS6000_BUILTIN_0
1277 #undef RS6000_BUILTIN_1
1278 #undef RS6000_BUILTIN_2
1279 #undef RS6000_BUILTIN_3
1280 #undef RS6000_BUILTIN_A
1281 #undef RS6000_BUILTIN_D
1282 #undef RS6000_BUILTIN_H
1283 #undef RS6000_BUILTIN_P
1284 #undef RS6000_BUILTIN_X
1286 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
1287 { NAME, ICODE, MASK, ATTR },
1289 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1290 { NAME, ICODE, MASK, ATTR },
1292 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1293 { NAME, ICODE, MASK, ATTR },
1295 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1296 { NAME, ICODE, MASK, ATTR },
1298 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1299 { NAME, ICODE, MASK, ATTR },
1301 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1302 { NAME, ICODE, MASK, ATTR },
1304 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1305 { NAME, ICODE, MASK, ATTR },
1307 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1308 { NAME, ICODE, MASK, ATTR },
1310 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1311 { NAME, ICODE, MASK, ATTR },
1313 struct rs6000_builtin_info_type
{
1315 const enum insn_code icode
;
1316 const HOST_WIDE_INT mask
;
1317 const unsigned attr
;
1320 static const struct rs6000_builtin_info_type rs6000_builtin_info
[] =
1322 #include "rs6000-builtin.def"
1325 #undef RS6000_BUILTIN_0
1326 #undef RS6000_BUILTIN_1
1327 #undef RS6000_BUILTIN_2
1328 #undef RS6000_BUILTIN_3
1329 #undef RS6000_BUILTIN_A
1330 #undef RS6000_BUILTIN_D
1331 #undef RS6000_BUILTIN_H
1332 #undef RS6000_BUILTIN_P
1333 #undef RS6000_BUILTIN_X
1335 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1336 static tree (*rs6000_veclib_handler
) (combined_fn
, tree
, tree
);
1339 static bool rs6000_debug_legitimate_address_p (machine_mode
, rtx
, bool);
1340 static struct machine_function
* rs6000_init_machine_status (void);
1341 static int rs6000_ra_ever_killed (void);
1342 static tree
rs6000_handle_longcall_attribute (tree
*, tree
, tree
, int, bool *);
1343 static tree
rs6000_handle_altivec_attribute (tree
*, tree
, tree
, int, bool *);
1344 static tree
rs6000_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
1345 static tree
rs6000_builtin_vectorized_libmass (combined_fn
, tree
, tree
);
1346 static void rs6000_emit_set_long_const (rtx
, HOST_WIDE_INT
);
1347 static int rs6000_memory_move_cost (machine_mode
, reg_class_t
, bool);
1348 static bool rs6000_debug_rtx_costs (rtx
, machine_mode
, int, int, int *, bool);
1349 static int rs6000_debug_address_cost (rtx
, machine_mode
, addr_space_t
,
1351 static int rs6000_debug_adjust_cost (rtx_insn
*, int, rtx_insn
*, int,
1353 static bool is_microcoded_insn (rtx_insn
*);
1354 static bool is_nonpipeline_insn (rtx_insn
*);
1355 static bool is_cracked_insn (rtx_insn
*);
1356 static bool is_load_insn (rtx
, rtx
*);
1357 static bool is_store_insn (rtx
, rtx
*);
1358 static bool set_to_load_agen (rtx_insn
*,rtx_insn
*);
1359 static bool insn_terminates_group_p (rtx_insn
*, enum group_termination
);
1360 static bool insn_must_be_first_in_group (rtx_insn
*);
1361 static bool insn_must_be_last_in_group (rtx_insn
*);
1362 static void altivec_init_builtins (void);
1363 static tree
builtin_function_type (machine_mode
, machine_mode
,
1364 machine_mode
, machine_mode
,
1365 enum rs6000_builtins
, const char *name
);
1366 static void rs6000_common_init_builtins (void);
1367 static void htm_init_builtins (void);
1368 static rs6000_stack_t
*rs6000_stack_info (void);
1369 static void is_altivec_return_reg (rtx
, void *);
1370 int easy_vector_constant (rtx
, machine_mode
);
1371 static rtx
rs6000_debug_legitimize_address (rtx
, rtx
, machine_mode
);
1372 static rtx
rs6000_legitimize_tls_address (rtx
, enum tls_model
);
1373 static rtx
rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*, const_tree
,
1376 static void macho_branch_islands (void);
1378 static rtx
rs6000_legitimize_reload_address (rtx
, machine_mode
, int, int,
1380 static rtx
rs6000_debug_legitimize_reload_address (rtx
, machine_mode
, int,
1382 static bool rs6000_mode_dependent_address (const_rtx
);
1383 static bool rs6000_debug_mode_dependent_address (const_rtx
);
1384 static bool rs6000_offsettable_memref_p (rtx
, machine_mode
, bool);
1385 static enum reg_class
rs6000_secondary_reload_class (enum reg_class
,
1387 static enum reg_class
rs6000_debug_secondary_reload_class (enum reg_class
,
1390 static enum reg_class
rs6000_preferred_reload_class (rtx
, enum reg_class
);
1391 static enum reg_class
rs6000_debug_preferred_reload_class (rtx
,
1393 static bool rs6000_debug_secondary_memory_needed (machine_mode
,
1396 static bool rs6000_debug_can_change_mode_class (machine_mode
,
1399 static bool rs6000_save_toc_in_prologue_p (void);
1400 static rtx
rs6000_internal_arg_pointer (void);
1402 rtx (*rs6000_legitimize_reload_address_ptr
) (rtx
, machine_mode
, int, int,
1404 = rs6000_legitimize_reload_address
;
1406 static bool (*rs6000_mode_dependent_address_ptr
) (const_rtx
)
1407 = rs6000_mode_dependent_address
;
1409 enum reg_class (*rs6000_secondary_reload_class_ptr
) (enum reg_class
,
1411 = rs6000_secondary_reload_class
;
1413 enum reg_class (*rs6000_preferred_reload_class_ptr
) (rtx
, enum reg_class
)
1414 = rs6000_preferred_reload_class
;
1416 const int INSN_NOT_AVAILABLE
= -1;
1418 static void rs6000_print_isa_options (FILE *, int, const char *,
1420 static void rs6000_print_builtin_options (FILE *, int, const char *,
1422 static HOST_WIDE_INT
rs6000_disable_incompatible_switches (void);
1424 static enum rs6000_reg_type
register_to_reg_type (rtx
, bool *);
1425 static bool rs6000_secondary_reload_move (enum rs6000_reg_type
,
1426 enum rs6000_reg_type
,
1428 secondary_reload_info
*,
1430 rtl_opt_pass
*make_pass_analyze_swaps (gcc::context
*);
1431 static bool rs6000_keep_leaf_when_profiled () __attribute__ ((unused
));
1432 static tree
rs6000_fold_builtin (tree
, int, tree
*, bool);
1434 /* Hash table stuff for keeping track of TOC entries. */
1436 struct GTY((for_user
)) toc_hash_struct
1438 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1439 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1441 machine_mode key_mode
;
1445 struct toc_hasher
: ggc_ptr_hash
<toc_hash_struct
>
1447 static hashval_t
hash (toc_hash_struct
*);
1448 static bool equal (toc_hash_struct
*, toc_hash_struct
*);
1451 static GTY (()) hash_table
<toc_hasher
> *toc_hash_table
;
1453 /* Hash table to keep track of the argument types for builtin functions. */
1455 struct GTY((for_user
)) builtin_hash_struct
1458 machine_mode mode
[4]; /* return value + 3 arguments. */
1459 unsigned char uns_p
[4]; /* and whether the types are unsigned. */
1462 struct builtin_hasher
: ggc_ptr_hash
<builtin_hash_struct
>
1464 static hashval_t
hash (builtin_hash_struct
*);
1465 static bool equal (builtin_hash_struct
*, builtin_hash_struct
*);
1468 static GTY (()) hash_table
<builtin_hasher
> *builtin_hash_table
;
1471 /* Default register names. */
1472 char rs6000_reg_names
[][8] =
1474 "0", "1", "2", "3", "4", "5", "6", "7",
1475 "8", "9", "10", "11", "12", "13", "14", "15",
1476 "16", "17", "18", "19", "20", "21", "22", "23",
1477 "24", "25", "26", "27", "28", "29", "30", "31",
1478 "0", "1", "2", "3", "4", "5", "6", "7",
1479 "8", "9", "10", "11", "12", "13", "14", "15",
1480 "16", "17", "18", "19", "20", "21", "22", "23",
1481 "24", "25", "26", "27", "28", "29", "30", "31",
1482 "mq", "lr", "ctr","ap",
1483 "0", "1", "2", "3", "4", "5", "6", "7",
1485 /* AltiVec registers. */
1486 "0", "1", "2", "3", "4", "5", "6", "7",
1487 "8", "9", "10", "11", "12", "13", "14", "15",
1488 "16", "17", "18", "19", "20", "21", "22", "23",
1489 "24", "25", "26", "27", "28", "29", "30", "31",
1491 /* Soft frame pointer. */
1493 /* HTM SPR registers. */
1494 "tfhar", "tfiar", "texasr"
1497 #ifdef TARGET_REGNAMES
1498 static const char alt_reg_names
[][8] =
1500 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1501 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1502 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1503 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1504 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1505 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1506 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1507 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1508 "mq", "lr", "ctr", "ap",
1509 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1511 /* AltiVec registers. */
1512 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1513 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1514 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1515 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1517 /* Soft frame pointer. */
1519 /* HTM SPR registers. */
1520 "tfhar", "tfiar", "texasr"
1524 /* Table of valid machine attributes. */
1526 static const struct attribute_spec rs6000_attribute_table
[] =
1528 /* { name, min_len, max_len, decl_req, type_req, fn_type_req,
1529 affects_type_identity, handler, exclude } */
1530 { "altivec", 1, 1, false, true, false, false,
1531 rs6000_handle_altivec_attribute
, NULL
},
1532 { "longcall", 0, 0, false, true, true, false,
1533 rs6000_handle_longcall_attribute
, NULL
},
1534 { "shortcall", 0, 0, false, true, true, false,
1535 rs6000_handle_longcall_attribute
, NULL
},
1536 { "ms_struct", 0, 0, false, false, false, false,
1537 rs6000_handle_struct_attribute
, NULL
},
1538 { "gcc_struct", 0, 0, false, false, false, false,
1539 rs6000_handle_struct_attribute
, NULL
},
1540 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1541 SUBTARGET_ATTRIBUTE_TABLE
,
1543 { NULL
, 0, 0, false, false, false, false, NULL
, NULL
}
1546 #ifndef TARGET_PROFILE_KERNEL
1547 #define TARGET_PROFILE_KERNEL 0
1550 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1551 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1553 /* Initialize the GCC target structure. */
1554 #undef TARGET_ATTRIBUTE_TABLE
1555 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1556 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1557 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1558 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1559 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1561 #undef TARGET_ASM_ALIGNED_DI_OP
1562 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1564 /* Default unaligned ops are only provided for ELF. Find the ops needed
1565 for non-ELF systems. */
1566 #ifndef OBJECT_FORMAT_ELF
1568 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1570 #undef TARGET_ASM_UNALIGNED_HI_OP
1571 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1572 #undef TARGET_ASM_UNALIGNED_SI_OP
1573 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1574 #undef TARGET_ASM_UNALIGNED_DI_OP
1575 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1578 #undef TARGET_ASM_UNALIGNED_HI_OP
1579 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1580 #undef TARGET_ASM_UNALIGNED_SI_OP
1581 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1582 #undef TARGET_ASM_UNALIGNED_DI_OP
1583 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1584 #undef TARGET_ASM_ALIGNED_DI_OP
1585 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1589 /* This hook deals with fixups for relocatable code and DI-mode objects
1591 #undef TARGET_ASM_INTEGER
1592 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1594 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1595 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1596 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1599 #undef TARGET_SET_UP_BY_PROLOGUE
1600 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1602 #undef TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS
1603 #define TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS rs6000_get_separate_components
1604 #undef TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB
1605 #define TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB rs6000_components_for_bb
1606 #undef TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS
1607 #define TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS rs6000_disqualify_components
1608 #undef TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS
1609 #define TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS rs6000_emit_prologue_components
1610 #undef TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS
1611 #define TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS rs6000_emit_epilogue_components
1612 #undef TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS
1613 #define TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS rs6000_set_handled_components
1615 #undef TARGET_EXTRA_LIVE_ON_ENTRY
1616 #define TARGET_EXTRA_LIVE_ON_ENTRY rs6000_live_on_entry
1618 #undef TARGET_INTERNAL_ARG_POINTER
1619 #define TARGET_INTERNAL_ARG_POINTER rs6000_internal_arg_pointer
1621 #undef TARGET_HAVE_TLS
1622 #define TARGET_HAVE_TLS HAVE_AS_TLS
1624 #undef TARGET_CANNOT_FORCE_CONST_MEM
1625 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1627 #undef TARGET_DELEGITIMIZE_ADDRESS
1628 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1630 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1631 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1633 #undef TARGET_LEGITIMATE_COMBINED_INSN
1634 #define TARGET_LEGITIMATE_COMBINED_INSN rs6000_legitimate_combined_insn
1636 #undef TARGET_ASM_FUNCTION_PROLOGUE
1637 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1638 #undef TARGET_ASM_FUNCTION_EPILOGUE
1639 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1641 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1642 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1644 #undef TARGET_LEGITIMIZE_ADDRESS
1645 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1647 #undef TARGET_SCHED_VARIABLE_ISSUE
1648 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1650 #undef TARGET_SCHED_ISSUE_RATE
1651 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1652 #undef TARGET_SCHED_ADJUST_COST
1653 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1654 #undef TARGET_SCHED_ADJUST_PRIORITY
1655 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1656 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1657 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1658 #undef TARGET_SCHED_INIT
1659 #define TARGET_SCHED_INIT rs6000_sched_init
1660 #undef TARGET_SCHED_FINISH
1661 #define TARGET_SCHED_FINISH rs6000_sched_finish
1662 #undef TARGET_SCHED_REORDER
1663 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1664 #undef TARGET_SCHED_REORDER2
1665 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1667 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1668 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1670 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1671 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1673 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1674 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1675 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1676 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1677 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1678 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1679 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1680 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1682 #undef TARGET_SCHED_CAN_SPECULATE_INSN
1683 #define TARGET_SCHED_CAN_SPECULATE_INSN rs6000_sched_can_speculate_insn
1685 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1686 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1687 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1688 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1689 rs6000_builtin_support_vector_misalignment
1690 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1691 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1692 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1693 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1694 rs6000_builtin_vectorization_cost
1695 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1696 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1697 rs6000_preferred_simd_mode
1698 #undef TARGET_VECTORIZE_INIT_COST
1699 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1700 #undef TARGET_VECTORIZE_ADD_STMT_COST
1701 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1702 #undef TARGET_VECTORIZE_FINISH_COST
1703 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1704 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1705 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1707 #undef TARGET_INIT_BUILTINS
1708 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1709 #undef TARGET_BUILTIN_DECL
1710 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1712 #undef TARGET_FOLD_BUILTIN
1713 #define TARGET_FOLD_BUILTIN rs6000_fold_builtin
1714 #undef TARGET_GIMPLE_FOLD_BUILTIN
1715 #define TARGET_GIMPLE_FOLD_BUILTIN rs6000_gimple_fold_builtin
1717 #undef TARGET_EXPAND_BUILTIN
1718 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1720 #undef TARGET_MANGLE_TYPE
1721 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1723 #undef TARGET_INIT_LIBFUNCS
1724 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1727 #undef TARGET_BINDS_LOCAL_P
1728 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1731 #undef TARGET_MS_BITFIELD_LAYOUT_P
1732 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1734 #undef TARGET_ASM_OUTPUT_MI_THUNK
1735 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1737 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1738 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1740 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1741 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1743 #undef TARGET_REGISTER_MOVE_COST
1744 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1745 #undef TARGET_MEMORY_MOVE_COST
1746 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1747 #undef TARGET_CANNOT_COPY_INSN_P
1748 #define TARGET_CANNOT_COPY_INSN_P rs6000_cannot_copy_insn_p
1749 #undef TARGET_RTX_COSTS
1750 #define TARGET_RTX_COSTS rs6000_rtx_costs
1751 #undef TARGET_ADDRESS_COST
1752 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1753 #undef TARGET_INSN_COST
1754 #define TARGET_INSN_COST rs6000_insn_cost
1756 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1757 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1759 #undef TARGET_PROMOTE_FUNCTION_MODE
1760 #define TARGET_PROMOTE_FUNCTION_MODE rs6000_promote_function_mode
1762 #undef TARGET_RETURN_IN_MEMORY
1763 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1765 #undef TARGET_RETURN_IN_MSB
1766 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1768 #undef TARGET_SETUP_INCOMING_VARARGS
1769 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1771 /* Always strict argument naming on rs6000. */
1772 #undef TARGET_STRICT_ARGUMENT_NAMING
1773 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1774 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1775 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1776 #undef TARGET_SPLIT_COMPLEX_ARG
1777 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1778 #undef TARGET_MUST_PASS_IN_STACK
1779 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1780 #undef TARGET_PASS_BY_REFERENCE
1781 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1782 #undef TARGET_ARG_PARTIAL_BYTES
1783 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1784 #undef TARGET_FUNCTION_ARG_ADVANCE
1785 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1786 #undef TARGET_FUNCTION_ARG
1787 #define TARGET_FUNCTION_ARG rs6000_function_arg
1788 #undef TARGET_FUNCTION_ARG_PADDING
1789 #define TARGET_FUNCTION_ARG_PADDING rs6000_function_arg_padding
1790 #undef TARGET_FUNCTION_ARG_BOUNDARY
1791 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1793 #undef TARGET_BUILD_BUILTIN_VA_LIST
1794 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1796 #undef TARGET_EXPAND_BUILTIN_VA_START
1797 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1799 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1800 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1802 #undef TARGET_EH_RETURN_FILTER_MODE
1803 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1805 #undef TARGET_TRANSLATE_MODE_ATTRIBUTE
1806 #define TARGET_TRANSLATE_MODE_ATTRIBUTE rs6000_translate_mode_attribute
1808 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1809 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1811 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1812 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1814 #undef TARGET_FLOATN_MODE
1815 #define TARGET_FLOATN_MODE rs6000_floatn_mode
1817 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1818 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1820 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1821 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1823 #undef TARGET_MD_ASM_ADJUST
1824 #define TARGET_MD_ASM_ADJUST rs6000_md_asm_adjust
1826 #undef TARGET_OPTION_OVERRIDE
1827 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1829 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1830 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1831 rs6000_builtin_vectorized_function
1833 #undef TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION
1834 #define TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION \
1835 rs6000_builtin_md_vectorized_function
1837 #undef TARGET_STACK_PROTECT_GUARD
1838 #define TARGET_STACK_PROTECT_GUARD rs6000_init_stack_protect_guard
1841 #undef TARGET_STACK_PROTECT_FAIL
1842 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1846 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1847 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1850 /* Use a 32-bit anchor range. This leads to sequences like:
1852 addis tmp,anchor,high
1855 where tmp itself acts as an anchor, and can be shared between
1856 accesses to the same 64k page. */
1857 #undef TARGET_MIN_ANCHOR_OFFSET
1858 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1859 #undef TARGET_MAX_ANCHOR_OFFSET
1860 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1861 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1862 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1863 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1864 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1866 #undef TARGET_BUILTIN_RECIPROCAL
1867 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1869 #undef TARGET_SECONDARY_RELOAD
1870 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1871 #undef TARGET_SECONDARY_MEMORY_NEEDED
1872 #define TARGET_SECONDARY_MEMORY_NEEDED rs6000_secondary_memory_needed
1873 #undef TARGET_SECONDARY_MEMORY_NEEDED_MODE
1874 #define TARGET_SECONDARY_MEMORY_NEEDED_MODE rs6000_secondary_memory_needed_mode
1876 #undef TARGET_LEGITIMATE_ADDRESS_P
1877 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1879 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1880 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1882 #undef TARGET_COMPUTE_PRESSURE_CLASSES
1883 #define TARGET_COMPUTE_PRESSURE_CLASSES rs6000_compute_pressure_classes
1885 #undef TARGET_CAN_ELIMINATE
1886 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1888 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1889 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1891 #undef TARGET_SCHED_REASSOCIATION_WIDTH
1892 #define TARGET_SCHED_REASSOCIATION_WIDTH rs6000_reassociation_width
1894 #undef TARGET_TRAMPOLINE_INIT
1895 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1897 #undef TARGET_FUNCTION_VALUE
1898 #define TARGET_FUNCTION_VALUE rs6000_function_value
1900 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1901 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1903 #undef TARGET_OPTION_SAVE
1904 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1906 #undef TARGET_OPTION_RESTORE
1907 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1909 #undef TARGET_OPTION_PRINT
1910 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1912 #undef TARGET_CAN_INLINE_P
1913 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1915 #undef TARGET_SET_CURRENT_FUNCTION
1916 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1918 #undef TARGET_LEGITIMATE_CONSTANT_P
1919 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1921 #undef TARGET_VECTORIZE_VEC_PERM_CONST
1922 #define TARGET_VECTORIZE_VEC_PERM_CONST rs6000_vectorize_vec_perm_const
1924 #undef TARGET_CAN_USE_DOLOOP_P
1925 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1927 #undef TARGET_ATOMIC_ASSIGN_EXPAND_FENV
1928 #define TARGET_ATOMIC_ASSIGN_EXPAND_FENV rs6000_atomic_assign_expand_fenv
1930 #undef TARGET_LIBGCC_CMP_RETURN_MODE
1931 #define TARGET_LIBGCC_CMP_RETURN_MODE rs6000_abi_word_mode
1932 #undef TARGET_LIBGCC_SHIFT_COUNT_MODE
1933 #define TARGET_LIBGCC_SHIFT_COUNT_MODE rs6000_abi_word_mode
1934 #undef TARGET_UNWIND_WORD_MODE
1935 #define TARGET_UNWIND_WORD_MODE rs6000_abi_word_mode
1937 #undef TARGET_OFFLOAD_OPTIONS
1938 #define TARGET_OFFLOAD_OPTIONS rs6000_offload_options
1940 #undef TARGET_C_MODE_FOR_SUFFIX
1941 #define TARGET_C_MODE_FOR_SUFFIX rs6000_c_mode_for_suffix
1943 #undef TARGET_INVALID_BINARY_OP
1944 #define TARGET_INVALID_BINARY_OP rs6000_invalid_binary_op
1946 #undef TARGET_OPTAB_SUPPORTED_P
1947 #define TARGET_OPTAB_SUPPORTED_P rs6000_optab_supported_p
1949 #undef TARGET_CUSTOM_FUNCTION_DESCRIPTORS
1950 #define TARGET_CUSTOM_FUNCTION_DESCRIPTORS 1
1952 #undef TARGET_COMPARE_VERSION_PRIORITY
1953 #define TARGET_COMPARE_VERSION_PRIORITY rs6000_compare_version_priority
1955 #undef TARGET_GENERATE_VERSION_DISPATCHER_BODY
1956 #define TARGET_GENERATE_VERSION_DISPATCHER_BODY \
1957 rs6000_generate_version_dispatcher_body
1959 #undef TARGET_GET_FUNCTION_VERSIONS_DISPATCHER
1960 #define TARGET_GET_FUNCTION_VERSIONS_DISPATCHER \
1961 rs6000_get_function_versions_dispatcher
1963 #undef TARGET_OPTION_FUNCTION_VERSIONS
1964 #define TARGET_OPTION_FUNCTION_VERSIONS common_function_versions
1966 #undef TARGET_HARD_REGNO_NREGS
1967 #define TARGET_HARD_REGNO_NREGS rs6000_hard_regno_nregs_hook
1968 #undef TARGET_HARD_REGNO_MODE_OK
1969 #define TARGET_HARD_REGNO_MODE_OK rs6000_hard_regno_mode_ok
1971 #undef TARGET_MODES_TIEABLE_P
1972 #define TARGET_MODES_TIEABLE_P rs6000_modes_tieable_p
1974 #undef TARGET_HARD_REGNO_CALL_PART_CLOBBERED
1975 #define TARGET_HARD_REGNO_CALL_PART_CLOBBERED \
1976 rs6000_hard_regno_call_part_clobbered
1978 #undef TARGET_SLOW_UNALIGNED_ACCESS
1979 #define TARGET_SLOW_UNALIGNED_ACCESS rs6000_slow_unaligned_access
1981 #undef TARGET_CAN_CHANGE_MODE_CLASS
1982 #define TARGET_CAN_CHANGE_MODE_CLASS rs6000_can_change_mode_class
1984 #undef TARGET_CONSTANT_ALIGNMENT
1985 #define TARGET_CONSTANT_ALIGNMENT rs6000_constant_alignment
1987 #undef TARGET_STARTING_FRAME_OFFSET
1988 #define TARGET_STARTING_FRAME_OFFSET rs6000_starting_frame_offset
1990 #if TARGET_ELF && RS6000_WEAK
1991 #undef TARGET_ASM_GLOBALIZE_DECL_NAME
1992 #define TARGET_ASM_GLOBALIZE_DECL_NAME rs6000_globalize_decl_name
1996 /* Processor table. */
1999 const char *const name
; /* Canonical processor name. */
2000 const enum processor_type processor
; /* Processor type enum value. */
2001 const HOST_WIDE_INT target_enable
; /* Target flags to enable. */
2004 static struct rs6000_ptt
const processor_target_table
[] =
2006 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
2007 #include "rs6000-cpus.def"
2011 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
2015 rs6000_cpu_name_lookup (const char *name
)
2021 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
2022 if (! strcmp (name
, processor_target_table
[i
].name
))
2030 /* Return number of consecutive hard regs needed starting at reg REGNO
2031 to hold something of mode MODE.
2032 This is ordinarily the length in words of a value of mode MODE
2033 but can be less for certain modes in special long registers.
2035 POWER and PowerPC GPRs hold 32 bits worth;
2036 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
2039 rs6000_hard_regno_nregs_internal (int regno
, machine_mode mode
)
2041 unsigned HOST_WIDE_INT reg_size
;
2043 /* 128-bit floating point usually takes 2 registers, unless it is IEEE
2044 128-bit floating point that can go in vector registers, which has VSX
2045 memory addressing. */
2046 if (FP_REGNO_P (regno
))
2047 reg_size
= (VECTOR_MEM_VSX_P (mode
) || FLOAT128_VECTOR_P (mode
)
2048 ? UNITS_PER_VSX_WORD
2049 : UNITS_PER_FP_WORD
);
2051 else if (ALTIVEC_REGNO_P (regno
))
2052 reg_size
= UNITS_PER_ALTIVEC_WORD
;
2055 reg_size
= UNITS_PER_WORD
;
2057 return (GET_MODE_SIZE (mode
) + reg_size
- 1) / reg_size
;
2060 /* Value is 1 if hard register REGNO can hold a value of machine-mode
2063 rs6000_hard_regno_mode_ok_uncached (int regno
, machine_mode mode
)
2065 int last_regno
= regno
+ rs6000_hard_regno_nregs
[mode
][regno
] - 1;
2067 if (COMPLEX_MODE_P (mode
))
2068 mode
= GET_MODE_INNER (mode
);
2070 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
2071 register combinations, and use PTImode where we need to deal with quad
2072 word memory operations. Don't allow quad words in the argument or frame
2073 pointer registers, just registers 0..31. */
2074 if (mode
== PTImode
)
2075 return (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
2076 && IN_RANGE (last_regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
2077 && ((regno
& 1) == 0));
2079 /* VSX registers that overlap the FPR registers are larger than for non-VSX
2080 implementations. Don't allow an item to be split between a FP register
2081 and an Altivec register. Allow TImode in all VSX registers if the user
2083 if (TARGET_VSX
&& VSX_REGNO_P (regno
)
2084 && (VECTOR_MEM_VSX_P (mode
)
2085 || FLOAT128_VECTOR_P (mode
)
2086 || reg_addr
[mode
].scalar_in_vmx_p
2088 || (TARGET_VADDUQM
&& mode
== V1TImode
)))
2090 if (FP_REGNO_P (regno
))
2091 return FP_REGNO_P (last_regno
);
2093 if (ALTIVEC_REGNO_P (regno
))
2095 if (GET_MODE_SIZE (mode
) != 16 && !reg_addr
[mode
].scalar_in_vmx_p
)
2098 return ALTIVEC_REGNO_P (last_regno
);
2102 /* The GPRs can hold any mode, but values bigger than one register
2103 cannot go past R31. */
2104 if (INT_REGNO_P (regno
))
2105 return INT_REGNO_P (last_regno
);
2107 /* The float registers (except for VSX vector modes) can only hold floating
2108 modes and DImode. */
2109 if (FP_REGNO_P (regno
))
2111 if (FLOAT128_VECTOR_P (mode
))
2114 if (SCALAR_FLOAT_MODE_P (mode
)
2115 && (mode
!= TDmode
|| (regno
% 2) == 0)
2116 && FP_REGNO_P (last_regno
))
2119 if (GET_MODE_CLASS (mode
) == MODE_INT
)
2121 if(GET_MODE_SIZE (mode
) == UNITS_PER_FP_WORD
)
2124 if (TARGET_P8_VECTOR
&& (mode
== SImode
))
2127 if (TARGET_P9_VECTOR
&& (mode
== QImode
|| mode
== HImode
))
2134 /* The CR register can only hold CC modes. */
2135 if (CR_REGNO_P (regno
))
2136 return GET_MODE_CLASS (mode
) == MODE_CC
;
2138 if (CA_REGNO_P (regno
))
2139 return mode
== Pmode
|| mode
== SImode
;
2141 /* AltiVec only in AldyVec registers. */
2142 if (ALTIVEC_REGNO_P (regno
))
2143 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
2144 || mode
== V1TImode
);
2146 /* We cannot put non-VSX TImode or PTImode anywhere except general register
2147 and it must be able to fit within the register set. */
2149 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
;
2152 /* Implement TARGET_HARD_REGNO_NREGS. */
2155 rs6000_hard_regno_nregs_hook (unsigned int regno
, machine_mode mode
)
2157 return rs6000_hard_regno_nregs
[mode
][regno
];
2160 /* Implement TARGET_HARD_REGNO_MODE_OK. */
2163 rs6000_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
2165 return rs6000_hard_regno_mode_ok_p
[mode
][regno
];
2168 /* Implement TARGET_MODES_TIEABLE_P.
2170 PTImode cannot tie with other modes because PTImode is restricted to even
2171 GPR registers, and TImode can go in any GPR as well as VSX registers (PR
2174 Altivec/VSX vector tests were moved ahead of scalar float mode, so that IEEE
2175 128-bit floating point on VSX systems ties with other vectors. */
2178 rs6000_modes_tieable_p (machine_mode mode1
, machine_mode mode2
)
2180 if (mode1
== PTImode
)
2181 return mode2
== PTImode
;
2182 if (mode2
== PTImode
)
2185 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode1
))
2186 return ALTIVEC_OR_VSX_VECTOR_MODE (mode2
);
2187 if (ALTIVEC_OR_VSX_VECTOR_MODE (mode2
))
2190 if (SCALAR_FLOAT_MODE_P (mode1
))
2191 return SCALAR_FLOAT_MODE_P (mode2
);
2192 if (SCALAR_FLOAT_MODE_P (mode2
))
2195 if (GET_MODE_CLASS (mode1
) == MODE_CC
)
2196 return GET_MODE_CLASS (mode2
) == MODE_CC
;
2197 if (GET_MODE_CLASS (mode2
) == MODE_CC
)
2203 /* Implement TARGET_HARD_REGNO_CALL_PART_CLOBBERED. */
2206 rs6000_hard_regno_call_part_clobbered (unsigned int regno
, machine_mode mode
)
2210 && GET_MODE_SIZE (mode
) > 4
2211 && INT_REGNO_P (regno
))
2215 && FP_REGNO_P (regno
)
2216 && GET_MODE_SIZE (mode
) > 8
2217 && !FLOAT128_2REG_P (mode
))
2223 /* Print interesting facts about registers. */
2225 rs6000_debug_reg_print (int first_regno
, int last_regno
, const char *reg_name
)
2229 for (r
= first_regno
; r
<= last_regno
; ++r
)
2231 const char *comma
= "";
2234 if (first_regno
== last_regno
)
2235 fprintf (stderr
, "%s:\t", reg_name
);
2237 fprintf (stderr
, "%s%d:\t", reg_name
, r
- first_regno
);
2240 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2241 if (rs6000_hard_regno_mode_ok_p
[m
][r
] && rs6000_hard_regno_nregs
[m
][r
])
2245 fprintf (stderr
, ",\n\t");
2250 if (rs6000_hard_regno_nregs
[m
][r
] > 1)
2251 len
+= fprintf (stderr
, "%s%s/%d", comma
, GET_MODE_NAME (m
),
2252 rs6000_hard_regno_nregs
[m
][r
]);
2254 len
+= fprintf (stderr
, "%s%s", comma
, GET_MODE_NAME (m
));
2259 if (call_used_regs
[r
])
2263 fprintf (stderr
, ",\n\t");
2268 len
+= fprintf (stderr
, "%s%s", comma
, "call-used");
2276 fprintf (stderr
, ",\n\t");
2281 len
+= fprintf (stderr
, "%s%s", comma
, "fixed");
2287 fprintf (stderr
, ",\n\t");
2291 len
+= fprintf (stderr
, "%sreg-class = %s", comma
,
2292 reg_class_names
[(int)rs6000_regno_regclass
[r
]]);
2297 fprintf (stderr
, ",\n\t");
2301 fprintf (stderr
, "%sregno = %d\n", comma
, r
);
2306 rs6000_debug_vector_unit (enum rs6000_vector v
)
2312 case VECTOR_NONE
: ret
= "none"; break;
2313 case VECTOR_ALTIVEC
: ret
= "altivec"; break;
2314 case VECTOR_VSX
: ret
= "vsx"; break;
2315 case VECTOR_P8_VECTOR
: ret
= "p8_vector"; break;
2316 case VECTOR_OTHER
: ret
= "other"; break;
2317 default: ret
= "unknown"; break;
2323 /* Inner function printing just the address mask for a particular reload
2325 DEBUG_FUNCTION
char *
2326 rs6000_debug_addr_mask (addr_mask_type mask
, bool keep_spaces
)
2331 if ((mask
& RELOAD_REG_VALID
) != 0)
2333 else if (keep_spaces
)
2336 if ((mask
& RELOAD_REG_MULTIPLE
) != 0)
2338 else if (keep_spaces
)
2341 if ((mask
& RELOAD_REG_INDEXED
) != 0)
2343 else if (keep_spaces
)
2346 if ((mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
2348 else if ((mask
& RELOAD_REG_OFFSET
) != 0)
2350 else if (keep_spaces
)
2353 if ((mask
& RELOAD_REG_PRE_INCDEC
) != 0)
2355 else if (keep_spaces
)
2358 if ((mask
& RELOAD_REG_PRE_MODIFY
) != 0)
2360 else if (keep_spaces
)
2363 if ((mask
& RELOAD_REG_AND_M16
) != 0)
2365 else if (keep_spaces
)
2373 /* Print the address masks in a human readble fashion. */
2375 rs6000_debug_print_mode (ssize_t m
)
2381 fprintf (stderr
, "Mode: %-5s", GET_MODE_NAME (m
));
2382 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2383 fprintf (stderr
, " %s: %s", reload_reg_map
[rc
].name
,
2384 rs6000_debug_addr_mask (reg_addr
[m
].addr_mask
[rc
], true));
2386 if ((reg_addr
[m
].reload_store
!= CODE_FOR_nothing
)
2387 || (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
))
2388 fprintf (stderr
, " Reload=%c%c",
2389 (reg_addr
[m
].reload_store
!= CODE_FOR_nothing
) ? 's' : '*',
2390 (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
) ? 'l' : '*');
2392 spaces
+= sizeof (" Reload=sl") - 1;
2394 if (reg_addr
[m
].scalar_in_vmx_p
)
2396 fprintf (stderr
, "%*s Upper=y", spaces
, "");
2400 spaces
+= sizeof (" Upper=y") - 1;
2402 fuse_extra_p
= ((reg_addr
[m
].fusion_gpr_ld
!= CODE_FOR_nothing
)
2403 || reg_addr
[m
].fused_toc
);
2406 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2408 if (rc
!= RELOAD_REG_ANY
)
2410 if (reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
2411 || reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
2412 || reg_addr
[m
].fusion_addi_st
[rc
] != CODE_FOR_nothing
2413 || reg_addr
[m
].fusion_addis_ld
[rc
] != CODE_FOR_nothing
2414 || reg_addr
[m
].fusion_addis_st
[rc
] != CODE_FOR_nothing
)
2416 fuse_extra_p
= true;
2425 fprintf (stderr
, "%*s Fuse:", spaces
, "");
2428 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
2430 if (rc
!= RELOAD_REG_ANY
)
2434 if (reg_addr
[m
].fusion_addis_ld
[rc
] != CODE_FOR_nothing
)
2436 else if (reg_addr
[m
].fusion_addi_ld
[rc
] != CODE_FOR_nothing
)
2441 if (reg_addr
[m
].fusion_addis_st
[rc
] != CODE_FOR_nothing
)
2443 else if (reg_addr
[m
].fusion_addi_st
[rc
] != CODE_FOR_nothing
)
2448 if (load
== '-' && store
== '-')
2452 fprintf (stderr
, "%*s%c=%c%c", (spaces
+ 1), "",
2453 reload_reg_map
[rc
].name
[0], load
, store
);
2459 if (reg_addr
[m
].fusion_gpr_ld
!= CODE_FOR_nothing
)
2461 fprintf (stderr
, "%*sP8gpr", (spaces
+ 1), "");
2465 spaces
+= sizeof (" P8gpr") - 1;
2467 if (reg_addr
[m
].fused_toc
)
2469 fprintf (stderr
, "%*sToc", (spaces
+ 1), "");
2473 spaces
+= sizeof (" Toc") - 1;
2476 spaces
+= sizeof (" Fuse: G=ls F=ls v=ls P8gpr Toc") - 1;
2478 if (rs6000_vector_unit
[m
] != VECTOR_NONE
2479 || rs6000_vector_mem
[m
] != VECTOR_NONE
)
2481 fprintf (stderr
, "%*s vector: arith=%-10s mem=%s",
2483 rs6000_debug_vector_unit (rs6000_vector_unit
[m
]),
2484 rs6000_debug_vector_unit (rs6000_vector_mem
[m
]));
2487 fputs ("\n", stderr
);
2490 #define DEBUG_FMT_ID "%-32s= "
2491 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
2492 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
2493 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
2495 /* Print various interesting information with -mdebug=reg. */
2497 rs6000_debug_reg_global (void)
2499 static const char *const tf
[2] = { "false", "true" };
2500 const char *nl
= (const char *)0;
2503 char costly_num
[20];
2505 char flags_buffer
[40];
2506 const char *costly_str
;
2507 const char *nop_str
;
2508 const char *trace_str
;
2509 const char *abi_str
;
2510 const char *cmodel_str
;
2511 struct cl_target_option cl_opts
;
2513 /* Modes we want tieable information on. */
2514 static const machine_mode print_tieable_modes
[] = {
2548 /* Virtual regs we are interested in. */
2549 const static struct {
2550 int regno
; /* register number. */
2551 const char *name
; /* register name. */
2552 } virtual_regs
[] = {
2553 { STACK_POINTER_REGNUM
, "stack pointer:" },
2554 { TOC_REGNUM
, "toc: " },
2555 { STATIC_CHAIN_REGNUM
, "static chain: " },
2556 { RS6000_PIC_OFFSET_TABLE_REGNUM
, "pic offset: " },
2557 { HARD_FRAME_POINTER_REGNUM
, "hard frame: " },
2558 { ARG_POINTER_REGNUM
, "arg pointer: " },
2559 { FRAME_POINTER_REGNUM
, "frame pointer:" },
2560 { FIRST_PSEUDO_REGISTER
, "first pseudo: " },
2561 { FIRST_VIRTUAL_REGISTER
, "first virtual:" },
2562 { VIRTUAL_INCOMING_ARGS_REGNUM
, "incoming_args:" },
2563 { VIRTUAL_STACK_VARS_REGNUM
, "stack_vars: " },
2564 { VIRTUAL_STACK_DYNAMIC_REGNUM
, "stack_dynamic:" },
2565 { VIRTUAL_OUTGOING_ARGS_REGNUM
, "outgoing_args:" },
2566 { VIRTUAL_CFA_REGNUM
, "cfa (frame): " },
2567 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM
, "stack boundry:" },
2568 { LAST_VIRTUAL_REGISTER
, "last virtual: " },
2571 fputs ("\nHard register information:\n", stderr
);
2572 rs6000_debug_reg_print (FIRST_GPR_REGNO
, LAST_GPR_REGNO
, "gr");
2573 rs6000_debug_reg_print (FIRST_FPR_REGNO
, LAST_FPR_REGNO
, "fp");
2574 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO
,
2577 rs6000_debug_reg_print (LR_REGNO
, LR_REGNO
, "lr");
2578 rs6000_debug_reg_print (CTR_REGNO
, CTR_REGNO
, "ctr");
2579 rs6000_debug_reg_print (CR0_REGNO
, CR7_REGNO
, "cr");
2580 rs6000_debug_reg_print (CA_REGNO
, CA_REGNO
, "ca");
2581 rs6000_debug_reg_print (VRSAVE_REGNO
, VRSAVE_REGNO
, "vrsave");
2582 rs6000_debug_reg_print (VSCR_REGNO
, VSCR_REGNO
, "vscr");
2584 fputs ("\nVirtual/stack/frame registers:\n", stderr
);
2585 for (v
= 0; v
< ARRAY_SIZE (virtual_regs
); v
++)
2586 fprintf (stderr
, "%s regno = %3d\n", virtual_regs
[v
].name
, virtual_regs
[v
].regno
);
2590 "d reg_class = %s\n"
2591 "f reg_class = %s\n"
2592 "v reg_class = %s\n"
2593 "wa reg_class = %s\n"
2594 "wb reg_class = %s\n"
2595 "wd reg_class = %s\n"
2596 "we reg_class = %s\n"
2597 "wf reg_class = %s\n"
2598 "wg reg_class = %s\n"
2599 "wh reg_class = %s\n"
2600 "wi reg_class = %s\n"
2601 "wj reg_class = %s\n"
2602 "wk reg_class = %s\n"
2603 "wl reg_class = %s\n"
2604 "wm reg_class = %s\n"
2605 "wo reg_class = %s\n"
2606 "wp reg_class = %s\n"
2607 "wq reg_class = %s\n"
2608 "wr reg_class = %s\n"
2609 "ws reg_class = %s\n"
2610 "wt reg_class = %s\n"
2611 "wu reg_class = %s\n"
2612 "wv reg_class = %s\n"
2613 "ww reg_class = %s\n"
2614 "wx reg_class = %s\n"
2615 "wy reg_class = %s\n"
2616 "wz reg_class = %s\n"
2617 "wA reg_class = %s\n"
2618 "wH reg_class = %s\n"
2619 "wI reg_class = %s\n"
2620 "wJ reg_class = %s\n"
2621 "wK reg_class = %s\n"
2623 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_d
]],
2624 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_f
]],
2625 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_v
]],
2626 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wa
]],
2627 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wb
]],
2628 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wd
]],
2629 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_we
]],
2630 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wf
]],
2631 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wg
]],
2632 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wh
]],
2633 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wi
]],
2634 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wj
]],
2635 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wk
]],
2636 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wl
]],
2637 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wm
]],
2638 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wo
]],
2639 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wp
]],
2640 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wq
]],
2641 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wr
]],
2642 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ws
]],
2643 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wt
]],
2644 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wu
]],
2645 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wv
]],
2646 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ww
]],
2647 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wx
]],
2648 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wy
]],
2649 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wz
]],
2650 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wA
]],
2651 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wH
]],
2652 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wI
]],
2653 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wJ
]],
2654 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wK
]]);
2657 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2658 rs6000_debug_print_mode (m
);
2660 fputs ("\n", stderr
);
2662 for (m1
= 0; m1
< ARRAY_SIZE (print_tieable_modes
); m1
++)
2664 machine_mode mode1
= print_tieable_modes
[m1
];
2665 bool first_time
= true;
2667 nl
= (const char *)0;
2668 for (m2
= 0; m2
< ARRAY_SIZE (print_tieable_modes
); m2
++)
2670 machine_mode mode2
= print_tieable_modes
[m2
];
2671 if (mode1
!= mode2
&& rs6000_modes_tieable_p (mode1
, mode2
))
2675 fprintf (stderr
, "Tieable modes %s:", GET_MODE_NAME (mode1
));
2680 fprintf (stderr
, " %s", GET_MODE_NAME (mode2
));
2685 fputs ("\n", stderr
);
2691 if (rs6000_recip_control
)
2693 fprintf (stderr
, "\nReciprocal mask = 0x%x\n", rs6000_recip_control
);
2695 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2696 if (rs6000_recip_bits
[m
])
2699 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2701 (RS6000_RECIP_AUTO_RE_P (m
)
2703 : (RS6000_RECIP_HAVE_RE_P (m
) ? "have" : "none")),
2704 (RS6000_RECIP_AUTO_RSQRTE_P (m
)
2706 : (RS6000_RECIP_HAVE_RSQRTE_P (m
) ? "have" : "none")));
2709 fputs ("\n", stderr
);
2712 if (rs6000_cpu_index
>= 0)
2714 const char *name
= processor_target_table
[rs6000_cpu_index
].name
;
2716 = processor_target_table
[rs6000_cpu_index
].target_enable
;
2718 sprintf (flags_buffer
, "-mcpu=%s flags", name
);
2719 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2722 fprintf (stderr
, DEBUG_FMT_S
, "cpu", "<none>");
2724 if (rs6000_tune_index
>= 0)
2726 const char *name
= processor_target_table
[rs6000_tune_index
].name
;
2728 = processor_target_table
[rs6000_tune_index
].target_enable
;
2730 sprintf (flags_buffer
, "-mtune=%s flags", name
);
2731 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2734 fprintf (stderr
, DEBUG_FMT_S
, "tune", "<none>");
2736 cl_target_option_save (&cl_opts
, &global_options
);
2737 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags",
2740 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags_explicit",
2741 rs6000_isa_flags_explicit
);
2743 rs6000_print_builtin_options (stderr
, 0, "rs6000_builtin_mask",
2744 rs6000_builtin_mask
);
2746 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
2748 fprintf (stderr
, DEBUG_FMT_S
, "--with-cpu default",
2749 OPTION_TARGET_CPU_DEFAULT
? OPTION_TARGET_CPU_DEFAULT
: "<none>");
2751 switch (rs6000_sched_costly_dep
)
2753 case max_dep_latency
:
2754 costly_str
= "max_dep_latency";
2758 costly_str
= "no_dep_costly";
2761 case all_deps_costly
:
2762 costly_str
= "all_deps_costly";
2765 case true_store_to_load_dep_costly
:
2766 costly_str
= "true_store_to_load_dep_costly";
2769 case store_to_load_dep_costly
:
2770 costly_str
= "store_to_load_dep_costly";
2774 costly_str
= costly_num
;
2775 sprintf (costly_num
, "%d", (int)rs6000_sched_costly_dep
);
2779 fprintf (stderr
, DEBUG_FMT_S
, "sched_costly_dep", costly_str
);
2781 switch (rs6000_sched_insert_nops
)
2783 case sched_finish_regroup_exact
:
2784 nop_str
= "sched_finish_regroup_exact";
2787 case sched_finish_pad_groups
:
2788 nop_str
= "sched_finish_pad_groups";
2791 case sched_finish_none
:
2792 nop_str
= "sched_finish_none";
2797 sprintf (nop_num
, "%d", (int)rs6000_sched_insert_nops
);
2801 fprintf (stderr
, DEBUG_FMT_S
, "sched_insert_nops", nop_str
);
2803 switch (rs6000_sdata
)
2810 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "data");
2814 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "sysv");
2818 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "eabi");
2823 switch (rs6000_traceback
)
2825 case traceback_default
: trace_str
= "default"; break;
2826 case traceback_none
: trace_str
= "none"; break;
2827 case traceback_part
: trace_str
= "part"; break;
2828 case traceback_full
: trace_str
= "full"; break;
2829 default: trace_str
= "unknown"; break;
2832 fprintf (stderr
, DEBUG_FMT_S
, "traceback", trace_str
);
2834 switch (rs6000_current_cmodel
)
2836 case CMODEL_SMALL
: cmodel_str
= "small"; break;
2837 case CMODEL_MEDIUM
: cmodel_str
= "medium"; break;
2838 case CMODEL_LARGE
: cmodel_str
= "large"; break;
2839 default: cmodel_str
= "unknown"; break;
2842 fprintf (stderr
, DEBUG_FMT_S
, "cmodel", cmodel_str
);
2844 switch (rs6000_current_abi
)
2846 case ABI_NONE
: abi_str
= "none"; break;
2847 case ABI_AIX
: abi_str
= "aix"; break;
2848 case ABI_ELFv2
: abi_str
= "ELFv2"; break;
2849 case ABI_V4
: abi_str
= "V4"; break;
2850 case ABI_DARWIN
: abi_str
= "darwin"; break;
2851 default: abi_str
= "unknown"; break;
2854 fprintf (stderr
, DEBUG_FMT_S
, "abi", abi_str
);
2856 if (rs6000_altivec_abi
)
2857 fprintf (stderr
, DEBUG_FMT_S
, "altivec_abi", "true");
2859 if (rs6000_darwin64_abi
)
2860 fprintf (stderr
, DEBUG_FMT_S
, "darwin64_abi", "true");
2862 fprintf (stderr
, DEBUG_FMT_S
, "soft_float",
2863 (TARGET_SOFT_FLOAT
? "true" : "false"));
2865 if (TARGET_LINK_STACK
)
2866 fprintf (stderr
, DEBUG_FMT_S
, "link_stack", "true");
2868 if (TARGET_P8_FUSION
)
2872 strcpy (options
, (TARGET_P9_FUSION
) ? "power9" : "power8");
2873 if (TARGET_TOC_FUSION
)
2874 strcat (options
, ", toc");
2876 if (TARGET_P8_FUSION_SIGN
)
2877 strcat (options
, ", sign");
2879 fprintf (stderr
, DEBUG_FMT_S
, "fusion", options
);
2882 fprintf (stderr
, DEBUG_FMT_S
, "plt-format",
2883 TARGET_SECURE_PLT
? "secure" : "bss");
2884 fprintf (stderr
, DEBUG_FMT_S
, "struct-return",
2885 aix_struct_return
? "aix" : "sysv");
2886 fprintf (stderr
, DEBUG_FMT_S
, "always_hint", tf
[!!rs6000_always_hint
]);
2887 fprintf (stderr
, DEBUG_FMT_S
, "sched_groups", tf
[!!rs6000_sched_groups
]);
2888 fprintf (stderr
, DEBUG_FMT_S
, "align_branch",
2889 tf
[!!rs6000_align_branch_targets
]);
2890 fprintf (stderr
, DEBUG_FMT_D
, "tls_size", rs6000_tls_size
);
2891 fprintf (stderr
, DEBUG_FMT_D
, "long_double_size",
2892 rs6000_long_double_type_size
);
2893 if (rs6000_long_double_type_size
> 64)
2895 fprintf (stderr
, DEBUG_FMT_S
, "long double type",
2896 TARGET_IEEEQUAD
? "IEEE" : "IBM");
2897 fprintf (stderr
, DEBUG_FMT_S
, "default long double type",
2898 TARGET_IEEEQUAD_DEFAULT
? "IEEE" : "IBM");
2900 fprintf (stderr
, DEBUG_FMT_D
, "sched_restricted_insns_priority",
2901 (int)rs6000_sched_restricted_insns_priority
);
2902 fprintf (stderr
, DEBUG_FMT_D
, "Number of standard builtins",
2904 fprintf (stderr
, DEBUG_FMT_D
, "Number of rs6000 builtins",
2905 (int)RS6000_BUILTIN_COUNT
);
2907 fprintf (stderr
, DEBUG_FMT_D
, "Enable float128 on VSX",
2908 (int)TARGET_FLOAT128_ENABLE_TYPE
);
2911 fprintf (stderr
, DEBUG_FMT_D
, "VSX easy 64-bit scalar element",
2912 (int)VECTOR_ELEMENT_SCALAR_64BIT
);
2914 if (TARGET_DIRECT_MOVE_128
)
2915 fprintf (stderr
, DEBUG_FMT_D
, "VSX easy 64-bit mfvsrld element",
2916 (int)VECTOR_ELEMENT_MFVSRLD_64BIT
);
2920 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2921 legitimate address support to figure out the appropriate addressing to
2925 rs6000_setup_reg_addr_masks (void)
2927 ssize_t rc
, reg
, m
, nregs
;
2928 addr_mask_type any_addr_mask
, addr_mask
;
2930 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2932 machine_mode m2
= (machine_mode
) m
;
2933 bool complex_p
= false;
2934 bool small_int_p
= (m2
== QImode
|| m2
== HImode
|| m2
== SImode
);
2937 if (COMPLEX_MODE_P (m2
))
2940 m2
= GET_MODE_INNER (m2
);
2943 msize
= GET_MODE_SIZE (m2
);
2945 /* SDmode is special in that we want to access it only via REG+REG
2946 addressing on power7 and above, since we want to use the LFIWZX and
2947 STFIWZX instructions to load it. */
2948 bool indexed_only_p
= (m
== SDmode
&& TARGET_NO_SDMODE_STACK
);
2951 for (rc
= FIRST_RELOAD_REG_CLASS
; rc
<= LAST_RELOAD_REG_CLASS
; rc
++)
2954 reg
= reload_reg_map
[rc
].reg
;
2956 /* Can mode values go in the GPR/FPR/Altivec registers? */
2957 if (reg
>= 0 && rs6000_hard_regno_mode_ok_p
[m
][reg
])
2959 bool small_int_vsx_p
= (small_int_p
2960 && (rc
== RELOAD_REG_FPR
2961 || rc
== RELOAD_REG_VMX
));
2963 nregs
= rs6000_hard_regno_nregs
[m
][reg
];
2964 addr_mask
|= RELOAD_REG_VALID
;
2966 /* Indicate if the mode takes more than 1 physical register. If
2967 it takes a single register, indicate it can do REG+REG
2968 addressing. Small integers in VSX registers can only do
2969 REG+REG addressing. */
2970 if (small_int_vsx_p
)
2971 addr_mask
|= RELOAD_REG_INDEXED
;
2972 else if (nregs
> 1 || m
== BLKmode
|| complex_p
)
2973 addr_mask
|= RELOAD_REG_MULTIPLE
;
2975 addr_mask
|= RELOAD_REG_INDEXED
;
2977 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2978 addressing. If we allow scalars into Altivec registers,
2979 don't allow PRE_INC, PRE_DEC, or PRE_MODIFY.
2981 For VSX systems, we don't allow update addressing for
2982 DFmode/SFmode if those registers can go in both the
2983 traditional floating point registers and Altivec registers.
2984 The load/store instructions for the Altivec registers do not
2985 have update forms. If we allowed update addressing, it seems
2986 to break IV-OPT code using floating point if the index type is
2987 int instead of long (PR target/81550 and target/84042). */
2990 && (rc
== RELOAD_REG_GPR
|| rc
== RELOAD_REG_FPR
)
2992 && !VECTOR_MODE_P (m2
)
2993 && !FLOAT128_VECTOR_P (m2
)
2995 && (m
!= E_DFmode
|| !TARGET_VSX
)
2996 && (m
!= E_SFmode
|| !TARGET_P8_VECTOR
)
2997 && !small_int_vsx_p
)
2999 addr_mask
|= RELOAD_REG_PRE_INCDEC
;
3001 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
3002 we don't allow PRE_MODIFY for some multi-register
3007 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
3011 if (TARGET_POWERPC64
)
3012 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
3017 if (TARGET_HARD_FLOAT
)
3018 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
3024 /* GPR and FPR registers can do REG+OFFSET addressing, except
3025 possibly for SDmode. ISA 3.0 (i.e. power9) adds D-form addressing
3026 for 64-bit scalars and 32-bit SFmode to altivec registers. */
3027 if ((addr_mask
!= 0) && !indexed_only_p
3029 && (rc
== RELOAD_REG_GPR
3030 || ((msize
== 8 || m2
== SFmode
)
3031 && (rc
== RELOAD_REG_FPR
3032 || (rc
== RELOAD_REG_VMX
&& TARGET_P9_VECTOR
)))))
3033 addr_mask
|= RELOAD_REG_OFFSET
;
3035 /* VSX registers can do REG+OFFSET addresssing if ISA 3.0
3036 instructions are enabled. The offset for 128-bit VSX registers is
3037 only 12-bits. While GPRs can handle the full offset range, VSX
3038 registers can only handle the restricted range. */
3039 else if ((addr_mask
!= 0) && !indexed_only_p
3040 && msize
== 16 && TARGET_P9_VECTOR
3041 && (ALTIVEC_OR_VSX_VECTOR_MODE (m2
)
3042 || (m2
== TImode
&& TARGET_VSX
)))
3044 addr_mask
|= RELOAD_REG_OFFSET
;
3045 if (rc
== RELOAD_REG_FPR
|| rc
== RELOAD_REG_VMX
)
3046 addr_mask
|= RELOAD_REG_QUAD_OFFSET
;
3049 /* VMX registers can do (REG & -16) and ((REG+REG) & -16)
3050 addressing on 128-bit types. */
3051 if (rc
== RELOAD_REG_VMX
&& msize
== 16
3052 && (addr_mask
& RELOAD_REG_VALID
) != 0)
3053 addr_mask
|= RELOAD_REG_AND_M16
;
3055 reg_addr
[m
].addr_mask
[rc
] = addr_mask
;
3056 any_addr_mask
|= addr_mask
;
3059 reg_addr
[m
].addr_mask
[RELOAD_REG_ANY
] = any_addr_mask
;
3064 /* Initialize the various global tables that are based on register size. */
3066 rs6000_init_hard_regno_mode_ok (bool global_init_p
)
3072 /* Precalculate REGNO_REG_CLASS. */
3073 rs6000_regno_regclass
[0] = GENERAL_REGS
;
3074 for (r
= 1; r
< 32; ++r
)
3075 rs6000_regno_regclass
[r
] = BASE_REGS
;
3077 for (r
= 32; r
< 64; ++r
)
3078 rs6000_regno_regclass
[r
] = FLOAT_REGS
;
3080 for (r
= 64; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3081 rs6000_regno_regclass
[r
] = NO_REGS
;
3083 for (r
= FIRST_ALTIVEC_REGNO
; r
<= LAST_ALTIVEC_REGNO
; ++r
)
3084 rs6000_regno_regclass
[r
] = ALTIVEC_REGS
;
3086 rs6000_regno_regclass
[CR0_REGNO
] = CR0_REGS
;
3087 for (r
= CR1_REGNO
; r
<= CR7_REGNO
; ++r
)
3088 rs6000_regno_regclass
[r
] = CR_REGS
;
3090 rs6000_regno_regclass
[LR_REGNO
] = LINK_REGS
;
3091 rs6000_regno_regclass
[CTR_REGNO
] = CTR_REGS
;
3092 rs6000_regno_regclass
[CA_REGNO
] = NO_REGS
;
3093 rs6000_regno_regclass
[VRSAVE_REGNO
] = VRSAVE_REGS
;
3094 rs6000_regno_regclass
[VSCR_REGNO
] = VRSAVE_REGS
;
3095 rs6000_regno_regclass
[TFHAR_REGNO
] = SPR_REGS
;
3096 rs6000_regno_regclass
[TFIAR_REGNO
] = SPR_REGS
;
3097 rs6000_regno_regclass
[TEXASR_REGNO
] = SPR_REGS
;
3098 rs6000_regno_regclass
[ARG_POINTER_REGNUM
] = BASE_REGS
;
3099 rs6000_regno_regclass
[FRAME_POINTER_REGNUM
] = BASE_REGS
;
3101 /* Precalculate register class to simpler reload register class. We don't
3102 need all of the register classes that are combinations of different
3103 classes, just the simple ones that have constraint letters. */
3104 for (c
= 0; c
< N_REG_CLASSES
; c
++)
3105 reg_class_to_reg_type
[c
] = NO_REG_TYPE
;
3107 reg_class_to_reg_type
[(int)GENERAL_REGS
] = GPR_REG_TYPE
;
3108 reg_class_to_reg_type
[(int)BASE_REGS
] = GPR_REG_TYPE
;
3109 reg_class_to_reg_type
[(int)VSX_REGS
] = VSX_REG_TYPE
;
3110 reg_class_to_reg_type
[(int)VRSAVE_REGS
] = SPR_REG_TYPE
;
3111 reg_class_to_reg_type
[(int)VSCR_REGS
] = SPR_REG_TYPE
;
3112 reg_class_to_reg_type
[(int)LINK_REGS
] = SPR_REG_TYPE
;
3113 reg_class_to_reg_type
[(int)CTR_REGS
] = SPR_REG_TYPE
;
3114 reg_class_to_reg_type
[(int)LINK_OR_CTR_REGS
] = SPR_REG_TYPE
;
3115 reg_class_to_reg_type
[(int)CR_REGS
] = CR_REG_TYPE
;
3116 reg_class_to_reg_type
[(int)CR0_REGS
] = CR_REG_TYPE
;
3120 reg_class_to_reg_type
[(int)FLOAT_REGS
] = VSX_REG_TYPE
;
3121 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = VSX_REG_TYPE
;
3125 reg_class_to_reg_type
[(int)FLOAT_REGS
] = FPR_REG_TYPE
;
3126 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = ALTIVEC_REG_TYPE
;
3129 /* Precalculate the valid memory formats as well as the vector information,
3130 this must be set up before the rs6000_hard_regno_nregs_internal calls
3132 gcc_assert ((int)VECTOR_NONE
== 0);
3133 memset ((void *) &rs6000_vector_unit
[0], '\0', sizeof (rs6000_vector_unit
));
3134 memset ((void *) &rs6000_vector_mem
[0], '\0', sizeof (rs6000_vector_unit
));
3136 gcc_assert ((int)CODE_FOR_nothing
== 0);
3137 memset ((void *) ®_addr
[0], '\0', sizeof (reg_addr
));
3139 gcc_assert ((int)NO_REGS
== 0);
3140 memset ((void *) &rs6000_constraints
[0], '\0', sizeof (rs6000_constraints
));
3142 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
3143 believes it can use native alignment or still uses 128-bit alignment. */
3144 if (TARGET_VSX
&& !TARGET_VSX_ALIGN_128
)
3155 /* KF mode (IEEE 128-bit in VSX registers). We do not have arithmetic, so
3156 only set the memory modes. Include TFmode if -mabi=ieeelongdouble. */
3157 if (TARGET_FLOAT128_TYPE
)
3159 rs6000_vector_mem
[KFmode
] = VECTOR_VSX
;
3160 rs6000_vector_align
[KFmode
] = 128;
3162 if (FLOAT128_IEEE_P (TFmode
))
3164 rs6000_vector_mem
[TFmode
] = VECTOR_VSX
;
3165 rs6000_vector_align
[TFmode
] = 128;
3169 /* V2DF mode, VSX only. */
3172 rs6000_vector_unit
[V2DFmode
] = VECTOR_VSX
;
3173 rs6000_vector_mem
[V2DFmode
] = VECTOR_VSX
;
3174 rs6000_vector_align
[V2DFmode
] = align64
;
3177 /* V4SF mode, either VSX or Altivec. */
3180 rs6000_vector_unit
[V4SFmode
] = VECTOR_VSX
;
3181 rs6000_vector_mem
[V4SFmode
] = VECTOR_VSX
;
3182 rs6000_vector_align
[V4SFmode
] = align32
;
3184 else if (TARGET_ALTIVEC
)
3186 rs6000_vector_unit
[V4SFmode
] = VECTOR_ALTIVEC
;
3187 rs6000_vector_mem
[V4SFmode
] = VECTOR_ALTIVEC
;
3188 rs6000_vector_align
[V4SFmode
] = align32
;
3191 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
3195 rs6000_vector_unit
[V4SImode
] = VECTOR_ALTIVEC
;
3196 rs6000_vector_unit
[V8HImode
] = VECTOR_ALTIVEC
;
3197 rs6000_vector_unit
[V16QImode
] = VECTOR_ALTIVEC
;
3198 rs6000_vector_align
[V4SImode
] = align32
;
3199 rs6000_vector_align
[V8HImode
] = align32
;
3200 rs6000_vector_align
[V16QImode
] = align32
;
3204 rs6000_vector_mem
[V4SImode
] = VECTOR_VSX
;
3205 rs6000_vector_mem
[V8HImode
] = VECTOR_VSX
;
3206 rs6000_vector_mem
[V16QImode
] = VECTOR_VSX
;
3210 rs6000_vector_mem
[V4SImode
] = VECTOR_ALTIVEC
;
3211 rs6000_vector_mem
[V8HImode
] = VECTOR_ALTIVEC
;
3212 rs6000_vector_mem
[V16QImode
] = VECTOR_ALTIVEC
;
3216 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
3217 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
3220 rs6000_vector_mem
[V2DImode
] = VECTOR_VSX
;
3221 rs6000_vector_unit
[V2DImode
]
3222 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
3223 rs6000_vector_align
[V2DImode
] = align64
;
3225 rs6000_vector_mem
[V1TImode
] = VECTOR_VSX
;
3226 rs6000_vector_unit
[V1TImode
]
3227 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
3228 rs6000_vector_align
[V1TImode
] = 128;
3231 /* DFmode, see if we want to use the VSX unit. Memory is handled
3232 differently, so don't set rs6000_vector_mem. */
3235 rs6000_vector_unit
[DFmode
] = VECTOR_VSX
;
3236 rs6000_vector_align
[DFmode
] = 64;
3239 /* SFmode, see if we want to use the VSX unit. */
3240 if (TARGET_P8_VECTOR
)
3242 rs6000_vector_unit
[SFmode
] = VECTOR_VSX
;
3243 rs6000_vector_align
[SFmode
] = 32;
3246 /* Allow TImode in VSX register and set the VSX memory macros. */
3249 rs6000_vector_mem
[TImode
] = VECTOR_VSX
;
3250 rs6000_vector_align
[TImode
] = align64
;
3253 /* Register class constraints for the constraints that depend on compile
3254 switches. When the VSX code was added, different constraints were added
3255 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
3256 of the VSX registers are used. The register classes for scalar floating
3257 point types is set, based on whether we allow that type into the upper
3258 (Altivec) registers. GCC has register classes to target the Altivec
3259 registers for load/store operations, to select using a VSX memory
3260 operation instead of the traditional floating point operation. The
3263 d - Register class to use with traditional DFmode instructions.
3264 f - Register class to use with traditional SFmode instructions.
3265 v - Altivec register.
3266 wa - Any VSX register.
3267 wc - Reserved to represent individual CR bits (used in LLVM).
3268 wd - Preferred register class for V2DFmode.
3269 wf - Preferred register class for V4SFmode.
3270 wg - Float register for power6x move insns.
3271 wh - FP register for direct move instructions.
3272 wi - FP or VSX register to hold 64-bit integers for VSX insns.
3273 wj - FP or VSX register to hold 64-bit integers for direct moves.
3274 wk - FP or VSX register to hold 64-bit doubles for direct moves.
3275 wl - Float register if we can do 32-bit signed int loads.
3276 wm - VSX register for ISA 2.07 direct move operations.
3277 wn - always NO_REGS.
3278 wr - GPR if 64-bit mode is permitted.
3279 ws - Register class to do ISA 2.06 DF operations.
3280 wt - VSX register for TImode in VSX registers.
3281 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
3282 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
3283 ww - Register class to do SF conversions in with VSX operations.
3284 wx - Float register if we can do 32-bit int stores.
3285 wy - Register class to do ISA 2.07 SF operations.
3286 wz - Float register if we can do 32-bit unsigned int loads.
3287 wH - Altivec register if SImode is allowed in VSX registers.
3288 wI - VSX register if SImode is allowed in VSX registers.
3289 wJ - VSX register if QImode/HImode are allowed in VSX registers.
3290 wK - Altivec register if QImode/HImode are allowed in VSX registers. */
3292 if (TARGET_HARD_FLOAT
)
3294 rs6000_constraints
[RS6000_CONSTRAINT_f
] = FLOAT_REGS
; /* SFmode */
3295 rs6000_constraints
[RS6000_CONSTRAINT_d
] = FLOAT_REGS
; /* DFmode */
3300 rs6000_constraints
[RS6000_CONSTRAINT_wa
] = VSX_REGS
;
3301 rs6000_constraints
[RS6000_CONSTRAINT_wd
] = VSX_REGS
; /* V2DFmode */
3302 rs6000_constraints
[RS6000_CONSTRAINT_wf
] = VSX_REGS
; /* V4SFmode */
3303 rs6000_constraints
[RS6000_CONSTRAINT_ws
] = VSX_REGS
; /* DFmode */
3304 rs6000_constraints
[RS6000_CONSTRAINT_wv
] = ALTIVEC_REGS
; /* DFmode */
3305 rs6000_constraints
[RS6000_CONSTRAINT_wi
] = VSX_REGS
; /* DImode */
3306 rs6000_constraints
[RS6000_CONSTRAINT_wt
] = VSX_REGS
; /* TImode */
3309 /* Add conditional constraints based on various options, to allow us to
3310 collapse multiple insn patterns. */
3312 rs6000_constraints
[RS6000_CONSTRAINT_v
] = ALTIVEC_REGS
;
3314 if (TARGET_MFPGPR
) /* DFmode */
3315 rs6000_constraints
[RS6000_CONSTRAINT_wg
] = FLOAT_REGS
;
3318 rs6000_constraints
[RS6000_CONSTRAINT_wl
] = FLOAT_REGS
; /* DImode */
3320 if (TARGET_DIRECT_MOVE
)
3322 rs6000_constraints
[RS6000_CONSTRAINT_wh
] = FLOAT_REGS
;
3323 rs6000_constraints
[RS6000_CONSTRAINT_wj
] /* DImode */
3324 = rs6000_constraints
[RS6000_CONSTRAINT_wi
];
3325 rs6000_constraints
[RS6000_CONSTRAINT_wk
] /* DFmode */
3326 = rs6000_constraints
[RS6000_CONSTRAINT_ws
];
3327 rs6000_constraints
[RS6000_CONSTRAINT_wm
] = VSX_REGS
;
3330 if (TARGET_POWERPC64
)
3332 rs6000_constraints
[RS6000_CONSTRAINT_wr
] = GENERAL_REGS
;
3333 rs6000_constraints
[RS6000_CONSTRAINT_wA
] = BASE_REGS
;
3336 if (TARGET_P8_VECTOR
) /* SFmode */
3338 rs6000_constraints
[RS6000_CONSTRAINT_wu
] = ALTIVEC_REGS
;
3339 rs6000_constraints
[RS6000_CONSTRAINT_wy
] = VSX_REGS
;
3340 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = VSX_REGS
;
3342 else if (TARGET_VSX
)
3343 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = FLOAT_REGS
;
3346 rs6000_constraints
[RS6000_CONSTRAINT_wx
] = FLOAT_REGS
; /* DImode */
3349 rs6000_constraints
[RS6000_CONSTRAINT_wz
] = FLOAT_REGS
; /* DImode */
3351 if (TARGET_FLOAT128_TYPE
)
3353 rs6000_constraints
[RS6000_CONSTRAINT_wq
] = VSX_REGS
; /* KFmode */
3354 if (FLOAT128_IEEE_P (TFmode
))
3355 rs6000_constraints
[RS6000_CONSTRAINT_wp
] = VSX_REGS
; /* TFmode */
3358 if (TARGET_P9_VECTOR
)
3360 /* Support for new D-form instructions. */
3361 rs6000_constraints
[RS6000_CONSTRAINT_wb
] = ALTIVEC_REGS
;
3363 /* Support for ISA 3.0 (power9) vectors. */
3364 rs6000_constraints
[RS6000_CONSTRAINT_wo
] = VSX_REGS
;
3367 /* Support for new direct moves (ISA 3.0 + 64bit). */
3368 if (TARGET_DIRECT_MOVE_128
)
3369 rs6000_constraints
[RS6000_CONSTRAINT_we
] = VSX_REGS
;
3371 /* Support small integers in VSX registers. */
3372 if (TARGET_P8_VECTOR
)
3374 rs6000_constraints
[RS6000_CONSTRAINT_wH
] = ALTIVEC_REGS
;
3375 rs6000_constraints
[RS6000_CONSTRAINT_wI
] = FLOAT_REGS
;
3376 if (TARGET_P9_VECTOR
)
3378 rs6000_constraints
[RS6000_CONSTRAINT_wJ
] = FLOAT_REGS
;
3379 rs6000_constraints
[RS6000_CONSTRAINT_wK
] = ALTIVEC_REGS
;
3383 /* Set up the reload helper and direct move functions. */
3384 if (TARGET_VSX
|| TARGET_ALTIVEC
)
3388 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_di_store
;
3389 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_di_load
;
3390 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_di_store
;
3391 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_di_load
;
3392 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_di_store
;
3393 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_di_load
;
3394 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_di_store
;
3395 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_di_load
;
3396 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_di_store
;
3397 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_di_load
;
3398 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_di_store
;
3399 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_di_load
;
3400 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_di_store
;
3401 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_di_load
;
3402 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_di_store
;
3403 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_di_load
;
3404 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_di_store
;
3405 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_di_load
;
3406 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_di_store
;
3407 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_di_load
;
3409 if (FLOAT128_VECTOR_P (KFmode
))
3411 reg_addr
[KFmode
].reload_store
= CODE_FOR_reload_kf_di_store
;
3412 reg_addr
[KFmode
].reload_load
= CODE_FOR_reload_kf_di_load
;
3415 if (FLOAT128_VECTOR_P (TFmode
))
3417 reg_addr
[TFmode
].reload_store
= CODE_FOR_reload_tf_di_store
;
3418 reg_addr
[TFmode
].reload_load
= CODE_FOR_reload_tf_di_load
;
3421 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3423 if (TARGET_NO_SDMODE_STACK
)
3425 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_di_store
;
3426 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_di_load
;
3431 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_di_store
;
3432 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_di_load
;
3435 if (TARGET_DIRECT_MOVE
&& !TARGET_DIRECT_MOVE_128
)
3437 reg_addr
[TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxti
;
3438 reg_addr
[V1TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv1ti
;
3439 reg_addr
[V2DFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2df
;
3440 reg_addr
[V2DImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2di
;
3441 reg_addr
[V4SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4sf
;
3442 reg_addr
[V4SImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4si
;
3443 reg_addr
[V8HImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv8hi
;
3444 reg_addr
[V16QImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv16qi
;
3445 reg_addr
[SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxsf
;
3447 reg_addr
[TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprti
;
3448 reg_addr
[V1TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv1ti
;
3449 reg_addr
[V2DFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2df
;
3450 reg_addr
[V2DImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2di
;
3451 reg_addr
[V4SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4sf
;
3452 reg_addr
[V4SImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4si
;
3453 reg_addr
[V8HImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv8hi
;
3454 reg_addr
[V16QImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv16qi
;
3455 reg_addr
[SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprsf
;
3457 if (FLOAT128_VECTOR_P (KFmode
))
3459 reg_addr
[KFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxkf
;
3460 reg_addr
[KFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprkf
;
3463 if (FLOAT128_VECTOR_P (TFmode
))
3465 reg_addr
[TFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxtf
;
3466 reg_addr
[TFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprtf
;
3472 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_si_store
;
3473 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_si_load
;
3474 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_si_store
;
3475 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_si_load
;
3476 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_si_store
;
3477 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_si_load
;
3478 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_si_store
;
3479 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_si_load
;
3480 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_si_store
;
3481 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_si_load
;
3482 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_si_store
;
3483 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_si_load
;
3484 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_si_store
;
3485 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_si_load
;
3486 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_si_store
;
3487 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_si_load
;
3488 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_si_store
;
3489 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_si_load
;
3490 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_si_store
;
3491 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_si_load
;
3493 if (FLOAT128_VECTOR_P (KFmode
))
3495 reg_addr
[KFmode
].reload_store
= CODE_FOR_reload_kf_si_store
;
3496 reg_addr
[KFmode
].reload_load
= CODE_FOR_reload_kf_si_load
;
3499 if (FLOAT128_IEEE_P (TFmode
))
3501 reg_addr
[TFmode
].reload_store
= CODE_FOR_reload_tf_si_store
;
3502 reg_addr
[TFmode
].reload_load
= CODE_FOR_reload_tf_si_load
;
3505 /* Only provide a reload handler for SDmode if lfiwzx/stfiwx are
3507 if (TARGET_NO_SDMODE_STACK
)
3509 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_si_store
;
3510 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_si_load
;
3515 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_si_store
;
3516 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_si_load
;
3519 if (TARGET_DIRECT_MOVE
)
3521 reg_addr
[DImode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdi
;
3522 reg_addr
[DDmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdd
;
3523 reg_addr
[DFmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdf
;
3527 reg_addr
[DFmode
].scalar_in_vmx_p
= true;
3528 reg_addr
[DImode
].scalar_in_vmx_p
= true;
3530 if (TARGET_P8_VECTOR
)
3532 reg_addr
[SFmode
].scalar_in_vmx_p
= true;
3533 reg_addr
[SImode
].scalar_in_vmx_p
= true;
3535 if (TARGET_P9_VECTOR
)
3537 reg_addr
[HImode
].scalar_in_vmx_p
= true;
3538 reg_addr
[QImode
].scalar_in_vmx_p
= true;
3543 /* Setup the fusion operations. */
3544 if (TARGET_P8_FUSION
)
3546 reg_addr
[QImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_qi
;
3547 reg_addr
[HImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_hi
;
3548 reg_addr
[SImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_si
;
3550 reg_addr
[DImode
].fusion_gpr_ld
= CODE_FOR_fusion_gpr_load_di
;
3553 if (TARGET_P9_FUSION
)
3556 enum machine_mode mode
; /* mode of the fused type. */
3557 enum machine_mode pmode
; /* pointer mode. */
3558 enum rs6000_reload_reg_type rtype
; /* register type. */
3559 enum insn_code load
; /* load insn. */
3560 enum insn_code store
; /* store insn. */
3563 static const struct fuse_insns addis_insns
[] = {
3564 { E_SFmode
, E_DImode
, RELOAD_REG_FPR
,
3565 CODE_FOR_fusion_vsx_di_sf_load
,
3566 CODE_FOR_fusion_vsx_di_sf_store
},
3568 { E_SFmode
, E_SImode
, RELOAD_REG_FPR
,
3569 CODE_FOR_fusion_vsx_si_sf_load
,
3570 CODE_FOR_fusion_vsx_si_sf_store
},
3572 { E_DFmode
, E_DImode
, RELOAD_REG_FPR
,
3573 CODE_FOR_fusion_vsx_di_df_load
,
3574 CODE_FOR_fusion_vsx_di_df_store
},
3576 { E_DFmode
, E_SImode
, RELOAD_REG_FPR
,
3577 CODE_FOR_fusion_vsx_si_df_load
,
3578 CODE_FOR_fusion_vsx_si_df_store
},
3580 { E_DImode
, E_DImode
, RELOAD_REG_FPR
,
3581 CODE_FOR_fusion_vsx_di_di_load
,
3582 CODE_FOR_fusion_vsx_di_di_store
},
3584 { E_DImode
, E_SImode
, RELOAD_REG_FPR
,
3585 CODE_FOR_fusion_vsx_si_di_load
,
3586 CODE_FOR_fusion_vsx_si_di_store
},
3588 { E_QImode
, E_DImode
, RELOAD_REG_GPR
,
3589 CODE_FOR_fusion_gpr_di_qi_load
,
3590 CODE_FOR_fusion_gpr_di_qi_store
},
3592 { E_QImode
, E_SImode
, RELOAD_REG_GPR
,
3593 CODE_FOR_fusion_gpr_si_qi_load
,
3594 CODE_FOR_fusion_gpr_si_qi_store
},
3596 { E_HImode
, E_DImode
, RELOAD_REG_GPR
,
3597 CODE_FOR_fusion_gpr_di_hi_load
,
3598 CODE_FOR_fusion_gpr_di_hi_store
},
3600 { E_HImode
, E_SImode
, RELOAD_REG_GPR
,
3601 CODE_FOR_fusion_gpr_si_hi_load
,
3602 CODE_FOR_fusion_gpr_si_hi_store
},
3604 { E_SImode
, E_DImode
, RELOAD_REG_GPR
,
3605 CODE_FOR_fusion_gpr_di_si_load
,
3606 CODE_FOR_fusion_gpr_di_si_store
},
3608 { E_SImode
, E_SImode
, RELOAD_REG_GPR
,
3609 CODE_FOR_fusion_gpr_si_si_load
,
3610 CODE_FOR_fusion_gpr_si_si_store
},
3612 { E_SFmode
, E_DImode
, RELOAD_REG_GPR
,
3613 CODE_FOR_fusion_gpr_di_sf_load
,
3614 CODE_FOR_fusion_gpr_di_sf_store
},
3616 { E_SFmode
, E_SImode
, RELOAD_REG_GPR
,
3617 CODE_FOR_fusion_gpr_si_sf_load
,
3618 CODE_FOR_fusion_gpr_si_sf_store
},
3620 { E_DImode
, E_DImode
, RELOAD_REG_GPR
,
3621 CODE_FOR_fusion_gpr_di_di_load
,
3622 CODE_FOR_fusion_gpr_di_di_store
},
3624 { E_DFmode
, E_DImode
, RELOAD_REG_GPR
,
3625 CODE_FOR_fusion_gpr_di_df_load
,
3626 CODE_FOR_fusion_gpr_di_df_store
},
3629 machine_mode cur_pmode
= Pmode
;
3632 for (i
= 0; i
< ARRAY_SIZE (addis_insns
); i
++)
3634 machine_mode xmode
= addis_insns
[i
].mode
;
3635 enum rs6000_reload_reg_type rtype
= addis_insns
[i
].rtype
;
3637 if (addis_insns
[i
].pmode
!= cur_pmode
)
3640 if (rtype
== RELOAD_REG_FPR
&& !TARGET_HARD_FLOAT
)
3643 reg_addr
[xmode
].fusion_addis_ld
[rtype
] = addis_insns
[i
].load
;
3644 reg_addr
[xmode
].fusion_addis_st
[rtype
] = addis_insns
[i
].store
;
3646 if (rtype
== RELOAD_REG_FPR
&& TARGET_P9_VECTOR
)
3648 reg_addr
[xmode
].fusion_addis_ld
[RELOAD_REG_VMX
]
3649 = addis_insns
[i
].load
;
3650 reg_addr
[xmode
].fusion_addis_st
[RELOAD_REG_VMX
]
3651 = addis_insns
[i
].store
;
3656 /* Note which types we support fusing TOC setup plus memory insn. We only do
3657 fused TOCs for medium/large code models. */
3658 if (TARGET_P8_FUSION
&& TARGET_TOC_FUSION
&& TARGET_POWERPC64
3659 && (TARGET_CMODEL
!= CMODEL_SMALL
))
3661 reg_addr
[QImode
].fused_toc
= true;
3662 reg_addr
[HImode
].fused_toc
= true;
3663 reg_addr
[SImode
].fused_toc
= true;
3664 reg_addr
[DImode
].fused_toc
= true;
3665 if (TARGET_HARD_FLOAT
)
3667 reg_addr
[SFmode
].fused_toc
= true;
3668 reg_addr
[DFmode
].fused_toc
= true;
3672 /* Precalculate HARD_REGNO_NREGS. */
3673 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3674 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3675 rs6000_hard_regno_nregs
[m
][r
]
3676 = rs6000_hard_regno_nregs_internal (r
, (machine_mode
)m
);
3678 /* Precalculate TARGET_HARD_REGNO_MODE_OK. */
3679 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
3680 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3681 if (rs6000_hard_regno_mode_ok_uncached (r
, (machine_mode
)m
))
3682 rs6000_hard_regno_mode_ok_p
[m
][r
] = true;
3684 /* Precalculate CLASS_MAX_NREGS sizes. */
3685 for (c
= 0; c
< LIM_REG_CLASSES
; ++c
)
3689 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
))
3690 reg_size
= UNITS_PER_VSX_WORD
;
3692 else if (c
== ALTIVEC_REGS
)
3693 reg_size
= UNITS_PER_ALTIVEC_WORD
;
3695 else if (c
== FLOAT_REGS
)
3696 reg_size
= UNITS_PER_FP_WORD
;
3699 reg_size
= UNITS_PER_WORD
;
3701 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
3703 machine_mode m2
= (machine_mode
)m
;
3704 int reg_size2
= reg_size
;
3706 /* TDmode & IBM 128-bit floating point always takes 2 registers, even
3708 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
) && FLOAT128_2REG_P (m
))
3709 reg_size2
= UNITS_PER_FP_WORD
;
3711 rs6000_class_max_nregs
[m
][c
]
3712 = (GET_MODE_SIZE (m2
) + reg_size2
- 1) / reg_size2
;
3716 /* Calculate which modes to automatically generate code to use a the
3717 reciprocal divide and square root instructions. In the future, possibly
3718 automatically generate the instructions even if the user did not specify
3719 -mrecip. The older machines double precision reciprocal sqrt estimate is
3720 not accurate enough. */
3721 memset (rs6000_recip_bits
, 0, sizeof (rs6000_recip_bits
));
3723 rs6000_recip_bits
[SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3725 rs6000_recip_bits
[DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3726 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
3727 rs6000_recip_bits
[V4SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3728 if (VECTOR_UNIT_VSX_P (V2DFmode
))
3729 rs6000_recip_bits
[V2DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
3731 if (TARGET_FRSQRTES
)
3732 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3734 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3735 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
3736 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3737 if (VECTOR_UNIT_VSX_P (V2DFmode
))
3738 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
3740 if (rs6000_recip_control
)
3742 if (!flag_finite_math_only
)
3743 warning (0, "%qs requires %qs or %qs", "-mrecip", "-ffinite-math",
3745 if (flag_trapping_math
)
3746 warning (0, "%qs requires %qs or %qs", "-mrecip",
3747 "-fno-trapping-math", "-ffast-math");
3748 if (!flag_reciprocal_math
)
3749 warning (0, "%qs requires %qs or %qs", "-mrecip", "-freciprocal-math",
3751 if (flag_finite_math_only
&& !flag_trapping_math
&& flag_reciprocal_math
)
3753 if (RS6000_RECIP_HAVE_RE_P (SFmode
)
3754 && (rs6000_recip_control
& RECIP_SF_DIV
) != 0)
3755 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3757 if (RS6000_RECIP_HAVE_RE_P (DFmode
)
3758 && (rs6000_recip_control
& RECIP_DF_DIV
) != 0)
3759 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3761 if (RS6000_RECIP_HAVE_RE_P (V4SFmode
)
3762 && (rs6000_recip_control
& RECIP_V4SF_DIV
) != 0)
3763 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3765 if (RS6000_RECIP_HAVE_RE_P (V2DFmode
)
3766 && (rs6000_recip_control
& RECIP_V2DF_DIV
) != 0)
3767 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
3769 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode
)
3770 && (rs6000_recip_control
& RECIP_SF_RSQRT
) != 0)
3771 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3773 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode
)
3774 && (rs6000_recip_control
& RECIP_DF_RSQRT
) != 0)
3775 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3777 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode
)
3778 && (rs6000_recip_control
& RECIP_V4SF_RSQRT
) != 0)
3779 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3781 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode
)
3782 && (rs6000_recip_control
& RECIP_V2DF_RSQRT
) != 0)
3783 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
3787 /* Update the addr mask bits in reg_addr to help secondary reload and go if
3788 legitimate address support to figure out the appropriate addressing to
3790 rs6000_setup_reg_addr_masks ();
3792 if (global_init_p
|| TARGET_DEBUG_TARGET
)
3794 if (TARGET_DEBUG_REG
)
3795 rs6000_debug_reg_global ();
3797 if (TARGET_DEBUG_COST
|| TARGET_DEBUG_REG
)
3799 "SImode variable mult cost = %d\n"
3800 "SImode constant mult cost = %d\n"
3801 "SImode short constant mult cost = %d\n"
3802 "DImode multipliciation cost = %d\n"
3803 "SImode division cost = %d\n"
3804 "DImode division cost = %d\n"
3805 "Simple fp operation cost = %d\n"
3806 "DFmode multiplication cost = %d\n"
3807 "SFmode division cost = %d\n"
3808 "DFmode division cost = %d\n"
3809 "cache line size = %d\n"
3810 "l1 cache size = %d\n"
3811 "l2 cache size = %d\n"
3812 "simultaneous prefetches = %d\n"
3815 rs6000_cost
->mulsi_const
,
3816 rs6000_cost
->mulsi_const9
,
3824 rs6000_cost
->cache_line_size
,
3825 rs6000_cost
->l1_cache_size
,
3826 rs6000_cost
->l2_cache_size
,
3827 rs6000_cost
->simultaneous_prefetches
);
3832 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3835 darwin_rs6000_override_options (void)
3837 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3839 rs6000_altivec_abi
= 1;
3840 TARGET_ALTIVEC_VRSAVE
= 1;
3841 rs6000_current_abi
= ABI_DARWIN
;
3843 if (DEFAULT_ABI
== ABI_DARWIN
3845 darwin_one_byte_bool
= 1;
3847 if (TARGET_64BIT
&& ! TARGET_POWERPC64
)
3849 rs6000_isa_flags
|= OPTION_MASK_POWERPC64
;
3850 warning (0, "%qs requires PowerPC64 architecture, enabling", "-m64");
3854 rs6000_default_long_calls
= 1;
3855 rs6000_isa_flags
|= OPTION_MASK_SOFT_FLOAT
;
3858 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3860 if (!flag_mkernel
&& !flag_apple_kext
3862 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
))
3863 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3865 /* Unless the user (not the configurer) has explicitly overridden
3866 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3867 G4 unless targeting the kernel. */
3870 && strverscmp (darwin_macosx_version_min
, "10.5") >= 0
3871 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
)
3872 && ! global_options_set
.x_rs6000_cpu_index
)
3874 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3879 /* If not otherwise specified by a target, make 'long double' equivalent to
3882 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3883 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3886 /* Return the builtin mask of the various options used that could affect which
3887 builtins were used. In the past we used target_flags, but we've run out of
3888 bits, and some options are no longer in target_flags. */
3891 rs6000_builtin_mask_calculate (void)
3893 return (((TARGET_ALTIVEC
) ? RS6000_BTM_ALTIVEC
: 0)
3894 | ((TARGET_CMPB
) ? RS6000_BTM_CMPB
: 0)
3895 | ((TARGET_VSX
) ? RS6000_BTM_VSX
: 0)
3896 | ((TARGET_FRE
) ? RS6000_BTM_FRE
: 0)
3897 | ((TARGET_FRES
) ? RS6000_BTM_FRES
: 0)
3898 | ((TARGET_FRSQRTE
) ? RS6000_BTM_FRSQRTE
: 0)
3899 | ((TARGET_FRSQRTES
) ? RS6000_BTM_FRSQRTES
: 0)
3900 | ((TARGET_POPCNTD
) ? RS6000_BTM_POPCNTD
: 0)
3901 | ((rs6000_cpu
== PROCESSOR_CELL
) ? RS6000_BTM_CELL
: 0)
3902 | ((TARGET_P8_VECTOR
) ? RS6000_BTM_P8_VECTOR
: 0)
3903 | ((TARGET_P9_VECTOR
) ? RS6000_BTM_P9_VECTOR
: 0)
3904 | ((TARGET_P9_MISC
) ? RS6000_BTM_P9_MISC
: 0)
3905 | ((TARGET_MODULO
) ? RS6000_BTM_MODULO
: 0)
3906 | ((TARGET_64BIT
) ? RS6000_BTM_64BIT
: 0)
3907 | ((TARGET_POWERPC64
) ? RS6000_BTM_POWERPC64
: 0)
3908 | ((TARGET_CRYPTO
) ? RS6000_BTM_CRYPTO
: 0)
3909 | ((TARGET_HTM
) ? RS6000_BTM_HTM
: 0)
3910 | ((TARGET_DFP
) ? RS6000_BTM_DFP
: 0)
3911 | ((TARGET_HARD_FLOAT
) ? RS6000_BTM_HARD_FLOAT
: 0)
3912 | ((TARGET_LONG_DOUBLE_128
3913 && TARGET_HARD_FLOAT
3914 && !TARGET_IEEEQUAD
) ? RS6000_BTM_LDBL128
: 0)
3915 | ((TARGET_FLOAT128_TYPE
) ? RS6000_BTM_FLOAT128
: 0)
3916 | ((TARGET_FLOAT128_HW
) ? RS6000_BTM_FLOAT128_HW
: 0));
3919 /* Implement TARGET_MD_ASM_ADJUST. All asm statements are considered
3920 to clobber the XER[CA] bit because clobbering that bit without telling
3921 the compiler worked just fine with versions of GCC before GCC 5, and
3922 breaking a lot of older code in ways that are hard to track down is
3923 not such a great idea. */
3926 rs6000_md_asm_adjust (vec
<rtx
> &/*outputs*/, vec
<rtx
> &/*inputs*/,
3927 vec
<const char *> &/*constraints*/,
3928 vec
<rtx
> &clobbers
, HARD_REG_SET
&clobbered_regs
)
3930 clobbers
.safe_push (gen_rtx_REG (SImode
, CA_REGNO
));
3931 SET_HARD_REG_BIT (clobbered_regs
, CA_REGNO
);
3935 /* Override command line options.
3937 Combine build-specific configuration information with options
3938 specified on the command line to set various state variables which
3939 influence code generation, optimization, and expansion of built-in
3940 functions. Assure that command-line configuration preferences are
3941 compatible with each other and with the build configuration; issue
3942 warnings while adjusting configuration or error messages while
3943 rejecting configuration.
3945 Upon entry to this function:
3947 This function is called once at the beginning of
3948 compilation, and then again at the start and end of compiling
3949 each section of code that has a different configuration, as
3950 indicated, for example, by adding the
3952 __attribute__((__target__("cpu=power9")))
3954 qualifier to a function definition or, for example, by bracketing
3957 #pragma GCC target("altivec")
3961 #pragma GCC reset_options
3963 directives. Parameter global_init_p is true for the initial
3964 invocation, which initializes global variables, and false for all
3965 subsequent invocations.
3968 Various global state information is assumed to be valid. This
3969 includes OPTION_TARGET_CPU_DEFAULT, representing the name of the
3970 default CPU specified at build configure time, TARGET_DEFAULT,
3971 representing the default set of option flags for the default
3972 target, and global_options_set.x_rs6000_isa_flags, representing
3973 which options were requested on the command line.
3975 Upon return from this function:
3977 rs6000_isa_flags_explicit has a non-zero bit for each flag that
3978 was set by name on the command line. Additionally, if certain
3979 attributes are automatically enabled or disabled by this function
3980 in order to assure compatibility between options and
3981 configuration, the flags associated with those attributes are
3982 also set. By setting these "explicit bits", we avoid the risk
3983 that other code might accidentally overwrite these particular
3984 attributes with "default values".
3986 The various bits of rs6000_isa_flags are set to indicate the
3987 target options that have been selected for the most current
3988 compilation efforts. This has the effect of also turning on the
3989 associated TARGET_XXX values since these are macros which are
3990 generally defined to test the corresponding bit of the
3991 rs6000_isa_flags variable.
3993 The variable rs6000_builtin_mask is set to represent the target
3994 options for the most current compilation efforts, consistent with
3995 the current contents of rs6000_isa_flags. This variable controls
3996 expansion of built-in functions.
3998 Various other global variables and fields of global structures
3999 (over 50 in all) are initialized to reflect the desired options
4000 for the most current compilation efforts. */
4003 rs6000_option_override_internal (bool global_init_p
)
4007 HOST_WIDE_INT set_masks
;
4008 HOST_WIDE_INT ignore_masks
;
4011 struct cl_target_option
*main_target_opt
4012 = ((global_init_p
|| target_option_default_node
== NULL
)
4013 ? NULL
: TREE_TARGET_OPTION (target_option_default_node
));
4015 /* Print defaults. */
4016 if ((TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
) && global_init_p
)
4017 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
4019 /* Remember the explicit arguments. */
4021 rs6000_isa_flags_explicit
= global_options_set
.x_rs6000_isa_flags
;
4023 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
4024 library functions, so warn about it. The flag may be useful for
4025 performance studies from time to time though, so don't disable it
4027 if (global_options_set
.x_rs6000_alignment_flags
4028 && rs6000_alignment_flags
== MASK_ALIGN_POWER
4029 && DEFAULT_ABI
== ABI_DARWIN
4031 warning (0, "%qs is not supported for 64-bit Darwin;"
4032 " it is incompatible with the installed C and C++ libraries",
4035 /* Numerous experiment shows that IRA based loop pressure
4036 calculation works better for RTL loop invariant motion on targets
4037 with enough (>= 32) registers. It is an expensive optimization.
4038 So it is on only for peak performance. */
4039 if (optimize
>= 3 && global_init_p
4040 && !global_options_set
.x_flag_ira_loop_pressure
)
4041 flag_ira_loop_pressure
= 1;
4043 /* -fsanitize=address needs to turn on -fasynchronous-unwind-tables in order
4044 for tracebacks to be complete but not if any -fasynchronous-unwind-tables
4045 options were already specified. */
4046 if (flag_sanitize
& SANITIZE_USER_ADDRESS
4047 && !global_options_set
.x_flag_asynchronous_unwind_tables
)
4048 flag_asynchronous_unwind_tables
= 1;
4050 /* Set the pointer size. */
4053 rs6000_pmode
= DImode
;
4054 rs6000_pointer_size
= 64;
4058 rs6000_pmode
= SImode
;
4059 rs6000_pointer_size
= 32;
4062 /* Some OSs don't support saving the high part of 64-bit registers on context
4063 switch. Other OSs don't support saving Altivec registers. On those OSs,
4064 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
4065 if the user wants either, the user must explicitly specify them and we
4066 won't interfere with the user's specification. */
4068 set_masks
= POWERPC_MASKS
;
4069 #ifdef OS_MISSING_POWERPC64
4070 if (OS_MISSING_POWERPC64
)
4071 set_masks
&= ~OPTION_MASK_POWERPC64
;
4073 #ifdef OS_MISSING_ALTIVEC
4074 if (OS_MISSING_ALTIVEC
)
4075 set_masks
&= ~(OPTION_MASK_ALTIVEC
| OPTION_MASK_VSX
4076 | OTHER_VSX_VECTOR_MASKS
);
4079 /* Don't override by the processor default if given explicitly. */
4080 set_masks
&= ~rs6000_isa_flags_explicit
;
4082 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
4083 the cpu in a target attribute or pragma, but did not specify a tuning
4084 option, use the cpu for the tuning option rather than the option specified
4085 with -mtune on the command line. Process a '--with-cpu' configuration
4086 request as an implicit --cpu. */
4087 if (rs6000_cpu_index
>= 0)
4088 cpu_index
= rs6000_cpu_index
;
4089 else if (main_target_opt
!= NULL
&& main_target_opt
->x_rs6000_cpu_index
>= 0)
4090 cpu_index
= main_target_opt
->x_rs6000_cpu_index
;
4091 else if (OPTION_TARGET_CPU_DEFAULT
)
4092 cpu_index
= rs6000_cpu_name_lookup (OPTION_TARGET_CPU_DEFAULT
);
4096 const char *unavailable_cpu
= NULL
;
4097 switch (processor_target_table
[cpu_index
].processor
)
4099 #ifndef HAVE_AS_POWER9
4100 case PROCESSOR_POWER9
:
4101 unavailable_cpu
= "power9";
4104 #ifndef HAVE_AS_POWER8
4105 case PROCESSOR_POWER8
:
4106 unavailable_cpu
= "power8";
4109 #ifndef HAVE_AS_POPCNTD
4110 case PROCESSOR_POWER7
:
4111 unavailable_cpu
= "power7";
4115 case PROCESSOR_POWER6
:
4116 unavailable_cpu
= "power6";
4119 #ifndef HAVE_AS_POPCNTB
4120 case PROCESSOR_POWER5
:
4121 unavailable_cpu
= "power5";
4127 if (unavailable_cpu
)
4130 warning (0, "will not generate %qs instructions because "
4131 "assembler lacks %qs support", unavailable_cpu
,
4136 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
4137 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
4138 with those from the cpu, except for options that were explicitly set. If
4139 we don't have a cpu, do not override the target bits set in
4143 rs6000_cpu_index
= cpu_index
;
4144 rs6000_isa_flags
&= ~set_masks
;
4145 rs6000_isa_flags
|= (processor_target_table
[cpu_index
].target_enable
4150 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
4151 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
4152 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
4153 to using rs6000_isa_flags, we need to do the initialization here.
4155 If there is a TARGET_DEFAULT, use that. Otherwise fall back to using
4156 -mcpu=powerpc, -mcpu=powerpc64, or -mcpu=powerpc64le defaults. */
4157 HOST_WIDE_INT flags
;
4159 flags
= TARGET_DEFAULT
;
4162 /* PowerPC 64-bit LE requires at least ISA 2.07. */
4163 const char *default_cpu
= (!TARGET_POWERPC64
4168 int default_cpu_index
= rs6000_cpu_name_lookup (default_cpu
);
4169 flags
= processor_target_table
[default_cpu_index
].target_enable
;
4171 rs6000_isa_flags
|= (flags
& ~rs6000_isa_flags_explicit
);
4174 if (rs6000_tune_index
>= 0)
4175 tune_index
= rs6000_tune_index
;
4176 else if (cpu_index
>= 0)
4177 rs6000_tune_index
= tune_index
= cpu_index
;
4181 enum processor_type tune_proc
4182 = (TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
);
4185 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
4186 if (processor_target_table
[i
].processor
== tune_proc
)
4194 rs6000_cpu
= processor_target_table
[cpu_index
].processor
;
4196 rs6000_cpu
= TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
;
4198 gcc_assert (tune_index
>= 0);
4199 rs6000_tune
= processor_target_table
[tune_index
].processor
;
4201 if (rs6000_cpu
== PROCESSOR_PPCE300C2
|| rs6000_cpu
== PROCESSOR_PPCE300C3
4202 || rs6000_cpu
== PROCESSOR_PPCE500MC
|| rs6000_cpu
== PROCESSOR_PPCE500MC64
4203 || rs6000_cpu
== PROCESSOR_PPCE5500
)
4206 error ("AltiVec not supported in this target");
4209 /* If we are optimizing big endian systems for space, use the load/store
4210 multiple instructions. */
4211 if (BYTES_BIG_ENDIAN
&& optimize_size
)
4212 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& OPTION_MASK_MULTIPLE
;
4214 /* Don't allow -mmultiple on little endian systems unless the cpu is a 750,
4215 because the hardware doesn't support the instructions used in little
4216 endian mode, and causes an alignment trap. The 750 does not cause an
4217 alignment trap (except when the target is unaligned). */
4219 if (!BYTES_BIG_ENDIAN
&& rs6000_cpu
!= PROCESSOR_PPC750
&& TARGET_MULTIPLE
)
4221 rs6000_isa_flags
&= ~OPTION_MASK_MULTIPLE
;
4222 if ((rs6000_isa_flags_explicit
& OPTION_MASK_MULTIPLE
) != 0)
4223 warning (0, "%qs is not supported on little endian systems",
4227 /* If little-endian, default to -mstrict-align on older processors.
4228 Testing for htm matches power8 and later. */
4229 if (!BYTES_BIG_ENDIAN
4230 && !(processor_target_table
[tune_index
].target_enable
& OPTION_MASK_HTM
))
4231 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& OPTION_MASK_STRICT_ALIGN
;
4233 if (!rs6000_fold_gimple
)
4235 "gimple folding of rs6000 builtins has been disabled.\n");
4237 /* Add some warnings for VSX. */
4240 const char *msg
= NULL
;
4241 if (!TARGET_HARD_FLOAT
)
4243 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4244 msg
= N_("-mvsx requires hardware floating point");
4247 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
4248 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4251 else if (TARGET_AVOID_XFORM
> 0)
4252 msg
= N_("-mvsx needs indexed addressing");
4253 else if (!TARGET_ALTIVEC
&& (rs6000_isa_flags_explicit
4254 & OPTION_MASK_ALTIVEC
))
4256 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4257 msg
= N_("-mvsx and -mno-altivec are incompatible");
4259 msg
= N_("-mno-altivec disables vsx");
4265 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
4266 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4270 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
4271 the -mcpu setting to enable options that conflict. */
4272 if ((!TARGET_HARD_FLOAT
|| !TARGET_ALTIVEC
|| !TARGET_VSX
)
4273 && (rs6000_isa_flags_explicit
& (OPTION_MASK_SOFT_FLOAT
4274 | OPTION_MASK_ALTIVEC
4275 | OPTION_MASK_VSX
)) != 0)
4276 rs6000_isa_flags
&= ~((OPTION_MASK_P8_VECTOR
| OPTION_MASK_CRYPTO
4277 | OPTION_MASK_DIRECT_MOVE
)
4278 & ~rs6000_isa_flags_explicit
);
4280 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4281 rs6000_print_isa_options (stderr
, 0, "before defaults", rs6000_isa_flags
);
4283 /* Handle explicit -mno-{altivec,vsx,power8-vector,power9-vector} and turn
4284 off all of the options that depend on those flags. */
4285 ignore_masks
= rs6000_disable_incompatible_switches ();
4287 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
4288 unless the user explicitly used the -mno-<option> to disable the code. */
4289 if (TARGET_P9_VECTOR
|| TARGET_MODULO
|| TARGET_P9_MISC
)
4290 rs6000_isa_flags
|= (ISA_3_0_MASKS_SERVER
& ~ignore_masks
);
4291 else if (TARGET_P9_MINMAX
)
4295 if (cpu_index
== PROCESSOR_POWER9
)
4297 /* legacy behavior: allow -mcpu=power9 with certain
4298 capabilities explicitly disabled. */
4299 rs6000_isa_flags
|= (ISA_3_0_MASKS_SERVER
& ~ignore_masks
);
4302 error ("power9 target option is incompatible with %<%s=<xxx>%> "
4303 "for <xxx> less than power9", "-mcpu");
4305 else if ((ISA_3_0_MASKS_SERVER
& rs6000_isa_flags_explicit
)
4306 != (ISA_3_0_MASKS_SERVER
& rs6000_isa_flags
4307 & rs6000_isa_flags_explicit
))
4308 /* Enforce that none of the ISA_3_0_MASKS_SERVER flags
4309 were explicitly cleared. */
4310 error ("%qs incompatible with explicitly disabled options",
4313 rs6000_isa_flags
|= ISA_3_0_MASKS_SERVER
;
4315 else if (TARGET_P8_VECTOR
|| TARGET_DIRECT_MOVE
|| TARGET_CRYPTO
)
4316 rs6000_isa_flags
|= (ISA_2_7_MASKS_SERVER
& ~ignore_masks
);
4317 else if (TARGET_VSX
)
4318 rs6000_isa_flags
|= (ISA_2_6_MASKS_SERVER
& ~ignore_masks
);
4319 else if (TARGET_POPCNTD
)
4320 rs6000_isa_flags
|= (ISA_2_6_MASKS_EMBEDDED
& ~ignore_masks
);
4321 else if (TARGET_DFP
)
4322 rs6000_isa_flags
|= (ISA_2_5_MASKS_SERVER
& ~ignore_masks
);
4323 else if (TARGET_CMPB
)
4324 rs6000_isa_flags
|= (ISA_2_5_MASKS_EMBEDDED
& ~ignore_masks
);
4325 else if (TARGET_FPRND
)
4326 rs6000_isa_flags
|= (ISA_2_4_MASKS
& ~ignore_masks
);
4327 else if (TARGET_POPCNTB
)
4328 rs6000_isa_flags
|= (ISA_2_2_MASKS
& ~ignore_masks
);
4329 else if (TARGET_ALTIVEC
)
4330 rs6000_isa_flags
|= (OPTION_MASK_PPC_GFXOPT
& ~ignore_masks
);
4332 if (TARGET_CRYPTO
&& !TARGET_ALTIVEC
)
4334 if (rs6000_isa_flags_explicit
& OPTION_MASK_CRYPTO
)
4335 error ("%qs requires %qs", "-mcrypto", "-maltivec");
4336 rs6000_isa_flags
&= ~OPTION_MASK_CRYPTO
;
4339 if (TARGET_DIRECT_MOVE
&& !TARGET_VSX
)
4341 if (rs6000_isa_flags_explicit
& OPTION_MASK_DIRECT_MOVE
)
4342 error ("%qs requires %qs", "-mdirect-move", "-mvsx");
4343 rs6000_isa_flags
&= ~OPTION_MASK_DIRECT_MOVE
;
4346 if (TARGET_P8_VECTOR
&& !TARGET_ALTIVEC
)
4348 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4349 error ("%qs requires %qs", "-mpower8-vector", "-maltivec");
4350 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
4353 if (TARGET_P8_VECTOR
&& !TARGET_VSX
)
4355 if ((rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4356 && (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
))
4357 error ("%qs requires %qs", "-mpower8-vector", "-mvsx");
4358 else if ((rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
) == 0)
4360 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
4361 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
4362 rs6000_isa_flags_explicit
|= OPTION_MASK_P8_VECTOR
;
4366 /* OPTION_MASK_P8_VECTOR is explicit, and OPTION_MASK_VSX is
4368 rs6000_isa_flags
|= OPTION_MASK_VSX
;
4369 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
4373 if (TARGET_DFP
&& !TARGET_HARD_FLOAT
)
4375 if (rs6000_isa_flags_explicit
& OPTION_MASK_DFP
)
4376 error ("%qs requires %qs", "-mhard-dfp", "-mhard-float");
4377 rs6000_isa_flags
&= ~OPTION_MASK_DFP
;
4380 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
4381 silently turn off quad memory mode. */
4382 if ((TARGET_QUAD_MEMORY
|| TARGET_QUAD_MEMORY_ATOMIC
) && !TARGET_POWERPC64
)
4384 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
4385 warning (0, N_("-mquad-memory requires 64-bit mode"));
4387 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) != 0)
4388 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
4390 rs6000_isa_flags
&= ~(OPTION_MASK_QUAD_MEMORY
4391 | OPTION_MASK_QUAD_MEMORY_ATOMIC
);
4394 /* Non-atomic quad memory load/store are disabled for little endian, since
4395 the words are reversed, but atomic operations can still be done by
4396 swapping the words. */
4397 if (TARGET_QUAD_MEMORY
&& !WORDS_BIG_ENDIAN
)
4399 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
4400 warning (0, N_("-mquad-memory is not available in little endian "
4403 rs6000_isa_flags
&= ~OPTION_MASK_QUAD_MEMORY
;
4406 /* Assume if the user asked for normal quad memory instructions, they want
4407 the atomic versions as well, unless they explicity told us not to use quad
4408 word atomic instructions. */
4409 if (TARGET_QUAD_MEMORY
4410 && !TARGET_QUAD_MEMORY_ATOMIC
4411 && ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) == 0))
4412 rs6000_isa_flags
|= OPTION_MASK_QUAD_MEMORY_ATOMIC
;
4414 /* If we can shrink-wrap the TOC register save separately, then use
4415 -msave-toc-indirect unless explicitly disabled. */
4416 if ((rs6000_isa_flags_explicit
& OPTION_MASK_SAVE_TOC_INDIRECT
) == 0
4417 && flag_shrink_wrap_separate
4418 && optimize_function_for_speed_p (cfun
))
4419 rs6000_isa_flags
|= OPTION_MASK_SAVE_TOC_INDIRECT
;
4421 /* Enable power8 fusion if we are tuning for power8, even if we aren't
4422 generating power8 instructions. */
4423 if (!(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
))
4424 rs6000_isa_flags
|= (processor_target_table
[tune_index
].target_enable
4425 & OPTION_MASK_P8_FUSION
);
4427 /* Setting additional fusion flags turns on base fusion. */
4428 if (!TARGET_P8_FUSION
&& (TARGET_P8_FUSION_SIGN
|| TARGET_TOC_FUSION
))
4430 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
)
4432 if (TARGET_P8_FUSION_SIGN
)
4433 error ("%qs requires %qs", "-mpower8-fusion-sign",
4436 if (TARGET_TOC_FUSION
)
4437 error ("%qs requires %qs", "-mtoc-fusion", "-mpower8-fusion");
4439 rs6000_isa_flags
&= ~OPTION_MASK_P8_FUSION
;
4442 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION
;
4445 /* Power9 fusion is a superset over power8 fusion. */
4446 if (TARGET_P9_FUSION
&& !TARGET_P8_FUSION
)
4448 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
)
4450 /* We prefer to not mention undocumented options in
4451 error messages. However, if users have managed to select
4452 power9-fusion without selecting power8-fusion, they
4453 already know about undocumented flags. */
4454 error ("%qs requires %qs", "-mpower9-fusion", "-mpower8-fusion");
4455 rs6000_isa_flags
&= ~OPTION_MASK_P9_FUSION
;
4458 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION
;
4461 /* Enable power9 fusion if we are tuning for power9, even if we aren't
4462 generating power9 instructions. */
4463 if (!(rs6000_isa_flags_explicit
& OPTION_MASK_P9_FUSION
))
4464 rs6000_isa_flags
|= (processor_target_table
[tune_index
].target_enable
4465 & OPTION_MASK_P9_FUSION
);
4467 /* Power8 does not fuse sign extended loads with the addis. If we are
4468 optimizing at high levels for speed, convert a sign extended load into a
4469 zero extending load, and an explicit sign extension. */
4470 if (TARGET_P8_FUSION
4471 && !(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION_SIGN
)
4472 && optimize_function_for_speed_p (cfun
)
4474 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION_SIGN
;
4476 /* TOC fusion requires 64-bit and medium/large code model. */
4477 if (TARGET_TOC_FUSION
&& !TARGET_POWERPC64
)
4479 rs6000_isa_flags
&= ~OPTION_MASK_TOC_FUSION
;
4480 if ((rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
) != 0)
4481 warning (0, N_("-mtoc-fusion requires 64-bit"));
4484 if (TARGET_TOC_FUSION
&& (TARGET_CMODEL
== CMODEL_SMALL
))
4486 rs6000_isa_flags
&= ~OPTION_MASK_TOC_FUSION
;
4487 if ((rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
) != 0)
4488 warning (0, N_("-mtoc-fusion requires medium/large code model"));
4491 /* Turn on -mtoc-fusion by default if p8-fusion and 64-bit medium/large code
4493 if (TARGET_P8_FUSION
&& !TARGET_TOC_FUSION
&& TARGET_POWERPC64
4494 && (TARGET_CMODEL
!= CMODEL_SMALL
)
4495 && !(rs6000_isa_flags_explicit
& OPTION_MASK_TOC_FUSION
))
4496 rs6000_isa_flags
|= OPTION_MASK_TOC_FUSION
;
4498 /* ISA 3.0 vector instructions include ISA 2.07. */
4499 if (TARGET_P9_VECTOR
&& !TARGET_P8_VECTOR
)
4501 /* We prefer to not mention undocumented options in
4502 error messages. However, if users have managed to select
4503 power9-vector without selecting power8-vector, they
4504 already know about undocumented flags. */
4505 if ((rs6000_isa_flags_explicit
& OPTION_MASK_P9_VECTOR
) &&
4506 (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
))
4507 error ("%qs requires %qs", "-mpower9-vector", "-mpower8-vector");
4508 else if ((rs6000_isa_flags_explicit
& OPTION_MASK_P9_VECTOR
) == 0)
4510 rs6000_isa_flags
&= ~OPTION_MASK_P9_VECTOR
;
4511 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
4512 rs6000_isa_flags_explicit
|= OPTION_MASK_P9_VECTOR
;
4516 /* OPTION_MASK_P9_VECTOR is explicit and
4517 OPTION_MASK_P8_VECTOR is not explicit. */
4518 rs6000_isa_flags
|= OPTION_MASK_P8_VECTOR
;
4519 rs6000_isa_flags_explicit
|= OPTION_MASK_P8_VECTOR
;
4523 /* Set -mallow-movmisalign to explicitly on if we have full ISA 2.07
4524 support. If we only have ISA 2.06 support, and the user did not specify
4525 the switch, leave it set to -1 so the movmisalign patterns are enabled,
4526 but we don't enable the full vectorization support */
4527 if (TARGET_ALLOW_MOVMISALIGN
== -1 && TARGET_P8_VECTOR
&& TARGET_DIRECT_MOVE
)
4528 TARGET_ALLOW_MOVMISALIGN
= 1;
4530 else if (TARGET_ALLOW_MOVMISALIGN
&& !TARGET_VSX
)
4532 if (TARGET_ALLOW_MOVMISALIGN
> 0
4533 && global_options_set
.x_TARGET_ALLOW_MOVMISALIGN
)
4534 error ("%qs requires %qs", "-mallow-movmisalign", "-mvsx");
4536 TARGET_ALLOW_MOVMISALIGN
= 0;
4539 /* Determine when unaligned vector accesses are permitted, and when
4540 they are preferred over masked Altivec loads. Note that if
4541 TARGET_ALLOW_MOVMISALIGN has been disabled by the user, then
4542 TARGET_EFFICIENT_UNALIGNED_VSX must be as well. The converse is
4544 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
4548 if (rs6000_isa_flags_explicit
& OPTION_MASK_EFFICIENT_UNALIGNED_VSX
)
4549 error ("%qs requires %qs", "-mefficient-unaligned-vsx", "-mvsx");
4551 rs6000_isa_flags
&= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX
;
4554 else if (!TARGET_ALLOW_MOVMISALIGN
)
4556 if (rs6000_isa_flags_explicit
& OPTION_MASK_EFFICIENT_UNALIGNED_VSX
)
4557 error ("%qs requires %qs", "-munefficient-unaligned-vsx",
4558 "-mallow-movmisalign");
4560 rs6000_isa_flags
&= ~OPTION_MASK_EFFICIENT_UNALIGNED_VSX
;
4564 /* Use long double size to select the appropriate long double. We use
4565 TYPE_PRECISION to differentiate the 3 different long double types. We map
4566 128 into the precision used for TFmode. */
4567 int default_long_double_size
= (RS6000_DEFAULT_LONG_DOUBLE_SIZE
== 64
4569 : FLOAT_PRECISION_TFmode
);
4571 /* Set long double size before the IEEE 128-bit tests. */
4572 if (!global_options_set
.x_rs6000_long_double_type_size
)
4574 if (main_target_opt
!= NULL
4575 && (main_target_opt
->x_rs6000_long_double_type_size
4576 != default_long_double_size
))
4577 error ("target attribute or pragma changes long double size");
4579 rs6000_long_double_type_size
= default_long_double_size
;
4581 else if (rs6000_long_double_type_size
== 128)
4582 rs6000_long_double_type_size
= FLOAT_PRECISION_TFmode
;
4584 /* Set -mabi=ieeelongdouble on some old targets. In the future, power server
4585 systems will also set long double to be IEEE 128-bit. AIX and Darwin
4586 explicitly redefine TARGET_IEEEQUAD and TARGET_IEEEQUAD_DEFAULT to 0, so
4587 those systems will not pick up this default. Warn if the user changes the
4588 default unless -Wno-psabi. */
4589 if (!global_options_set
.x_rs6000_ieeequad
)
4590 rs6000_ieeequad
= TARGET_IEEEQUAD_DEFAULT
;
4592 else if (rs6000_ieeequad
!= TARGET_IEEEQUAD_DEFAULT
&& TARGET_LONG_DOUBLE_128
)
4594 static bool warned_change_long_double
;
4595 if (!warned_change_long_double
)
4597 warned_change_long_double
= true;
4598 if (TARGET_IEEEQUAD
)
4599 warning (OPT_Wpsabi
, "Using IEEE extended precision long double");
4601 warning (OPT_Wpsabi
, "Using IBM extended precision long double");
4605 /* Enable the default support for IEEE 128-bit floating point on Linux VSX
4606 sytems. In GCC 7, we would enable the the IEEE 128-bit floating point
4607 infrastructure (-mfloat128-type) but not enable the actual __float128 type
4608 unless the user used the explicit -mfloat128. In GCC 8, we enable both
4609 the keyword as well as the type. */
4610 TARGET_FLOAT128_TYPE
= TARGET_FLOAT128_ENABLE_TYPE
&& TARGET_VSX
;
4612 /* IEEE 128-bit floating point requires VSX support. */
4613 if (TARGET_FLOAT128_KEYWORD
)
4617 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) != 0)
4618 error ("%qs requires VSX support", "-mfloat128");
4620 TARGET_FLOAT128_TYPE
= 0;
4621 rs6000_isa_flags
&= ~(OPTION_MASK_FLOAT128_KEYWORD
4622 | OPTION_MASK_FLOAT128_HW
);
4624 else if (!TARGET_FLOAT128_TYPE
)
4626 TARGET_FLOAT128_TYPE
= 1;
4627 warning (0, "The -mfloat128 option may not be fully supported");
4631 /* Enable the __float128 keyword under Linux by default. */
4632 if (TARGET_FLOAT128_TYPE
&& !TARGET_FLOAT128_KEYWORD
4633 && (rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_KEYWORD
) == 0)
4634 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_KEYWORD
;
4636 /* If we have are supporting the float128 type and full ISA 3.0 support,
4637 enable -mfloat128-hardware by default. However, don't enable the
4638 __float128 keyword if it was explicitly turned off. 64-bit mode is needed
4639 because sometimes the compiler wants to put things in an integer
4640 container, and if we don't have __int128 support, it is impossible. */
4641 if (TARGET_FLOAT128_TYPE
&& !TARGET_FLOAT128_HW
&& TARGET_64BIT
4642 && (rs6000_isa_flags
& ISA_3_0_MASKS_IEEE
) == ISA_3_0_MASKS_IEEE
4643 && !(rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
))
4644 rs6000_isa_flags
|= OPTION_MASK_FLOAT128_HW
;
4646 if (TARGET_FLOAT128_HW
4647 && (rs6000_isa_flags
& ISA_3_0_MASKS_IEEE
) != ISA_3_0_MASKS_IEEE
)
4649 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4650 error ("%qs requires full ISA 3.0 support", "-mfloat128-hardware");
4652 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4655 if (TARGET_FLOAT128_HW
&& !TARGET_64BIT
)
4657 if ((rs6000_isa_flags_explicit
& OPTION_MASK_FLOAT128_HW
) != 0)
4658 error ("%qs requires %qs", "-mfloat128-hardware", "-m64");
4660 rs6000_isa_flags
&= ~OPTION_MASK_FLOAT128_HW
;
4663 /* Print the options after updating the defaults. */
4664 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4665 rs6000_print_isa_options (stderr
, 0, "after defaults", rs6000_isa_flags
);
4667 /* E500mc does "better" if we inline more aggressively. Respect the
4668 user's opinion, though. */
4669 if (rs6000_block_move_inline_limit
== 0
4670 && (rs6000_tune
== PROCESSOR_PPCE500MC
4671 || rs6000_tune
== PROCESSOR_PPCE500MC64
4672 || rs6000_tune
== PROCESSOR_PPCE5500
4673 || rs6000_tune
== PROCESSOR_PPCE6500
))
4674 rs6000_block_move_inline_limit
= 128;
4676 /* store_one_arg depends on expand_block_move to handle at least the
4677 size of reg_parm_stack_space. */
4678 if (rs6000_block_move_inline_limit
< (TARGET_POWERPC64
? 64 : 32))
4679 rs6000_block_move_inline_limit
= (TARGET_POWERPC64
? 64 : 32);
4683 /* If the appropriate debug option is enabled, replace the target hooks
4684 with debug versions that call the real version and then prints
4685 debugging information. */
4686 if (TARGET_DEBUG_COST
)
4688 targetm
.rtx_costs
= rs6000_debug_rtx_costs
;
4689 targetm
.address_cost
= rs6000_debug_address_cost
;
4690 targetm
.sched
.adjust_cost
= rs6000_debug_adjust_cost
;
4693 if (TARGET_DEBUG_ADDR
)
4695 targetm
.legitimate_address_p
= rs6000_debug_legitimate_address_p
;
4696 targetm
.legitimize_address
= rs6000_debug_legitimize_address
;
4697 rs6000_secondary_reload_class_ptr
4698 = rs6000_debug_secondary_reload_class
;
4699 targetm
.secondary_memory_needed
4700 = rs6000_debug_secondary_memory_needed
;
4701 targetm
.can_change_mode_class
4702 = rs6000_debug_can_change_mode_class
;
4703 rs6000_preferred_reload_class_ptr
4704 = rs6000_debug_preferred_reload_class
;
4705 rs6000_legitimize_reload_address_ptr
4706 = rs6000_debug_legitimize_reload_address
;
4707 rs6000_mode_dependent_address_ptr
4708 = rs6000_debug_mode_dependent_address
;
4711 if (rs6000_veclibabi_name
)
4713 if (strcmp (rs6000_veclibabi_name
, "mass") == 0)
4714 rs6000_veclib_handler
= rs6000_builtin_vectorized_libmass
;
4717 error ("unknown vectorization library ABI type (%qs) for "
4718 "%qs switch", rs6000_veclibabi_name
, "-mveclibabi=");
4724 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
4725 target attribute or pragma which automatically enables both options,
4726 unless the altivec ABI was set. This is set by default for 64-bit, but
4728 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
4730 TARGET_FLOAT128_TYPE
= 0;
4731 rs6000_isa_flags
&= ~((OPTION_MASK_VSX
| OPTION_MASK_ALTIVEC
4732 | OPTION_MASK_FLOAT128_KEYWORD
)
4733 & ~rs6000_isa_flags_explicit
);
4736 /* Enable Altivec ABI for AIX -maltivec. */
4737 if (TARGET_XCOFF
&& (TARGET_ALTIVEC
|| TARGET_VSX
))
4739 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
4740 error ("target attribute or pragma changes AltiVec ABI");
4742 rs6000_altivec_abi
= 1;
4745 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
4746 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
4747 be explicitly overridden in either case. */
4750 if (!global_options_set
.x_rs6000_altivec_abi
4751 && (TARGET_64BIT
|| TARGET_ALTIVEC
|| TARGET_VSX
))
4753 if (main_target_opt
!= NULL
&&
4754 !main_target_opt
->x_rs6000_altivec_abi
)
4755 error ("target attribute or pragma changes AltiVec ABI");
4757 rs6000_altivec_abi
= 1;
4761 /* Set the Darwin64 ABI as default for 64-bit Darwin.
4762 So far, the only darwin64 targets are also MACH-O. */
4764 && DEFAULT_ABI
== ABI_DARWIN
4767 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_darwin64_abi
)
4768 error ("target attribute or pragma changes darwin64 ABI");
4771 rs6000_darwin64_abi
= 1;
4772 /* Default to natural alignment, for better performance. */
4773 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
4777 /* Place FP constants in the constant pool instead of TOC
4778 if section anchors enabled. */
4779 if (flag_section_anchors
4780 && !global_options_set
.x_TARGET_NO_FP_IN_TOC
)
4781 TARGET_NO_FP_IN_TOC
= 1;
4783 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4784 rs6000_print_isa_options (stderr
, 0, "before subtarget", rs6000_isa_flags
);
4786 #ifdef SUBTARGET_OVERRIDE_OPTIONS
4787 SUBTARGET_OVERRIDE_OPTIONS
;
4789 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
4790 SUBSUBTARGET_OVERRIDE_OPTIONS
;
4792 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
4793 SUB3TARGET_OVERRIDE_OPTIONS
;
4796 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
4797 rs6000_print_isa_options (stderr
, 0, "after subtarget", rs6000_isa_flags
);
4799 rs6000_always_hint
= (rs6000_tune
!= PROCESSOR_POWER4
4800 && rs6000_tune
!= PROCESSOR_POWER5
4801 && rs6000_tune
!= PROCESSOR_POWER6
4802 && rs6000_tune
!= PROCESSOR_POWER7
4803 && rs6000_tune
!= PROCESSOR_POWER8
4804 && rs6000_tune
!= PROCESSOR_POWER9
4805 && rs6000_tune
!= PROCESSOR_PPCA2
4806 && rs6000_tune
!= PROCESSOR_CELL
4807 && rs6000_tune
!= PROCESSOR_PPC476
);
4808 rs6000_sched_groups
= (rs6000_tune
== PROCESSOR_POWER4
4809 || rs6000_tune
== PROCESSOR_POWER5
4810 || rs6000_tune
== PROCESSOR_POWER7
4811 || rs6000_tune
== PROCESSOR_POWER8
);
4812 rs6000_align_branch_targets
= (rs6000_tune
== PROCESSOR_POWER4
4813 || rs6000_tune
== PROCESSOR_POWER5
4814 || rs6000_tune
== PROCESSOR_POWER6
4815 || rs6000_tune
== PROCESSOR_POWER7
4816 || rs6000_tune
== PROCESSOR_POWER8
4817 || rs6000_tune
== PROCESSOR_POWER9
4818 || rs6000_tune
== PROCESSOR_PPCE500MC
4819 || rs6000_tune
== PROCESSOR_PPCE500MC64
4820 || rs6000_tune
== PROCESSOR_PPCE5500
4821 || rs6000_tune
== PROCESSOR_PPCE6500
);
4823 /* Allow debug switches to override the above settings. These are set to -1
4824 in rs6000.opt to indicate the user hasn't directly set the switch. */
4825 if (TARGET_ALWAYS_HINT
>= 0)
4826 rs6000_always_hint
= TARGET_ALWAYS_HINT
;
4828 if (TARGET_SCHED_GROUPS
>= 0)
4829 rs6000_sched_groups
= TARGET_SCHED_GROUPS
;
4831 if (TARGET_ALIGN_BRANCH_TARGETS
>= 0)
4832 rs6000_align_branch_targets
= TARGET_ALIGN_BRANCH_TARGETS
;
4834 rs6000_sched_restricted_insns_priority
4835 = (rs6000_sched_groups
? 1 : 0);
4837 /* Handle -msched-costly-dep option. */
4838 rs6000_sched_costly_dep
4839 = (rs6000_sched_groups
? true_store_to_load_dep_costly
: no_dep_costly
);
4841 if (rs6000_sched_costly_dep_str
)
4843 if (! strcmp (rs6000_sched_costly_dep_str
, "no"))
4844 rs6000_sched_costly_dep
= no_dep_costly
;
4845 else if (! strcmp (rs6000_sched_costly_dep_str
, "all"))
4846 rs6000_sched_costly_dep
= all_deps_costly
;
4847 else if (! strcmp (rs6000_sched_costly_dep_str
, "true_store_to_load"))
4848 rs6000_sched_costly_dep
= true_store_to_load_dep_costly
;
4849 else if (! strcmp (rs6000_sched_costly_dep_str
, "store_to_load"))
4850 rs6000_sched_costly_dep
= store_to_load_dep_costly
;
4852 rs6000_sched_costly_dep
= ((enum rs6000_dependence_cost
)
4853 atoi (rs6000_sched_costly_dep_str
));
4856 /* Handle -minsert-sched-nops option. */
4857 rs6000_sched_insert_nops
4858 = (rs6000_sched_groups
? sched_finish_regroup_exact
: sched_finish_none
);
4860 if (rs6000_sched_insert_nops_str
)
4862 if (! strcmp (rs6000_sched_insert_nops_str
, "no"))
4863 rs6000_sched_insert_nops
= sched_finish_none
;
4864 else if (! strcmp (rs6000_sched_insert_nops_str
, "pad"))
4865 rs6000_sched_insert_nops
= sched_finish_pad_groups
;
4866 else if (! strcmp (rs6000_sched_insert_nops_str
, "regroup_exact"))
4867 rs6000_sched_insert_nops
= sched_finish_regroup_exact
;
4869 rs6000_sched_insert_nops
= ((enum rs6000_nop_insertion
)
4870 atoi (rs6000_sched_insert_nops_str
));
4873 /* Handle stack protector */
4874 if (!global_options_set
.x_rs6000_stack_protector_guard
)
4875 #ifdef TARGET_THREAD_SSP_OFFSET
4876 rs6000_stack_protector_guard
= SSP_TLS
;
4878 rs6000_stack_protector_guard
= SSP_GLOBAL
;
4881 #ifdef TARGET_THREAD_SSP_OFFSET
4882 rs6000_stack_protector_guard_offset
= TARGET_THREAD_SSP_OFFSET
;
4883 rs6000_stack_protector_guard_reg
= TARGET_64BIT
? 13 : 2;
4886 if (global_options_set
.x_rs6000_stack_protector_guard_offset_str
)
4889 const char *str
= rs6000_stack_protector_guard_offset_str
;
4892 long offset
= strtol (str
, &endp
, 0);
4893 if (!*str
|| *endp
|| errno
)
4894 error ("%qs is not a valid number in %qs", str
,
4895 "-mstack-protector-guard-offset=");
4897 if (!IN_RANGE (offset
, -0x8000, 0x7fff)
4898 || (TARGET_64BIT
&& (offset
& 3)))
4899 error ("%qs is not a valid offset in %qs", str
,
4900 "-mstack-protector-guard-offset=");
4902 rs6000_stack_protector_guard_offset
= offset
;
4905 if (global_options_set
.x_rs6000_stack_protector_guard_reg_str
)
4907 const char *str
= rs6000_stack_protector_guard_reg_str
;
4908 int reg
= decode_reg_name (str
);
4910 if (!IN_RANGE (reg
, 1, 31))
4911 error ("%qs is not a valid base register in %qs", str
,
4912 "-mstack-protector-guard-reg=");
4914 rs6000_stack_protector_guard_reg
= reg
;
4917 if (rs6000_stack_protector_guard
== SSP_TLS
4918 && !IN_RANGE (rs6000_stack_protector_guard_reg
, 1, 31))
4919 error ("%qs needs a valid base register", "-mstack-protector-guard=tls");
4923 #ifdef TARGET_REGNAMES
4924 /* If the user desires alternate register names, copy in the
4925 alternate names now. */
4926 if (TARGET_REGNAMES
)
4927 memcpy (rs6000_reg_names
, alt_reg_names
, sizeof (rs6000_reg_names
));
4930 /* Set aix_struct_return last, after the ABI is determined.
4931 If -maix-struct-return or -msvr4-struct-return was explicitly
4932 used, don't override with the ABI default. */
4933 if (!global_options_set
.x_aix_struct_return
)
4934 aix_struct_return
= (DEFAULT_ABI
!= ABI_V4
|| DRAFT_V4_STRUCT_RET
);
4937 /* IBM XL compiler defaults to unsigned bitfields. */
4938 if (TARGET_XL_COMPAT
)
4939 flag_signed_bitfields
= 0;
4942 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
4943 REAL_MODE_FORMAT (TFmode
) = &ibm_extended_format
;
4945 ASM_GENERATE_INTERNAL_LABEL (toc_label_name
, "LCTOC", 1);
4947 /* We can only guarantee the availability of DI pseudo-ops when
4948 assembling for 64-bit targets. */
4951 targetm
.asm_out
.aligned_op
.di
= NULL
;
4952 targetm
.asm_out
.unaligned_op
.di
= NULL
;
4956 /* Set branch target alignment, if not optimizing for size. */
4959 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
4960 aligned 8byte to avoid misprediction by the branch predictor. */
4961 if (rs6000_tune
== PROCESSOR_TITAN
4962 || rs6000_tune
== PROCESSOR_CELL
)
4964 if (flag_align_functions
&& !str_align_functions
)
4965 str_align_functions
= "8";
4966 if (flag_align_jumps
&& !str_align_jumps
)
4967 str_align_jumps
= "8";
4968 if (flag_align_loops
&& !str_align_loops
)
4969 str_align_loops
= "8";
4971 if (rs6000_align_branch_targets
)
4973 if (flag_align_functions
&& !str_align_functions
)
4974 str_align_functions
= "16";
4975 if (flag_align_jumps
&& !str_align_jumps
)
4976 str_align_jumps
= "16";
4977 if (flag_align_loops
&& !str_align_loops
)
4979 can_override_loop_align
= 1;
4980 str_align_loops
= "16";
4984 if (flag_align_jumps
&& !str_align_jumps
)
4985 str_align_jumps
= "16";
4986 if (flag_align_loops
&& !str_align_loops
)
4987 str_align_loops
= "16";
4990 /* Arrange to save and restore machine status around nested functions. */
4991 init_machine_status
= rs6000_init_machine_status
;
4993 /* We should always be splitting complex arguments, but we can't break
4994 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
4995 if (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
4996 targetm
.calls
.split_complex_arg
= NULL
;
4998 /* The AIX and ELFv1 ABIs define standard function descriptors. */
4999 if (DEFAULT_ABI
== ABI_AIX
)
5000 targetm
.calls
.custom_function_descriptors
= 0;
5003 /* Initialize rs6000_cost with the appropriate target costs. */
5005 rs6000_cost
= TARGET_POWERPC64
? &size64_cost
: &size32_cost
;
5007 switch (rs6000_tune
)
5009 case PROCESSOR_RS64A
:
5010 rs6000_cost
= &rs64a_cost
;
5013 case PROCESSOR_MPCCORE
:
5014 rs6000_cost
= &mpccore_cost
;
5017 case PROCESSOR_PPC403
:
5018 rs6000_cost
= &ppc403_cost
;
5021 case PROCESSOR_PPC405
:
5022 rs6000_cost
= &ppc405_cost
;
5025 case PROCESSOR_PPC440
:
5026 rs6000_cost
= &ppc440_cost
;
5029 case PROCESSOR_PPC476
:
5030 rs6000_cost
= &ppc476_cost
;
5033 case PROCESSOR_PPC601
:
5034 rs6000_cost
= &ppc601_cost
;
5037 case PROCESSOR_PPC603
:
5038 rs6000_cost
= &ppc603_cost
;
5041 case PROCESSOR_PPC604
:
5042 rs6000_cost
= &ppc604_cost
;
5045 case PROCESSOR_PPC604e
:
5046 rs6000_cost
= &ppc604e_cost
;
5049 case PROCESSOR_PPC620
:
5050 rs6000_cost
= &ppc620_cost
;
5053 case PROCESSOR_PPC630
:
5054 rs6000_cost
= &ppc630_cost
;
5057 case PROCESSOR_CELL
:
5058 rs6000_cost
= &ppccell_cost
;
5061 case PROCESSOR_PPC750
:
5062 case PROCESSOR_PPC7400
:
5063 rs6000_cost
= &ppc750_cost
;
5066 case PROCESSOR_PPC7450
:
5067 rs6000_cost
= &ppc7450_cost
;
5070 case PROCESSOR_PPC8540
:
5071 case PROCESSOR_PPC8548
:
5072 rs6000_cost
= &ppc8540_cost
;
5075 case PROCESSOR_PPCE300C2
:
5076 case PROCESSOR_PPCE300C3
:
5077 rs6000_cost
= &ppce300c2c3_cost
;
5080 case PROCESSOR_PPCE500MC
:
5081 rs6000_cost
= &ppce500mc_cost
;
5084 case PROCESSOR_PPCE500MC64
:
5085 rs6000_cost
= &ppce500mc64_cost
;
5088 case PROCESSOR_PPCE5500
:
5089 rs6000_cost
= &ppce5500_cost
;
5092 case PROCESSOR_PPCE6500
:
5093 rs6000_cost
= &ppce6500_cost
;
5096 case PROCESSOR_TITAN
:
5097 rs6000_cost
= &titan_cost
;
5100 case PROCESSOR_POWER4
:
5101 case PROCESSOR_POWER5
:
5102 rs6000_cost
= &power4_cost
;
5105 case PROCESSOR_POWER6
:
5106 rs6000_cost
= &power6_cost
;
5109 case PROCESSOR_POWER7
:
5110 rs6000_cost
= &power7_cost
;
5113 case PROCESSOR_POWER8
:
5114 rs6000_cost
= &power8_cost
;
5117 case PROCESSOR_POWER9
:
5118 rs6000_cost
= &power9_cost
;
5121 case PROCESSOR_PPCA2
:
5122 rs6000_cost
= &ppca2_cost
;
5131 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
5132 rs6000_cost
->simultaneous_prefetches
,
5133 global_options
.x_param_values
,
5134 global_options_set
.x_param_values
);
5135 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, rs6000_cost
->l1_cache_size
,
5136 global_options
.x_param_values
,
5137 global_options_set
.x_param_values
);
5138 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
5139 rs6000_cost
->cache_line_size
,
5140 global_options
.x_param_values
,
5141 global_options_set
.x_param_values
);
5142 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, rs6000_cost
->l2_cache_size
,
5143 global_options
.x_param_values
,
5144 global_options_set
.x_param_values
);
5146 /* Increase loop peeling limits based on performance analysis. */
5147 maybe_set_param_value (PARAM_MAX_PEELED_INSNS
, 400,
5148 global_options
.x_param_values
,
5149 global_options_set
.x_param_values
);
5150 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS
, 400,
5151 global_options
.x_param_values
,
5152 global_options_set
.x_param_values
);
5154 /* Use the 'model' -fsched-pressure algorithm by default. */
5155 maybe_set_param_value (PARAM_SCHED_PRESSURE_ALGORITHM
,
5156 SCHED_PRESSURE_MODEL
,
5157 global_options
.x_param_values
,
5158 global_options_set
.x_param_values
);
5160 /* If using typedef char *va_list, signal that
5161 __builtin_va_start (&ap, 0) can be optimized to
5162 ap = __builtin_next_arg (0). */
5163 if (DEFAULT_ABI
!= ABI_V4
)
5164 targetm
.expand_builtin_va_start
= NULL
;
5167 /* If not explicitly specified via option, decide whether to generate indexed
5168 load/store instructions. A value of -1 indicates that the
5169 initial value of this variable has not been overwritten. During
5170 compilation, TARGET_AVOID_XFORM is either 0 or 1. */
5171 if (TARGET_AVOID_XFORM
== -1)
5172 /* Avoid indexed addressing when targeting Power6 in order to avoid the
5173 DERAT mispredict penalty. However the LVE and STVE altivec instructions
5174 need indexed accesses and the type used is the scalar type of the element
5175 being loaded or stored. */
5176 TARGET_AVOID_XFORM
= (rs6000_tune
== PROCESSOR_POWER6
&& TARGET_CMPB
5177 && !TARGET_ALTIVEC
);
5179 /* Set the -mrecip options. */
5180 if (rs6000_recip_name
)
5182 char *p
= ASTRDUP (rs6000_recip_name
);
5184 unsigned int mask
, i
;
5187 while ((q
= strtok (p
, ",")) != NULL
)
5198 if (!strcmp (q
, "default"))
5199 mask
= ((TARGET_RECIP_PRECISION
)
5200 ? RECIP_HIGH_PRECISION
: RECIP_LOW_PRECISION
);
5203 for (i
= 0; i
< ARRAY_SIZE (recip_options
); i
++)
5204 if (!strcmp (q
, recip_options
[i
].string
))
5206 mask
= recip_options
[i
].mask
;
5210 if (i
== ARRAY_SIZE (recip_options
))
5212 error ("unknown option for %<%s=%s%>", "-mrecip", q
);
5220 rs6000_recip_control
&= ~mask
;
5222 rs6000_recip_control
|= mask
;
5226 /* Set the builtin mask of the various options used that could affect which
5227 builtins were used. In the past we used target_flags, but we've run out
5228 of bits, and some options are no longer in target_flags. */
5229 rs6000_builtin_mask
= rs6000_builtin_mask_calculate ();
5230 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
5231 rs6000_print_builtin_options (stderr
, 0, "builtin mask",
5232 rs6000_builtin_mask
);
5234 /* Initialize all of the registers. */
5235 rs6000_init_hard_regno_mode_ok (global_init_p
);
5237 /* Save the initial options in case the user does function specific options */
5239 target_option_default_node
= target_option_current_node
5240 = build_target_option_node (&global_options
);
5242 /* If not explicitly specified via option, decide whether to generate the
5243 extra blr's required to preserve the link stack on some cpus (eg, 476). */
5244 if (TARGET_LINK_STACK
== -1)
5245 SET_TARGET_LINK_STACK (rs6000_tune
== PROCESSOR_PPC476
&& flag_pic
);
5247 /* Deprecate use of -mno-speculate-indirect-jumps. */
5248 if (!rs6000_speculate_indirect_jumps
)
5249 warning (0, "%qs is deprecated and not recommended in any circumstances",
5250 "-mno-speculate-indirect-jumps");
5255 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
5256 define the target cpu type. */
5259 rs6000_option_override (void)
5261 (void) rs6000_option_override_internal (true);
5265 /* Implement targetm.vectorize.builtin_mask_for_load. */
5267 rs6000_builtin_mask_for_load (void)
5269 /* Don't use lvsl/vperm for P8 and similarly efficient machines. */
5270 if ((TARGET_ALTIVEC
&& !TARGET_VSX
)
5271 || (TARGET_VSX
&& !TARGET_EFFICIENT_UNALIGNED_VSX
))
5272 return altivec_builtin_mask_for_load
;
5277 /* Implement LOOP_ALIGN. */
5279 rs6000_loop_align (rtx label
)
5284 /* Don't override loop alignment if -falign-loops was specified. */
5285 if (!can_override_loop_align
)
5286 return align_loops_log
;
5288 bb
= BLOCK_FOR_INSN (label
);
5289 ninsns
= num_loop_insns(bb
->loop_father
);
5291 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
5292 if (ninsns
> 4 && ninsns
<= 8
5293 && (rs6000_tune
== PROCESSOR_POWER4
5294 || rs6000_tune
== PROCESSOR_POWER5
5295 || rs6000_tune
== PROCESSOR_POWER6
5296 || rs6000_tune
== PROCESSOR_POWER7
5297 || rs6000_tune
== PROCESSOR_POWER8
))
5300 return align_loops_log
;
5303 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
5305 rs6000_loop_align_max_skip (rtx_insn
*label
)
5307 return (1 << rs6000_loop_align (label
)) - 1;
5310 /* Return true iff, data reference of TYPE can reach vector alignment (16)
5311 after applying N number of iterations. This routine does not determine
5312 how may iterations are required to reach desired alignment. */
5315 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED
, bool is_packed
)
5322 if (rs6000_alignment_flags
== MASK_ALIGN_NATURAL
)
5325 if (rs6000_alignment_flags
== MASK_ALIGN_POWER
)
5335 /* Assuming that all other types are naturally aligned. CHECKME! */
5340 /* Return true if the vector misalignment factor is supported by the
5343 rs6000_builtin_support_vector_misalignment (machine_mode mode
,
5350 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5353 /* Return if movmisalign pattern is not supported for this mode. */
5354 if (optab_handler (movmisalign_optab
, mode
) == CODE_FOR_nothing
)
5357 if (misalignment
== -1)
5359 /* Misalignment factor is unknown at compile time but we know
5360 it's word aligned. */
5361 if (rs6000_vector_alignment_reachable (type
, is_packed
))
5363 int element_size
= TREE_INT_CST_LOW (TYPE_SIZE (type
));
5365 if (element_size
== 64 || element_size
== 32)
5372 /* VSX supports word-aligned vector. */
5373 if (misalignment
% 4 == 0)
5379 /* Implement targetm.vectorize.builtin_vectorization_cost. */
5381 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
5382 tree vectype
, int misalign
)
5387 switch (type_of_cost
)
5397 case cond_branch_not_taken
:
5406 case vec_promote_demote
:
5412 case cond_branch_taken
:
5415 case unaligned_load
:
5416 case vector_gather_load
:
5417 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5420 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
5422 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
5424 /* Double word aligned. */
5432 /* Double word aligned. */
5436 /* Unknown misalignment. */
5449 /* Misaligned loads are not supported. */
5454 case unaligned_store
:
5455 case vector_scatter_store
:
5456 if (TARGET_EFFICIENT_UNALIGNED_VSX
)
5459 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
5461 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
5463 /* Double word aligned. */
5471 /* Double word aligned. */
5475 /* Unknown misalignment. */
5488 /* Misaligned stores are not supported. */
5494 /* This is a rough approximation assuming non-constant elements
5495 constructed into a vector via element insertion. FIXME:
5496 vec_construct is not granular enough for uniformly good
5497 decisions. If the initialization is a splat, this is
5498 cheaper than we estimate. Improve this someday. */
5499 elem_type
= TREE_TYPE (vectype
);
5500 /* 32-bit vectors loaded into registers are stored as double
5501 precision, so we need 2 permutes, 2 converts, and 1 merge
5502 to construct a vector of short floats from them. */
5503 if (SCALAR_FLOAT_TYPE_P (elem_type
)
5504 && TYPE_PRECISION (elem_type
) == 32)
5506 /* On POWER9, integer vector types are built up in GPRs and then
5507 use a direct move (2 cycles). For POWER8 this is even worse,
5508 as we need two direct moves and a merge, and the direct moves
5510 else if (INTEGRAL_TYPE_P (elem_type
))
5512 if (TARGET_P9_VECTOR
)
5513 return TYPE_VECTOR_SUBPARTS (vectype
) - 1 + 2;
5515 return TYPE_VECTOR_SUBPARTS (vectype
) - 1 + 5;
5518 /* V2DFmode doesn't need a direct move. */
5526 /* Implement targetm.vectorize.preferred_simd_mode. */
5529 rs6000_preferred_simd_mode (scalar_mode mode
)
5538 if (TARGET_ALTIVEC
|| TARGET_VSX
)
5558 typedef struct _rs6000_cost_data
5560 struct loop
*loop_info
;
5564 /* Test for likely overcommitment of vector hardware resources. If a
5565 loop iteration is relatively large, and too large a percentage of
5566 instructions in the loop are vectorized, the cost model may not
5567 adequately reflect delays from unavailable vector resources.
5568 Penalize the loop body cost for this case. */
5571 rs6000_density_test (rs6000_cost_data
*data
)
5573 const int DENSITY_PCT_THRESHOLD
= 85;
5574 const int DENSITY_SIZE_THRESHOLD
= 70;
5575 const int DENSITY_PENALTY
= 10;
5576 struct loop
*loop
= data
->loop_info
;
5577 basic_block
*bbs
= get_loop_body (loop
);
5578 int nbbs
= loop
->num_nodes
;
5579 int vec_cost
= data
->cost
[vect_body
], not_vec_cost
= 0;
5582 for (i
= 0; i
< nbbs
; i
++)
5584 basic_block bb
= bbs
[i
];
5585 gimple_stmt_iterator gsi
;
5587 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
5589 gimple
*stmt
= gsi_stmt (gsi
);
5590 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
5592 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
5593 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
5599 density_pct
= (vec_cost
* 100) / (vec_cost
+ not_vec_cost
);
5601 if (density_pct
> DENSITY_PCT_THRESHOLD
5602 && vec_cost
+ not_vec_cost
> DENSITY_SIZE_THRESHOLD
)
5604 data
->cost
[vect_body
] = vec_cost
* (100 + DENSITY_PENALTY
) / 100;
5605 if (dump_enabled_p ())
5606 dump_printf_loc (MSG_NOTE
, vect_location
,
5607 "density %d%%, cost %d exceeds threshold, penalizing "
5608 "loop body cost by %d%%", density_pct
,
5609 vec_cost
+ not_vec_cost
, DENSITY_PENALTY
);
5613 /* Implement targetm.vectorize.init_cost. */
5615 /* For each vectorized loop, this var holds TRUE iff a non-memory vector
5616 instruction is needed by the vectorization. */
5617 static bool rs6000_vect_nonmem
;
5620 rs6000_init_cost (struct loop
*loop_info
)
5622 rs6000_cost_data
*data
= XNEW (struct _rs6000_cost_data
);
5623 data
->loop_info
= loop_info
;
5624 data
->cost
[vect_prologue
] = 0;
5625 data
->cost
[vect_body
] = 0;
5626 data
->cost
[vect_epilogue
] = 0;
5627 rs6000_vect_nonmem
= false;
5631 /* Implement targetm.vectorize.add_stmt_cost. */
5634 rs6000_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
5635 struct _stmt_vec_info
*stmt_info
, int misalign
,
5636 enum vect_cost_model_location where
)
5638 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
5639 unsigned retval
= 0;
5641 if (flag_vect_cost_model
)
5643 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
5644 int stmt_cost
= rs6000_builtin_vectorization_cost (kind
, vectype
,
5646 /* Statements in an inner loop relative to the loop being
5647 vectorized are weighted more heavily. The value here is
5648 arbitrary and could potentially be improved with analysis. */
5649 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
5650 count
*= 50; /* FIXME. */
5652 retval
= (unsigned) (count
* stmt_cost
);
5653 cost_data
->cost
[where
] += retval
;
5655 /* Check whether we're doing something other than just a copy loop.
5656 Not all such loops may be profitably vectorized; see
5657 rs6000_finish_cost. */
5658 if ((kind
== vec_to_scalar
|| kind
== vec_perm
5659 || kind
== vec_promote_demote
|| kind
== vec_construct
5660 || kind
== scalar_to_vec
)
5661 || (where
== vect_body
&& kind
== vector_stmt
))
5662 rs6000_vect_nonmem
= true;
5668 /* Implement targetm.vectorize.finish_cost. */
5671 rs6000_finish_cost (void *data
, unsigned *prologue_cost
,
5672 unsigned *body_cost
, unsigned *epilogue_cost
)
5674 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
5676 if (cost_data
->loop_info
)
5677 rs6000_density_test (cost_data
);
5679 /* Don't vectorize minimum-vectorization-factor, simple copy loops
5680 that require versioning for any reason. The vectorization is at
5681 best a wash inside the loop, and the versioning checks make
5682 profitability highly unlikely and potentially quite harmful. */
5683 if (cost_data
->loop_info
)
5685 loop_vec_info vec_info
= loop_vec_info_for_loop (cost_data
->loop_info
);
5686 if (!rs6000_vect_nonmem
5687 && LOOP_VINFO_VECT_FACTOR (vec_info
) == 2
5688 && LOOP_REQUIRES_VERSIONING (vec_info
))
5689 cost_data
->cost
[vect_body
] += 10000;
5692 *prologue_cost
= cost_data
->cost
[vect_prologue
];
5693 *body_cost
= cost_data
->cost
[vect_body
];
5694 *epilogue_cost
= cost_data
->cost
[vect_epilogue
];
5697 /* Implement targetm.vectorize.destroy_cost_data. */
5700 rs6000_destroy_cost_data (void *data
)
5705 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
5706 library with vectorized intrinsics. */
5709 rs6000_builtin_vectorized_libmass (combined_fn fn
, tree type_out
,
5713 const char *suffix
= NULL
;
5714 tree fntype
, new_fndecl
, bdecl
= NULL_TREE
;
5717 machine_mode el_mode
, in_mode
;
5720 /* Libmass is suitable for unsafe math only as it does not correctly support
5721 parts of IEEE with the required precision such as denormals. Only support
5722 it if we have VSX to use the simd d2 or f4 functions.
5723 XXX: Add variable length support. */
5724 if (!flag_unsafe_math_optimizations
|| !TARGET_VSX
)
5727 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5728 n
= TYPE_VECTOR_SUBPARTS (type_out
);
5729 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5730 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5731 if (el_mode
!= in_mode
5767 if (el_mode
== DFmode
&& n
== 2)
5769 bdecl
= mathfn_built_in (double_type_node
, fn
);
5770 suffix
= "d2"; /* pow -> powd2 */
5772 else if (el_mode
== SFmode
&& n
== 4)
5774 bdecl
= mathfn_built_in (float_type_node
, fn
);
5775 suffix
= "4"; /* powf -> powf4 */
5787 gcc_assert (suffix
!= NULL
);
5788 bname
= IDENTIFIER_POINTER (DECL_NAME (bdecl
));
5792 strcpy (name
, bname
+ sizeof ("__builtin_") - 1);
5793 strcat (name
, suffix
);
5796 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
5797 else if (n_args
== 2)
5798 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
5802 /* Build a function declaration for the vectorized function. */
5803 new_fndecl
= build_decl (BUILTINS_LOCATION
,
5804 FUNCTION_DECL
, get_identifier (name
), fntype
);
5805 TREE_PUBLIC (new_fndecl
) = 1;
5806 DECL_EXTERNAL (new_fndecl
) = 1;
5807 DECL_IS_NOVOPS (new_fndecl
) = 1;
5808 TREE_READONLY (new_fndecl
) = 1;
5813 /* Returns a function decl for a vectorized version of the builtin function
5814 with builtin function code FN and the result vector type TYPE, or NULL_TREE
5815 if it is not available. */
5818 rs6000_builtin_vectorized_function (unsigned int fn
, tree type_out
,
5821 machine_mode in_mode
, out_mode
;
5824 if (TARGET_DEBUG_BUILTIN
)
5825 fprintf (stderr
, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
5826 combined_fn_name (combined_fn (fn
)),
5827 GET_MODE_NAME (TYPE_MODE (type_out
)),
5828 GET_MODE_NAME (TYPE_MODE (type_in
)));
5830 if (TREE_CODE (type_out
) != VECTOR_TYPE
5831 || TREE_CODE (type_in
) != VECTOR_TYPE
)
5834 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5835 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
5836 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5837 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5842 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5843 && out_mode
== DFmode
&& out_n
== 2
5844 && in_mode
== DFmode
&& in_n
== 2)
5845 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNDP
];
5846 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5847 && out_mode
== SFmode
&& out_n
== 4
5848 && in_mode
== SFmode
&& in_n
== 4)
5849 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNSP
];
5850 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5851 && out_mode
== SFmode
&& out_n
== 4
5852 && in_mode
== SFmode
&& in_n
== 4)
5853 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_COPYSIGN_V4SF
];
5856 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5857 && out_mode
== DFmode
&& out_n
== 2
5858 && in_mode
== DFmode
&& in_n
== 2)
5859 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIP
];
5860 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5861 && out_mode
== SFmode
&& out_n
== 4
5862 && in_mode
== SFmode
&& in_n
== 4)
5863 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIP
];
5864 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5865 && out_mode
== SFmode
&& out_n
== 4
5866 && in_mode
== SFmode
&& in_n
== 4)
5867 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIP
];
5870 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5871 && out_mode
== DFmode
&& out_n
== 2
5872 && in_mode
== DFmode
&& in_n
== 2)
5873 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIM
];
5874 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5875 && out_mode
== SFmode
&& out_n
== 4
5876 && in_mode
== SFmode
&& in_n
== 4)
5877 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIM
];
5878 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5879 && out_mode
== SFmode
&& out_n
== 4
5880 && in_mode
== SFmode
&& in_n
== 4)
5881 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIM
];
5884 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5885 && out_mode
== DFmode
&& out_n
== 2
5886 && in_mode
== DFmode
&& in_n
== 2)
5887 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDDP
];
5888 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5889 && out_mode
== SFmode
&& out_n
== 4
5890 && in_mode
== SFmode
&& in_n
== 4)
5891 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDSP
];
5892 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5893 && out_mode
== SFmode
&& out_n
== 4
5894 && in_mode
== SFmode
&& in_n
== 4)
5895 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMADDFP
];
5898 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5899 && out_mode
== DFmode
&& out_n
== 2
5900 && in_mode
== DFmode
&& in_n
== 2)
5901 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIZ
];
5902 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5903 && out_mode
== SFmode
&& out_n
== 4
5904 && in_mode
== SFmode
&& in_n
== 4)
5905 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIZ
];
5906 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
5907 && out_mode
== SFmode
&& out_n
== 4
5908 && in_mode
== SFmode
&& in_n
== 4)
5909 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIZ
];
5912 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5913 && flag_unsafe_math_optimizations
5914 && out_mode
== DFmode
&& out_n
== 2
5915 && in_mode
== DFmode
&& in_n
== 2)
5916 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPI
];
5917 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5918 && flag_unsafe_math_optimizations
5919 && out_mode
== SFmode
&& out_n
== 4
5920 && in_mode
== SFmode
&& in_n
== 4)
5921 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPI
];
5924 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5925 && !flag_trapping_math
5926 && out_mode
== DFmode
&& out_n
== 2
5927 && in_mode
== DFmode
&& in_n
== 2)
5928 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIC
];
5929 if (VECTOR_UNIT_VSX_P (V4SFmode
)
5930 && !flag_trapping_math
5931 && out_mode
== SFmode
&& out_n
== 4
5932 && in_mode
== SFmode
&& in_n
== 4)
5933 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIC
];
5939 /* Generate calls to libmass if appropriate. */
5940 if (rs6000_veclib_handler
)
5941 return rs6000_veclib_handler (combined_fn (fn
), type_out
, type_in
);
5946 /* Implement TARGET_VECTORIZE_BUILTIN_MD_VECTORIZED_FUNCTION. */
5949 rs6000_builtin_md_vectorized_function (tree fndecl
, tree type_out
,
5952 machine_mode in_mode
, out_mode
;
5955 if (TARGET_DEBUG_BUILTIN
)
5956 fprintf (stderr
, "rs6000_builtin_md_vectorized_function (%s, %s, %s)\n",
5957 IDENTIFIER_POINTER (DECL_NAME (fndecl
)),
5958 GET_MODE_NAME (TYPE_MODE (type_out
)),
5959 GET_MODE_NAME (TYPE_MODE (type_in
)));
5961 if (TREE_CODE (type_out
) != VECTOR_TYPE
5962 || TREE_CODE (type_in
) != VECTOR_TYPE
)
5965 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
5966 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
5967 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
5968 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
5970 enum rs6000_builtins fn
5971 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
5974 case RS6000_BUILTIN_RSQRTF
:
5975 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
5976 && out_mode
== SFmode
&& out_n
== 4
5977 && in_mode
== SFmode
&& in_n
== 4)
5978 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRSQRTFP
];
5980 case RS6000_BUILTIN_RSQRT
:
5981 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5982 && out_mode
== DFmode
&& out_n
== 2
5983 && in_mode
== DFmode
&& in_n
== 2)
5984 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
5986 case RS6000_BUILTIN_RECIPF
:
5987 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
5988 && out_mode
== SFmode
&& out_n
== 4
5989 && in_mode
== SFmode
&& in_n
== 4)
5990 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRECIPFP
];
5992 case RS6000_BUILTIN_RECIP
:
5993 if (VECTOR_UNIT_VSX_P (V2DFmode
)
5994 && out_mode
== DFmode
&& out_n
== 2
5995 && in_mode
== DFmode
&& in_n
== 2)
5996 return rs6000_builtin_decls
[VSX_BUILTIN_RECIP_V2DF
];
6004 /* Default CPU string for rs6000*_file_start functions. */
6005 static const char *rs6000_default_cpu
;
6007 /* Do anything needed at the start of the asm file. */
6010 rs6000_file_start (void)
6013 const char *start
= buffer
;
6014 FILE *file
= asm_out_file
;
6016 rs6000_default_cpu
= TARGET_CPU_DEFAULT
;
6018 default_file_start ();
6020 if (flag_verbose_asm
)
6022 sprintf (buffer
, "\n%s rs6000/powerpc options:", ASM_COMMENT_START
);
6024 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
6026 fprintf (file
, "%s --with-cpu=%s", start
, rs6000_default_cpu
);
6030 if (global_options_set
.x_rs6000_cpu_index
)
6032 fprintf (file
, "%s -mcpu=%s", start
,
6033 processor_target_table
[rs6000_cpu_index
].name
);
6037 if (global_options_set
.x_rs6000_tune_index
)
6039 fprintf (file
, "%s -mtune=%s", start
,
6040 processor_target_table
[rs6000_tune_index
].name
);
6044 if (PPC405_ERRATUM77
)
6046 fprintf (file
, "%s PPC405CR_ERRATUM77", start
);
6050 #ifdef USING_ELFOS_H
6051 switch (rs6000_sdata
)
6053 case SDATA_NONE
: fprintf (file
, "%s -msdata=none", start
); start
= ""; break;
6054 case SDATA_DATA
: fprintf (file
, "%s -msdata=data", start
); start
= ""; break;
6055 case SDATA_SYSV
: fprintf (file
, "%s -msdata=sysv", start
); start
= ""; break;
6056 case SDATA_EABI
: fprintf (file
, "%s -msdata=eabi", start
); start
= ""; break;
6059 if (rs6000_sdata
&& g_switch_value
)
6061 fprintf (file
, "%s -G %d", start
,
6071 #ifdef USING_ELFOS_H
6072 if (!(rs6000_default_cpu
&& rs6000_default_cpu
[0])
6073 && !global_options_set
.x_rs6000_cpu_index
)
6075 fputs ("\t.machine ", asm_out_file
);
6076 if ((rs6000_isa_flags
& OPTION_MASK_MODULO
) != 0)
6077 fputs ("power9\n", asm_out_file
);
6078 else if ((rs6000_isa_flags
& OPTION_MASK_DIRECT_MOVE
) != 0)
6079 fputs ("power8\n", asm_out_file
);
6080 else if ((rs6000_isa_flags
& OPTION_MASK_POPCNTD
) != 0)
6081 fputs ("power7\n", asm_out_file
);
6082 else if ((rs6000_isa_flags
& OPTION_MASK_CMPB
) != 0)
6083 fputs ("power6\n", asm_out_file
);
6084 else if ((rs6000_isa_flags
& OPTION_MASK_POPCNTB
) != 0)
6085 fputs ("power5\n", asm_out_file
);
6086 else if ((rs6000_isa_flags
& OPTION_MASK_MFCRF
) != 0)
6087 fputs ("power4\n", asm_out_file
);
6088 else if ((rs6000_isa_flags
& OPTION_MASK_POWERPC64
) != 0)
6089 fputs ("ppc64\n", asm_out_file
);
6091 fputs ("ppc\n", asm_out_file
);
6095 if (DEFAULT_ABI
== ABI_ELFv2
)
6096 fprintf (file
, "\t.abiversion 2\n");
6100 /* Return nonzero if this function is known to have a null epilogue. */
6103 direct_return (void)
6105 if (reload_completed
)
6107 rs6000_stack_t
*info
= rs6000_stack_info ();
6109 if (info
->first_gp_reg_save
== 32
6110 && info
->first_fp_reg_save
== 64
6111 && info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
6112 && ! info
->lr_save_p
6113 && ! info
->cr_save_p
6114 && info
->vrsave_size
== 0
6122 /* Return the number of instructions it takes to form a constant in an
6123 integer register. */
6126 num_insns_constant_wide (HOST_WIDE_INT value
)
6128 /* signed constant loadable with addi */
6129 if (((unsigned HOST_WIDE_INT
) value
+ 0x8000) < 0x10000)
6132 /* constant loadable with addis */
6133 else if ((value
& 0xffff) == 0
6134 && (value
>> 31 == -1 || value
>> 31 == 0))
6137 else if (TARGET_POWERPC64
)
6139 HOST_WIDE_INT low
= ((value
& 0xffffffff) ^ 0x80000000) - 0x80000000;
6140 HOST_WIDE_INT high
= value
>> 31;
6142 if (high
== 0 || high
== -1)
6148 return num_insns_constant_wide (high
) + 1;
6150 return num_insns_constant_wide (low
) + 1;
6152 return (num_insns_constant_wide (high
)
6153 + num_insns_constant_wide (low
) + 1);
6161 num_insns_constant (rtx op
, machine_mode mode
)
6163 HOST_WIDE_INT low
, high
;
6165 switch (GET_CODE (op
))
6168 if ((INTVAL (op
) >> 31) != 0 && (INTVAL (op
) >> 31) != -1
6169 && rs6000_is_valid_and_mask (op
, mode
))
6172 return num_insns_constant_wide (INTVAL (op
));
6174 case CONST_WIDE_INT
:
6177 int ins
= CONST_WIDE_INT_NUNITS (op
) - 1;
6178 for (i
= 0; i
< CONST_WIDE_INT_NUNITS (op
); i
++)
6179 ins
+= num_insns_constant_wide (CONST_WIDE_INT_ELT (op
, i
));
6184 if (mode
== SFmode
|| mode
== SDmode
)
6188 if (DECIMAL_FLOAT_MODE_P (mode
))
6189 REAL_VALUE_TO_TARGET_DECIMAL32
6190 (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6192 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6193 return num_insns_constant_wide ((HOST_WIDE_INT
) l
);
6197 if (DECIMAL_FLOAT_MODE_P (mode
))
6198 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6200 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (op
), l
);
6201 high
= l
[WORDS_BIG_ENDIAN
== 0];
6202 low
= l
[WORDS_BIG_ENDIAN
!= 0];
6205 return (num_insns_constant_wide (low
)
6206 + num_insns_constant_wide (high
));
6209 if ((high
== 0 && low
>= 0)
6210 || (high
== -1 && low
< 0))
6211 return num_insns_constant_wide (low
);
6213 else if (rs6000_is_valid_and_mask (op
, mode
))
6217 return num_insns_constant_wide (high
) + 1;
6220 return (num_insns_constant_wide (high
)
6221 + num_insns_constant_wide (low
) + 1);
6229 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
6230 If the mode of OP is MODE_VECTOR_INT, this simply returns the
6231 corresponding element of the vector, but for V4SFmode, the
6232 corresponding "float" is interpreted as an SImode integer. */
6235 const_vector_elt_as_int (rtx op
, unsigned int elt
)
6239 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
6240 gcc_assert (GET_MODE (op
) != V2DImode
6241 && GET_MODE (op
) != V2DFmode
);
6243 tmp
= CONST_VECTOR_ELT (op
, elt
);
6244 if (GET_MODE (op
) == V4SFmode
)
6245 tmp
= gen_lowpart (SImode
, tmp
);
6246 return INTVAL (tmp
);
6249 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
6250 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
6251 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
6252 all items are set to the same value and contain COPIES replicas of the
6253 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
6254 operand and the others are set to the value of the operand's msb. */
6257 vspltis_constant (rtx op
, unsigned step
, unsigned copies
)
6259 machine_mode mode
= GET_MODE (op
);
6260 machine_mode inner
= GET_MODE_INNER (mode
);
6268 HOST_WIDE_INT splat_val
;
6269 HOST_WIDE_INT msb_val
;
6271 if (mode
== V2DImode
|| mode
== V2DFmode
|| mode
== V1TImode
)
6274 nunits
= GET_MODE_NUNITS (mode
);
6275 bitsize
= GET_MODE_BITSIZE (inner
);
6276 mask
= GET_MODE_MASK (inner
);
6278 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
6280 msb_val
= val
>= 0 ? 0 : -1;
6282 /* Construct the value to be splatted, if possible. If not, return 0. */
6283 for (i
= 2; i
<= copies
; i
*= 2)
6285 HOST_WIDE_INT small_val
;
6287 small_val
= splat_val
>> bitsize
;
6289 if (splat_val
!= ((HOST_WIDE_INT
)
6290 ((unsigned HOST_WIDE_INT
) small_val
<< bitsize
)
6291 | (small_val
& mask
)))
6293 splat_val
= small_val
;
6296 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
6297 if (EASY_VECTOR_15 (splat_val
))
6300 /* Also check if we can splat, and then add the result to itself. Do so if
6301 the value is positive, of if the splat instruction is using OP's mode;
6302 for splat_val < 0, the splat and the add should use the same mode. */
6303 else if (EASY_VECTOR_15_ADD_SELF (splat_val
)
6304 && (splat_val
>= 0 || (step
== 1 && copies
== 1)))
6307 /* Also check if are loading up the most significant bit which can be done by
6308 loading up -1 and shifting the value left by -1. */
6309 else if (EASY_VECTOR_MSB (splat_val
, inner
))
6315 /* Check if VAL is present in every STEP-th element, and the
6316 other elements are filled with its most significant bit. */
6317 for (i
= 1; i
< nunits
; ++i
)
6319 HOST_WIDE_INT desired_val
;
6320 unsigned elt
= BYTES_BIG_ENDIAN
? nunits
- 1 - i
: i
;
6321 if ((i
& (step
- 1)) == 0)
6324 desired_val
= msb_val
;
6326 if (desired_val
!= const_vector_elt_as_int (op
, elt
))
6333 /* Like vsplitis_constant, but allow the value to be shifted left with a VSLDOI
6334 instruction, filling in the bottom elements with 0 or -1.
6336 Return 0 if the constant cannot be generated with VSLDOI. Return positive
6337 for the number of zeroes to shift in, or negative for the number of 0xff
6340 OP is a CONST_VECTOR. */
6343 vspltis_shifted (rtx op
)
6345 machine_mode mode
= GET_MODE (op
);
6346 machine_mode inner
= GET_MODE_INNER (mode
);
6354 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
)
6357 /* We need to create pseudo registers to do the shift, so don't recognize
6358 shift vector constants after reload. */
6359 if (!can_create_pseudo_p ())
6362 nunits
= GET_MODE_NUNITS (mode
);
6363 mask
= GET_MODE_MASK (inner
);
6365 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? 0 : nunits
- 1);
6367 /* Check if the value can really be the operand of a vspltis[bhw]. */
6368 if (EASY_VECTOR_15 (val
))
6371 /* Also check if we are loading up the most significant bit which can be done
6372 by loading up -1 and shifting the value left by -1. */
6373 else if (EASY_VECTOR_MSB (val
, inner
))
6379 /* Check if VAL is present in every STEP-th element until we find elements
6380 that are 0 or all 1 bits. */
6381 for (i
= 1; i
< nunits
; ++i
)
6383 unsigned elt
= BYTES_BIG_ENDIAN
? i
: nunits
- 1 - i
;
6384 HOST_WIDE_INT elt_val
= const_vector_elt_as_int (op
, elt
);
6386 /* If the value isn't the splat value, check for the remaining elements
6392 for (j
= i
+1; j
< nunits
; ++j
)
6394 unsigned elt2
= BYTES_BIG_ENDIAN
? j
: nunits
- 1 - j
;
6395 if (const_vector_elt_as_int (op
, elt2
) != 0)
6399 return (nunits
- i
) * GET_MODE_SIZE (inner
);
6402 else if ((elt_val
& mask
) == mask
)
6404 for (j
= i
+1; j
< nunits
; ++j
)
6406 unsigned elt2
= BYTES_BIG_ENDIAN
? j
: nunits
- 1 - j
;
6407 if ((const_vector_elt_as_int (op
, elt2
) & mask
) != mask
)
6411 return -((nunits
- i
) * GET_MODE_SIZE (inner
));
6419 /* If all elements are equal, we don't need to do VLSDOI. */
6424 /* Return true if OP is of the given MODE and can be synthesized
6425 with a vspltisb, vspltish or vspltisw. */
6428 easy_altivec_constant (rtx op
, machine_mode mode
)
6430 unsigned step
, copies
;
6432 if (mode
== VOIDmode
)
6433 mode
= GET_MODE (op
);
6434 else if (mode
!= GET_MODE (op
))
6437 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
6439 if (mode
== V2DFmode
)
6440 return zero_constant (op
, mode
);
6442 else if (mode
== V2DImode
)
6444 if (GET_CODE (CONST_VECTOR_ELT (op
, 0)) != CONST_INT
6445 || GET_CODE (CONST_VECTOR_ELT (op
, 1)) != CONST_INT
)
6448 if (zero_constant (op
, mode
))
6451 if (INTVAL (CONST_VECTOR_ELT (op
, 0)) == -1
6452 && INTVAL (CONST_VECTOR_ELT (op
, 1)) == -1)
6458 /* V1TImode is a special container for TImode. Ignore for now. */
6459 else if (mode
== V1TImode
)
6462 /* Start with a vspltisw. */
6463 step
= GET_MODE_NUNITS (mode
) / 4;
6466 if (vspltis_constant (op
, step
, copies
))
6469 /* Then try with a vspltish. */
6475 if (vspltis_constant (op
, step
, copies
))
6478 /* And finally a vspltisb. */
6484 if (vspltis_constant (op
, step
, copies
))
6487 if (vspltis_shifted (op
) != 0)
6493 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
6494 result is OP. Abort if it is not possible. */
6497 gen_easy_altivec_constant (rtx op
)
6499 machine_mode mode
= GET_MODE (op
);
6500 int nunits
= GET_MODE_NUNITS (mode
);
6501 rtx val
= CONST_VECTOR_ELT (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
6502 unsigned step
= nunits
/ 4;
6503 unsigned copies
= 1;
6505 /* Start with a vspltisw. */
6506 if (vspltis_constant (op
, step
, copies
))
6507 return gen_rtx_VEC_DUPLICATE (V4SImode
, gen_lowpart (SImode
, val
));
6509 /* Then try with a vspltish. */
6515 if (vspltis_constant (op
, step
, copies
))
6516 return gen_rtx_VEC_DUPLICATE (V8HImode
, gen_lowpart (HImode
, val
));
6518 /* And finally a vspltisb. */
6524 if (vspltis_constant (op
, step
, copies
))
6525 return gen_rtx_VEC_DUPLICATE (V16QImode
, gen_lowpart (QImode
, val
));
6530 /* Return true if OP is of the given MODE and can be synthesized with ISA 3.0
6531 instructions (xxspltib, vupkhsb/vextsb2w/vextb2d).
6533 Return the number of instructions needed (1 or 2) into the address pointed
6536 Return the constant that is being split via CONSTANT_PTR. */
6539 xxspltib_constant_p (rtx op
,
6544 size_t nunits
= GET_MODE_NUNITS (mode
);
6546 HOST_WIDE_INT value
;
6549 /* Set the returned values to out of bound values. */
6550 *num_insns_ptr
= -1;
6551 *constant_ptr
= 256;
6553 if (!TARGET_P9_VECTOR
)
6556 if (mode
== VOIDmode
)
6557 mode
= GET_MODE (op
);
6559 else if (mode
!= GET_MODE (op
) && GET_MODE (op
) != VOIDmode
)
6562 /* Handle (vec_duplicate <constant>). */
6563 if (GET_CODE (op
) == VEC_DUPLICATE
)
6565 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
6566 && mode
!= V2DImode
)
6569 element
= XEXP (op
, 0);
6570 if (!CONST_INT_P (element
))
6573 value
= INTVAL (element
);
6574 if (!IN_RANGE (value
, -128, 127))
6578 /* Handle (const_vector [...]). */
6579 else if (GET_CODE (op
) == CONST_VECTOR
)
6581 if (mode
!= V16QImode
&& mode
!= V8HImode
&& mode
!= V4SImode
6582 && mode
!= V2DImode
)
6585 element
= CONST_VECTOR_ELT (op
, 0);
6586 if (!CONST_INT_P (element
))
6589 value
= INTVAL (element
);
6590 if (!IN_RANGE (value
, -128, 127))
6593 for (i
= 1; i
< nunits
; i
++)
6595 element
= CONST_VECTOR_ELT (op
, i
);
6596 if (!CONST_INT_P (element
))
6599 if (value
!= INTVAL (element
))
6604 /* Handle integer constants being loaded into the upper part of the VSX
6605 register as a scalar. If the value isn't 0/-1, only allow it if the mode
6606 can go in Altivec registers. Prefer VSPLTISW/VUPKHSW over XXSPLITIB. */
6607 else if (CONST_INT_P (op
))
6609 if (!SCALAR_INT_MODE_P (mode
))
6612 value
= INTVAL (op
);
6613 if (!IN_RANGE (value
, -128, 127))
6616 if (!IN_RANGE (value
, -1, 0))
6618 if (!(reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_VALID
))
6621 if (EASY_VECTOR_15 (value
))
6629 /* See if we could generate vspltisw/vspltish directly instead of xxspltib +
6630 sign extend. Special case 0/-1 to allow getting any VSX register instead
6631 of an Altivec register. */
6632 if ((mode
== V4SImode
|| mode
== V8HImode
) && !IN_RANGE (value
, -1, 0)
6633 && EASY_VECTOR_15 (value
))
6636 /* Return # of instructions and the constant byte for XXSPLTIB. */
6637 if (mode
== V16QImode
)
6640 else if (IN_RANGE (value
, -1, 0))
6646 *constant_ptr
= (int) value
;
6651 output_vec_const_move (rtx
*operands
)
6659 mode
= GET_MODE (dest
);
6663 bool dest_vmx_p
= ALTIVEC_REGNO_P (REGNO (dest
));
6664 int xxspltib_value
= 256;
6667 if (zero_constant (vec
, mode
))
6669 if (TARGET_P9_VECTOR
)
6670 return "xxspltib %x0,0";
6672 else if (dest_vmx_p
)
6673 return "vspltisw %0,0";
6676 return "xxlxor %x0,%x0,%x0";
6679 if (all_ones_constant (vec
, mode
))
6681 if (TARGET_P9_VECTOR
)
6682 return "xxspltib %x0,255";
6684 else if (dest_vmx_p
)
6685 return "vspltisw %0,-1";
6687 else if (TARGET_P8_VECTOR
)
6688 return "xxlorc %x0,%x0,%x0";
6694 if (TARGET_P9_VECTOR
6695 && xxspltib_constant_p (vec
, mode
, &num_insns
, &xxspltib_value
))
6699 operands
[2] = GEN_INT (xxspltib_value
& 0xff);
6700 return "xxspltib %x0,%2";
6711 gcc_assert (ALTIVEC_REGNO_P (REGNO (dest
)));
6712 if (zero_constant (vec
, mode
))
6713 return "vspltisw %0,0";
6715 if (all_ones_constant (vec
, mode
))
6716 return "vspltisw %0,-1";
6718 /* Do we need to construct a value using VSLDOI? */
6719 shift
= vspltis_shifted (vec
);
6723 splat_vec
= gen_easy_altivec_constant (vec
);
6724 gcc_assert (GET_CODE (splat_vec
) == VEC_DUPLICATE
);
6725 operands
[1] = XEXP (splat_vec
, 0);
6726 if (!EASY_VECTOR_15 (INTVAL (operands
[1])))
6729 switch (GET_MODE (splat_vec
))
6732 return "vspltisw %0,%1";
6735 return "vspltish %0,%1";
6738 return "vspltisb %0,%1";
6748 /* Initialize vector TARGET to VALS. */
6751 rs6000_expand_vector_init (rtx target
, rtx vals
)
6753 machine_mode mode
= GET_MODE (target
);
6754 machine_mode inner_mode
= GET_MODE_INNER (mode
);
6755 int n_elts
= GET_MODE_NUNITS (mode
);
6756 int n_var
= 0, one_var
= -1;
6757 bool all_same
= true, all_const_zero
= true;
6761 for (i
= 0; i
< n_elts
; ++i
)
6763 x
= XVECEXP (vals
, 0, i
);
6764 if (!(CONST_SCALAR_INT_P (x
) || CONST_DOUBLE_P (x
) || CONST_FIXED_P (x
)))
6765 ++n_var
, one_var
= i
;
6766 else if (x
!= CONST0_RTX (inner_mode
))
6767 all_const_zero
= false;
6769 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
6775 rtx const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
6776 bool int_vector_p
= (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
6777 if ((int_vector_p
|| TARGET_VSX
) && all_const_zero
)
6779 /* Zero register. */
6780 emit_move_insn (target
, CONST0_RTX (mode
));
6783 else if (int_vector_p
&& easy_vector_constant (const_vec
, mode
))
6785 /* Splat immediate. */
6786 emit_insn (gen_rtx_SET (target
, const_vec
));
6791 /* Load from constant pool. */
6792 emit_move_insn (target
, const_vec
);
6797 /* Double word values on VSX can use xxpermdi or lxvdsx. */
6798 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
6802 size_t num_elements
= all_same
? 1 : 2;
6803 for (i
= 0; i
< num_elements
; i
++)
6805 op
[i
] = XVECEXP (vals
, 0, i
);
6806 /* Just in case there is a SUBREG with a smaller mode, do a
6808 if (GET_MODE (op
[i
]) != inner_mode
)
6810 rtx tmp
= gen_reg_rtx (inner_mode
);
6811 convert_move (tmp
, op
[i
], 0);
6814 /* Allow load with splat double word. */
6815 else if (MEM_P (op
[i
]))
6818 op
[i
] = force_reg (inner_mode
, op
[i
]);
6820 else if (!REG_P (op
[i
]))
6821 op
[i
] = force_reg (inner_mode
, op
[i
]);
6826 if (mode
== V2DFmode
)
6827 emit_insn (gen_vsx_splat_v2df (target
, op
[0]));
6829 emit_insn (gen_vsx_splat_v2di (target
, op
[0]));
6833 if (mode
== V2DFmode
)
6834 emit_insn (gen_vsx_concat_v2df (target
, op
[0], op
[1]));
6836 emit_insn (gen_vsx_concat_v2di (target
, op
[0], op
[1]));
6841 /* Special case initializing vector int if we are on 64-bit systems with
6842 direct move or we have the ISA 3.0 instructions. */
6843 if (mode
== V4SImode
&& VECTOR_MEM_VSX_P (V4SImode
)
6844 && TARGET_DIRECT_MOVE_64BIT
)
6848 rtx element0
= XVECEXP (vals
, 0, 0);
6849 if (MEM_P (element0
))
6850 element0
= rs6000_address_for_fpconvert (element0
);
6852 element0
= force_reg (SImode
, element0
);
6854 if (TARGET_P9_VECTOR
)
6855 emit_insn (gen_vsx_splat_v4si (target
, element0
));
6858 rtx tmp
= gen_reg_rtx (DImode
);
6859 emit_insn (gen_zero_extendsidi2 (tmp
, element0
));
6860 emit_insn (gen_vsx_splat_v4si_di (target
, tmp
));
6869 for (i
= 0; i
< 4; i
++)
6871 elements
[i
] = XVECEXP (vals
, 0, i
);
6872 if (!CONST_INT_P (elements
[i
]) && !REG_P (elements
[i
]))
6873 elements
[i
] = copy_to_mode_reg (SImode
, elements
[i
]);
6876 emit_insn (gen_vsx_init_v4si (target
, elements
[0], elements
[1],
6877 elements
[2], elements
[3]));
6882 /* With single precision floating point on VSX, know that internally single
6883 precision is actually represented as a double, and either make 2 V2DF
6884 vectors, and convert these vectors to single precision, or do one
6885 conversion, and splat the result to the other elements. */
6886 if (mode
== V4SFmode
&& VECTOR_MEM_VSX_P (V4SFmode
))
6890 rtx element0
= XVECEXP (vals
, 0, 0);
6892 if (TARGET_P9_VECTOR
)
6894 if (MEM_P (element0
))
6895 element0
= rs6000_address_for_fpconvert (element0
);
6897 emit_insn (gen_vsx_splat_v4sf (target
, element0
));
6902 rtx freg
= gen_reg_rtx (V4SFmode
);
6903 rtx sreg
= force_reg (SFmode
, element0
);
6904 rtx cvt
= (TARGET_XSCVDPSPN
6905 ? gen_vsx_xscvdpspn_scalar (freg
, sreg
)
6906 : gen_vsx_xscvdpsp_scalar (freg
, sreg
));
6909 emit_insn (gen_vsx_xxspltw_v4sf_direct (target
, freg
,
6915 rtx dbl_even
= gen_reg_rtx (V2DFmode
);
6916 rtx dbl_odd
= gen_reg_rtx (V2DFmode
);
6917 rtx flt_even
= gen_reg_rtx (V4SFmode
);
6918 rtx flt_odd
= gen_reg_rtx (V4SFmode
);
6919 rtx op0
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
6920 rtx op1
= force_reg (SFmode
, XVECEXP (vals
, 0, 1));
6921 rtx op2
= force_reg (SFmode
, XVECEXP (vals
, 0, 2));
6922 rtx op3
= force_reg (SFmode
, XVECEXP (vals
, 0, 3));
6924 /* Use VMRGEW if we can instead of doing a permute. */
6925 if (TARGET_P8_VECTOR
)
6927 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op2
));
6928 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op1
, op3
));
6929 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
6930 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
6931 if (BYTES_BIG_ENDIAN
)
6932 emit_insn (gen_p8_vmrgew_v4sf_direct (target
, flt_even
, flt_odd
));
6934 emit_insn (gen_p8_vmrgew_v4sf_direct (target
, flt_odd
, flt_even
));
6938 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op1
));
6939 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op2
, op3
));
6940 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
6941 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
6942 rs6000_expand_extract_even (target
, flt_even
, flt_odd
);
6948 /* Special case initializing vector short/char that are splats if we are on
6949 64-bit systems with direct move. */
6950 if (all_same
&& TARGET_DIRECT_MOVE_64BIT
6951 && (mode
== V16QImode
|| mode
== V8HImode
))
6953 rtx op0
= XVECEXP (vals
, 0, 0);
6954 rtx di_tmp
= gen_reg_rtx (DImode
);
6957 op0
= force_reg (GET_MODE_INNER (mode
), op0
);
6959 if (mode
== V16QImode
)
6961 emit_insn (gen_zero_extendqidi2 (di_tmp
, op0
));
6962 emit_insn (gen_vsx_vspltb_di (target
, di_tmp
));
6966 if (mode
== V8HImode
)
6968 emit_insn (gen_zero_extendhidi2 (di_tmp
, op0
));
6969 emit_insn (gen_vsx_vsplth_di (target
, di_tmp
));
6974 /* Store value to stack temp. Load vector element. Splat. However, splat
6975 of 64-bit items is not supported on Altivec. */
6976 if (all_same
&& GET_MODE_SIZE (inner_mode
) <= 4)
6978 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
6979 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0),
6980 XVECEXP (vals
, 0, 0));
6981 x
= gen_rtx_UNSPEC (VOIDmode
,
6982 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
6983 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
6985 gen_rtx_SET (target
, mem
),
6987 x
= gen_rtx_VEC_SELECT (inner_mode
, target
,
6988 gen_rtx_PARALLEL (VOIDmode
,
6989 gen_rtvec (1, const0_rtx
)));
6990 emit_insn (gen_rtx_SET (target
, gen_rtx_VEC_DUPLICATE (mode
, x
)));
6994 /* One field is non-constant. Load constant then overwrite
6998 rtx copy
= copy_rtx (vals
);
7000 /* Load constant part of vector, substitute neighboring value for
7002 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, (one_var
+ 1) % n_elts
);
7003 rs6000_expand_vector_init (target
, copy
);
7005 /* Insert variable. */
7006 rs6000_expand_vector_set (target
, XVECEXP (vals
, 0, one_var
), one_var
);
7010 /* Construct the vector in memory one field at a time
7011 and load the whole vector. */
7012 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
7013 for (i
= 0; i
< n_elts
; i
++)
7014 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
7015 i
* GET_MODE_SIZE (inner_mode
)),
7016 XVECEXP (vals
, 0, i
));
7017 emit_move_insn (target
, mem
);
7020 /* Set field ELT of TARGET to VAL. */
7023 rs6000_expand_vector_set (rtx target
, rtx val
, int elt
)
7025 machine_mode mode
= GET_MODE (target
);
7026 machine_mode inner_mode
= GET_MODE_INNER (mode
);
7027 rtx reg
= gen_reg_rtx (mode
);
7029 int width
= GET_MODE_SIZE (inner_mode
);
7032 val
= force_reg (GET_MODE (val
), val
);
7034 if (VECTOR_MEM_VSX_P (mode
))
7036 rtx insn
= NULL_RTX
;
7037 rtx elt_rtx
= GEN_INT (elt
);
7039 if (mode
== V2DFmode
)
7040 insn
= gen_vsx_set_v2df (target
, target
, val
, elt_rtx
);
7042 else if (mode
== V2DImode
)
7043 insn
= gen_vsx_set_v2di (target
, target
, val
, elt_rtx
);
7045 else if (TARGET_P9_VECTOR
&& TARGET_POWERPC64
)
7047 if (mode
== V4SImode
)
7048 insn
= gen_vsx_set_v4si_p9 (target
, target
, val
, elt_rtx
);
7049 else if (mode
== V8HImode
)
7050 insn
= gen_vsx_set_v8hi_p9 (target
, target
, val
, elt_rtx
);
7051 else if (mode
== V16QImode
)
7052 insn
= gen_vsx_set_v16qi_p9 (target
, target
, val
, elt_rtx
);
7053 else if (mode
== V4SFmode
)
7054 insn
= gen_vsx_set_v4sf_p9 (target
, target
, val
, elt_rtx
);
7064 /* Simplify setting single element vectors like V1TImode. */
7065 if (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (inner_mode
) && elt
== 0)
7067 emit_move_insn (target
, gen_lowpart (mode
, val
));
7071 /* Load single variable value. */
7072 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
7073 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0), val
);
7074 x
= gen_rtx_UNSPEC (VOIDmode
,
7075 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
7076 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
7078 gen_rtx_SET (reg
, mem
),
7081 /* Linear sequence. */
7082 mask
= gen_rtx_PARALLEL (V16QImode
, rtvec_alloc (16));
7083 for (i
= 0; i
< 16; ++i
)
7084 XVECEXP (mask
, 0, i
) = GEN_INT (i
);
7086 /* Set permute mask to insert element into target. */
7087 for (i
= 0; i
< width
; ++i
)
7088 XVECEXP (mask
, 0, elt
*width
+ i
)
7089 = GEN_INT (i
+ 0x10);
7090 x
= gen_rtx_CONST_VECTOR (V16QImode
, XVEC (mask
, 0));
7092 if (BYTES_BIG_ENDIAN
)
7093 x
= gen_rtx_UNSPEC (mode
,
7094 gen_rtvec (3, target
, reg
,
7095 force_reg (V16QImode
, x
)),
7099 if (TARGET_P9_VECTOR
)
7100 x
= gen_rtx_UNSPEC (mode
,
7101 gen_rtvec (3, reg
, target
,
7102 force_reg (V16QImode
, x
)),
7106 /* Invert selector. We prefer to generate VNAND on P8 so
7107 that future fusion opportunities can kick in, but must
7108 generate VNOR elsewhere. */
7109 rtx notx
= gen_rtx_NOT (V16QImode
, force_reg (V16QImode
, x
));
7110 rtx iorx
= (TARGET_P8_VECTOR
7111 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
7112 : gen_rtx_AND (V16QImode
, notx
, notx
));
7113 rtx tmp
= gen_reg_rtx (V16QImode
);
7114 emit_insn (gen_rtx_SET (tmp
, iorx
));
7116 /* Permute with operands reversed and adjusted selector. */
7117 x
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, reg
, target
, tmp
),
7122 emit_insn (gen_rtx_SET (target
, x
));
7125 /* Extract field ELT from VEC into TARGET. */
7128 rs6000_expand_vector_extract (rtx target
, rtx vec
, rtx elt
)
7130 machine_mode mode
= GET_MODE (vec
);
7131 machine_mode inner_mode
= GET_MODE_INNER (mode
);
7134 if (VECTOR_MEM_VSX_P (mode
) && CONST_INT_P (elt
))
7141 gcc_assert (INTVAL (elt
) == 0 && inner_mode
== TImode
);
7142 emit_move_insn (target
, gen_lowpart (TImode
, vec
));
7145 emit_insn (gen_vsx_extract_v2df (target
, vec
, elt
));
7148 emit_insn (gen_vsx_extract_v2di (target
, vec
, elt
));
7151 emit_insn (gen_vsx_extract_v4sf (target
, vec
, elt
));
7154 if (TARGET_DIRECT_MOVE_64BIT
)
7156 emit_insn (gen_vsx_extract_v16qi (target
, vec
, elt
));
7162 if (TARGET_DIRECT_MOVE_64BIT
)
7164 emit_insn (gen_vsx_extract_v8hi (target
, vec
, elt
));
7170 if (TARGET_DIRECT_MOVE_64BIT
)
7172 emit_insn (gen_vsx_extract_v4si (target
, vec
, elt
));
7178 else if (VECTOR_MEM_VSX_P (mode
) && !CONST_INT_P (elt
)
7179 && TARGET_DIRECT_MOVE_64BIT
)
7181 if (GET_MODE (elt
) != DImode
)
7183 rtx tmp
= gen_reg_rtx (DImode
);
7184 convert_move (tmp
, elt
, 0);
7187 else if (!REG_P (elt
))
7188 elt
= force_reg (DImode
, elt
);
7193 emit_insn (gen_vsx_extract_v2df_var (target
, vec
, elt
));
7197 emit_insn (gen_vsx_extract_v2di_var (target
, vec
, elt
));
7201 emit_insn (gen_vsx_extract_v4sf_var (target
, vec
, elt
));
7205 emit_insn (gen_vsx_extract_v4si_var (target
, vec
, elt
));
7209 emit_insn (gen_vsx_extract_v8hi_var (target
, vec
, elt
));
7213 emit_insn (gen_vsx_extract_v16qi_var (target
, vec
, elt
));
7221 gcc_assert (CONST_INT_P (elt
));
7223 /* Allocate mode-sized buffer. */
7224 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
7226 emit_move_insn (mem
, vec
);
7228 /* Add offset to field within buffer matching vector element. */
7229 mem
= adjust_address_nv (mem
, inner_mode
,
7230 INTVAL (elt
) * GET_MODE_SIZE (inner_mode
));
7232 emit_move_insn (target
, adjust_address_nv (mem
, inner_mode
, 0));
7235 /* Helper function to return the register number of a RTX. */
7237 regno_or_subregno (rtx op
)
7241 else if (SUBREG_P (op
))
7242 return subreg_regno (op
);
7247 /* Adjust a memory address (MEM) of a vector type to point to a scalar field
7248 within the vector (ELEMENT) with a mode (SCALAR_MODE). Use a base register
7249 temporary (BASE_TMP) to fixup the address. Return the new memory address
7250 that is valid for reads or writes to a given register (SCALAR_REG). */
7253 rs6000_adjust_vec_address (rtx scalar_reg
,
7257 machine_mode scalar_mode
)
7259 unsigned scalar_size
= GET_MODE_SIZE (scalar_mode
);
7260 rtx addr
= XEXP (mem
, 0);
7265 /* Vector addresses should not have PRE_INC, PRE_DEC, or PRE_MODIFY. */
7266 gcc_assert (GET_RTX_CLASS (GET_CODE (addr
)) != RTX_AUTOINC
);
7268 /* Calculate what we need to add to the address to get the element
7270 if (CONST_INT_P (element
))
7271 element_offset
= GEN_INT (INTVAL (element
) * scalar_size
);
7274 int byte_shift
= exact_log2 (scalar_size
);
7275 gcc_assert (byte_shift
>= 0);
7277 if (byte_shift
== 0)
7278 element_offset
= element
;
7282 if (TARGET_POWERPC64
)
7283 emit_insn (gen_ashldi3 (base_tmp
, element
, GEN_INT (byte_shift
)));
7285 emit_insn (gen_ashlsi3 (base_tmp
, element
, GEN_INT (byte_shift
)));
7287 element_offset
= base_tmp
;
7291 /* Create the new address pointing to the element within the vector. If we
7292 are adding 0, we don't have to change the address. */
7293 if (element_offset
== const0_rtx
)
7296 /* A simple indirect address can be converted into a reg + offset
7298 else if (REG_P (addr
) || SUBREG_P (addr
))
7299 new_addr
= gen_rtx_PLUS (Pmode
, addr
, element_offset
);
7301 /* Optimize D-FORM addresses with constant offset with a constant element, to
7302 include the element offset in the address directly. */
7303 else if (GET_CODE (addr
) == PLUS
)
7305 rtx op0
= XEXP (addr
, 0);
7306 rtx op1
= XEXP (addr
, 1);
7309 gcc_assert (REG_P (op0
) || SUBREG_P (op0
));
7310 if (CONST_INT_P (op1
) && CONST_INT_P (element_offset
))
7312 HOST_WIDE_INT offset
= INTVAL (op1
) + INTVAL (element_offset
);
7313 rtx offset_rtx
= GEN_INT (offset
);
7315 if (IN_RANGE (offset
, -32768, 32767)
7316 && (scalar_size
< 8 || (offset
& 0x3) == 0))
7317 new_addr
= gen_rtx_PLUS (Pmode
, op0
, offset_rtx
);
7320 emit_move_insn (base_tmp
, offset_rtx
);
7321 new_addr
= gen_rtx_PLUS (Pmode
, op0
, base_tmp
);
7326 bool op1_reg_p
= (REG_P (op1
) || SUBREG_P (op1
));
7327 bool ele_reg_p
= (REG_P (element_offset
) || SUBREG_P (element_offset
));
7329 /* Note, ADDI requires the register being added to be a base
7330 register. If the register was R0, load it up into the temporary
7333 && (ele_reg_p
|| reg_or_subregno (op1
) != FIRST_GPR_REGNO
))
7335 insn
= gen_add3_insn (base_tmp
, op1
, element_offset
);
7336 gcc_assert (insn
!= NULL_RTX
);
7341 && reg_or_subregno (element_offset
) != FIRST_GPR_REGNO
)
7343 insn
= gen_add3_insn (base_tmp
, element_offset
, op1
);
7344 gcc_assert (insn
!= NULL_RTX
);
7350 emit_move_insn (base_tmp
, op1
);
7351 emit_insn (gen_add2_insn (base_tmp
, element_offset
));
7354 new_addr
= gen_rtx_PLUS (Pmode
, op0
, base_tmp
);
7360 emit_move_insn (base_tmp
, addr
);
7361 new_addr
= gen_rtx_PLUS (Pmode
, base_tmp
, element_offset
);
7364 /* If we have a PLUS, we need to see whether the particular register class
7365 allows for D-FORM or X-FORM addressing. */
7366 if (GET_CODE (new_addr
) == PLUS
)
7368 rtx op1
= XEXP (new_addr
, 1);
7369 addr_mask_type addr_mask
;
7370 int scalar_regno
= regno_or_subregno (scalar_reg
);
7372 gcc_assert (scalar_regno
< FIRST_PSEUDO_REGISTER
);
7373 if (INT_REGNO_P (scalar_regno
))
7374 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_GPR
];
7376 else if (FP_REGNO_P (scalar_regno
))
7377 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_FPR
];
7379 else if (ALTIVEC_REGNO_P (scalar_regno
))
7380 addr_mask
= reg_addr
[scalar_mode
].addr_mask
[RELOAD_REG_VMX
];
7385 if (REG_P (op1
) || SUBREG_P (op1
))
7386 valid_addr_p
= (addr_mask
& RELOAD_REG_INDEXED
) != 0;
7388 valid_addr_p
= (addr_mask
& RELOAD_REG_OFFSET
) != 0;
7391 else if (REG_P (new_addr
) || SUBREG_P (new_addr
))
7392 valid_addr_p
= true;
7395 valid_addr_p
= false;
7399 emit_move_insn (base_tmp
, new_addr
);
7400 new_addr
= base_tmp
;
7403 return change_address (mem
, scalar_mode
, new_addr
);
7406 /* Split a variable vec_extract operation into the component instructions. */
7409 rs6000_split_vec_extract_var (rtx dest
, rtx src
, rtx element
, rtx tmp_gpr
,
7412 machine_mode mode
= GET_MODE (src
);
7413 machine_mode scalar_mode
= GET_MODE (dest
);
7414 unsigned scalar_size
= GET_MODE_SIZE (scalar_mode
);
7415 int byte_shift
= exact_log2 (scalar_size
);
7417 gcc_assert (byte_shift
>= 0);
7419 /* If we are given a memory address, optimize to load just the element. We
7420 don't have to adjust the vector element number on little endian
7424 gcc_assert (REG_P (tmp_gpr
));
7425 emit_move_insn (dest
, rs6000_adjust_vec_address (dest
, src
, element
,
7426 tmp_gpr
, scalar_mode
));
7430 else if (REG_P (src
) || SUBREG_P (src
))
7432 int bit_shift
= byte_shift
+ 3;
7434 int dest_regno
= regno_or_subregno (dest
);
7435 int src_regno
= regno_or_subregno (src
);
7436 int element_regno
= regno_or_subregno (element
);
7438 gcc_assert (REG_P (tmp_gpr
));
7440 /* See if we want to generate VEXTU{B,H,W}{L,R}X if the destination is in
7441 a general purpose register. */
7442 if (TARGET_P9_VECTOR
7443 && (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
7444 && INT_REGNO_P (dest_regno
)
7445 && ALTIVEC_REGNO_P (src_regno
)
7446 && INT_REGNO_P (element_regno
))
7448 rtx dest_si
= gen_rtx_REG (SImode
, dest_regno
);
7449 rtx element_si
= gen_rtx_REG (SImode
, element_regno
);
7451 if (mode
== V16QImode
)
7452 emit_insn (BYTES_BIG_ENDIAN
7453 ? gen_vextublx (dest_si
, element_si
, src
)
7454 : gen_vextubrx (dest_si
, element_si
, src
));
7456 else if (mode
== V8HImode
)
7458 rtx tmp_gpr_si
= gen_rtx_REG (SImode
, REGNO (tmp_gpr
));
7459 emit_insn (gen_ashlsi3 (tmp_gpr_si
, element_si
, const1_rtx
));
7460 emit_insn (BYTES_BIG_ENDIAN
7461 ? gen_vextuhlx (dest_si
, tmp_gpr_si
, src
)
7462 : gen_vextuhrx (dest_si
, tmp_gpr_si
, src
));
7468 rtx tmp_gpr_si
= gen_rtx_REG (SImode
, REGNO (tmp_gpr
));
7469 emit_insn (gen_ashlsi3 (tmp_gpr_si
, element_si
, const2_rtx
));
7470 emit_insn (BYTES_BIG_ENDIAN
7471 ? gen_vextuwlx (dest_si
, tmp_gpr_si
, src
)
7472 : gen_vextuwrx (dest_si
, tmp_gpr_si
, src
));
7479 gcc_assert (REG_P (tmp_altivec
));
7481 /* For little endian, adjust element ordering. For V2DI/V2DF, we can use
7482 an XOR, otherwise we need to subtract. The shift amount is so VSLO
7483 will shift the element into the upper position (adding 3 to convert a
7484 byte shift into a bit shift). */
7485 if (scalar_size
== 8)
7487 if (!BYTES_BIG_ENDIAN
)
7489 emit_insn (gen_xordi3 (tmp_gpr
, element
, const1_rtx
));
7495 /* Generate RLDIC directly to shift left 6 bits and retrieve 1
7497 emit_insn (gen_rtx_SET (tmp_gpr
,
7498 gen_rtx_AND (DImode
,
7499 gen_rtx_ASHIFT (DImode
,
7506 if (!BYTES_BIG_ENDIAN
)
7508 rtx num_ele_m1
= GEN_INT (GET_MODE_NUNITS (mode
) - 1);
7510 emit_insn (gen_anddi3 (tmp_gpr
, element
, num_ele_m1
));
7511 emit_insn (gen_subdi3 (tmp_gpr
, num_ele_m1
, tmp_gpr
));
7517 emit_insn (gen_ashldi3 (tmp_gpr
, element2
, GEN_INT (bit_shift
)));
7520 /* Get the value into the lower byte of the Altivec register where VSLO
7522 if (TARGET_P9_VECTOR
)
7523 emit_insn (gen_vsx_splat_v2di (tmp_altivec
, tmp_gpr
));
7524 else if (can_create_pseudo_p ())
7525 emit_insn (gen_vsx_concat_v2di (tmp_altivec
, tmp_gpr
, tmp_gpr
));
7528 rtx tmp_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7529 emit_move_insn (tmp_di
, tmp_gpr
);
7530 emit_insn (gen_vsx_concat_v2di (tmp_altivec
, tmp_di
, tmp_di
));
7533 /* Do the VSLO to get the value into the final location. */
7537 emit_insn (gen_vsx_vslo_v2df (dest
, src
, tmp_altivec
));
7541 emit_insn (gen_vsx_vslo_v2di (dest
, src
, tmp_altivec
));
7546 rtx tmp_altivec_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7547 rtx tmp_altivec_v4sf
= gen_rtx_REG (V4SFmode
, REGNO (tmp_altivec
));
7548 rtx src_v2di
= gen_rtx_REG (V2DImode
, REGNO (src
));
7549 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di
, src_v2di
,
7552 emit_insn (gen_vsx_xscvspdp_scalar2 (dest
, tmp_altivec_v4sf
));
7560 rtx tmp_altivec_di
= gen_rtx_REG (DImode
, REGNO (tmp_altivec
));
7561 rtx src_v2di
= gen_rtx_REG (V2DImode
, REGNO (src
));
7562 rtx tmp_gpr_di
= gen_rtx_REG (DImode
, REGNO (dest
));
7563 emit_insn (gen_vsx_vslo_v2di (tmp_altivec_di
, src_v2di
,
7565 emit_move_insn (tmp_gpr_di
, tmp_altivec_di
);
7566 emit_insn (gen_ashrdi3 (tmp_gpr_di
, tmp_gpr_di
,
7567 GEN_INT (64 - (8 * scalar_size
))));
7581 /* Helper function for rs6000_split_v4si_init to build up a DImode value from
7582 two SImode values. */
7585 rs6000_split_v4si_init_di_reg (rtx dest
, rtx si1
, rtx si2
, rtx tmp
)
7587 const unsigned HOST_WIDE_INT mask_32bit
= HOST_WIDE_INT_C (0xffffffff);
7589 if (CONST_INT_P (si1
) && CONST_INT_P (si2
))
7591 unsigned HOST_WIDE_INT const1
= (UINTVAL (si1
) & mask_32bit
) << 32;
7592 unsigned HOST_WIDE_INT const2
= UINTVAL (si2
) & mask_32bit
;
7594 emit_move_insn (dest
, GEN_INT (const1
| const2
));
7598 /* Put si1 into upper 32-bits of dest. */
7599 if (CONST_INT_P (si1
))
7600 emit_move_insn (dest
, GEN_INT ((UINTVAL (si1
) & mask_32bit
) << 32));
7603 /* Generate RLDIC. */
7604 rtx si1_di
= gen_rtx_REG (DImode
, regno_or_subregno (si1
));
7605 rtx shift_rtx
= gen_rtx_ASHIFT (DImode
, si1_di
, GEN_INT (32));
7606 rtx mask_rtx
= GEN_INT (mask_32bit
<< 32);
7607 rtx and_rtx
= gen_rtx_AND (DImode
, shift_rtx
, mask_rtx
);
7608 gcc_assert (!reg_overlap_mentioned_p (dest
, si1
));
7609 emit_insn (gen_rtx_SET (dest
, and_rtx
));
7612 /* Put si2 into the temporary. */
7613 gcc_assert (!reg_overlap_mentioned_p (dest
, tmp
));
7614 if (CONST_INT_P (si2
))
7615 emit_move_insn (tmp
, GEN_INT (UINTVAL (si2
) & mask_32bit
));
7617 emit_insn (gen_zero_extendsidi2 (tmp
, si2
));
7619 /* Combine the two parts. */
7620 emit_insn (gen_iordi3 (dest
, dest
, tmp
));
7624 /* Split a V4SI initialization. */
7627 rs6000_split_v4si_init (rtx operands
[])
7629 rtx dest
= operands
[0];
7631 /* Destination is a GPR, build up the two DImode parts in place. */
7632 if (REG_P (dest
) || SUBREG_P (dest
))
7634 int d_regno
= regno_or_subregno (dest
);
7635 rtx scalar1
= operands
[1];
7636 rtx scalar2
= operands
[2];
7637 rtx scalar3
= operands
[3];
7638 rtx scalar4
= operands
[4];
7639 rtx tmp1
= operands
[5];
7640 rtx tmp2
= operands
[6];
7642 /* Even though we only need one temporary (plus the destination, which
7643 has an early clobber constraint, try to use two temporaries, one for
7644 each double word created. That way the 2nd insn scheduling pass can
7645 rearrange things so the two parts are done in parallel. */
7646 if (BYTES_BIG_ENDIAN
)
7648 rtx di_lo
= gen_rtx_REG (DImode
, d_regno
);
7649 rtx di_hi
= gen_rtx_REG (DImode
, d_regno
+ 1);
7650 rs6000_split_v4si_init_di_reg (di_lo
, scalar1
, scalar2
, tmp1
);
7651 rs6000_split_v4si_init_di_reg (di_hi
, scalar3
, scalar4
, tmp2
);
7655 rtx di_lo
= gen_rtx_REG (DImode
, d_regno
+ 1);
7656 rtx di_hi
= gen_rtx_REG (DImode
, d_regno
);
7657 rs6000_split_v4si_init_di_reg (di_lo
, scalar4
, scalar3
, tmp1
);
7658 rs6000_split_v4si_init_di_reg (di_hi
, scalar2
, scalar1
, tmp2
);
7667 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
7668 selects whether the alignment is abi mandated, optional, or
7669 both abi and optional alignment. */
7672 rs6000_data_alignment (tree type
, unsigned int align
, enum data_align how
)
7674 if (how
!= align_opt
)
7676 if (TREE_CODE (type
) == VECTOR_TYPE
&& align
< 128)
7680 if (how
!= align_abi
)
7682 if (TREE_CODE (type
) == ARRAY_TYPE
7683 && TYPE_MODE (TREE_TYPE (type
)) == QImode
)
7685 if (align
< BITS_PER_WORD
)
7686 align
= BITS_PER_WORD
;
7693 /* Implement TARGET_SLOW_UNALIGNED_ACCESS. Altivec vector memory
7694 instructions simply ignore the low bits; VSX memory instructions
7695 are aligned to 4 or 8 bytes. */
7698 rs6000_slow_unaligned_access (machine_mode mode
, unsigned int align
)
7700 return (STRICT_ALIGNMENT
7701 || (!TARGET_EFFICIENT_UNALIGNED_VSX
7702 && ((SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode
) && align
< 32)
7703 || ((VECTOR_MODE_P (mode
) || FLOAT128_VECTOR_P (mode
))
7704 && (int) align
< VECTOR_ALIGN (mode
)))));
7707 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
7710 rs6000_special_adjust_field_align_p (tree type
, unsigned int computed
)
7712 if (TARGET_ALTIVEC
&& TREE_CODE (type
) == VECTOR_TYPE
)
7714 if (computed
!= 128)
7717 if (!warned
&& warn_psabi
)
7720 inform (input_location
,
7721 "the layout of aggregates containing vectors with"
7722 " %d-byte alignment has changed in GCC 5",
7723 computed
/ BITS_PER_UNIT
);
7726 /* In current GCC there is no special case. */
7733 /* AIX increases natural record alignment to doubleword if the first
7734 field is an FP double while the FP fields remain word aligned. */
7737 rs6000_special_round_type_align (tree type
, unsigned int computed
,
7738 unsigned int specified
)
7740 unsigned int align
= MAX (computed
, specified
);
7741 tree field
= TYPE_FIELDS (type
);
7743 /* Skip all non field decls */
7744 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
7745 field
= DECL_CHAIN (field
);
7747 if (field
!= NULL
&& field
!= type
)
7749 type
= TREE_TYPE (field
);
7750 while (TREE_CODE (type
) == ARRAY_TYPE
)
7751 type
= TREE_TYPE (type
);
7753 if (type
!= error_mark_node
&& TYPE_MODE (type
) == DFmode
)
7754 align
= MAX (align
, 64);
7760 /* Darwin increases record alignment to the natural alignment of
7764 darwin_rs6000_special_round_type_align (tree type
, unsigned int computed
,
7765 unsigned int specified
)
7767 unsigned int align
= MAX (computed
, specified
);
7769 if (TYPE_PACKED (type
))
7772 /* Find the first field, looking down into aggregates. */
7774 tree field
= TYPE_FIELDS (type
);
7775 /* Skip all non field decls */
7776 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
7777 field
= DECL_CHAIN (field
);
7780 /* A packed field does not contribute any extra alignment. */
7781 if (DECL_PACKED (field
))
7783 type
= TREE_TYPE (field
);
7784 while (TREE_CODE (type
) == ARRAY_TYPE
)
7785 type
= TREE_TYPE (type
);
7786 } while (AGGREGATE_TYPE_P (type
));
7788 if (! AGGREGATE_TYPE_P (type
) && type
!= error_mark_node
)
7789 align
= MAX (align
, TYPE_ALIGN (type
));
7794 /* Return 1 for an operand in small memory on V.4/eabi. */
7797 small_data_operand (rtx op ATTRIBUTE_UNUSED
,
7798 machine_mode mode ATTRIBUTE_UNUSED
)
7803 if (rs6000_sdata
== SDATA_NONE
|| rs6000_sdata
== SDATA_DATA
)
7806 if (DEFAULT_ABI
!= ABI_V4
)
7809 if (GET_CODE (op
) == SYMBOL_REF
)
7812 else if (GET_CODE (op
) != CONST
7813 || GET_CODE (XEXP (op
, 0)) != PLUS
7814 || GET_CODE (XEXP (XEXP (op
, 0), 0)) != SYMBOL_REF
7815 || GET_CODE (XEXP (XEXP (op
, 0), 1)) != CONST_INT
)
7820 rtx sum
= XEXP (op
, 0);
7821 HOST_WIDE_INT summand
;
7823 /* We have to be careful here, because it is the referenced address
7824 that must be 32k from _SDA_BASE_, not just the symbol. */
7825 summand
= INTVAL (XEXP (sum
, 1));
7826 if (summand
< 0 || summand
> g_switch_value
)
7829 sym_ref
= XEXP (sum
, 0);
7832 return SYMBOL_REF_SMALL_P (sym_ref
);
7838 /* Return true if either operand is a general purpose register. */
7841 gpr_or_gpr_p (rtx op0
, rtx op1
)
7843 return ((REG_P (op0
) && INT_REGNO_P (REGNO (op0
)))
7844 || (REG_P (op1
) && INT_REGNO_P (REGNO (op1
))));
7847 /* Return true if this is a move direct operation between GPR registers and
7848 floating point/VSX registers. */
7851 direct_move_p (rtx op0
, rtx op1
)
7855 if (!REG_P (op0
) || !REG_P (op1
))
7858 if (!TARGET_DIRECT_MOVE
&& !TARGET_MFPGPR
)
7861 regno0
= REGNO (op0
);
7862 regno1
= REGNO (op1
);
7863 if (regno0
>= FIRST_PSEUDO_REGISTER
|| regno1
>= FIRST_PSEUDO_REGISTER
)
7866 if (INT_REGNO_P (regno0
))
7867 return (TARGET_DIRECT_MOVE
) ? VSX_REGNO_P (regno1
) : FP_REGNO_P (regno1
);
7869 else if (INT_REGNO_P (regno1
))
7871 if (TARGET_MFPGPR
&& FP_REGNO_P (regno0
))
7874 else if (TARGET_DIRECT_MOVE
&& VSX_REGNO_P (regno0
))
7881 /* Return true if the OFFSET is valid for the quad address instructions that
7882 use d-form (register + offset) addressing. */
7885 quad_address_offset_p (HOST_WIDE_INT offset
)
7887 return (IN_RANGE (offset
, -32768, 32767) && ((offset
) & 0xf) == 0);
7890 /* Return true if the ADDR is an acceptable address for a quad memory
7891 operation of mode MODE (either LQ/STQ for general purpose registers, or
7892 LXV/STXV for vector registers under ISA 3.0. GPR_P is true if this address
7893 is intended for LQ/STQ. If it is false, the address is intended for the ISA
7894 3.0 LXV/STXV instruction. */
7897 quad_address_p (rtx addr
, machine_mode mode
, bool strict
)
7901 if (GET_MODE_SIZE (mode
) != 16)
7904 if (legitimate_indirect_address_p (addr
, strict
))
7907 if (VECTOR_MODE_P (mode
) && !mode_supports_dq_form (mode
))
7910 if (GET_CODE (addr
) != PLUS
)
7913 op0
= XEXP (addr
, 0);
7914 if (!REG_P (op0
) || !INT_REG_OK_FOR_BASE_P (op0
, strict
))
7917 op1
= XEXP (addr
, 1);
7918 if (!CONST_INT_P (op1
))
7921 return quad_address_offset_p (INTVAL (op1
));
7924 /* Return true if this is a load or store quad operation. This function does
7925 not handle the atomic quad memory instructions. */
7928 quad_load_store_p (rtx op0
, rtx op1
)
7932 if (!TARGET_QUAD_MEMORY
)
7935 else if (REG_P (op0
) && MEM_P (op1
))
7936 ret
= (quad_int_reg_operand (op0
, GET_MODE (op0
))
7937 && quad_memory_operand (op1
, GET_MODE (op1
))
7938 && !reg_overlap_mentioned_p (op0
, op1
));
7940 else if (MEM_P (op0
) && REG_P (op1
))
7941 ret
= (quad_memory_operand (op0
, GET_MODE (op0
))
7942 && quad_int_reg_operand (op1
, GET_MODE (op1
)));
7947 if (TARGET_DEBUG_ADDR
)
7949 fprintf (stderr
, "\n========== quad_load_store, return %s\n",
7950 ret
? "true" : "false");
7951 debug_rtx (gen_rtx_SET (op0
, op1
));
7957 /* Given an address, return a constant offset term if one exists. */
7960 address_offset (rtx op
)
7962 if (GET_CODE (op
) == PRE_INC
7963 || GET_CODE (op
) == PRE_DEC
)
7965 else if (GET_CODE (op
) == PRE_MODIFY
7966 || GET_CODE (op
) == LO_SUM
)
7969 if (GET_CODE (op
) == CONST
)
7972 if (GET_CODE (op
) == PLUS
)
7975 if (CONST_INT_P (op
))
7981 /* Return true if the MEM operand is a memory operand suitable for use
7982 with a (full width, possibly multiple) gpr load/store. On
7983 powerpc64 this means the offset must be divisible by 4.
7984 Implements 'Y' constraint.
7986 Accept direct, indexed, offset, lo_sum and tocref. Since this is
7987 a constraint function we know the operand has satisfied a suitable
7988 memory predicate. Also accept some odd rtl generated by reload
7989 (see rs6000_legitimize_reload_address for various forms). It is
7990 important that reload rtl be accepted by appropriate constraints
7991 but not by the operand predicate.
7993 Offsetting a lo_sum should not be allowed, except where we know by
7994 alignment that a 32k boundary is not crossed, but see the ???
7995 comment in rs6000_legitimize_reload_address. Note that by
7996 "offsetting" here we mean a further offset to access parts of the
7997 MEM. It's fine to have a lo_sum where the inner address is offset
7998 from a sym, since the same sym+offset will appear in the high part
7999 of the address calculation. */
8002 mem_operand_gpr (rtx op
, machine_mode mode
)
8004 unsigned HOST_WIDE_INT offset
;
8006 rtx addr
= XEXP (op
, 0);
8008 /* PR85755: Allow PRE_INC and PRE_DEC addresses. */
8010 && (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
8011 && mode_supports_pre_incdec_p (mode
)
8012 && legitimate_indirect_address_p (XEXP (addr
, 0), false))
8015 /* Don't allow non-offsettable addresses. See PRs 83969 and 84279. */
8016 if (!rs6000_offsettable_memref_p (op
, mode
, false))
8019 op
= address_offset (addr
);
8023 offset
= INTVAL (op
);
8024 if (TARGET_POWERPC64
&& (offset
& 3) != 0)
8027 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
8031 if (GET_CODE (addr
) == LO_SUM
)
8032 /* For lo_sum addresses, we must allow any offset except one that
8033 causes a wrap, so test only the low 16 bits. */
8034 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
8036 return offset
+ 0x8000 < 0x10000u
- extra
;
8039 /* As above, but for DS-FORM VSX insns. Unlike mem_operand_gpr,
8040 enforce an offset divisible by 4 even for 32-bit. */
8043 mem_operand_ds_form (rtx op
, machine_mode mode
)
8045 unsigned HOST_WIDE_INT offset
;
8047 rtx addr
= XEXP (op
, 0);
8049 if (!offsettable_address_p (false, mode
, addr
))
8052 op
= address_offset (addr
);
8056 offset
= INTVAL (op
);
8057 if ((offset
& 3) != 0)
8060 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
8064 if (GET_CODE (addr
) == LO_SUM
)
8065 /* For lo_sum addresses, we must allow any offset except one that
8066 causes a wrap, so test only the low 16 bits. */
8067 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
8069 return offset
+ 0x8000 < 0x10000u
- extra
;
8072 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
8075 reg_offset_addressing_ok_p (machine_mode mode
)
8089 /* AltiVec/VSX vector modes. Only reg+reg addressing was valid until the
8090 ISA 3.0 vector d-form addressing mode was added. While TImode is not
8091 a vector mode, if we want to use the VSX registers to move it around,
8092 we need to restrict ourselves to reg+reg addressing. Similarly for
8093 IEEE 128-bit floating point that is passed in a single vector
8095 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
8096 return mode_supports_dq_form (mode
);
8100 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
8101 addressing for the LFIWZX and STFIWX instructions. */
8102 if (TARGET_NO_SDMODE_STACK
)
8114 virtual_stack_registers_memory_p (rtx op
)
8118 if (GET_CODE (op
) == REG
)
8119 regnum
= REGNO (op
);
8121 else if (GET_CODE (op
) == PLUS
8122 && GET_CODE (XEXP (op
, 0)) == REG
8123 && GET_CODE (XEXP (op
, 1)) == CONST_INT
)
8124 regnum
= REGNO (XEXP (op
, 0));
8129 return (regnum
>= FIRST_VIRTUAL_REGISTER
8130 && regnum
<= LAST_VIRTUAL_POINTER_REGISTER
);
8133 /* Return true if a MODE sized memory accesses to OP plus OFFSET
8134 is known to not straddle a 32k boundary. This function is used
8135 to determine whether -mcmodel=medium code can use TOC pointer
8136 relative addressing for OP. This means the alignment of the TOC
8137 pointer must also be taken into account, and unfortunately that is
8140 #ifndef POWERPC64_TOC_POINTER_ALIGNMENT
8141 #define POWERPC64_TOC_POINTER_ALIGNMENT 8
8145 offsettable_ok_by_alignment (rtx op
, HOST_WIDE_INT offset
,
8149 unsigned HOST_WIDE_INT dsize
, dalign
, lsb
, mask
;
8151 if (GET_CODE (op
) != SYMBOL_REF
)
8154 /* ISA 3.0 vector d-form addressing is restricted, don't allow
8156 if (mode_supports_dq_form (mode
))
8159 dsize
= GET_MODE_SIZE (mode
);
8160 decl
= SYMBOL_REF_DECL (op
);
8166 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
8167 replacing memory addresses with an anchor plus offset. We
8168 could find the decl by rummaging around in the block->objects
8169 VEC for the given offset but that seems like too much work. */
8170 dalign
= BITS_PER_UNIT
;
8171 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op
)
8172 && SYMBOL_REF_ANCHOR_P (op
)
8173 && SYMBOL_REF_BLOCK (op
) != NULL
)
8175 struct object_block
*block
= SYMBOL_REF_BLOCK (op
);
8177 dalign
= block
->alignment
;
8178 offset
+= SYMBOL_REF_BLOCK_OFFSET (op
);
8180 else if (CONSTANT_POOL_ADDRESS_P (op
))
8182 /* It would be nice to have get_pool_align().. */
8183 machine_mode cmode
= get_pool_mode (op
);
8185 dalign
= GET_MODE_ALIGNMENT (cmode
);
8188 else if (DECL_P (decl
))
8190 dalign
= DECL_ALIGN (decl
);
8194 /* Allow BLKmode when the entire object is known to not
8195 cross a 32k boundary. */
8196 if (!DECL_SIZE_UNIT (decl
))
8199 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl
)))
8202 dsize
= tree_to_uhwi (DECL_SIZE_UNIT (decl
));
8206 dalign
/= BITS_PER_UNIT
;
8207 if (dalign
> POWERPC64_TOC_POINTER_ALIGNMENT
)
8208 dalign
= POWERPC64_TOC_POINTER_ALIGNMENT
;
8209 return dalign
>= dsize
;
8215 /* Find how many bits of the alignment we know for this access. */
8216 dalign
/= BITS_PER_UNIT
;
8217 if (dalign
> POWERPC64_TOC_POINTER_ALIGNMENT
)
8218 dalign
= POWERPC64_TOC_POINTER_ALIGNMENT
;
8220 lsb
= offset
& -offset
;
8224 return dalign
>= dsize
;
8228 constant_pool_expr_p (rtx op
)
8232 split_const (op
, &base
, &offset
);
8233 return (GET_CODE (base
) == SYMBOL_REF
8234 && CONSTANT_POOL_ADDRESS_P (base
)
8235 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base
), Pmode
));
8238 /* These are only used to pass through from print_operand/print_operand_address
8239 to rs6000_output_addr_const_extra over the intervening function
8240 output_addr_const which is not target code. */
8241 static const_rtx tocrel_base_oac
, tocrel_offset_oac
;
8243 /* Return true if OP is a toc pointer relative address (the output
8244 of create_TOC_reference). If STRICT, do not match non-split
8245 -mcmodel=large/medium toc pointer relative addresses. If the pointers
8246 are non-NULL, place base and offset pieces in TOCREL_BASE_RET and
8247 TOCREL_OFFSET_RET respectively. */
8250 toc_relative_expr_p (const_rtx op
, bool strict
, const_rtx
*tocrel_base_ret
,
8251 const_rtx
*tocrel_offset_ret
)
8256 if (TARGET_CMODEL
!= CMODEL_SMALL
)
8258 /* When strict ensure we have everything tidy. */
8260 && !(GET_CODE (op
) == LO_SUM
8261 && REG_P (XEXP (op
, 0))
8262 && INT_REG_OK_FOR_BASE_P (XEXP (op
, 0), strict
)))
8265 /* When not strict, allow non-split TOC addresses and also allow
8266 (lo_sum (high ..)) TOC addresses created during reload. */
8267 if (GET_CODE (op
) == LO_SUM
)
8271 const_rtx tocrel_base
= op
;
8272 const_rtx tocrel_offset
= const0_rtx
;
8274 if (GET_CODE (op
) == PLUS
&& add_cint_operand (XEXP (op
, 1), GET_MODE (op
)))
8276 tocrel_base
= XEXP (op
, 0);
8277 tocrel_offset
= XEXP (op
, 1);
8280 if (tocrel_base_ret
)
8281 *tocrel_base_ret
= tocrel_base
;
8282 if (tocrel_offset_ret
)
8283 *tocrel_offset_ret
= tocrel_offset
;
8285 return (GET_CODE (tocrel_base
) == UNSPEC
8286 && XINT (tocrel_base
, 1) == UNSPEC_TOCREL
);
8289 /* Return true if X is a constant pool address, and also for cmodel=medium
8290 if X is a toc-relative address known to be offsettable within MODE. */
8293 legitimate_constant_pool_address_p (const_rtx x
, machine_mode mode
,
8296 const_rtx tocrel_base
, tocrel_offset
;
8297 return (toc_relative_expr_p (x
, strict
, &tocrel_base
, &tocrel_offset
)
8298 && (TARGET_CMODEL
!= CMODEL_MEDIUM
8299 || constant_pool_expr_p (XVECEXP (tocrel_base
, 0, 0))
8301 || offsettable_ok_by_alignment (XVECEXP (tocrel_base
, 0, 0),
8302 INTVAL (tocrel_offset
), mode
)));
8306 legitimate_small_data_p (machine_mode mode
, rtx x
)
8308 return (DEFAULT_ABI
== ABI_V4
8309 && !flag_pic
&& !TARGET_TOC
8310 && (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
)
8311 && small_data_operand (x
, mode
));
8315 rs6000_legitimate_offset_address_p (machine_mode mode
, rtx x
,
8316 bool strict
, bool worst_case
)
8318 unsigned HOST_WIDE_INT offset
;
8321 if (GET_CODE (x
) != PLUS
)
8323 if (!REG_P (XEXP (x
, 0)))
8325 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
8327 if (mode_supports_dq_form (mode
))
8328 return quad_address_p (x
, mode
, strict
);
8329 if (!reg_offset_addressing_ok_p (mode
))
8330 return virtual_stack_registers_memory_p (x
);
8331 if (legitimate_constant_pool_address_p (x
, mode
, strict
|| lra_in_progress
))
8333 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
8336 offset
= INTVAL (XEXP (x
, 1));
8343 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
8345 if (VECTOR_MEM_VSX_P (mode
))
8350 if (!TARGET_POWERPC64
)
8352 else if (offset
& 3)
8365 if (!TARGET_POWERPC64
)
8367 else if (offset
& 3)
8376 return offset
< 0x10000 - extra
;
8380 legitimate_indexed_address_p (rtx x
, int strict
)
8384 if (GET_CODE (x
) != PLUS
)
8390 return (REG_P (op0
) && REG_P (op1
)
8391 && ((INT_REG_OK_FOR_BASE_P (op0
, strict
)
8392 && INT_REG_OK_FOR_INDEX_P (op1
, strict
))
8393 || (INT_REG_OK_FOR_BASE_P (op1
, strict
)
8394 && INT_REG_OK_FOR_INDEX_P (op0
, strict
))));
8398 avoiding_indexed_address_p (machine_mode mode
)
8400 /* Avoid indexed addressing for modes that have non-indexed
8401 load/store instruction forms. */
8402 return (TARGET_AVOID_XFORM
&& VECTOR_MEM_NONE_P (mode
));
8406 legitimate_indirect_address_p (rtx x
, int strict
)
8408 return GET_CODE (x
) == REG
&& INT_REG_OK_FOR_BASE_P (x
, strict
);
8412 macho_lo_sum_memory_operand (rtx x
, machine_mode mode
)
8414 if (!TARGET_MACHO
|| !flag_pic
8415 || mode
!= SImode
|| GET_CODE (x
) != MEM
)
8419 if (GET_CODE (x
) != LO_SUM
)
8421 if (GET_CODE (XEXP (x
, 0)) != REG
)
8423 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 0))
8427 return CONSTANT_P (x
);
8431 legitimate_lo_sum_address_p (machine_mode mode
, rtx x
, int strict
)
8433 if (GET_CODE (x
) != LO_SUM
)
8435 if (GET_CODE (XEXP (x
, 0)) != REG
)
8437 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
8439 /* quad word addresses are restricted, and we can't use LO_SUM. */
8440 if (mode_supports_dq_form (mode
))
8444 if (TARGET_ELF
|| TARGET_MACHO
)
8448 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
)
8450 /* LRA doesn't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
8451 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
8452 recognizes some LO_SUM addresses as valid although this
8453 function says opposite. In most cases, LRA through different
8454 transformations can generate correct code for address reloads.
8455 It can not manage only some LO_SUM cases. So we need to add
8456 code analogous to one in rs6000_legitimize_reload_address for
8457 LOW_SUM here saying that some addresses are still valid. */
8458 large_toc_ok
= (lra_in_progress
&& TARGET_CMODEL
!= CMODEL_SMALL
8459 && small_toc_ref (x
, VOIDmode
));
8460 if (TARGET_TOC
&& ! large_toc_ok
)
8462 if (GET_MODE_NUNITS (mode
) != 1)
8464 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
8465 && !(/* ??? Assume floating point reg based on mode? */
8466 TARGET_HARD_FLOAT
&& (mode
== DFmode
|| mode
== DDmode
)))
8469 return CONSTANT_P (x
) || large_toc_ok
;
8476 /* Try machine-dependent ways of modifying an illegitimate address
8477 to be legitimate. If we find one, return the new, valid address.
8478 This is used from only one place: `memory_address' in explow.c.
8480 OLDX is the address as it was before break_out_memory_refs was
8481 called. In some cases it is useful to look at this to decide what
8484 It is always safe for this function to do nothing. It exists to
8485 recognize opportunities to optimize the output.
8487 On RS/6000, first check for the sum of a register with a constant
8488 integer that is out of range. If so, generate code to add the
8489 constant with the low-order 16 bits masked to the register and force
8490 this result into another register (this can be done with `cau').
8491 Then generate an address of REG+(CONST&0xffff), allowing for the
8492 possibility of bit 16 being a one.
8494 Then check for the sum of a register and something not constant, try to
8495 load the other things into a register and return the sum. */
8498 rs6000_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
8503 if (!reg_offset_addressing_ok_p (mode
)
8504 || mode_supports_dq_form (mode
))
8506 if (virtual_stack_registers_memory_p (x
))
8509 /* In theory we should not be seeing addresses of the form reg+0,
8510 but just in case it is generated, optimize it away. */
8511 if (GET_CODE (x
) == PLUS
&& XEXP (x
, 1) == const0_rtx
)
8512 return force_reg (Pmode
, XEXP (x
, 0));
8514 /* For TImode with load/store quad, restrict addresses to just a single
8515 pointer, so it works with both GPRs and VSX registers. */
8516 /* Make sure both operands are registers. */
8517 else if (GET_CODE (x
) == PLUS
8518 && (mode
!= TImode
|| !TARGET_VSX
))
8519 return gen_rtx_PLUS (Pmode
,
8520 force_reg (Pmode
, XEXP (x
, 0)),
8521 force_reg (Pmode
, XEXP (x
, 1)));
8523 return force_reg (Pmode
, x
);
8525 if (GET_CODE (x
) == SYMBOL_REF
)
8527 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
8529 return rs6000_legitimize_tls_address (x
, model
);
8541 /* As in legitimate_offset_address_p we do not assume
8542 worst-case. The mode here is just a hint as to the registers
8543 used. A TImode is usually in gprs, but may actually be in
8544 fprs. Leave worst-case scenario for reload to handle via
8545 insn constraints. PTImode is only GPRs. */
8552 if (GET_CODE (x
) == PLUS
8553 && GET_CODE (XEXP (x
, 0)) == REG
8554 && GET_CODE (XEXP (x
, 1)) == CONST_INT
8555 && ((unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)) + 0x8000)
8556 >= 0x10000 - extra
))
8558 HOST_WIDE_INT high_int
, low_int
;
8560 low_int
= ((INTVAL (XEXP (x
, 1)) & 0xffff) ^ 0x8000) - 0x8000;
8561 if (low_int
>= 0x8000 - extra
)
8563 high_int
= INTVAL (XEXP (x
, 1)) - low_int
;
8564 sum
= force_operand (gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
8565 GEN_INT (high_int
)), 0);
8566 return plus_constant (Pmode
, sum
, low_int
);
8568 else if (GET_CODE (x
) == PLUS
8569 && GET_CODE (XEXP (x
, 0)) == REG
8570 && GET_CODE (XEXP (x
, 1)) != CONST_INT
8571 && GET_MODE_NUNITS (mode
) == 1
8572 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
8573 || (/* ??? Assume floating point reg based on mode? */
8574 TARGET_HARD_FLOAT
&& (mode
== DFmode
|| mode
== DDmode
)))
8575 && !avoiding_indexed_address_p (mode
))
8577 return gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
8578 force_reg (Pmode
, force_operand (XEXP (x
, 1), 0)));
8580 else if ((TARGET_ELF
8582 || !MACHO_DYNAMIC_NO_PIC_P
8588 && GET_CODE (x
) != CONST_INT
8589 && GET_CODE (x
) != CONST_WIDE_INT
8590 && GET_CODE (x
) != CONST_DOUBLE
8592 && GET_MODE_NUNITS (mode
) == 1
8593 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
8594 || (/* ??? Assume floating point reg based on mode? */
8595 TARGET_HARD_FLOAT
&& (mode
== DFmode
|| mode
== DDmode
))))
8597 rtx reg
= gen_reg_rtx (Pmode
);
8599 emit_insn (gen_elf_high (reg
, x
));
8601 emit_insn (gen_macho_high (reg
, x
));
8602 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
8605 && GET_CODE (x
) == SYMBOL_REF
8606 && constant_pool_expr_p (x
)
8607 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), Pmode
))
8608 return create_TOC_reference (x
, NULL_RTX
);
8613 /* Debug version of rs6000_legitimize_address. */
8615 rs6000_debug_legitimize_address (rtx x
, rtx oldx
, machine_mode mode
)
8621 ret
= rs6000_legitimize_address (x
, oldx
, mode
);
8622 insns
= get_insns ();
8628 "\nrs6000_legitimize_address: mode %s, old code %s, "
8629 "new code %s, modified\n",
8630 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)),
8631 GET_RTX_NAME (GET_CODE (ret
)));
8633 fprintf (stderr
, "Original address:\n");
8636 fprintf (stderr
, "oldx:\n");
8639 fprintf (stderr
, "New address:\n");
8644 fprintf (stderr
, "Insns added:\n");
8645 debug_rtx_list (insns
, 20);
8651 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
8652 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)));
8663 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
8664 We need to emit DTP-relative relocations. */
8666 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
8668 rs6000_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
8673 fputs ("\t.long\t", file
);
8676 fputs (DOUBLE_INT_ASM_OP
, file
);
8681 output_addr_const (file
, x
);
8683 fputs ("@dtprel+0x8000", file
);
8684 else if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
)
8686 switch (SYMBOL_REF_TLS_MODEL (x
))
8690 case TLS_MODEL_LOCAL_EXEC
:
8691 fputs ("@le", file
);
8693 case TLS_MODEL_INITIAL_EXEC
:
8694 fputs ("@ie", file
);
8696 case TLS_MODEL_GLOBAL_DYNAMIC
:
8697 case TLS_MODEL_LOCAL_DYNAMIC
:
8706 /* Return true if X is a symbol that refers to real (rather than emulated)
8710 rs6000_real_tls_symbol_ref_p (rtx x
)
8712 return (GET_CODE (x
) == SYMBOL_REF
8713 && SYMBOL_REF_TLS_MODEL (x
) >= TLS_MODEL_REAL
);
8716 /* In the name of slightly smaller debug output, and to cater to
8717 general assembler lossage, recognize various UNSPEC sequences
8718 and turn them back into a direct symbol reference. */
8721 rs6000_delegitimize_address (rtx orig_x
)
8725 orig_x
= delegitimize_mem_from_attrs (orig_x
);
8731 if (TARGET_CMODEL
!= CMODEL_SMALL
8732 && GET_CODE (y
) == LO_SUM
)
8736 if (GET_CODE (y
) == PLUS
8737 && GET_MODE (y
) == Pmode
8738 && CONST_INT_P (XEXP (y
, 1)))
8740 offset
= XEXP (y
, 1);
8744 if (GET_CODE (y
) == UNSPEC
8745 && XINT (y
, 1) == UNSPEC_TOCREL
)
8747 y
= XVECEXP (y
, 0, 0);
8750 /* Do not associate thread-local symbols with the original
8751 constant pool symbol. */
8753 && GET_CODE (y
) == SYMBOL_REF
8754 && CONSTANT_POOL_ADDRESS_P (y
)
8755 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y
)))
8759 if (offset
!= NULL_RTX
)
8760 y
= gen_rtx_PLUS (Pmode
, y
, offset
);
8761 if (!MEM_P (orig_x
))
8764 return replace_equiv_address_nv (orig_x
, y
);
8768 && GET_CODE (orig_x
) == LO_SUM
8769 && GET_CODE (XEXP (orig_x
, 1)) == CONST
)
8771 y
= XEXP (XEXP (orig_x
, 1), 0);
8772 if (GET_CODE (y
) == UNSPEC
8773 && XINT (y
, 1) == UNSPEC_MACHOPIC_OFFSET
)
8774 return XVECEXP (y
, 0, 0);
8780 /* Return true if X shouldn't be emitted into the debug info.
8781 The linker doesn't like .toc section references from
8782 .debug_* sections, so reject .toc section symbols. */
8785 rs6000_const_not_ok_for_debug_p (rtx x
)
8787 if (GET_CODE (x
) == UNSPEC
)
8789 if (GET_CODE (x
) == SYMBOL_REF
8790 && CONSTANT_POOL_ADDRESS_P (x
))
8792 rtx c
= get_pool_constant (x
);
8793 machine_mode cmode
= get_pool_mode (x
);
8794 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c
, cmode
))
8802 /* Implement the TARGET_LEGITIMATE_COMBINED_INSN hook. */
8805 rs6000_legitimate_combined_insn (rtx_insn
*insn
)
8807 int icode
= INSN_CODE (insn
);
8809 /* Reject creating doloop insns. Combine should not be allowed
8810 to create these for a number of reasons:
8811 1) In a nested loop, if combine creates one of these in an
8812 outer loop and the register allocator happens to allocate ctr
8813 to the outer loop insn, then the inner loop can't use ctr.
8814 Inner loops ought to be more highly optimized.
8815 2) Combine often wants to create one of these from what was
8816 originally a three insn sequence, first combining the three
8817 insns to two, then to ctrsi/ctrdi. When ctrsi/ctrdi is not
8818 allocated ctr, the splitter takes use back to the three insn
8819 sequence. It's better to stop combine at the two insn
8821 3) Faced with not being able to allocate ctr for ctrsi/crtdi
8822 insns, the register allocator sometimes uses floating point
8823 or vector registers for the pseudo. Since ctrsi/ctrdi is a
8824 jump insn and output reloads are not implemented for jumps,
8825 the ctrsi/ctrdi splitters need to handle all possible cases.
8826 That's a pain, and it gets to be seriously difficult when a
8827 splitter that runs after reload needs memory to transfer from
8828 a gpr to fpr. See PR70098 and PR71763 which are not fixed
8829 for the difficult case. It's better to not create problems
8830 in the first place. */
8831 if (icode
!= CODE_FOR_nothing
8832 && (icode
== CODE_FOR_bdz_si
8833 || icode
== CODE_FOR_bdz_di
8834 || icode
== CODE_FOR_bdnz_si
8835 || icode
== CODE_FOR_bdnz_di
8836 || icode
== CODE_FOR_bdztf_si
8837 || icode
== CODE_FOR_bdztf_di
8838 || icode
== CODE_FOR_bdnztf_si
8839 || icode
== CODE_FOR_bdnztf_di
))
8845 /* Construct the SYMBOL_REF for the tls_get_addr function. */
8847 static GTY(()) rtx rs6000_tls_symbol
;
8849 rs6000_tls_get_addr (void)
8851 if (!rs6000_tls_symbol
)
8852 rs6000_tls_symbol
= init_one_libfunc ("__tls_get_addr");
8854 return rs6000_tls_symbol
;
8857 /* Construct the SYMBOL_REF for TLS GOT references. */
8859 static GTY(()) rtx rs6000_got_symbol
;
8861 rs6000_got_sym (void)
8863 if (!rs6000_got_symbol
)
8865 rs6000_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
8866 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_LOCAL
;
8867 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_EXTERNAL
;
8870 return rs6000_got_symbol
;
8873 /* AIX Thread-Local Address support. */
8876 rs6000_legitimize_tls_address_aix (rtx addr
, enum tls_model model
)
8878 rtx sym
, mem
, tocref
, tlsreg
, tmpreg
, dest
, tlsaddr
;
8882 name
= XSTR (addr
, 0);
8883 /* Append TLS CSECT qualifier, unless the symbol already is qualified
8884 or the symbol will be in TLS private data section. */
8885 if (name
[strlen (name
) - 1] != ']'
8886 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr
))
8887 || bss_initializer_p (SYMBOL_REF_DECL (addr
))))
8889 tlsname
= XALLOCAVEC (char, strlen (name
) + 4);
8890 strcpy (tlsname
, name
);
8892 bss_initializer_p (SYMBOL_REF_DECL (addr
)) ? "[UL]" : "[TL]");
8893 tlsaddr
= copy_rtx (addr
);
8894 XSTR (tlsaddr
, 0) = ggc_strdup (tlsname
);
8899 /* Place addr into TOC constant pool. */
8900 sym
= force_const_mem (GET_MODE (tlsaddr
), tlsaddr
);
8902 /* Output the TOC entry and create the MEM referencing the value. */
8903 if (constant_pool_expr_p (XEXP (sym
, 0))
8904 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym
, 0)), Pmode
))
8906 tocref
= create_TOC_reference (XEXP (sym
, 0), NULL_RTX
);
8907 mem
= gen_const_mem (Pmode
, tocref
);
8908 set_mem_alias_set (mem
, get_TOC_alias_set ());
8913 /* Use global-dynamic for local-dynamic. */
8914 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
8915 || model
== TLS_MODEL_LOCAL_DYNAMIC
)
8917 /* Create new TOC reference for @m symbol. */
8918 name
= XSTR (XVECEXP (XEXP (mem
, 0), 0, 0), 0);
8919 tlsname
= XALLOCAVEC (char, strlen (name
) + 1);
8920 strcpy (tlsname
, "*LCM");
8921 strcat (tlsname
, name
+ 3);
8922 rtx modaddr
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (tlsname
));
8923 SYMBOL_REF_FLAGS (modaddr
) |= SYMBOL_FLAG_LOCAL
;
8924 tocref
= create_TOC_reference (modaddr
, NULL_RTX
);
8925 rtx modmem
= gen_const_mem (Pmode
, tocref
);
8926 set_mem_alias_set (modmem
, get_TOC_alias_set ());
8928 rtx modreg
= gen_reg_rtx (Pmode
);
8929 emit_insn (gen_rtx_SET (modreg
, modmem
));
8931 tmpreg
= gen_reg_rtx (Pmode
);
8932 emit_insn (gen_rtx_SET (tmpreg
, mem
));
8934 dest
= gen_reg_rtx (Pmode
);
8936 emit_insn (gen_tls_get_addrsi (dest
, modreg
, tmpreg
));
8938 emit_insn (gen_tls_get_addrdi (dest
, modreg
, tmpreg
));
8941 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
8942 else if (TARGET_32BIT
)
8944 tlsreg
= gen_reg_rtx (SImode
);
8945 emit_insn (gen_tls_get_tpointer (tlsreg
));
8948 tlsreg
= gen_rtx_REG (DImode
, 13);
8950 /* Load the TOC value into temporary register. */
8951 tmpreg
= gen_reg_rtx (Pmode
);
8952 emit_insn (gen_rtx_SET (tmpreg
, mem
));
8953 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
8954 gen_rtx_MINUS (Pmode
, addr
, tlsreg
));
8956 /* Add TOC symbol value to TLS pointer. */
8957 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, tmpreg
, tlsreg
));
8962 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
8963 this (thread-local) address. */
8966 rs6000_legitimize_tls_address (rtx addr
, enum tls_model model
)
8971 return rs6000_legitimize_tls_address_aix (addr
, model
);
8973 dest
= gen_reg_rtx (Pmode
);
8974 if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 16)
8980 tlsreg
= gen_rtx_REG (Pmode
, 13);
8981 insn
= gen_tls_tprel_64 (dest
, tlsreg
, addr
);
8985 tlsreg
= gen_rtx_REG (Pmode
, 2);
8986 insn
= gen_tls_tprel_32 (dest
, tlsreg
, addr
);
8990 else if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 32)
8994 tmp
= gen_reg_rtx (Pmode
);
8997 tlsreg
= gen_rtx_REG (Pmode
, 13);
8998 insn
= gen_tls_tprel_ha_64 (tmp
, tlsreg
, addr
);
9002 tlsreg
= gen_rtx_REG (Pmode
, 2);
9003 insn
= gen_tls_tprel_ha_32 (tmp
, tlsreg
, addr
);
9007 insn
= gen_tls_tprel_lo_64 (dest
, tmp
, addr
);
9009 insn
= gen_tls_tprel_lo_32 (dest
, tmp
, addr
);
9014 rtx r3
, got
, tga
, tmp1
, tmp2
, call_insn
;
9016 /* We currently use relocations like @got@tlsgd for tls, which
9017 means the linker will handle allocation of tls entries, placing
9018 them in the .got section. So use a pointer to the .got section,
9019 not one to secondary TOC sections used by 64-bit -mminimal-toc,
9020 or to secondary GOT sections used by 32-bit -fPIC. */
9022 got
= gen_rtx_REG (Pmode
, 2);
9026 got
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
9029 rtx gsym
= rs6000_got_sym ();
9030 got
= gen_reg_rtx (Pmode
);
9032 rs6000_emit_move (got
, gsym
, Pmode
);
9037 tmp1
= gen_reg_rtx (Pmode
);
9038 tmp2
= gen_reg_rtx (Pmode
);
9039 mem
= gen_const_mem (Pmode
, tmp1
);
9040 lab
= gen_label_rtx ();
9041 emit_insn (gen_load_toc_v4_PIC_1b (gsym
, lab
));
9042 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
9043 if (TARGET_LINK_STACK
)
9044 emit_insn (gen_addsi3 (tmp1
, tmp1
, GEN_INT (4)));
9045 emit_move_insn (tmp2
, mem
);
9046 rtx_insn
*last
= emit_insn (gen_addsi3 (got
, tmp1
, tmp2
));
9047 set_unique_reg_note (last
, REG_EQUAL
, gsym
);
9052 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
)
9054 tga
= rs6000_tls_get_addr ();
9055 emit_library_call_value (tga
, dest
, LCT_CONST
, Pmode
,
9058 r3
= gen_rtx_REG (Pmode
, 3);
9059 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9062 insn
= gen_tls_gd_aix64 (r3
, got
, addr
, tga
, const0_rtx
);
9064 insn
= gen_tls_gd_aix32 (r3
, got
, addr
, tga
, const0_rtx
);
9066 else if (DEFAULT_ABI
== ABI_V4
)
9067 insn
= gen_tls_gd_sysvsi (r3
, got
, addr
, tga
, const0_rtx
);
9070 call_insn
= last_call_insn ();
9071 PATTERN (call_insn
) = insn
;
9072 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
9073 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
9074 pic_offset_table_rtx
);
9076 else if (model
== TLS_MODEL_LOCAL_DYNAMIC
)
9078 tga
= rs6000_tls_get_addr ();
9079 tmp1
= gen_reg_rtx (Pmode
);
9080 emit_library_call_value (tga
, tmp1
, LCT_CONST
, Pmode
,
9083 r3
= gen_rtx_REG (Pmode
, 3);
9084 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9087 insn
= gen_tls_ld_aix64 (r3
, got
, tga
, const0_rtx
);
9089 insn
= gen_tls_ld_aix32 (r3
, got
, tga
, const0_rtx
);
9091 else if (DEFAULT_ABI
== ABI_V4
)
9092 insn
= gen_tls_ld_sysvsi (r3
, got
, tga
, const0_rtx
);
9095 call_insn
= last_call_insn ();
9096 PATTERN (call_insn
) = insn
;
9097 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
9098 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
9099 pic_offset_table_rtx
);
9101 if (rs6000_tls_size
== 16)
9104 insn
= gen_tls_dtprel_64 (dest
, tmp1
, addr
);
9106 insn
= gen_tls_dtprel_32 (dest
, tmp1
, addr
);
9108 else if (rs6000_tls_size
== 32)
9110 tmp2
= gen_reg_rtx (Pmode
);
9112 insn
= gen_tls_dtprel_ha_64 (tmp2
, tmp1
, addr
);
9114 insn
= gen_tls_dtprel_ha_32 (tmp2
, tmp1
, addr
);
9117 insn
= gen_tls_dtprel_lo_64 (dest
, tmp2
, addr
);
9119 insn
= gen_tls_dtprel_lo_32 (dest
, tmp2
, addr
);
9123 tmp2
= gen_reg_rtx (Pmode
);
9125 insn
= gen_tls_got_dtprel_64 (tmp2
, got
, addr
);
9127 insn
= gen_tls_got_dtprel_32 (tmp2
, got
, addr
);
9129 insn
= gen_rtx_SET (dest
, gen_rtx_PLUS (Pmode
, tmp2
, tmp1
));
9135 /* IE, or 64-bit offset LE. */
9136 tmp2
= gen_reg_rtx (Pmode
);
9138 insn
= gen_tls_got_tprel_64 (tmp2
, got
, addr
);
9140 insn
= gen_tls_got_tprel_32 (tmp2
, got
, addr
);
9143 insn
= gen_tls_tls_64 (dest
, tmp2
, addr
);
9145 insn
= gen_tls_tls_32 (dest
, tmp2
, addr
);
9153 /* Only create the global variable for the stack protect guard if we are using
9154 the global flavor of that guard. */
9156 rs6000_init_stack_protect_guard (void)
9158 if (rs6000_stack_protector_guard
== SSP_GLOBAL
)
9159 return default_stack_protect_guard ();
9164 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
9167 rs6000_cannot_force_const_mem (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
9169 if (GET_CODE (x
) == HIGH
9170 && GET_CODE (XEXP (x
, 0)) == UNSPEC
)
9173 /* A TLS symbol in the TOC cannot contain a sum. */
9174 if (GET_CODE (x
) == CONST
9175 && GET_CODE (XEXP (x
, 0)) == PLUS
9176 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
9177 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x
, 0), 0)) != 0)
9180 /* Do not place an ELF TLS symbol in the constant pool. */
9181 return TARGET_ELF
&& tls_referenced_p (x
);
9184 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
9185 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
9186 can be addressed relative to the toc pointer. */
9189 use_toc_relative_ref (rtx sym
, machine_mode mode
)
9191 return ((constant_pool_expr_p (sym
)
9192 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym
),
9193 get_pool_mode (sym
)))
9194 || (TARGET_CMODEL
== CMODEL_MEDIUM
9195 && SYMBOL_REF_LOCAL_P (sym
)
9196 && GET_MODE_SIZE (mode
) <= POWERPC64_TOC_POINTER_ALIGNMENT
));
9199 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
9200 replace the input X, or the original X if no replacement is called for.
9201 The output parameter *WIN is 1 if the calling macro should goto WIN,
9204 For RS/6000, we wish to handle large displacements off a base
9205 register by splitting the addend across an addiu/addis and the mem insn.
9206 This cuts number of extra insns needed from 3 to 1.
9208 On Darwin, we use this to generate code for floating point constants.
9209 A movsf_low is generated so we wind up with 2 instructions rather than 3.
9210 The Darwin code is inside #if TARGET_MACHO because only then are the
9211 machopic_* functions defined. */
9213 rs6000_legitimize_reload_address (rtx x
, machine_mode mode
,
9214 int opnum
, int type
,
9215 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
9217 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
9218 bool quad_offset_p
= mode_supports_dq_form (mode
);
9220 /* Nasty hack for vsx_splat_v2df/v2di load from mem, which takes a
9221 DFmode/DImode MEM. Ditto for ISA 3.0 vsx_splat_v4sf/v4si. */
9224 && ((mode
== DFmode
&& recog_data
.operand_mode
[0] == V2DFmode
)
9225 || (mode
== DImode
&& recog_data
.operand_mode
[0] == V2DImode
)
9226 || (mode
== SFmode
&& recog_data
.operand_mode
[0] == V4SFmode
9227 && TARGET_P9_VECTOR
)
9228 || (mode
== SImode
&& recog_data
.operand_mode
[0] == V4SImode
9229 && TARGET_P9_VECTOR
)))
9230 reg_offset_p
= false;
9232 /* We must recognize output that we have already generated ourselves. */
9233 if (GET_CODE (x
) == PLUS
9234 && GET_CODE (XEXP (x
, 0)) == PLUS
9235 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
9236 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
9237 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
9239 if (TARGET_DEBUG_ADDR
)
9241 fprintf (stderr
, "\nlegitimize_reload_address push_reload #1:\n");
9244 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9245 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
9246 opnum
, (enum reload_type
) type
);
9251 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
9252 if (GET_CODE (x
) == LO_SUM
9253 && GET_CODE (XEXP (x
, 0)) == HIGH
)
9255 if (TARGET_DEBUG_ADDR
)
9257 fprintf (stderr
, "\nlegitimize_reload_address push_reload #2:\n");
9260 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9261 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9262 opnum
, (enum reload_type
) type
);
9268 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
9269 && GET_CODE (x
) == LO_SUM
9270 && GET_CODE (XEXP (x
, 0)) == PLUS
9271 && XEXP (XEXP (x
, 0), 0) == pic_offset_table_rtx
9272 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == HIGH
9273 && XEXP (XEXP (XEXP (x
, 0), 1), 0) == XEXP (x
, 1)
9274 && machopic_operand_p (XEXP (x
, 1)))
9276 /* Result of previous invocation of this function on Darwin
9277 floating point constant. */
9278 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9279 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9280 opnum
, (enum reload_type
) type
);
9286 if (TARGET_CMODEL
!= CMODEL_SMALL
9289 && small_toc_ref (x
, VOIDmode
))
9291 rtx hi
= gen_rtx_HIGH (Pmode
, copy_rtx (x
));
9292 x
= gen_rtx_LO_SUM (Pmode
, hi
, x
);
9293 if (TARGET_DEBUG_ADDR
)
9295 fprintf (stderr
, "\nlegitimize_reload_address push_reload #3:\n");
9298 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9299 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9300 opnum
, (enum reload_type
) type
);
9305 if (GET_CODE (x
) == PLUS
9306 && REG_P (XEXP (x
, 0))
9307 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
9308 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
9309 && CONST_INT_P (XEXP (x
, 1))
9311 && (quad_offset_p
|| !VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
)))
9313 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
9314 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
9316 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
9318 /* Check for 32-bit overflow or quad addresses with one of the
9319 four least significant bits set. */
9320 if (high
+ low
!= val
9321 || (quad_offset_p
&& (low
& 0xf)))
9327 /* Reload the high part into a base reg; leave the low part
9328 in the mem directly. */
9330 x
= gen_rtx_PLUS (GET_MODE (x
),
9331 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
9335 if (TARGET_DEBUG_ADDR
)
9337 fprintf (stderr
, "\nlegitimize_reload_address push_reload #4:\n");
9340 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9341 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
9342 opnum
, (enum reload_type
) type
);
9347 if (GET_CODE (x
) == SYMBOL_REF
9350 && (!VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
))
9352 && DEFAULT_ABI
== ABI_DARWIN
9353 && (flag_pic
|| MACHO_DYNAMIC_NO_PIC_P
)
9354 && machopic_symbol_defined_p (x
)
9356 && DEFAULT_ABI
== ABI_V4
9359 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
9360 The same goes for DImode without 64-bit gprs and DFmode and DDmode
9362 ??? Assume floating point reg based on mode? This assumption is
9363 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
9364 where reload ends up doing a DFmode load of a constant from
9365 mem using two gprs. Unfortunately, at this point reload
9366 hasn't yet selected regs so poking around in reload data
9367 won't help and even if we could figure out the regs reliably,
9368 we'd still want to allow this transformation when the mem is
9369 naturally aligned. Since we say the address is good here, we
9370 can't disable offsets from LO_SUMs in mem_operand_gpr.
9371 FIXME: Allow offset from lo_sum for other modes too, when
9372 mem is sufficiently aligned.
9374 Also disallow this if the type can go in VMX/Altivec registers, since
9375 those registers do not have d-form (reg+offset) address modes. */
9376 && !reg_addr
[mode
].scalar_in_vmx_p
9381 && (mode
!= TImode
|| !TARGET_VSX
)
9383 && (mode
!= DImode
|| TARGET_POWERPC64
)
9384 && ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_POWERPC64
9385 || TARGET_HARD_FLOAT
))
9390 rtx offset
= machopic_gen_offset (x
);
9391 x
= gen_rtx_LO_SUM (GET_MODE (x
),
9392 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
9393 gen_rtx_HIGH (Pmode
, offset
)), offset
);
9397 x
= gen_rtx_LO_SUM (GET_MODE (x
),
9398 gen_rtx_HIGH (Pmode
, x
), x
);
9400 if (TARGET_DEBUG_ADDR
)
9402 fprintf (stderr
, "\nlegitimize_reload_address push_reload #5:\n");
9405 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9406 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9407 opnum
, (enum reload_type
) type
);
9412 /* Reload an offset address wrapped by an AND that represents the
9413 masking of the lower bits. Strip the outer AND and let reload
9414 convert the offset address into an indirect address. For VSX,
9415 force reload to create the address with an AND in a separate
9416 register, because we can't guarantee an altivec register will
9418 if (VECTOR_MEM_ALTIVEC_P (mode
)
9419 && GET_CODE (x
) == AND
9420 && GET_CODE (XEXP (x
, 0)) == PLUS
9421 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
9422 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
9423 && GET_CODE (XEXP (x
, 1)) == CONST_INT
9424 && INTVAL (XEXP (x
, 1)) == -16)
9434 && GET_CODE (x
) == SYMBOL_REF
9435 && use_toc_relative_ref (x
, mode
))
9437 x
= create_TOC_reference (x
, NULL_RTX
);
9438 if (TARGET_CMODEL
!= CMODEL_SMALL
)
9440 if (TARGET_DEBUG_ADDR
)
9442 fprintf (stderr
, "\nlegitimize_reload_address push_reload #6:\n");
9445 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
9446 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
9447 opnum
, (enum reload_type
) type
);
9456 /* Debug version of rs6000_legitimize_reload_address. */
9458 rs6000_debug_legitimize_reload_address (rtx x
, machine_mode mode
,
9459 int opnum
, int type
,
9460 int ind_levels
, int *win
)
9462 rtx ret
= rs6000_legitimize_reload_address (x
, mode
, opnum
, type
,
9465 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
9466 "type = %d, ind_levels = %d, win = %d, original addr:\n",
9467 GET_MODE_NAME (mode
), opnum
, type
, ind_levels
, *win
);
9471 fprintf (stderr
, "Same address returned\n");
9473 fprintf (stderr
, "NULL returned\n");
9476 fprintf (stderr
, "New address:\n");
9483 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
9484 that is a valid memory address for an instruction.
9485 The MODE argument is the machine mode for the MEM expression
9486 that wants to use this address.
9488 On the RS/6000, there are four valid address: a SYMBOL_REF that
9489 refers to a constant pool entry of an address (or the sum of it
9490 plus a constant), a short (16-bit signed) constant plus a register,
9491 the sum of two registers, or a register indirect, possibly with an
9492 auto-increment. For DFmode, DDmode and DImode with a constant plus
9493 register, we must ensure that both words are addressable or PowerPC64
9494 with offset word aligned.
9496 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
9497 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
9498 because adjacent memory cells are accessed by adding word-sized offsets
9499 during assembly output. */
9501 rs6000_legitimate_address_p (machine_mode mode
, rtx x
, bool reg_ok_strict
)
9503 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
9504 bool quad_offset_p
= mode_supports_dq_form (mode
);
9506 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
9507 if (VECTOR_MEM_ALTIVEC_P (mode
)
9508 && GET_CODE (x
) == AND
9509 && GET_CODE (XEXP (x
, 1)) == CONST_INT
9510 && INTVAL (XEXP (x
, 1)) == -16)
9513 if (TARGET_ELF
&& RS6000_SYMBOL_REF_TLS_P (x
))
9515 if (legitimate_indirect_address_p (x
, reg_ok_strict
))
9518 && (GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == PRE_DEC
)
9519 && mode_supports_pre_incdec_p (mode
)
9520 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
))
9522 /* Handle restricted vector d-form offsets in ISA 3.0. */
9525 if (quad_address_p (x
, mode
, reg_ok_strict
))
9528 else if (virtual_stack_registers_memory_p (x
))
9531 else if (reg_offset_p
)
9533 if (legitimate_small_data_p (mode
, x
))
9535 if (legitimate_constant_pool_address_p (x
, mode
,
9536 reg_ok_strict
|| lra_in_progress
))
9538 if (reg_addr
[mode
].fused_toc
&& GET_CODE (x
) == UNSPEC
9539 && XINT (x
, 1) == UNSPEC_FUSION_ADDIS
)
9543 /* For TImode, if we have TImode in VSX registers, only allow register
9544 indirect addresses. This will allow the values to go in either GPRs
9545 or VSX registers without reloading. The vector types would tend to
9546 go into VSX registers, so we allow REG+REG, while TImode seems
9547 somewhat split, in that some uses are GPR based, and some VSX based. */
9548 /* FIXME: We could loosen this by changing the following to
9549 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX)
9550 but currently we cannot allow REG+REG addressing for TImode. See
9551 PR72827 for complete details on how this ends up hoodwinking DSE. */
9552 if (mode
== TImode
&& TARGET_VSX
)
9554 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
9557 && GET_CODE (x
) == PLUS
9558 && GET_CODE (XEXP (x
, 0)) == REG
9559 && (XEXP (x
, 0) == virtual_stack_vars_rtx
9560 || XEXP (x
, 0) == arg_pointer_rtx
)
9561 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
9563 if (rs6000_legitimate_offset_address_p (mode
, x
, reg_ok_strict
, false))
9565 if (!FLOAT128_2REG_P (mode
)
9566 && (TARGET_HARD_FLOAT
9568 || (mode
!= DFmode
&& mode
!= DDmode
))
9569 && (TARGET_POWERPC64
|| mode
!= DImode
)
9570 && (mode
!= TImode
|| VECTOR_MEM_VSX_P (TImode
))
9572 && !avoiding_indexed_address_p (mode
)
9573 && legitimate_indexed_address_p (x
, reg_ok_strict
))
9575 if (TARGET_UPDATE
&& GET_CODE (x
) == PRE_MODIFY
9576 && mode_supports_pre_modify_p (mode
)
9577 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
)
9578 && (rs6000_legitimate_offset_address_p (mode
, XEXP (x
, 1),
9579 reg_ok_strict
, false)
9580 || (!avoiding_indexed_address_p (mode
)
9581 && legitimate_indexed_address_p (XEXP (x
, 1), reg_ok_strict
)))
9582 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
9584 if (reg_offset_p
&& !quad_offset_p
9585 && legitimate_lo_sum_address_p (mode
, x
, reg_ok_strict
))
9590 /* Debug version of rs6000_legitimate_address_p. */
9592 rs6000_debug_legitimate_address_p (machine_mode mode
, rtx x
,
9595 bool ret
= rs6000_legitimate_address_p (mode
, x
, reg_ok_strict
);
9597 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
9598 "strict = %d, reload = %s, code = %s\n",
9599 ret
? "true" : "false",
9600 GET_MODE_NAME (mode
),
9602 (reload_completed
? "after" : "before"),
9603 GET_RTX_NAME (GET_CODE (x
)));
9609 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
9612 rs6000_mode_dependent_address_p (const_rtx addr
,
9613 addr_space_t as ATTRIBUTE_UNUSED
)
9615 return rs6000_mode_dependent_address_ptr (addr
);
9618 /* Go to LABEL if ADDR (a legitimate address expression)
9619 has an effect that depends on the machine mode it is used for.
9621 On the RS/6000 this is true of all integral offsets (since AltiVec
9622 and VSX modes don't allow them) or is a pre-increment or decrement.
9624 ??? Except that due to conceptual problems in offsettable_address_p
9625 we can't really report the problems of integral offsets. So leave
9626 this assuming that the adjustable offset must be valid for the
9627 sub-words of a TFmode operand, which is what we had before. */
9630 rs6000_mode_dependent_address (const_rtx addr
)
9632 switch (GET_CODE (addr
))
9635 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
9636 is considered a legitimate address before reload, so there
9637 are no offset restrictions in that case. Note that this
9638 condition is safe in strict mode because any address involving
9639 virtual_stack_vars_rtx or arg_pointer_rtx would already have
9640 been rejected as illegitimate. */
9641 if (XEXP (addr
, 0) != virtual_stack_vars_rtx
9642 && XEXP (addr
, 0) != arg_pointer_rtx
9643 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
9645 unsigned HOST_WIDE_INT val
= INTVAL (XEXP (addr
, 1));
9646 return val
+ 0x8000 >= 0x10000 - (TARGET_POWERPC64
? 8 : 12);
9651 /* Anything in the constant pool is sufficiently aligned that
9652 all bytes have the same high part address. */
9653 return !legitimate_constant_pool_address_p (addr
, QImode
, false);
9655 /* Auto-increment cases are now treated generically in recog.c. */
9657 return TARGET_UPDATE
;
9659 /* AND is only allowed in Altivec loads. */
9670 /* Debug version of rs6000_mode_dependent_address. */
9672 rs6000_debug_mode_dependent_address (const_rtx addr
)
9674 bool ret
= rs6000_mode_dependent_address (addr
);
9676 fprintf (stderr
, "\nrs6000_mode_dependent_address: ret = %s\n",
9677 ret
? "true" : "false");
9683 /* Implement FIND_BASE_TERM. */
9686 rs6000_find_base_term (rtx op
)
9691 if (GET_CODE (base
) == CONST
)
9692 base
= XEXP (base
, 0);
9693 if (GET_CODE (base
) == PLUS
)
9694 base
= XEXP (base
, 0);
9695 if (GET_CODE (base
) == UNSPEC
)
9696 switch (XINT (base
, 1))
9699 case UNSPEC_MACHOPIC_OFFSET
:
9700 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
9701 for aliasing purposes. */
9702 return XVECEXP (base
, 0, 0);
9708 /* More elaborate version of recog's offsettable_memref_p predicate
9709 that works around the ??? note of rs6000_mode_dependent_address.
9710 In particular it accepts
9712 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
9714 in 32-bit mode, that the recog predicate rejects. */
9717 rs6000_offsettable_memref_p (rtx op
, machine_mode reg_mode
, bool strict
)
9724 /* First mimic offsettable_memref_p. */
9725 if (offsettable_address_p (strict
, GET_MODE (op
), XEXP (op
, 0)))
9728 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
9729 the latter predicate knows nothing about the mode of the memory
9730 reference and, therefore, assumes that it is the largest supported
9731 mode (TFmode). As a consequence, legitimate offsettable memory
9732 references are rejected. rs6000_legitimate_offset_address_p contains
9733 the correct logic for the PLUS case of rs6000_mode_dependent_address,
9734 at least with a little bit of help here given that we know the
9735 actual registers used. */
9736 worst_case
= ((TARGET_POWERPC64
&& GET_MODE_CLASS (reg_mode
) == MODE_INT
)
9737 || GET_MODE_SIZE (reg_mode
) == 4);
9738 return rs6000_legitimate_offset_address_p (GET_MODE (op
), XEXP (op
, 0),
9739 strict
, worst_case
);
9742 /* Determine the reassociation width to be used in reassociate_bb.
9743 This takes into account how many parallel operations we
9744 can actually do of a given type, and also the latency.
9748 vect add/sub/mul 2/cycle
9749 fp add/sub/mul 2/cycle
9754 rs6000_reassociation_width (unsigned int opc ATTRIBUTE_UNUSED
,
9757 switch (rs6000_tune
)
9759 case PROCESSOR_POWER8
:
9760 case PROCESSOR_POWER9
:
9761 if (DECIMAL_FLOAT_MODE_P (mode
))
9763 if (VECTOR_MODE_P (mode
))
9765 if (INTEGRAL_MODE_P (mode
))
9767 if (FLOAT_MODE_P (mode
))
9776 /* Change register usage conditional on target flags. */
9778 rs6000_conditional_register_usage (void)
9782 if (TARGET_DEBUG_TARGET
)
9783 fprintf (stderr
, "rs6000_conditional_register_usage called\n");
9785 /* Set MQ register fixed (already call_used) so that it will not be
9789 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
9791 fixed_regs
[13] = call_used_regs
[13]
9792 = call_really_used_regs
[13] = 1;
9794 /* Conditionally disable FPRs. */
9795 if (TARGET_SOFT_FLOAT
)
9796 for (i
= 32; i
< 64; i
++)
9797 fixed_regs
[i
] = call_used_regs
[i
]
9798 = call_really_used_regs
[i
] = 1;
9800 /* The TOC register is not killed across calls in a way that is
9801 visible to the compiler. */
9802 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9803 call_really_used_regs
[2] = 0;
9805 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
9806 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
9808 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
9809 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
9810 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
9811 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
9813 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
9814 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
9815 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
9816 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
9818 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
)
9819 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
9820 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
9822 if (!TARGET_ALTIVEC
&& !TARGET_VSX
)
9824 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
9825 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
9826 call_really_used_regs
[VRSAVE_REGNO
] = 1;
9829 if (TARGET_ALTIVEC
|| TARGET_VSX
)
9830 global_regs
[VSCR_REGNO
] = 1;
9832 if (TARGET_ALTIVEC_ABI
)
9834 for (i
= FIRST_ALTIVEC_REGNO
; i
< FIRST_ALTIVEC_REGNO
+ 20; ++i
)
9835 call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
9837 /* AIX reserves VR20:31 in non-extended ABI mode. */
9839 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
< FIRST_ALTIVEC_REGNO
+ 32; ++i
)
9840 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
9845 /* Output insns to set DEST equal to the constant SOURCE as a series of
9846 lis, ori and shl instructions and return TRUE. */
9849 rs6000_emit_set_const (rtx dest
, rtx source
)
9851 machine_mode mode
= GET_MODE (dest
);
9856 gcc_checking_assert (CONST_INT_P (source
));
9857 c
= INTVAL (source
);
9862 emit_insn (gen_rtx_SET (dest
, source
));
9866 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (SImode
);
9868 emit_insn (gen_rtx_SET (copy_rtx (temp
),
9869 GEN_INT (c
& ~(HOST_WIDE_INT
) 0xffff)));
9870 emit_insn (gen_rtx_SET (dest
,
9871 gen_rtx_IOR (SImode
, copy_rtx (temp
),
9872 GEN_INT (c
& 0xffff))));
9876 if (!TARGET_POWERPC64
)
9880 hi
= operand_subword_force (copy_rtx (dest
), WORDS_BIG_ENDIAN
== 0,
9882 lo
= operand_subword_force (dest
, WORDS_BIG_ENDIAN
!= 0,
9884 emit_move_insn (hi
, GEN_INT (c
>> 32));
9885 c
= ((c
& 0xffffffff) ^ 0x80000000) - 0x80000000;
9886 emit_move_insn (lo
, GEN_INT (c
));
9889 rs6000_emit_set_long_const (dest
, c
);
9896 insn
= get_last_insn ();
9897 set
= single_set (insn
);
9898 if (! CONSTANT_P (SET_SRC (set
)))
9899 set_unique_reg_note (insn
, REG_EQUAL
, GEN_INT (c
));
9904 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
9905 Output insns to set DEST equal to the constant C as a series of
9906 lis, ori and shl instructions. */
9909 rs6000_emit_set_long_const (rtx dest
, HOST_WIDE_INT c
)
9912 HOST_WIDE_INT ud1
, ud2
, ud3
, ud4
;
9922 if ((ud4
== 0xffff && ud3
== 0xffff && ud2
== 0xffff && (ud1
& 0x8000))
9923 || (ud4
== 0 && ud3
== 0 && ud2
== 0 && ! (ud1
& 0x8000)))
9924 emit_move_insn (dest
, GEN_INT ((ud1
^ 0x8000) - 0x8000));
9926 else if ((ud4
== 0xffff && ud3
== 0xffff && (ud2
& 0x8000))
9927 || (ud4
== 0 && ud3
== 0 && ! (ud2
& 0x8000)))
9929 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
9931 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
9932 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
9934 emit_move_insn (dest
,
9935 gen_rtx_IOR (DImode
, copy_rtx (temp
),
9938 else if (ud3
== 0 && ud4
== 0)
9940 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
9942 gcc_assert (ud2
& 0x8000);
9943 emit_move_insn (copy_rtx (temp
),
9944 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
9946 emit_move_insn (copy_rtx (temp
),
9947 gen_rtx_IOR (DImode
, copy_rtx (temp
),
9949 emit_move_insn (dest
,
9950 gen_rtx_ZERO_EXTEND (DImode
,
9951 gen_lowpart (SImode
,
9954 else if ((ud4
== 0xffff && (ud3
& 0x8000))
9955 || (ud4
== 0 && ! (ud3
& 0x8000)))
9957 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
9959 emit_move_insn (copy_rtx (temp
),
9960 GEN_INT (((ud3
<< 16) ^ 0x80000000) - 0x80000000));
9962 emit_move_insn (copy_rtx (temp
),
9963 gen_rtx_IOR (DImode
, copy_rtx (temp
),
9965 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
9966 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
9969 emit_move_insn (dest
,
9970 gen_rtx_IOR (DImode
, copy_rtx (temp
),
9975 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
9977 emit_move_insn (copy_rtx (temp
),
9978 GEN_INT (((ud4
<< 16) ^ 0x80000000) - 0x80000000));
9980 emit_move_insn (copy_rtx (temp
),
9981 gen_rtx_IOR (DImode
, copy_rtx (temp
),
9984 emit_move_insn (ud2
!= 0 || ud1
!= 0 ? copy_rtx (temp
) : dest
,
9985 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
9988 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
9989 gen_rtx_IOR (DImode
, copy_rtx (temp
),
9990 GEN_INT (ud2
<< 16)));
9992 emit_move_insn (dest
,
9993 gen_rtx_IOR (DImode
, copy_rtx (temp
),
9998 /* Helper for the following. Get rid of [r+r] memory refs
9999 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
10002 rs6000_eliminate_indexed_memrefs (rtx operands
[2])
10004 if (GET_CODE (operands
[0]) == MEM
10005 && GET_CODE (XEXP (operands
[0], 0)) != REG
10006 && ! legitimate_constant_pool_address_p (XEXP (operands
[0], 0),
10007 GET_MODE (operands
[0]), false))
10009 = replace_equiv_address (operands
[0],
10010 copy_addr_to_reg (XEXP (operands
[0], 0)));
10012 if (GET_CODE (operands
[1]) == MEM
10013 && GET_CODE (XEXP (operands
[1], 0)) != REG
10014 && ! legitimate_constant_pool_address_p (XEXP (operands
[1], 0),
10015 GET_MODE (operands
[1]), false))
10017 = replace_equiv_address (operands
[1],
10018 copy_addr_to_reg (XEXP (operands
[1], 0)));
10021 /* Generate a vector of constants to permute MODE for a little-endian
10022 storage operation by swapping the two halves of a vector. */
10024 rs6000_const_vec (machine_mode mode
)
10052 v
= rtvec_alloc (subparts
);
10054 for (i
= 0; i
< subparts
/ 2; ++i
)
10055 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
+ subparts
/ 2);
10056 for (i
= subparts
/ 2; i
< subparts
; ++i
)
10057 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
- subparts
/ 2);
10062 /* Emit an lxvd2x, stxvd2x, or xxpermdi instruction for a VSX load or
10063 store operation. */
10065 rs6000_emit_le_vsx_permute (rtx dest
, rtx source
, machine_mode mode
)
10067 /* Scalar permutations are easier to express in integer modes rather than
10068 floating-point modes, so cast them here. We use V1TImode instead
10069 of TImode to ensure that the values don't go through GPRs. */
10070 if (FLOAT128_VECTOR_P (mode
))
10072 dest
= gen_lowpart (V1TImode
, dest
);
10073 source
= gen_lowpart (V1TImode
, source
);
10077 /* Use ROTATE instead of VEC_SELECT if the mode contains only a single
10079 if (mode
== TImode
|| mode
== V1TImode
)
10080 emit_insn (gen_rtx_SET (dest
, gen_rtx_ROTATE (mode
, source
,
10084 rtx par
= gen_rtx_PARALLEL (VOIDmode
, rs6000_const_vec (mode
));
10085 emit_insn (gen_rtx_SET (dest
, gen_rtx_VEC_SELECT (mode
, source
, par
)));
10089 /* Emit a little-endian load from vector memory location SOURCE to VSX
10090 register DEST in mode MODE. The load is done with two permuting
10091 insn's that represent an lxvd2x and xxpermdi. */
10093 rs6000_emit_le_vsx_load (rtx dest
, rtx source
, machine_mode mode
)
10095 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
10097 if (mode
== TImode
|| mode
== V1TImode
)
10100 dest
= gen_lowpart (V2DImode
, dest
);
10101 source
= adjust_address (source
, V2DImode
, 0);
10104 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest
) : dest
;
10105 rs6000_emit_le_vsx_permute (tmp
, source
, mode
);
10106 rs6000_emit_le_vsx_permute (dest
, tmp
, mode
);
10109 /* Emit a little-endian store to vector memory location DEST from VSX
10110 register SOURCE in mode MODE. The store is done with two permuting
10111 insn's that represent an xxpermdi and an stxvd2x. */
10113 rs6000_emit_le_vsx_store (rtx dest
, rtx source
, machine_mode mode
)
10115 /* This should never be called during or after LRA, because it does
10116 not re-permute the source register. It is intended only for use
10118 gcc_assert (!lra_in_progress
&& !reload_completed
);
10120 /* Use V2DImode to do swaps of types with 128-bit scalar parts (TImode,
10122 if (mode
== TImode
|| mode
== V1TImode
)
10125 dest
= adjust_address (dest
, V2DImode
, 0);
10126 source
= gen_lowpart (V2DImode
, source
);
10129 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source
) : source
;
10130 rs6000_emit_le_vsx_permute (tmp
, source
, mode
);
10131 rs6000_emit_le_vsx_permute (dest
, tmp
, mode
);
10134 /* Emit a sequence representing a little-endian VSX load or store,
10135 moving data from SOURCE to DEST in mode MODE. This is done
10136 separately from rs6000_emit_move to ensure it is called only
10137 during expand. LE VSX loads and stores introduced later are
10138 handled with a split. The expand-time RTL generation allows
10139 us to optimize away redundant pairs of register-permutes. */
10141 rs6000_emit_le_vsx_move (rtx dest
, rtx source
, machine_mode mode
)
10143 gcc_assert (!BYTES_BIG_ENDIAN
10144 && VECTOR_MEM_VSX_P (mode
)
10145 && !TARGET_P9_VECTOR
10146 && !gpr_or_gpr_p (dest
, source
)
10147 && (MEM_P (source
) ^ MEM_P (dest
)));
10149 if (MEM_P (source
))
10151 gcc_assert (REG_P (dest
) || GET_CODE (dest
) == SUBREG
);
10152 rs6000_emit_le_vsx_load (dest
, source
, mode
);
10156 if (!REG_P (source
))
10157 source
= force_reg (mode
, source
);
10158 rs6000_emit_le_vsx_store (dest
, source
, mode
);
10162 /* Return whether a SFmode or SImode move can be done without converting one
10163 mode to another. This arrises when we have:
10165 (SUBREG:SF (REG:SI ...))
10166 (SUBREG:SI (REG:SF ...))
10168 and one of the values is in a floating point/vector register, where SFmode
10169 scalars are stored in DFmode format. */
10172 valid_sf_si_move (rtx dest
, rtx src
, machine_mode mode
)
10174 if (TARGET_ALLOW_SF_SUBREG
)
10177 if (mode
!= SFmode
&& GET_MODE_CLASS (mode
) != MODE_INT
)
10180 if (!SUBREG_P (src
) || !sf_subreg_operand (src
, mode
))
10183 /*. Allow (set (SUBREG:SI (REG:SF)) (SUBREG:SI (REG:SF))). */
10184 if (SUBREG_P (dest
))
10186 rtx dest_subreg
= SUBREG_REG (dest
);
10187 rtx src_subreg
= SUBREG_REG (src
);
10188 return GET_MODE (dest_subreg
) == GET_MODE (src_subreg
);
10195 /* Helper function to change moves with:
10197 (SUBREG:SF (REG:SI)) and
10198 (SUBREG:SI (REG:SF))
10200 into separate UNSPEC insns. In the PowerPC architecture, scalar SFmode
10201 values are stored as DFmode values in the VSX registers. We need to convert
10202 the bits before we can use a direct move or operate on the bits in the
10203 vector register as an integer type.
10205 Skip things like (set (SUBREG:SI (...) (SUBREG:SI (...)). */
10208 rs6000_emit_move_si_sf_subreg (rtx dest
, rtx source
, machine_mode mode
)
10210 if (TARGET_DIRECT_MOVE_64BIT
&& !lra_in_progress
&& !reload_completed
10211 && (!SUBREG_P (dest
) || !sf_subreg_operand (dest
, mode
))
10212 && SUBREG_P (source
) && sf_subreg_operand (source
, mode
))
10214 rtx inner_source
= SUBREG_REG (source
);
10215 machine_mode inner_mode
= GET_MODE (inner_source
);
10217 if (mode
== SImode
&& inner_mode
== SFmode
)
10219 emit_insn (gen_movsi_from_sf (dest
, inner_source
));
10223 if (mode
== SFmode
&& inner_mode
== SImode
)
10225 emit_insn (gen_movsf_from_si (dest
, inner_source
));
10233 /* Emit a move from SOURCE to DEST in mode MODE. */
10235 rs6000_emit_move (rtx dest
, rtx source
, machine_mode mode
)
10238 operands
[0] = dest
;
10239 operands
[1] = source
;
10241 if (TARGET_DEBUG_ADDR
)
10244 "\nrs6000_emit_move: mode = %s, lra_in_progress = %d, "
10245 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
10246 GET_MODE_NAME (mode
),
10249 can_create_pseudo_p ());
10251 fprintf (stderr
, "source:\n");
10252 debug_rtx (source
);
10255 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
10256 if (CONST_WIDE_INT_P (operands
[1])
10257 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
10259 /* This should be fixed with the introduction of CONST_WIDE_INT. */
10260 gcc_unreachable ();
10263 #ifdef HAVE_AS_GNU_ATTRIBUTE
10264 /* If we use a long double type, set the flags in .gnu_attribute that say
10265 what the long double type is. This is to allow the linker's warning
10266 message for the wrong long double to be useful, even if the function does
10267 not do a call (for example, doing a 128-bit add on power9 if the long
10268 double type is IEEE 128-bit. Do not set this if __ibm128 or __floa128 are
10269 used if they aren't the default long dobule type. */
10270 if (rs6000_gnu_attr
&& (HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
))
10272 if (TARGET_LONG_DOUBLE_128
&& (mode
== TFmode
|| mode
== TCmode
))
10273 rs6000_passes_float
= rs6000_passes_long_double
= true;
10275 else if (!TARGET_LONG_DOUBLE_128
&& (mode
== DFmode
|| mode
== DCmode
))
10276 rs6000_passes_float
= rs6000_passes_long_double
= true;
10280 /* See if we need to special case SImode/SFmode SUBREG moves. */
10281 if ((mode
== SImode
|| mode
== SFmode
) && SUBREG_P (source
)
10282 && rs6000_emit_move_si_sf_subreg (dest
, source
, mode
))
10285 /* Check if GCC is setting up a block move that will end up using FP
10286 registers as temporaries. We must make sure this is acceptable. */
10287 if (GET_CODE (operands
[0]) == MEM
10288 && GET_CODE (operands
[1]) == MEM
10290 && (rs6000_slow_unaligned_access (DImode
, MEM_ALIGN (operands
[0]))
10291 || rs6000_slow_unaligned_access (DImode
, MEM_ALIGN (operands
[1])))
10292 && ! (rs6000_slow_unaligned_access (SImode
,
10293 (MEM_ALIGN (operands
[0]) > 32
10294 ? 32 : MEM_ALIGN (operands
[0])))
10295 || rs6000_slow_unaligned_access (SImode
,
10296 (MEM_ALIGN (operands
[1]) > 32
10297 ? 32 : MEM_ALIGN (operands
[1]))))
10298 && ! MEM_VOLATILE_P (operands
[0])
10299 && ! MEM_VOLATILE_P (operands
[1]))
10301 emit_move_insn (adjust_address (operands
[0], SImode
, 0),
10302 adjust_address (operands
[1], SImode
, 0));
10303 emit_move_insn (adjust_address (copy_rtx (operands
[0]), SImode
, 4),
10304 adjust_address (copy_rtx (operands
[1]), SImode
, 4));
10308 if (can_create_pseudo_p () && GET_CODE (operands
[0]) == MEM
10309 && !gpc_reg_operand (operands
[1], mode
))
10310 operands
[1] = force_reg (mode
, operands
[1]);
10312 /* Recognize the case where operand[1] is a reference to thread-local
10313 data and load its address to a register. */
10314 if (tls_referenced_p (operands
[1]))
10316 enum tls_model model
;
10317 rtx tmp
= operands
[1];
10320 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
10322 addend
= XEXP (XEXP (tmp
, 0), 1);
10323 tmp
= XEXP (XEXP (tmp
, 0), 0);
10326 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
10327 model
= SYMBOL_REF_TLS_MODEL (tmp
);
10328 gcc_assert (model
!= 0);
10330 tmp
= rs6000_legitimize_tls_address (tmp
, model
);
10333 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
10334 tmp
= force_operand (tmp
, operands
[0]);
10339 /* 128-bit constant floating-point values on Darwin should really be loaded
10340 as two parts. However, this premature splitting is a problem when DFmode
10341 values can go into Altivec registers. */
10342 if (FLOAT128_IBM_P (mode
) && !reg_addr
[DFmode
].scalar_in_vmx_p
10343 && GET_CODE (operands
[1]) == CONST_DOUBLE
)
10345 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
, 0),
10346 simplify_gen_subreg (DFmode
, operands
[1], mode
, 0),
10348 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
,
10349 GET_MODE_SIZE (DFmode
)),
10350 simplify_gen_subreg (DFmode
, operands
[1], mode
,
10351 GET_MODE_SIZE (DFmode
)),
10356 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
10357 p1:SD) if p1 is not of floating point class and p0 is spilled as
10358 we can have no analogous movsd_store for this. */
10359 if (lra_in_progress
&& mode
== DDmode
10360 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
10361 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
10362 && GET_CODE (operands
[1]) == SUBREG
&& REG_P (SUBREG_REG (operands
[1]))
10363 && GET_MODE (SUBREG_REG (operands
[1])) == SDmode
)
10366 int regno
= REGNO (SUBREG_REG (operands
[1]));
10368 if (regno
>= FIRST_PSEUDO_REGISTER
)
10370 cl
= reg_preferred_class (regno
);
10371 regno
= reg_renumber
[regno
];
10373 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][1];
10375 if (regno
>= 0 && ! FP_REGNO_P (regno
))
10378 operands
[0] = gen_lowpart_SUBREG (SDmode
, operands
[0]);
10379 operands
[1] = SUBREG_REG (operands
[1]);
10382 if (lra_in_progress
10384 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
10385 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
10386 && (REG_P (operands
[1])
10387 || (GET_CODE (operands
[1]) == SUBREG
10388 && REG_P (SUBREG_REG (operands
[1])))))
10390 int regno
= REGNO (GET_CODE (operands
[1]) == SUBREG
10391 ? SUBREG_REG (operands
[1]) : operands
[1]);
10394 if (regno
>= FIRST_PSEUDO_REGISTER
)
10396 cl
= reg_preferred_class (regno
);
10397 gcc_assert (cl
!= NO_REGS
);
10398 regno
= reg_renumber
[regno
];
10400 regno
= ira_class_hard_regs
[cl
][0];
10402 if (FP_REGNO_P (regno
))
10404 if (GET_MODE (operands
[0]) != DDmode
)
10405 operands
[0] = gen_rtx_SUBREG (DDmode
, operands
[0], 0);
10406 emit_insn (gen_movsd_store (operands
[0], operands
[1]));
10408 else if (INT_REGNO_P (regno
))
10409 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
10414 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
10415 p:DD)) if p0 is not of floating point class and p1 is spilled as
10416 we can have no analogous movsd_load for this. */
10417 if (lra_in_progress
&& mode
== DDmode
10418 && GET_CODE (operands
[0]) == SUBREG
&& REG_P (SUBREG_REG (operands
[0]))
10419 && GET_MODE (SUBREG_REG (operands
[0])) == SDmode
10420 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
10421 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
10424 int regno
= REGNO (SUBREG_REG (operands
[0]));
10426 if (regno
>= FIRST_PSEUDO_REGISTER
)
10428 cl
= reg_preferred_class (regno
);
10429 regno
= reg_renumber
[regno
];
10431 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][0];
10433 if (regno
>= 0 && ! FP_REGNO_P (regno
))
10436 operands
[0] = SUBREG_REG (operands
[0]);
10437 operands
[1] = gen_lowpart_SUBREG (SDmode
, operands
[1]);
10440 if (lra_in_progress
10442 && (REG_P (operands
[0])
10443 || (GET_CODE (operands
[0]) == SUBREG
10444 && REG_P (SUBREG_REG (operands
[0]))))
10445 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
10446 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
10448 int regno
= REGNO (GET_CODE (operands
[0]) == SUBREG
10449 ? SUBREG_REG (operands
[0]) : operands
[0]);
10452 if (regno
>= FIRST_PSEUDO_REGISTER
)
10454 cl
= reg_preferred_class (regno
);
10455 gcc_assert (cl
!= NO_REGS
);
10456 regno
= reg_renumber
[regno
];
10458 regno
= ira_class_hard_regs
[cl
][0];
10460 if (FP_REGNO_P (regno
))
10462 if (GET_MODE (operands
[1]) != DDmode
)
10463 operands
[1] = gen_rtx_SUBREG (DDmode
, operands
[1], 0);
10464 emit_insn (gen_movsd_load (operands
[0], operands
[1]));
10466 else if (INT_REGNO_P (regno
))
10467 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
10473 /* FIXME: In the long term, this switch statement should go away
10474 and be replaced by a sequence of tests based on things like
10480 if (CONSTANT_P (operands
[1])
10481 && GET_CODE (operands
[1]) != CONST_INT
)
10482 operands
[1] = force_const_mem (mode
, operands
[1]);
10489 if (FLOAT128_2REG_P (mode
))
10490 rs6000_eliminate_indexed_memrefs (operands
);
10497 if (CONSTANT_P (operands
[1])
10498 && ! easy_fp_constant (operands
[1], mode
))
10499 operands
[1] = force_const_mem (mode
, operands
[1]);
10509 if (CONSTANT_P (operands
[1])
10510 && !easy_vector_constant (operands
[1], mode
))
10511 operands
[1] = force_const_mem (mode
, operands
[1]);
10516 /* Use default pattern for address of ELF small data */
10519 && DEFAULT_ABI
== ABI_V4
10520 && (GET_CODE (operands
[1]) == SYMBOL_REF
10521 || GET_CODE (operands
[1]) == CONST
)
10522 && small_data_operand (operands
[1], mode
))
10524 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10528 if (DEFAULT_ABI
== ABI_V4
10529 && mode
== Pmode
&& mode
== SImode
10530 && flag_pic
== 1 && got_operand (operands
[1], mode
))
10532 emit_insn (gen_movsi_got (operands
[0], operands
[1]));
10536 if ((TARGET_ELF
|| DEFAULT_ABI
== ABI_DARWIN
)
10540 && CONSTANT_P (operands
[1])
10541 && GET_CODE (operands
[1]) != HIGH
10542 && GET_CODE (operands
[1]) != CONST_INT
)
10544 rtx target
= (!can_create_pseudo_p ()
10546 : gen_reg_rtx (mode
));
10548 /* If this is a function address on -mcall-aixdesc,
10549 convert it to the address of the descriptor. */
10550 if (DEFAULT_ABI
== ABI_AIX
10551 && GET_CODE (operands
[1]) == SYMBOL_REF
10552 && XSTR (operands
[1], 0)[0] == '.')
10554 const char *name
= XSTR (operands
[1], 0);
10556 while (*name
== '.')
10558 new_ref
= gen_rtx_SYMBOL_REF (Pmode
, name
);
10559 CONSTANT_POOL_ADDRESS_P (new_ref
)
10560 = CONSTANT_POOL_ADDRESS_P (operands
[1]);
10561 SYMBOL_REF_FLAGS (new_ref
) = SYMBOL_REF_FLAGS (operands
[1]);
10562 SYMBOL_REF_USED (new_ref
) = SYMBOL_REF_USED (operands
[1]);
10563 SYMBOL_REF_DATA (new_ref
) = SYMBOL_REF_DATA (operands
[1]);
10564 operands
[1] = new_ref
;
10567 if (DEFAULT_ABI
== ABI_DARWIN
)
10570 if (MACHO_DYNAMIC_NO_PIC_P
)
10572 /* Take care of any required data indirection. */
10573 operands
[1] = rs6000_machopic_legitimize_pic_address (
10574 operands
[1], mode
, operands
[0]);
10575 if (operands
[0] != operands
[1])
10576 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10580 emit_insn (gen_macho_high (target
, operands
[1]));
10581 emit_insn (gen_macho_low (operands
[0], target
, operands
[1]));
10585 emit_insn (gen_elf_high (target
, operands
[1]));
10586 emit_insn (gen_elf_low (operands
[0], target
, operands
[1]));
10590 /* If this is a SYMBOL_REF that refers to a constant pool entry,
10591 and we have put it in the TOC, we just need to make a TOC-relative
10592 reference to it. */
10594 && GET_CODE (operands
[1]) == SYMBOL_REF
10595 && use_toc_relative_ref (operands
[1], mode
))
10596 operands
[1] = create_TOC_reference (operands
[1], operands
[0]);
10597 else if (mode
== Pmode
10598 && CONSTANT_P (operands
[1])
10599 && GET_CODE (operands
[1]) != HIGH
10600 && ((GET_CODE (operands
[1]) != CONST_INT
10601 && ! easy_fp_constant (operands
[1], mode
))
10602 || (GET_CODE (operands
[1]) == CONST_INT
10603 && (num_insns_constant (operands
[1], mode
)
10604 > (TARGET_CMODEL
!= CMODEL_SMALL
? 3 : 2)))
10605 || (GET_CODE (operands
[0]) == REG
10606 && FP_REGNO_P (REGNO (operands
[0]))))
10607 && !toc_relative_expr_p (operands
[1], false, NULL
, NULL
)
10608 && (TARGET_CMODEL
== CMODEL_SMALL
10609 || can_create_pseudo_p ()
10610 || (REG_P (operands
[0])
10611 && INT_REG_OK_FOR_BASE_P (operands
[0], true))))
10615 /* Darwin uses a special PIC legitimizer. */
10616 if (DEFAULT_ABI
== ABI_DARWIN
&& MACHOPIC_INDIRECT
)
10619 rs6000_machopic_legitimize_pic_address (operands
[1], mode
,
10621 if (operands
[0] != operands
[1])
10622 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10627 /* If we are to limit the number of things we put in the TOC and
10628 this is a symbol plus a constant we can add in one insn,
10629 just put the symbol in the TOC and add the constant. */
10630 if (GET_CODE (operands
[1]) == CONST
10631 && TARGET_NO_SUM_IN_TOC
10632 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
10633 && add_operand (XEXP (XEXP (operands
[1], 0), 1), mode
)
10634 && (GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == LABEL_REF
10635 || GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == SYMBOL_REF
)
10636 && ! side_effects_p (operands
[0]))
10639 force_const_mem (mode
, XEXP (XEXP (operands
[1], 0), 0));
10640 rtx other
= XEXP (XEXP (operands
[1], 0), 1);
10642 sym
= force_reg (mode
, sym
);
10643 emit_insn (gen_add3_insn (operands
[0], sym
, other
));
10647 operands
[1] = force_const_mem (mode
, operands
[1]);
10650 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
10651 && use_toc_relative_ref (XEXP (operands
[1], 0), mode
))
10653 rtx tocref
= create_TOC_reference (XEXP (operands
[1], 0),
10655 operands
[1] = gen_const_mem (mode
, tocref
);
10656 set_mem_alias_set (operands
[1], get_TOC_alias_set ());
10662 if (!VECTOR_MEM_VSX_P (TImode
))
10663 rs6000_eliminate_indexed_memrefs (operands
);
10667 rs6000_eliminate_indexed_memrefs (operands
);
10671 fatal_insn ("bad move", gen_rtx_SET (dest
, source
));
10674 /* Above, we may have called force_const_mem which may have returned
10675 an invalid address. If we can, fix this up; otherwise, reload will
10676 have to deal with it. */
10677 if (GET_CODE (operands
[1]) == MEM
)
10678 operands
[1] = validize_mem (operands
[1]);
10680 emit_insn (gen_rtx_SET (operands
[0], operands
[1]));
10683 /* Nonzero if we can use a floating-point register to pass this arg. */
10684 #define USE_FP_FOR_ARG_P(CUM,MODE) \
10685 (SCALAR_FLOAT_MODE_NOT_VECTOR_P (MODE) \
10686 && (CUM)->fregno <= FP_ARG_MAX_REG \
10687 && TARGET_HARD_FLOAT)
10689 /* Nonzero if we can use an AltiVec register to pass this arg. */
10690 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
10691 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
10692 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
10693 && TARGET_ALTIVEC_ABI \
10696 /* Walk down the type tree of TYPE counting consecutive base elements.
10697 If *MODEP is VOIDmode, then set it to the first valid floating point
10698 or vector type. If a non-floating point or vector type is found, or
10699 if a floating point or vector type that doesn't match a non-VOIDmode
10700 *MODEP is found, then return -1, otherwise return the count in the
10704 rs6000_aggregate_candidate (const_tree type
, machine_mode
*modep
)
10707 HOST_WIDE_INT size
;
10709 switch (TREE_CODE (type
))
10712 mode
= TYPE_MODE (type
);
10713 if (!SCALAR_FLOAT_MODE_P (mode
))
10716 if (*modep
== VOIDmode
)
10719 if (*modep
== mode
)
10725 mode
= TYPE_MODE (TREE_TYPE (type
));
10726 if (!SCALAR_FLOAT_MODE_P (mode
))
10729 if (*modep
== VOIDmode
)
10732 if (*modep
== mode
)
10738 if (!TARGET_ALTIVEC_ABI
|| !TARGET_ALTIVEC
)
10741 /* Use V4SImode as representative of all 128-bit vector types. */
10742 size
= int_size_in_bytes (type
);
10752 if (*modep
== VOIDmode
)
10755 /* Vector modes are considered to be opaque: two vectors are
10756 equivalent for the purposes of being homogeneous aggregates
10757 if they are the same size. */
10758 if (*modep
== mode
)
10766 tree index
= TYPE_DOMAIN (type
);
10768 /* Can't handle incomplete types nor sizes that are not
10770 if (!COMPLETE_TYPE_P (type
)
10771 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10774 count
= rs6000_aggregate_candidate (TREE_TYPE (type
), modep
);
10777 || !TYPE_MAX_VALUE (index
)
10778 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index
))
10779 || !TYPE_MIN_VALUE (index
)
10780 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index
))
10784 count
*= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index
))
10785 - tree_to_uhwi (TYPE_MIN_VALUE (index
)));
10787 /* There must be no padding. */
10788 if (wi::to_wide (TYPE_SIZE (type
))
10789 != count
* GET_MODE_BITSIZE (*modep
))
10801 /* Can't handle incomplete types nor sizes that are not
10803 if (!COMPLETE_TYPE_P (type
)
10804 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10807 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
10809 if (TREE_CODE (field
) != FIELD_DECL
)
10812 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
10815 count
+= sub_count
;
10818 /* There must be no padding. */
10819 if (wi::to_wide (TYPE_SIZE (type
))
10820 != count
* GET_MODE_BITSIZE (*modep
))
10827 case QUAL_UNION_TYPE
:
10829 /* These aren't very interesting except in a degenerate case. */
10834 /* Can't handle incomplete types nor sizes that are not
10836 if (!COMPLETE_TYPE_P (type
)
10837 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
10840 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
10842 if (TREE_CODE (field
) != FIELD_DECL
)
10845 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
10848 count
= count
> sub_count
? count
: sub_count
;
10851 /* There must be no padding. */
10852 if (wi::to_wide (TYPE_SIZE (type
))
10853 != count
* GET_MODE_BITSIZE (*modep
))
10866 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
10867 float or vector aggregate that shall be passed in FP/vector registers
10868 according to the ELFv2 ABI, return the homogeneous element mode in
10869 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
10871 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
10874 rs6000_discover_homogeneous_aggregate (machine_mode mode
, const_tree type
,
10875 machine_mode
*elt_mode
,
10878 /* Note that we do not accept complex types at the top level as
10879 homogeneous aggregates; these types are handled via the
10880 targetm.calls.split_complex_arg mechanism. Complex types
10881 can be elements of homogeneous aggregates, however. */
10882 if (TARGET_HARD_FLOAT
&& DEFAULT_ABI
== ABI_ELFv2
&& type
10883 && AGGREGATE_TYPE_P (type
))
10885 machine_mode field_mode
= VOIDmode
;
10886 int field_count
= rs6000_aggregate_candidate (type
, &field_mode
);
10888 if (field_count
> 0)
10890 int reg_size
= ALTIVEC_OR_VSX_VECTOR_MODE (field_mode
) ? 16 : 8;
10891 int field_size
= ROUND_UP (GET_MODE_SIZE (field_mode
), reg_size
);
10893 /* The ELFv2 ABI allows homogeneous aggregates to occupy
10894 up to AGGR_ARG_NUM_REG registers. */
10895 if (field_count
* field_size
<= AGGR_ARG_NUM_REG
* reg_size
)
10898 *elt_mode
= field_mode
;
10900 *n_elts
= field_count
;
10913 /* Return a nonzero value to say to return the function value in
10914 memory, just as large structures are always returned. TYPE will be
10915 the data type of the value, and FNTYPE will be the type of the
10916 function doing the returning, or @code{NULL} for libcalls.
10918 The AIX ABI for the RS/6000 specifies that all structures are
10919 returned in memory. The Darwin ABI does the same.
10921 For the Darwin 64 Bit ABI, a function result can be returned in
10922 registers or in memory, depending on the size of the return data
10923 type. If it is returned in registers, the value occupies the same
10924 registers as it would if it were the first and only function
10925 argument. Otherwise, the function places its result in memory at
10926 the location pointed to by GPR3.
10928 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
10929 but a draft put them in memory, and GCC used to implement the draft
10930 instead of the final standard. Therefore, aix_struct_return
10931 controls this instead of DEFAULT_ABI; V.4 targets needing backward
10932 compatibility can change DRAFT_V4_STRUCT_RET to override the
10933 default, and -m switches get the final word. See
10934 rs6000_option_override_internal for more details.
10936 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
10937 long double support is enabled. These values are returned in memory.
10939 int_size_in_bytes returns -1 for variable size objects, which go in
10940 memory always. The cast to unsigned makes -1 > 8. */
10943 rs6000_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
10945 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
10947 && rs6000_darwin64_abi
10948 && TREE_CODE (type
) == RECORD_TYPE
10949 && int_size_in_bytes (type
) > 0)
10951 CUMULATIVE_ARGS valcum
;
10955 valcum
.fregno
= FP_ARG_MIN_REG
;
10956 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
10957 /* Do a trial code generation as if this were going to be passed
10958 as an argument; if any part goes in memory, we return NULL. */
10959 valret
= rs6000_darwin64_record_arg (&valcum
, type
, true, true);
10962 /* Otherwise fall through to more conventional ABI rules. */
10965 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
10966 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type
), type
,
10970 /* The ELFv2 ABI returns aggregates up to 16B in registers */
10971 if (DEFAULT_ABI
== ABI_ELFv2
&& AGGREGATE_TYPE_P (type
)
10972 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) <= 16)
10975 if (AGGREGATE_TYPE_P (type
)
10976 && (aix_struct_return
10977 || (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8))
10980 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10981 modes only exist for GCC vector types if -maltivec. */
10982 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
10983 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
10986 /* Return synthetic vectors in memory. */
10987 if (TREE_CODE (type
) == VECTOR_TYPE
10988 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
10990 static bool warned_for_return_big_vectors
= false;
10991 if (!warned_for_return_big_vectors
)
10993 warning (OPT_Wpsabi
, "GCC vector returned by reference: "
10994 "non-standard ABI extension with no compatibility "
10996 warned_for_return_big_vectors
= true;
11001 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
11002 && FLOAT128_IEEE_P (TYPE_MODE (type
)))
11008 /* Specify whether values returned in registers should be at the most
11009 significant end of a register. We want aggregates returned by
11010 value to match the way aggregates are passed to functions. */
11013 rs6000_return_in_msb (const_tree valtype
)
11015 return (DEFAULT_ABI
== ABI_ELFv2
11016 && BYTES_BIG_ENDIAN
11017 && AGGREGATE_TYPE_P (valtype
)
11018 && (rs6000_function_arg_padding (TYPE_MODE (valtype
), valtype
)
11022 #ifdef HAVE_AS_GNU_ATTRIBUTE
11023 /* Return TRUE if a call to function FNDECL may be one that
11024 potentially affects the function calling ABI of the object file. */
11027 call_ABI_of_interest (tree fndecl
)
11029 if (rs6000_gnu_attr
&& symtab
->state
== EXPANSION
)
11031 struct cgraph_node
*c_node
;
11033 /* Libcalls are always interesting. */
11034 if (fndecl
== NULL_TREE
)
11037 /* Any call to an external function is interesting. */
11038 if (DECL_EXTERNAL (fndecl
))
11041 /* Interesting functions that we are emitting in this object file. */
11042 c_node
= cgraph_node::get (fndecl
);
11043 c_node
= c_node
->ultimate_alias_target ();
11044 return !c_node
->only_called_directly_p ();
11050 /* Initialize a variable CUM of type CUMULATIVE_ARGS
11051 for a call to a function whose data type is FNTYPE.
11052 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
11054 For incoming args we set the number of arguments in the prototype large
11055 so we never return a PARALLEL. */
11058 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
11059 rtx libname ATTRIBUTE_UNUSED
, int incoming
,
11060 int libcall
, int n_named_args
,
11061 tree fndecl ATTRIBUTE_UNUSED
,
11062 machine_mode return_mode ATTRIBUTE_UNUSED
)
11064 static CUMULATIVE_ARGS zero_cumulative
;
11066 *cum
= zero_cumulative
;
11068 cum
->fregno
= FP_ARG_MIN_REG
;
11069 cum
->vregno
= ALTIVEC_ARG_MIN_REG
;
11070 cum
->prototype
= (fntype
&& prototype_p (fntype
));
11071 cum
->call_cookie
= ((DEFAULT_ABI
== ABI_V4
&& libcall
)
11072 ? CALL_LIBCALL
: CALL_NORMAL
);
11073 cum
->sysv_gregno
= GP_ARG_MIN_REG
;
11074 cum
->stdarg
= stdarg_p (fntype
);
11075 cum
->libcall
= libcall
;
11077 cum
->nargs_prototype
= 0;
11078 if (incoming
|| cum
->prototype
)
11079 cum
->nargs_prototype
= n_named_args
;
11081 /* Check for a longcall attribute. */
11082 if ((!fntype
&& rs6000_default_long_calls
)
11084 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype
))
11085 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype
))))
11086 cum
->call_cookie
|= CALL_LONG
;
11088 if (TARGET_DEBUG_ARG
)
11090 fprintf (stderr
, "\ninit_cumulative_args:");
11093 tree ret_type
= TREE_TYPE (fntype
);
11094 fprintf (stderr
, " ret code = %s,",
11095 get_tree_code_name (TREE_CODE (ret_type
)));
11098 if (cum
->call_cookie
& CALL_LONG
)
11099 fprintf (stderr
, " longcall,");
11101 fprintf (stderr
, " proto = %d, nargs = %d\n",
11102 cum
->prototype
, cum
->nargs_prototype
);
11105 #ifdef HAVE_AS_GNU_ATTRIBUTE
11106 if (TARGET_ELF
&& (TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
))
11108 cum
->escapes
= call_ABI_of_interest (fndecl
);
11115 return_type
= TREE_TYPE (fntype
);
11116 return_mode
= TYPE_MODE (return_type
);
11119 return_type
= lang_hooks
.types
.type_for_mode (return_mode
, 0);
11121 if (return_type
!= NULL
)
11123 if (TREE_CODE (return_type
) == RECORD_TYPE
11124 && TYPE_TRANSPARENT_AGGR (return_type
))
11126 return_type
= TREE_TYPE (first_field (return_type
));
11127 return_mode
= TYPE_MODE (return_type
);
11129 if (AGGREGATE_TYPE_P (return_type
)
11130 && ((unsigned HOST_WIDE_INT
) int_size_in_bytes (return_type
)
11132 rs6000_returns_struct
= true;
11134 if (SCALAR_FLOAT_MODE_P (return_mode
))
11136 rs6000_passes_float
= true;
11137 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
)
11138 && (FLOAT128_IBM_P (return_mode
)
11139 || FLOAT128_IEEE_P (return_mode
)
11140 || (return_type
!= NULL
11141 && (TYPE_MAIN_VARIANT (return_type
)
11142 == long_double_type_node
))))
11143 rs6000_passes_long_double
= true;
11145 /* Note if we passed or return a IEEE 128-bit type. We changed
11146 the mangling for these types, and we may need to make an alias
11147 with the old mangling. */
11148 if (FLOAT128_IEEE_P (return_mode
))
11149 rs6000_passes_ieee128
= true;
11151 if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode
))
11152 rs6000_passes_vector
= true;
11159 && TARGET_ALTIVEC_ABI
11160 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype
))))
11162 error ("cannot return value in vector register because"
11163 " altivec instructions are disabled, use %qs"
11164 " to enable them", "-maltivec");
11168 /* The mode the ABI uses for a word. This is not the same as word_mode
11169 for -m32 -mpowerpc64. This is used to implement various target hooks. */
11171 static scalar_int_mode
11172 rs6000_abi_word_mode (void)
11174 return TARGET_32BIT
? SImode
: DImode
;
11177 /* Implement the TARGET_OFFLOAD_OPTIONS hook. */
11179 rs6000_offload_options (void)
11182 return xstrdup ("-foffload-abi=lp64");
11184 return xstrdup ("-foffload-abi=ilp32");
11187 /* On rs6000, function arguments are promoted, as are function return
11190 static machine_mode
11191 rs6000_promote_function_mode (const_tree type ATTRIBUTE_UNUSED
,
11193 int *punsignedp ATTRIBUTE_UNUSED
,
11196 PROMOTE_MODE (mode
, *punsignedp
, type
);
11201 /* Return true if TYPE must be passed on the stack and not in registers. */
11204 rs6000_must_pass_in_stack (machine_mode mode
, const_tree type
)
11206 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
|| TARGET_64BIT
)
11207 return must_pass_in_stack_var_size (mode
, type
);
11209 return must_pass_in_stack_var_size_or_pad (mode
, type
);
11213 is_complex_IBM_long_double (machine_mode mode
)
11215 return mode
== ICmode
|| (mode
== TCmode
&& FLOAT128_IBM_P (TCmode
));
11218 /* Whether ABI_V4 passes MODE args to a function in floating point
11222 abi_v4_pass_in_fpr (machine_mode mode
, bool named
)
11224 if (!TARGET_HARD_FLOAT
)
11226 if (mode
== DFmode
)
11228 if (mode
== SFmode
&& named
)
11230 /* ABI_V4 passes complex IBM long double in 8 gprs.
11231 Stupid, but we can't change the ABI now. */
11232 if (is_complex_IBM_long_double (mode
))
11234 if (FLOAT128_2REG_P (mode
))
11236 if (DECIMAL_FLOAT_MODE_P (mode
))
11241 /* Implement TARGET_FUNCTION_ARG_PADDING.
11243 For the AIX ABI structs are always stored left shifted in their
11246 static pad_direction
11247 rs6000_function_arg_padding (machine_mode mode
, const_tree type
)
11249 #ifndef AGGREGATE_PADDING_FIXED
11250 #define AGGREGATE_PADDING_FIXED 0
11252 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
11253 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
11256 if (!AGGREGATE_PADDING_FIXED
)
11258 /* GCC used to pass structures of the same size as integer types as
11259 if they were in fact integers, ignoring TARGET_FUNCTION_ARG_PADDING.
11260 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
11261 passed padded downward, except that -mstrict-align further
11262 muddied the water in that multi-component structures of 2 and 4
11263 bytes in size were passed padded upward.
11265 The following arranges for best compatibility with previous
11266 versions of gcc, but removes the -mstrict-align dependency. */
11267 if (BYTES_BIG_ENDIAN
)
11269 HOST_WIDE_INT size
= 0;
11271 if (mode
== BLKmode
)
11273 if (type
&& TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
11274 size
= int_size_in_bytes (type
);
11277 size
= GET_MODE_SIZE (mode
);
11279 if (size
== 1 || size
== 2 || size
== 4)
11280 return PAD_DOWNWARD
;
11285 if (AGGREGATES_PAD_UPWARD_ALWAYS
)
11287 if (type
!= 0 && AGGREGATE_TYPE_P (type
))
11291 /* Fall back to the default. */
11292 return default_function_arg_padding (mode
, type
);
11295 /* If defined, a C expression that gives the alignment boundary, in bits,
11296 of an argument with the specified mode and type. If it is not defined,
11297 PARM_BOUNDARY is used for all arguments.
11299 V.4 wants long longs and doubles to be double word aligned. Just
11300 testing the mode size is a boneheaded way to do this as it means
11301 that other types such as complex int are also double word aligned.
11302 However, we're stuck with this because changing the ABI might break
11303 existing library interfaces.
11305 Quadword align Altivec/VSX vectors.
11306 Quadword align large synthetic vector types. */
11308 static unsigned int
11309 rs6000_function_arg_boundary (machine_mode mode
, const_tree type
)
11311 machine_mode elt_mode
;
11314 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
11316 if (DEFAULT_ABI
== ABI_V4
11317 && (GET_MODE_SIZE (mode
) == 8
11318 || (TARGET_HARD_FLOAT
11319 && !is_complex_IBM_long_double (mode
)
11320 && FLOAT128_2REG_P (mode
))))
11322 else if (FLOAT128_VECTOR_P (mode
))
11324 else if (type
&& TREE_CODE (type
) == VECTOR_TYPE
11325 && int_size_in_bytes (type
) >= 8
11326 && int_size_in_bytes (type
) < 16)
11328 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
11329 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11330 && int_size_in_bytes (type
) >= 16))
11333 /* Aggregate types that need > 8 byte alignment are quadword-aligned
11334 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
11335 -mcompat-align-parm is used. */
11336 if (((DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
)
11337 || DEFAULT_ABI
== ABI_ELFv2
)
11338 && type
&& TYPE_ALIGN (type
) > 64)
11340 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
11341 or homogeneous float/vector aggregates here. We already handled
11342 vector aggregates above, but still need to check for float here. */
11343 bool aggregate_p
= (AGGREGATE_TYPE_P (type
)
11344 && !SCALAR_FLOAT_MODE_P (elt_mode
));
11346 /* We used to check for BLKmode instead of the above aggregate type
11347 check. Warn when this results in any difference to the ABI. */
11348 if (aggregate_p
!= (mode
== BLKmode
))
11350 static bool warned
;
11351 if (!warned
&& warn_psabi
)
11354 inform (input_location
,
11355 "the ABI of passing aggregates with %d-byte alignment"
11356 " has changed in GCC 5",
11357 (int) TYPE_ALIGN (type
) / BITS_PER_UNIT
);
11365 /* Similar for the Darwin64 ABI. Note that for historical reasons we
11366 implement the "aggregate type" check as a BLKmode check here; this
11367 means certain aggregate types are in fact not aligned. */
11368 if (TARGET_MACHO
&& rs6000_darwin64_abi
11370 && type
&& TYPE_ALIGN (type
) > 64)
11373 return PARM_BOUNDARY
;
11376 /* The offset in words to the start of the parameter save area. */
11378 static unsigned int
11379 rs6000_parm_offset (void)
11381 return (DEFAULT_ABI
== ABI_V4
? 2
11382 : DEFAULT_ABI
== ABI_ELFv2
? 4
11386 /* For a function parm of MODE and TYPE, return the starting word in
11387 the parameter area. NWORDS of the parameter area are already used. */
11389 static unsigned int
11390 rs6000_parm_start (machine_mode mode
, const_tree type
,
11391 unsigned int nwords
)
11393 unsigned int align
;
11395 align
= rs6000_function_arg_boundary (mode
, type
) / PARM_BOUNDARY
- 1;
11396 return nwords
+ (-(rs6000_parm_offset () + nwords
) & align
);
11399 /* Compute the size (in words) of a function argument. */
11401 static unsigned long
11402 rs6000_arg_size (machine_mode mode
, const_tree type
)
11404 unsigned long size
;
11406 if (mode
!= BLKmode
)
11407 size
= GET_MODE_SIZE (mode
);
11409 size
= int_size_in_bytes (type
);
11412 return (size
+ 3) >> 2;
11414 return (size
+ 7) >> 3;
11417 /* Use this to flush pending int fields. */
11420 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*cum
,
11421 HOST_WIDE_INT bitpos
, int final
)
11423 unsigned int startbit
, endbit
;
11424 int intregs
, intoffset
;
11426 /* Handle the situations where a float is taking up the first half
11427 of the GPR, and the other half is empty (typically due to
11428 alignment restrictions). We can detect this by a 8-byte-aligned
11429 int field, or by seeing that this is the final flush for this
11430 argument. Count the word and continue on. */
11431 if (cum
->floats_in_gpr
== 1
11432 && (cum
->intoffset
% 64 == 0
11433 || (cum
->intoffset
== -1 && final
)))
11436 cum
->floats_in_gpr
= 0;
11439 if (cum
->intoffset
== -1)
11442 intoffset
= cum
->intoffset
;
11443 cum
->intoffset
= -1;
11444 cum
->floats_in_gpr
= 0;
11446 if (intoffset
% BITS_PER_WORD
!= 0)
11448 unsigned int bits
= BITS_PER_WORD
- intoffset
% BITS_PER_WORD
;
11449 if (!int_mode_for_size (bits
, 0).exists ())
11451 /* We couldn't find an appropriate mode, which happens,
11452 e.g., in packed structs when there are 3 bytes to load.
11453 Back intoffset back to the beginning of the word in this
11455 intoffset
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11459 startbit
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11460 endbit
= ROUND_UP (bitpos
, BITS_PER_WORD
);
11461 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
11462 cum
->words
+= intregs
;
11463 /* words should be unsigned. */
11464 if ((unsigned)cum
->words
< (endbit
/BITS_PER_WORD
))
11466 int pad
= (endbit
/BITS_PER_WORD
) - cum
->words
;
11471 /* The darwin64 ABI calls for us to recurse down through structs,
11472 looking for elements passed in registers. Unfortunately, we have
11473 to track int register count here also because of misalignments
11474 in powerpc alignment mode. */
11477 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*cum
,
11479 HOST_WIDE_INT startbitpos
)
11483 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
11484 if (TREE_CODE (f
) == FIELD_DECL
)
11486 HOST_WIDE_INT bitpos
= startbitpos
;
11487 tree ftype
= TREE_TYPE (f
);
11489 if (ftype
== error_mark_node
)
11491 mode
= TYPE_MODE (ftype
);
11493 if (DECL_SIZE (f
) != 0
11494 && tree_fits_uhwi_p (bit_position (f
)))
11495 bitpos
+= int_bit_position (f
);
11497 /* ??? FIXME: else assume zero offset. */
11499 if (TREE_CODE (ftype
) == RECORD_TYPE
)
11500 rs6000_darwin64_record_arg_advance_recurse (cum
, ftype
, bitpos
);
11501 else if (USE_FP_FOR_ARG_P (cum
, mode
))
11503 unsigned n_fpregs
= (GET_MODE_SIZE (mode
) + 7) >> 3;
11504 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
11505 cum
->fregno
+= n_fpregs
;
11506 /* Single-precision floats present a special problem for
11507 us, because they are smaller than an 8-byte GPR, and so
11508 the structure-packing rules combined with the standard
11509 varargs behavior mean that we want to pack float/float
11510 and float/int combinations into a single register's
11511 space. This is complicated by the arg advance flushing,
11512 which works on arbitrarily large groups of int-type
11514 if (mode
== SFmode
)
11516 if (cum
->floats_in_gpr
== 1)
11518 /* Two floats in a word; count the word and reset
11519 the float count. */
11521 cum
->floats_in_gpr
= 0;
11523 else if (bitpos
% 64 == 0)
11525 /* A float at the beginning of an 8-byte word;
11526 count it and put off adjusting cum->words until
11527 we see if a arg advance flush is going to do it
11529 cum
->floats_in_gpr
++;
11533 /* The float is at the end of a word, preceded
11534 by integer fields, so the arg advance flush
11535 just above has already set cum->words and
11536 everything is taken care of. */
11540 cum
->words
+= n_fpregs
;
11542 else if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
11544 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
11548 else if (cum
->intoffset
== -1)
11549 cum
->intoffset
= bitpos
;
11553 /* Check for an item that needs to be considered specially under the darwin 64
11554 bit ABI. These are record types where the mode is BLK or the structure is
11555 8 bytes in size. */
11557 rs6000_darwin64_struct_check_p (machine_mode mode
, const_tree type
)
11559 return rs6000_darwin64_abi
11560 && ((mode
== BLKmode
11561 && TREE_CODE (type
) == RECORD_TYPE
11562 && int_size_in_bytes (type
) > 0)
11563 || (type
&& TREE_CODE (type
) == RECORD_TYPE
11564 && int_size_in_bytes (type
) == 8)) ? 1 : 0;
11567 /* Update the data in CUM to advance over an argument
11568 of mode MODE and data type TYPE.
11569 (TYPE is null for libcalls where that information may not be available.)
11571 Note that for args passed by reference, function_arg will be called
11572 with MODE and TYPE set to that of the pointer to the arg, not the arg
11576 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS
*cum
, machine_mode mode
,
11577 const_tree type
, bool named
, int depth
)
11579 machine_mode elt_mode
;
11582 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
11584 /* Only tick off an argument if we're not recursing. */
11586 cum
->nargs_prototype
--;
11588 #ifdef HAVE_AS_GNU_ATTRIBUTE
11589 if (TARGET_ELF
&& (TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
)
11592 if (SCALAR_FLOAT_MODE_P (mode
))
11594 rs6000_passes_float
= true;
11595 if ((HAVE_LD_PPC_GNU_ATTR_LONG_DOUBLE
|| TARGET_64BIT
)
11596 && (FLOAT128_IBM_P (mode
)
11597 || FLOAT128_IEEE_P (mode
)
11599 && TYPE_MAIN_VARIANT (type
) == long_double_type_node
)))
11600 rs6000_passes_long_double
= true;
11602 /* Note if we passed or return a IEEE 128-bit type. We changed the
11603 mangling for these types, and we may need to make an alias with
11604 the old mangling. */
11605 if (FLOAT128_IEEE_P (mode
))
11606 rs6000_passes_ieee128
= true;
11608 if (named
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
11609 rs6000_passes_vector
= true;
11613 if (TARGET_ALTIVEC_ABI
11614 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
11615 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
11616 && int_size_in_bytes (type
) == 16)))
11618 bool stack
= false;
11620 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
11622 cum
->vregno
+= n_elts
;
11624 if (!TARGET_ALTIVEC
)
11625 error ("cannot pass argument in vector register because"
11626 " altivec instructions are disabled, use %qs"
11627 " to enable them", "-maltivec");
11629 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
11630 even if it is going to be passed in a vector register.
11631 Darwin does the same for variable-argument functions. */
11632 if (((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
11634 || (cum
->stdarg
&& DEFAULT_ABI
!= ABI_V4
))
11644 /* Vector parameters must be 16-byte aligned. In 32-bit
11645 mode this means we need to take into account the offset
11646 to the parameter save area. In 64-bit mode, they just
11647 have to start on an even word, since the parameter save
11648 area is 16-byte aligned. */
11650 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
11652 align
= cum
->words
& 1;
11653 cum
->words
+= align
+ rs6000_arg_size (mode
, type
);
11655 if (TARGET_DEBUG_ARG
)
11657 fprintf (stderr
, "function_adv: words = %2d, align=%d, ",
11658 cum
->words
, align
);
11659 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s\n",
11660 cum
->nargs_prototype
, cum
->prototype
,
11661 GET_MODE_NAME (mode
));
11665 else if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
11667 int size
= int_size_in_bytes (type
);
11668 /* Variable sized types have size == -1 and are
11669 treated as if consisting entirely of ints.
11670 Pad to 16 byte boundary if needed. */
11671 if (TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
11672 && (cum
->words
% 2) != 0)
11674 /* For varargs, we can just go up by the size of the struct. */
11676 cum
->words
+= (size
+ 7) / 8;
11679 /* It is tempting to say int register count just goes up by
11680 sizeof(type)/8, but this is wrong in a case such as
11681 { int; double; int; } [powerpc alignment]. We have to
11682 grovel through the fields for these too. */
11683 cum
->intoffset
= 0;
11684 cum
->floats_in_gpr
= 0;
11685 rs6000_darwin64_record_arg_advance_recurse (cum
, type
, 0);
11686 rs6000_darwin64_record_arg_advance_flush (cum
,
11687 size
* BITS_PER_UNIT
, 1);
11689 if (TARGET_DEBUG_ARG
)
11691 fprintf (stderr
, "function_adv: words = %2d, align=%d, size=%d",
11692 cum
->words
, TYPE_ALIGN (type
), size
);
11694 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
11695 cum
->nargs_prototype
, cum
->prototype
,
11696 GET_MODE_NAME (mode
));
11699 else if (DEFAULT_ABI
== ABI_V4
)
11701 if (abi_v4_pass_in_fpr (mode
, named
))
11703 /* _Decimal128 must use an even/odd register pair. This assumes
11704 that the register number is odd when fregno is odd. */
11705 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11708 if (cum
->fregno
+ (FLOAT128_2REG_P (mode
) ? 1 : 0)
11709 <= FP_ARG_V4_MAX_REG
)
11710 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
11713 cum
->fregno
= FP_ARG_V4_MAX_REG
+ 1;
11714 if (mode
== DFmode
|| FLOAT128_IBM_P (mode
)
11715 || mode
== DDmode
|| mode
== TDmode
)
11716 cum
->words
+= cum
->words
& 1;
11717 cum
->words
+= rs6000_arg_size (mode
, type
);
11722 int n_words
= rs6000_arg_size (mode
, type
);
11723 int gregno
= cum
->sysv_gregno
;
11725 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
11726 As does any other 2 word item such as complex int due to a
11727 historical mistake. */
11729 gregno
+= (1 - gregno
) & 1;
11731 /* Multi-reg args are not split between registers and stack. */
11732 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
11734 /* Long long is aligned on the stack. So are other 2 word
11735 items such as complex int due to a historical mistake. */
11737 cum
->words
+= cum
->words
& 1;
11738 cum
->words
+= n_words
;
11741 /* Note: continuing to accumulate gregno past when we've started
11742 spilling to the stack indicates the fact that we've started
11743 spilling to the stack to expand_builtin_saveregs. */
11744 cum
->sysv_gregno
= gregno
+ n_words
;
11747 if (TARGET_DEBUG_ARG
)
11749 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
11750 cum
->words
, cum
->fregno
);
11751 fprintf (stderr
, "gregno = %2d, nargs = %4d, proto = %d, ",
11752 cum
->sysv_gregno
, cum
->nargs_prototype
, cum
->prototype
);
11753 fprintf (stderr
, "mode = %4s, named = %d\n",
11754 GET_MODE_NAME (mode
), named
);
11759 int n_words
= rs6000_arg_size (mode
, type
);
11760 int start_words
= cum
->words
;
11761 int align_words
= rs6000_parm_start (mode
, type
, start_words
);
11763 cum
->words
= align_words
+ n_words
;
11765 if (SCALAR_FLOAT_MODE_P (elt_mode
) && TARGET_HARD_FLOAT
)
11767 /* _Decimal128 must be passed in an even/odd float register pair.
11768 This assumes that the register number is odd when fregno is
11770 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
11772 cum
->fregno
+= n_elts
* ((GET_MODE_SIZE (elt_mode
) + 7) >> 3);
11775 if (TARGET_DEBUG_ARG
)
11777 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
11778 cum
->words
, cum
->fregno
);
11779 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s, ",
11780 cum
->nargs_prototype
, cum
->prototype
, GET_MODE_NAME (mode
));
11781 fprintf (stderr
, "named = %d, align = %d, depth = %d\n",
11782 named
, align_words
- start_words
, depth
);
11788 rs6000_function_arg_advance (cumulative_args_t cum
, machine_mode mode
,
11789 const_tree type
, bool named
)
11791 rs6000_function_arg_advance_1 (get_cumulative_args (cum
), mode
, type
, named
,
11795 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
11796 structure between cum->intoffset and bitpos to integer registers. */
11799 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*cum
,
11800 HOST_WIDE_INT bitpos
, rtx rvec
[], int *k
)
11803 unsigned int regno
;
11804 unsigned int startbit
, endbit
;
11805 int this_regno
, intregs
, intoffset
;
11808 if (cum
->intoffset
== -1)
11811 intoffset
= cum
->intoffset
;
11812 cum
->intoffset
= -1;
11814 /* If this is the trailing part of a word, try to only load that
11815 much into the register. Otherwise load the whole register. Note
11816 that in the latter case we may pick up unwanted bits. It's not a
11817 problem at the moment but may wish to revisit. */
11819 if (intoffset
% BITS_PER_WORD
!= 0)
11821 unsigned int bits
= BITS_PER_WORD
- intoffset
% BITS_PER_WORD
;
11822 if (!int_mode_for_size (bits
, 0).exists (&mode
))
11824 /* We couldn't find an appropriate mode, which happens,
11825 e.g., in packed structs when there are 3 bytes to load.
11826 Back intoffset back to the beginning of the word in this
11828 intoffset
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11835 startbit
= ROUND_DOWN (intoffset
, BITS_PER_WORD
);
11836 endbit
= ROUND_UP (bitpos
, BITS_PER_WORD
);
11837 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
11838 this_regno
= cum
->words
+ intoffset
/ BITS_PER_WORD
;
11840 if (intregs
> 0 && intregs
> GP_ARG_NUM_REG
- this_regno
)
11841 cum
->use_stack
= 1;
11843 intregs
= MIN (intregs
, GP_ARG_NUM_REG
- this_regno
);
11847 intoffset
/= BITS_PER_UNIT
;
11850 regno
= GP_ARG_MIN_REG
+ this_regno
;
11851 reg
= gen_rtx_REG (mode
, regno
);
11853 gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
11856 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
11860 while (intregs
> 0);
11863 /* Recursive workhorse for the following. */
11866 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*cum
, const_tree type
,
11867 HOST_WIDE_INT startbitpos
, rtx rvec
[],
11872 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
11873 if (TREE_CODE (f
) == FIELD_DECL
)
11875 HOST_WIDE_INT bitpos
= startbitpos
;
11876 tree ftype
= TREE_TYPE (f
);
11878 if (ftype
== error_mark_node
)
11880 mode
= TYPE_MODE (ftype
);
11882 if (DECL_SIZE (f
) != 0
11883 && tree_fits_uhwi_p (bit_position (f
)))
11884 bitpos
+= int_bit_position (f
);
11886 /* ??? FIXME: else assume zero offset. */
11888 if (TREE_CODE (ftype
) == RECORD_TYPE
)
11889 rs6000_darwin64_record_arg_recurse (cum
, ftype
, bitpos
, rvec
, k
);
11890 else if (cum
->named
&& USE_FP_FOR_ARG_P (cum
, mode
))
11892 unsigned n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
11896 case E_SCmode
: mode
= SFmode
; break;
11897 case E_DCmode
: mode
= DFmode
; break;
11898 case E_TCmode
: mode
= TFmode
; break;
11902 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
11903 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
11905 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
11906 && (mode
== TFmode
|| mode
== TDmode
));
11907 /* Long double or _Decimal128 split over regs and memory. */
11908 mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
: DFmode
;
11912 = gen_rtx_EXPR_LIST (VOIDmode
,
11913 gen_rtx_REG (mode
, cum
->fregno
++),
11914 GEN_INT (bitpos
/ BITS_PER_UNIT
));
11915 if (FLOAT128_2REG_P (mode
))
11918 else if (cum
->named
&& USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
11920 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
11922 = gen_rtx_EXPR_LIST (VOIDmode
,
11923 gen_rtx_REG (mode
, cum
->vregno
++),
11924 GEN_INT (bitpos
/ BITS_PER_UNIT
));
11926 else if (cum
->intoffset
== -1)
11927 cum
->intoffset
= bitpos
;
11931 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
11932 the register(s) to be used for each field and subfield of a struct
11933 being passed by value, along with the offset of where the
11934 register's value may be found in the block. FP fields go in FP
11935 register, vector fields go in vector registers, and everything
11936 else goes in int registers, packed as in memory.
11938 This code is also used for function return values. RETVAL indicates
11939 whether this is the case.
11941 Much of this is taken from the SPARC V9 port, which has a similar
11942 calling convention. */
11945 rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*orig_cum
, const_tree type
,
11946 bool named
, bool retval
)
11948 rtx rvec
[FIRST_PSEUDO_REGISTER
];
11949 int k
= 1, kbase
= 1;
11950 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
11951 /* This is a copy; modifications are not visible to our caller. */
11952 CUMULATIVE_ARGS copy_cum
= *orig_cum
;
11953 CUMULATIVE_ARGS
*cum
= ©_cum
;
11955 /* Pad to 16 byte boundary if needed. */
11956 if (!retval
&& TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
11957 && (cum
->words
% 2) != 0)
11960 cum
->intoffset
= 0;
11961 cum
->use_stack
= 0;
11962 cum
->named
= named
;
11964 /* Put entries into rvec[] for individual FP and vector fields, and
11965 for the chunks of memory that go in int regs. Note we start at
11966 element 1; 0 is reserved for an indication of using memory, and
11967 may or may not be filled in below. */
11968 rs6000_darwin64_record_arg_recurse (cum
, type
, /* startbit pos= */ 0, rvec
, &k
);
11969 rs6000_darwin64_record_arg_flush (cum
, typesize
* BITS_PER_UNIT
, rvec
, &k
);
11971 /* If any part of the struct went on the stack put all of it there.
11972 This hack is because the generic code for
11973 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
11974 parts of the struct are not at the beginning. */
11975 if (cum
->use_stack
)
11978 return NULL_RTX
; /* doesn't go in registers at all */
11980 rvec
[0] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
11982 if (k
> 1 || cum
->use_stack
)
11983 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (k
- kbase
, &rvec
[kbase
]));
11988 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
11991 rs6000_mixed_function_arg (machine_mode mode
, const_tree type
,
11996 rtx rvec
[GP_ARG_NUM_REG
+ 1];
11998 if (align_words
>= GP_ARG_NUM_REG
)
12001 n_units
= rs6000_arg_size (mode
, type
);
12003 /* Optimize the simple case where the arg fits in one gpr, except in
12004 the case of BLKmode due to assign_parms assuming that registers are
12005 BITS_PER_WORD wide. */
12007 || (n_units
== 1 && mode
!= BLKmode
))
12008 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12011 if (align_words
+ n_units
> GP_ARG_NUM_REG
)
12012 /* Not all of the arg fits in gprs. Say that it goes in memory too,
12013 using a magic NULL_RTX component.
12014 This is not strictly correct. Only some of the arg belongs in
12015 memory, not all of it. However, the normal scheme using
12016 function_arg_partial_nregs can result in unusual subregs, eg.
12017 (subreg:SI (reg:DF) 4), which are not handled well. The code to
12018 store the whole arg to memory is often more efficient than code
12019 to store pieces, and we know that space is available in the right
12020 place for the whole arg. */
12021 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12026 rtx r
= gen_rtx_REG (SImode
, GP_ARG_MIN_REG
+ align_words
);
12027 rtx off
= GEN_INT (i
++ * 4);
12028 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12030 while (++align_words
< GP_ARG_NUM_REG
&& --n_units
!= 0);
12032 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
12035 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
12036 but must also be copied into the parameter save area starting at
12037 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
12038 to the GPRs and/or memory. Return the number of elements used. */
12041 rs6000_psave_function_arg (machine_mode mode
, const_tree type
,
12042 int align_words
, rtx
*rvec
)
12046 if (align_words
< GP_ARG_NUM_REG
)
12048 int n_words
= rs6000_arg_size (mode
, type
);
12050 if (align_words
+ n_words
> GP_ARG_NUM_REG
12052 || (TARGET_32BIT
&& TARGET_POWERPC64
))
12054 /* If this is partially on the stack, then we only
12055 include the portion actually in registers here. */
12056 machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
12059 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
12061 /* Not all of the arg fits in gprs. Say that it goes in memory
12062 too, using a magic NULL_RTX component. Also see comment in
12063 rs6000_mixed_function_arg for why the normal
12064 function_arg_partial_nregs scheme doesn't work in this case. */
12065 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12070 rtx r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
12071 rtx off
= GEN_INT (i
++ * GET_MODE_SIZE (rmode
));
12072 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12074 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
12078 /* The whole arg fits in gprs. */
12079 rtx r
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12080 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
12085 /* It's entirely in memory. */
12086 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
12092 /* RVEC is a vector of K components of an argument of mode MODE.
12093 Construct the final function_arg return value from it. */
12096 rs6000_finish_function_arg (machine_mode mode
, rtx
*rvec
, int k
)
12098 gcc_assert (k
>= 1);
12100 /* Avoid returning a PARALLEL in the trivial cases. */
12103 if (XEXP (rvec
[0], 0) == NULL_RTX
)
12106 if (GET_MODE (XEXP (rvec
[0], 0)) == mode
)
12107 return XEXP (rvec
[0], 0);
12110 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
12113 /* Determine where to put an argument to a function.
12114 Value is zero to push the argument on the stack,
12115 or a hard register in which to store the argument.
12117 MODE is the argument's machine mode.
12118 TYPE is the data type of the argument (as a tree).
12119 This is null for libcalls where that information may
12121 CUM is a variable of type CUMULATIVE_ARGS which gives info about
12122 the preceding args and about the function being called. It is
12123 not modified in this routine.
12124 NAMED is nonzero if this argument is a named parameter
12125 (otherwise it is an extra parameter matching an ellipsis).
12127 On RS/6000 the first eight words of non-FP are normally in registers
12128 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
12129 Under V.4, the first 8 FP args are in registers.
12131 If this is floating-point and no prototype is specified, we use
12132 both an FP and integer register (or possibly FP reg and stack). Library
12133 functions (when CALL_LIBCALL is set) always have the proper types for args,
12134 so we can pass the FP value just in one register. emit_library_function
12135 doesn't support PARALLEL anyway.
12137 Note that for args passed by reference, function_arg will be called
12138 with MODE and TYPE set to that of the pointer to the arg, not the arg
12142 rs6000_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
12143 const_tree type
, bool named
)
12145 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
12146 enum rs6000_abi abi
= DEFAULT_ABI
;
12147 machine_mode elt_mode
;
12150 /* Return a marker to indicate whether CR1 needs to set or clear the
12151 bit that V.4 uses to say fp args were passed in registers.
12152 Assume that we don't need the marker for software floating point,
12153 or compiler generated library calls. */
12154 if (mode
== VOIDmode
)
12157 && (cum
->call_cookie
& CALL_LIBCALL
) == 0
12159 || (cum
->nargs_prototype
< 0
12160 && (cum
->prototype
|| TARGET_NO_PROTOTYPE
)))
12161 && TARGET_HARD_FLOAT
)
12162 return GEN_INT (cum
->call_cookie
12163 | ((cum
->fregno
== FP_ARG_MIN_REG
)
12164 ? CALL_V4_SET_FP_ARGS
12165 : CALL_V4_CLEAR_FP_ARGS
));
12167 return GEN_INT (cum
->call_cookie
& ~CALL_LIBCALL
);
12170 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
12172 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
12174 rtx rslt
= rs6000_darwin64_record_arg (cum
, type
, named
, /*retval= */false);
12175 if (rslt
!= NULL_RTX
)
12177 /* Else fall through to usual handling. */
12180 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
12182 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
12186 /* Do we also need to pass this argument in the parameter save area?
12187 Library support functions for IEEE 128-bit are assumed to not need the
12188 value passed both in GPRs and in vector registers. */
12189 if (TARGET_64BIT
&& !cum
->prototype
12190 && (!cum
->libcall
|| !FLOAT128_VECTOR_P (elt_mode
)))
12192 int align_words
= ROUND_UP (cum
->words
, 2);
12193 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
12196 /* Describe where this argument goes in the vector registers. */
12197 for (i
= 0; i
< n_elts
&& cum
->vregno
+ i
<= ALTIVEC_ARG_MAX_REG
; i
++)
12199 r
= gen_rtx_REG (elt_mode
, cum
->vregno
+ i
);
12200 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
12201 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12204 return rs6000_finish_function_arg (mode
, rvec
, k
);
12206 else if (TARGET_ALTIVEC_ABI
12207 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
12208 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
12209 && int_size_in_bytes (type
) == 16)))
12211 if (named
|| abi
== ABI_V4
)
12215 /* Vector parameters to varargs functions under AIX or Darwin
12216 get passed in memory and possibly also in GPRs. */
12217 int align
, align_words
, n_words
;
12218 machine_mode part_mode
;
12220 /* Vector parameters must be 16-byte aligned. In 32-bit
12221 mode this means we need to take into account the offset
12222 to the parameter save area. In 64-bit mode, they just
12223 have to start on an even word, since the parameter save
12224 area is 16-byte aligned. */
12226 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
12228 align
= cum
->words
& 1;
12229 align_words
= cum
->words
+ align
;
12231 /* Out of registers? Memory, then. */
12232 if (align_words
>= GP_ARG_NUM_REG
)
12235 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12236 return rs6000_mixed_function_arg (mode
, type
, align_words
);
12238 /* The vector value goes in GPRs. Only the part of the
12239 value in GPRs is reported here. */
12241 n_words
= rs6000_arg_size (mode
, type
);
12242 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
12243 /* Fortunately, there are only two possibilities, the value
12244 is either wholly in GPRs or half in GPRs and half not. */
12245 part_mode
= DImode
;
12247 return gen_rtx_REG (part_mode
, GP_ARG_MIN_REG
+ align_words
);
12251 else if (abi
== ABI_V4
)
12253 if (abi_v4_pass_in_fpr (mode
, named
))
12255 /* _Decimal128 must use an even/odd register pair. This assumes
12256 that the register number is odd when fregno is odd. */
12257 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
12260 if (cum
->fregno
+ (FLOAT128_2REG_P (mode
) ? 1 : 0)
12261 <= FP_ARG_V4_MAX_REG
)
12262 return gen_rtx_REG (mode
, cum
->fregno
);
12268 int n_words
= rs6000_arg_size (mode
, type
);
12269 int gregno
= cum
->sysv_gregno
;
12271 /* Long long is put in (r3,r4), (r5,r6), (r7,r8) or (r9,r10).
12272 As does any other 2 word item such as complex int due to a
12273 historical mistake. */
12275 gregno
+= (1 - gregno
) & 1;
12277 /* Multi-reg args are not split between registers and stack. */
12278 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
12281 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12282 return rs6000_mixed_function_arg (mode
, type
,
12283 gregno
- GP_ARG_MIN_REG
);
12284 return gen_rtx_REG (mode
, gregno
);
12289 int align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
12291 /* _Decimal128 must be passed in an even/odd float register pair.
12292 This assumes that the register number is odd when fregno is odd. */
12293 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
12296 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
12298 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
12301 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
12304 /* Do we also need to pass this argument in the parameter
12306 if (type
&& (cum
->nargs_prototype
<= 0
12307 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
12308 && TARGET_XL_COMPAT
12309 && align_words
>= GP_ARG_NUM_REG
)))
12310 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
12312 /* Describe where this argument goes in the fprs. */
12313 for (i
= 0; i
< n_elts
12314 && cum
->fregno
+ i
* n_fpreg
<= FP_ARG_MAX_REG
; i
++)
12316 /* Check if the argument is split over registers and memory.
12317 This can only ever happen for long double or _Decimal128;
12318 complex types are handled via split_complex_arg. */
12319 machine_mode fmode
= elt_mode
;
12320 if (cum
->fregno
+ (i
+ 1) * n_fpreg
> FP_ARG_MAX_REG
+ 1)
12322 gcc_assert (FLOAT128_2REG_P (fmode
));
12323 fmode
= DECIMAL_FLOAT_MODE_P (fmode
) ? DDmode
: DFmode
;
12326 r
= gen_rtx_REG (fmode
, cum
->fregno
+ i
* n_fpreg
);
12327 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
12328 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12331 /* If there were not enough FPRs to hold the argument, the rest
12332 usually goes into memory. However, if the current position
12333 is still within the register parameter area, a portion may
12334 actually have to go into GPRs.
12336 Note that it may happen that the portion of the argument
12337 passed in the first "half" of the first GPR was already
12338 passed in the last FPR as well.
12340 For unnamed arguments, we already set up GPRs to cover the
12341 whole argument in rs6000_psave_function_arg, so there is
12342 nothing further to do at this point. */
12343 fpr_words
= (i
* GET_MODE_SIZE (elt_mode
)) / (TARGET_32BIT
? 4 : 8);
12344 if (i
< n_elts
&& align_words
+ fpr_words
< GP_ARG_NUM_REG
12345 && cum
->nargs_prototype
> 0)
12347 static bool warned
;
12349 machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
12350 int n_words
= rs6000_arg_size (mode
, type
);
12352 align_words
+= fpr_words
;
12353 n_words
-= fpr_words
;
12357 r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
12358 off
= GEN_INT (fpr_words
++ * GET_MODE_SIZE (rmode
));
12359 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
12361 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
12363 if (!warned
&& warn_psabi
)
12366 inform (input_location
,
12367 "the ABI of passing homogeneous float aggregates"
12368 " has changed in GCC 5");
12372 return rs6000_finish_function_arg (mode
, rvec
, k
);
12374 else if (align_words
< GP_ARG_NUM_REG
)
12376 if (TARGET_32BIT
&& TARGET_POWERPC64
)
12377 return rs6000_mixed_function_arg (mode
, type
, align_words
);
12379 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
12386 /* For an arg passed partly in registers and partly in memory, this is
12387 the number of bytes passed in registers. For args passed entirely in
12388 registers or entirely in memory, zero. When an arg is described by a
12389 PARALLEL, perhaps using more than one register type, this function
12390 returns the number of bytes used by the first element of the PARALLEL. */
12393 rs6000_arg_partial_bytes (cumulative_args_t cum_v
, machine_mode mode
,
12394 tree type
, bool named
)
12396 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
12397 bool passed_in_gprs
= true;
12400 machine_mode elt_mode
;
12403 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
12405 if (DEFAULT_ABI
== ABI_V4
)
12408 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
12410 /* If we are passing this arg in the fixed parameter save area (gprs or
12411 memory) as well as VRs, we do not use the partial bytes mechanism;
12412 instead, rs6000_function_arg will return a PARALLEL including a memory
12413 element as necessary. Library support functions for IEEE 128-bit are
12414 assumed to not need the value passed both in GPRs and in vector
12416 if (TARGET_64BIT
&& !cum
->prototype
12417 && (!cum
->libcall
|| !FLOAT128_VECTOR_P (elt_mode
)))
12420 /* Otherwise, we pass in VRs only. Check for partial copies. */
12421 passed_in_gprs
= false;
12422 if (cum
->vregno
+ n_elts
> ALTIVEC_ARG_MAX_REG
+ 1)
12423 ret
= (ALTIVEC_ARG_MAX_REG
+ 1 - cum
->vregno
) * 16;
12426 /* In this complicated case we just disable the partial_nregs code. */
12427 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
12430 align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
12432 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
12434 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
12436 /* If we are passing this arg in the fixed parameter save area
12437 (gprs or memory) as well as FPRs, we do not use the partial
12438 bytes mechanism; instead, rs6000_function_arg will return a
12439 PARALLEL including a memory element as necessary. */
12441 && (cum
->nargs_prototype
<= 0
12442 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
12443 && TARGET_XL_COMPAT
12444 && align_words
>= GP_ARG_NUM_REG
)))
12447 /* Otherwise, we pass in FPRs only. Check for partial copies. */
12448 passed_in_gprs
= false;
12449 if (cum
->fregno
+ n_elts
* n_fpreg
> FP_ARG_MAX_REG
+ 1)
12451 /* Compute number of bytes / words passed in FPRs. If there
12452 is still space available in the register parameter area
12453 *after* that amount, a part of the argument will be passed
12454 in GPRs. In that case, the total amount passed in any
12455 registers is equal to the amount that would have been passed
12456 in GPRs if everything were passed there, so we fall back to
12457 the GPR code below to compute the appropriate value. */
12458 int fpr
= ((FP_ARG_MAX_REG
+ 1 - cum
->fregno
)
12459 * MIN (8, GET_MODE_SIZE (elt_mode
)));
12460 int fpr_words
= fpr
/ (TARGET_32BIT
? 4 : 8);
12462 if (align_words
+ fpr_words
< GP_ARG_NUM_REG
)
12463 passed_in_gprs
= true;
12470 && align_words
< GP_ARG_NUM_REG
12471 && GP_ARG_NUM_REG
< align_words
+ rs6000_arg_size (mode
, type
))
12472 ret
= (GP_ARG_NUM_REG
- align_words
) * (TARGET_32BIT
? 4 : 8);
12474 if (ret
!= 0 && TARGET_DEBUG_ARG
)
12475 fprintf (stderr
, "rs6000_arg_partial_bytes: %d\n", ret
);
12480 /* A C expression that indicates when an argument must be passed by
12481 reference. If nonzero for an argument, a copy of that argument is
12482 made in memory and a pointer to the argument is passed instead of
12483 the argument itself. The pointer is passed in whatever way is
12484 appropriate for passing a pointer to that type.
12486 Under V.4, aggregates and long double are passed by reference.
12488 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
12489 reference unless the AltiVec vector extension ABI is in force.
12491 As an extension to all ABIs, variable sized types are passed by
12495 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
12496 machine_mode mode
, const_tree type
,
12497 bool named ATTRIBUTE_UNUSED
)
12502 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
12503 && FLOAT128_IEEE_P (TYPE_MODE (type
)))
12505 if (TARGET_DEBUG_ARG
)
12506 fprintf (stderr
, "function_arg_pass_by_reference: V4 IEEE 128-bit\n");
12510 if (DEFAULT_ABI
== ABI_V4
&& AGGREGATE_TYPE_P (type
))
12512 if (TARGET_DEBUG_ARG
)
12513 fprintf (stderr
, "function_arg_pass_by_reference: V4 aggregate\n");
12517 if (int_size_in_bytes (type
) < 0)
12519 if (TARGET_DEBUG_ARG
)
12520 fprintf (stderr
, "function_arg_pass_by_reference: variable size\n");
12524 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
12525 modes only exist for GCC vector types if -maltivec. */
12526 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
12528 if (TARGET_DEBUG_ARG
)
12529 fprintf (stderr
, "function_arg_pass_by_reference: AltiVec\n");
12533 /* Pass synthetic vectors in memory. */
12534 if (TREE_CODE (type
) == VECTOR_TYPE
12535 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
12537 static bool warned_for_pass_big_vectors
= false;
12538 if (TARGET_DEBUG_ARG
)
12539 fprintf (stderr
, "function_arg_pass_by_reference: synthetic vector\n");
12540 if (!warned_for_pass_big_vectors
)
12542 warning (OPT_Wpsabi
, "GCC vector passed by reference: "
12543 "non-standard ABI extension with no compatibility "
12545 warned_for_pass_big_vectors
= true;
12553 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
12554 already processes. Return true if the parameter must be passed
12555 (fully or partially) on the stack. */
12558 rs6000_parm_needs_stack (cumulative_args_t args_so_far
, tree type
)
12564 /* Catch errors. */
12565 if (type
== NULL
|| type
== error_mark_node
)
12568 /* Handle types with no storage requirement. */
12569 if (TYPE_MODE (type
) == VOIDmode
)
12572 /* Handle complex types. */
12573 if (TREE_CODE (type
) == COMPLEX_TYPE
)
12574 return (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
))
12575 || rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
)));
12577 /* Handle transparent aggregates. */
12578 if ((TREE_CODE (type
) == UNION_TYPE
|| TREE_CODE (type
) == RECORD_TYPE
)
12579 && TYPE_TRANSPARENT_AGGR (type
))
12580 type
= TREE_TYPE (first_field (type
));
12582 /* See if this arg was passed by invisible reference. */
12583 if (pass_by_reference (get_cumulative_args (args_so_far
),
12584 TYPE_MODE (type
), type
, true))
12585 type
= build_pointer_type (type
);
12587 /* Find mode as it is passed by the ABI. */
12588 unsignedp
= TYPE_UNSIGNED (type
);
12589 mode
= promote_mode (type
, TYPE_MODE (type
), &unsignedp
);
12591 /* If we must pass in stack, we need a stack. */
12592 if (rs6000_must_pass_in_stack (mode
, type
))
12595 /* If there is no incoming register, we need a stack. */
12596 entry_parm
= rs6000_function_arg (args_so_far
, mode
, type
, true);
12597 if (entry_parm
== NULL
)
12600 /* Likewise if we need to pass both in registers and on the stack. */
12601 if (GET_CODE (entry_parm
) == PARALLEL
12602 && XEXP (XVECEXP (entry_parm
, 0, 0), 0) == NULL_RTX
)
12605 /* Also true if we're partially in registers and partially not. */
12606 if (rs6000_arg_partial_bytes (args_so_far
, mode
, type
, true) != 0)
12609 /* Update info on where next arg arrives in registers. */
12610 rs6000_function_arg_advance (args_so_far
, mode
, type
, true);
12614 /* Return true if FUN has no prototype, has a variable argument
12615 list, or passes any parameter in memory. */
12618 rs6000_function_parms_need_stack (tree fun
, bool incoming
)
12620 tree fntype
, result
;
12621 CUMULATIVE_ARGS args_so_far_v
;
12622 cumulative_args_t args_so_far
;
12625 /* Must be a libcall, all of which only use reg parms. */
12630 fntype
= TREE_TYPE (fun
);
12632 /* Varargs functions need the parameter save area. */
12633 if ((!incoming
&& !prototype_p (fntype
)) || stdarg_p (fntype
))
12636 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v
, fntype
, NULL_RTX
);
12637 args_so_far
= pack_cumulative_args (&args_so_far_v
);
12639 /* When incoming, we will have been passed the function decl.
12640 It is necessary to use the decl to handle K&R style functions,
12641 where TYPE_ARG_TYPES may not be available. */
12644 gcc_assert (DECL_P (fun
));
12645 result
= DECL_RESULT (fun
);
12648 result
= TREE_TYPE (fntype
);
12650 if (result
&& aggregate_value_p (result
, fntype
))
12652 if (!TYPE_P (result
))
12653 result
= TREE_TYPE (result
);
12654 result
= build_pointer_type (result
);
12655 rs6000_parm_needs_stack (args_so_far
, result
);
12662 for (parm
= DECL_ARGUMENTS (fun
);
12663 parm
&& parm
!= void_list_node
;
12664 parm
= TREE_CHAIN (parm
))
12665 if (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (parm
)))
12670 function_args_iterator args_iter
;
12673 FOREACH_FUNCTION_ARGS (fntype
, arg_type
, args_iter
)
12674 if (rs6000_parm_needs_stack (args_so_far
, arg_type
))
12681 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
12682 usually a constant depending on the ABI. However, in the ELFv2 ABI
12683 the register parameter area is optional when calling a function that
12684 has a prototype is scope, has no variable argument list, and passes
12685 all parameters in registers. */
12688 rs6000_reg_parm_stack_space (tree fun
, bool incoming
)
12690 int reg_parm_stack_space
;
12692 switch (DEFAULT_ABI
)
12695 reg_parm_stack_space
= 0;
12700 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
12704 /* ??? Recomputing this every time is a bit expensive. Is there
12705 a place to cache this information? */
12706 if (rs6000_function_parms_need_stack (fun
, incoming
))
12707 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
12709 reg_parm_stack_space
= 0;
12713 return reg_parm_stack_space
;
12717 rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
)
12720 machine_mode reg_mode
= TARGET_32BIT
? SImode
: DImode
;
12725 for (i
= 0; i
< nregs
; i
++)
12727 rtx tem
= adjust_address_nv (x
, reg_mode
, i
* GET_MODE_SIZE (reg_mode
));
12728 if (reload_completed
)
12730 if (! strict_memory_address_p (reg_mode
, XEXP (tem
, 0)))
12733 tem
= simplify_gen_subreg (reg_mode
, x
, BLKmode
,
12734 i
* GET_MODE_SIZE (reg_mode
));
12737 tem
= replace_equiv_address (tem
, XEXP (tem
, 0));
12741 emit_move_insn (tem
, gen_rtx_REG (reg_mode
, regno
+ i
));
12745 /* Perform any needed actions needed for a function that is receiving a
12746 variable number of arguments.
12750 MODE and TYPE are the mode and type of the current parameter.
12752 PRETEND_SIZE is a variable that should be set to the amount of stack
12753 that must be pushed by the prolog to pretend that our caller pushed
12756 Normally, this macro will push all remaining incoming registers on the
12757 stack and set PRETEND_SIZE to the length of the registers pushed. */
12760 setup_incoming_varargs (cumulative_args_t cum
, machine_mode mode
,
12761 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
12764 CUMULATIVE_ARGS next_cum
;
12765 int reg_size
= TARGET_32BIT
? 4 : 8;
12766 rtx save_area
= NULL_RTX
, mem
;
12767 int first_reg_offset
;
12768 alias_set_type set
;
12770 /* Skip the last named argument. */
12771 next_cum
= *get_cumulative_args (cum
);
12772 rs6000_function_arg_advance_1 (&next_cum
, mode
, type
, true, 0);
12774 if (DEFAULT_ABI
== ABI_V4
)
12776 first_reg_offset
= next_cum
.sysv_gregno
- GP_ARG_MIN_REG
;
12780 int gpr_reg_num
= 0, gpr_size
= 0, fpr_size
= 0;
12781 HOST_WIDE_INT offset
= 0;
12783 /* Try to optimize the size of the varargs save area.
12784 The ABI requires that ap.reg_save_area is doubleword
12785 aligned, but we don't need to allocate space for all
12786 the bytes, only those to which we actually will save
12788 if (cfun
->va_list_gpr_size
&& first_reg_offset
< GP_ARG_NUM_REG
)
12789 gpr_reg_num
= GP_ARG_NUM_REG
- first_reg_offset
;
12790 if (TARGET_HARD_FLOAT
12791 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
12792 && cfun
->va_list_fpr_size
)
12795 fpr_size
= (next_cum
.fregno
- FP_ARG_MIN_REG
)
12796 * UNITS_PER_FP_WORD
;
12797 if (cfun
->va_list_fpr_size
12798 < FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
12799 fpr_size
+= cfun
->va_list_fpr_size
* UNITS_PER_FP_WORD
;
12801 fpr_size
+= (FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
12802 * UNITS_PER_FP_WORD
;
12806 offset
= -((first_reg_offset
* reg_size
) & ~7);
12807 if (!fpr_size
&& gpr_reg_num
> cfun
->va_list_gpr_size
)
12809 gpr_reg_num
= cfun
->va_list_gpr_size
;
12810 if (reg_size
== 4 && (first_reg_offset
& 1))
12813 gpr_size
= (gpr_reg_num
* reg_size
+ 7) & ~7;
12816 offset
= - (int) (next_cum
.fregno
- FP_ARG_MIN_REG
)
12817 * UNITS_PER_FP_WORD
12818 - (int) (GP_ARG_NUM_REG
* reg_size
);
12820 if (gpr_size
+ fpr_size
)
12823 = assign_stack_local (BLKmode
, gpr_size
+ fpr_size
, 64);
12824 gcc_assert (GET_CODE (reg_save_area
) == MEM
);
12825 reg_save_area
= XEXP (reg_save_area
, 0);
12826 if (GET_CODE (reg_save_area
) == PLUS
)
12828 gcc_assert (XEXP (reg_save_area
, 0)
12829 == virtual_stack_vars_rtx
);
12830 gcc_assert (GET_CODE (XEXP (reg_save_area
, 1)) == CONST_INT
);
12831 offset
+= INTVAL (XEXP (reg_save_area
, 1));
12834 gcc_assert (reg_save_area
== virtual_stack_vars_rtx
);
12837 cfun
->machine
->varargs_save_offset
= offset
;
12838 save_area
= plus_constant (Pmode
, virtual_stack_vars_rtx
, offset
);
12843 first_reg_offset
= next_cum
.words
;
12844 save_area
= crtl
->args
.internal_arg_pointer
;
12846 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
12847 first_reg_offset
+= rs6000_arg_size (TYPE_MODE (type
), type
);
12850 set
= get_varargs_alias_set ();
12851 if (! no_rtl
&& first_reg_offset
< GP_ARG_NUM_REG
12852 && cfun
->va_list_gpr_size
)
12854 int n_gpr
, nregs
= GP_ARG_NUM_REG
- first_reg_offset
;
12856 if (va_list_gpr_counter_field
)
12857 /* V4 va_list_gpr_size counts number of registers needed. */
12858 n_gpr
= cfun
->va_list_gpr_size
;
12860 /* char * va_list instead counts number of bytes needed. */
12861 n_gpr
= (cfun
->va_list_gpr_size
+ reg_size
- 1) / reg_size
;
12866 mem
= gen_rtx_MEM (BLKmode
,
12867 plus_constant (Pmode
, save_area
,
12868 first_reg_offset
* reg_size
));
12869 MEM_NOTRAP_P (mem
) = 1;
12870 set_mem_alias_set (mem
, set
);
12871 set_mem_align (mem
, BITS_PER_WORD
);
12873 rs6000_move_block_from_reg (GP_ARG_MIN_REG
+ first_reg_offset
, mem
,
12877 /* Save FP registers if needed. */
12878 if (DEFAULT_ABI
== ABI_V4
12879 && TARGET_HARD_FLOAT
12881 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
12882 && cfun
->va_list_fpr_size
)
12884 int fregno
= next_cum
.fregno
, nregs
;
12885 rtx cr1
= gen_rtx_REG (CCmode
, CR1_REGNO
);
12886 rtx lab
= gen_label_rtx ();
12887 int off
= (GP_ARG_NUM_REG
* reg_size
) + ((fregno
- FP_ARG_MIN_REG
)
12888 * UNITS_PER_FP_WORD
);
12891 (gen_rtx_SET (pc_rtx
,
12892 gen_rtx_IF_THEN_ELSE (VOIDmode
,
12893 gen_rtx_NE (VOIDmode
, cr1
,
12895 gen_rtx_LABEL_REF (VOIDmode
, lab
),
12899 fregno
<= FP_ARG_V4_MAX_REG
&& nregs
< cfun
->va_list_fpr_size
;
12900 fregno
++, off
+= UNITS_PER_FP_WORD
, nregs
++)
12902 mem
= gen_rtx_MEM (TARGET_HARD_FLOAT
? DFmode
: SFmode
,
12903 plus_constant (Pmode
, save_area
, off
));
12904 MEM_NOTRAP_P (mem
) = 1;
12905 set_mem_alias_set (mem
, set
);
12906 set_mem_align (mem
, GET_MODE_ALIGNMENT (
12907 TARGET_HARD_FLOAT
? DFmode
: SFmode
));
12908 emit_move_insn (mem
, gen_rtx_REG (
12909 TARGET_HARD_FLOAT
? DFmode
: SFmode
, fregno
));
12916 /* Create the va_list data type. */
12919 rs6000_build_builtin_va_list (void)
12921 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
, record
, type_decl
;
12923 /* For AIX, prefer 'char *' because that's what the system
12924 header files like. */
12925 if (DEFAULT_ABI
!= ABI_V4
)
12926 return build_pointer_type (char_type_node
);
12928 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
12929 type_decl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
12930 get_identifier ("__va_list_tag"), record
);
12932 f_gpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("gpr"),
12933 unsigned_char_type_node
);
12934 f_fpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("fpr"),
12935 unsigned_char_type_node
);
12936 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
12937 every user file. */
12938 f_res
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
12939 get_identifier ("reserved"), short_unsigned_type_node
);
12940 f_ovf
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
12941 get_identifier ("overflow_arg_area"),
12943 f_sav
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
12944 get_identifier ("reg_save_area"),
12947 va_list_gpr_counter_field
= f_gpr
;
12948 va_list_fpr_counter_field
= f_fpr
;
12950 DECL_FIELD_CONTEXT (f_gpr
) = record
;
12951 DECL_FIELD_CONTEXT (f_fpr
) = record
;
12952 DECL_FIELD_CONTEXT (f_res
) = record
;
12953 DECL_FIELD_CONTEXT (f_ovf
) = record
;
12954 DECL_FIELD_CONTEXT (f_sav
) = record
;
12956 TYPE_STUB_DECL (record
) = type_decl
;
12957 TYPE_NAME (record
) = type_decl
;
12958 TYPE_FIELDS (record
) = f_gpr
;
12959 DECL_CHAIN (f_gpr
) = f_fpr
;
12960 DECL_CHAIN (f_fpr
) = f_res
;
12961 DECL_CHAIN (f_res
) = f_ovf
;
12962 DECL_CHAIN (f_ovf
) = f_sav
;
12964 layout_type (record
);
12966 /* The correct type is an array type of one element. */
12967 return build_array_type (record
, build_index_type (size_zero_node
));
12970 /* Implement va_start. */
12973 rs6000_va_start (tree valist
, rtx nextarg
)
12975 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
12976 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
12977 tree gpr
, fpr
, ovf
, sav
, t
;
12979 /* Only SVR4 needs something special. */
12980 if (DEFAULT_ABI
!= ABI_V4
)
12982 std_expand_builtin_va_start (valist
, nextarg
);
12986 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
12987 f_fpr
= DECL_CHAIN (f_gpr
);
12988 f_res
= DECL_CHAIN (f_fpr
);
12989 f_ovf
= DECL_CHAIN (f_res
);
12990 f_sav
= DECL_CHAIN (f_ovf
);
12992 valist
= build_simple_mem_ref (valist
);
12993 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
12994 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
12996 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
12998 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
13001 /* Count number of gp and fp argument registers used. */
13002 words
= crtl
->args
.info
.words
;
13003 n_gpr
= MIN (crtl
->args
.info
.sysv_gregno
- GP_ARG_MIN_REG
,
13005 n_fpr
= MIN (crtl
->args
.info
.fregno
- FP_ARG_MIN_REG
,
13008 if (TARGET_DEBUG_ARG
)
13009 fprintf (stderr
, "va_start: words = " HOST_WIDE_INT_PRINT_DEC
", n_gpr = "
13010 HOST_WIDE_INT_PRINT_DEC
", n_fpr = " HOST_WIDE_INT_PRINT_DEC
"\n",
13011 words
, n_gpr
, n_fpr
);
13013 if (cfun
->va_list_gpr_size
)
13015 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
13016 build_int_cst (NULL_TREE
, n_gpr
));
13017 TREE_SIDE_EFFECTS (t
) = 1;
13018 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13021 if (cfun
->va_list_fpr_size
)
13023 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
13024 build_int_cst (NULL_TREE
, n_fpr
));
13025 TREE_SIDE_EFFECTS (t
) = 1;
13026 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13028 #ifdef HAVE_AS_GNU_ATTRIBUTE
13029 if (call_ABI_of_interest (cfun
->decl
))
13030 rs6000_passes_float
= true;
13034 /* Find the overflow area. */
13035 t
= make_tree (TREE_TYPE (ovf
), crtl
->args
.internal_arg_pointer
);
13037 t
= fold_build_pointer_plus_hwi (t
, words
* MIN_UNITS_PER_WORD
);
13038 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
13039 TREE_SIDE_EFFECTS (t
) = 1;
13040 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13042 /* If there were no va_arg invocations, don't set up the register
13044 if (!cfun
->va_list_gpr_size
13045 && !cfun
->va_list_fpr_size
13046 && n_gpr
< GP_ARG_NUM_REG
13047 && n_fpr
< FP_ARG_V4_MAX_REG
)
13050 /* Find the register save area. */
13051 t
= make_tree (TREE_TYPE (sav
), virtual_stack_vars_rtx
);
13052 if (cfun
->machine
->varargs_save_offset
)
13053 t
= fold_build_pointer_plus_hwi (t
, cfun
->machine
->varargs_save_offset
);
13054 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
13055 TREE_SIDE_EFFECTS (t
) = 1;
13056 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
13059 /* Implement va_arg. */
13062 rs6000_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
13063 gimple_seq
*post_p
)
13065 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
13066 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
13067 int size
, rsize
, n_reg
, sav_ofs
, sav_scale
;
13068 tree lab_false
, lab_over
, addr
;
13070 tree ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
13074 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
13076 t
= rs6000_gimplify_va_arg (valist
, ptrtype
, pre_p
, post_p
);
13077 return build_va_arg_indirect_ref (t
);
13080 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
13081 earlier version of gcc, with the property that it always applied alignment
13082 adjustments to the va-args (even for zero-sized types). The cheapest way
13083 to deal with this is to replicate the effect of the part of
13084 std_gimplify_va_arg_expr that carries out the align adjust, for the case
13086 We don't need to check for pass-by-reference because of the test above.
13087 We can return a simplifed answer, since we know there's no offset to add. */
13090 && rs6000_darwin64_abi
)
13091 || DEFAULT_ABI
== ABI_ELFv2
13092 || (DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
))
13093 && integer_zerop (TYPE_SIZE (type
)))
13095 unsigned HOST_WIDE_INT align
, boundary
;
13096 tree valist_tmp
= get_initialized_tmp_var (valist
, pre_p
, NULL
);
13097 align
= PARM_BOUNDARY
/ BITS_PER_UNIT
;
13098 boundary
= rs6000_function_arg_boundary (TYPE_MODE (type
), type
);
13099 if (boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
13100 boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
13101 boundary
/= BITS_PER_UNIT
;
13102 if (boundary
> align
)
13105 /* This updates arg ptr by the amount that would be necessary
13106 to align the zero-sized (but not zero-alignment) item. */
13107 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
13108 fold_build_pointer_plus_hwi (valist_tmp
, boundary
- 1));
13109 gimplify_and_add (t
, pre_p
);
13111 t
= fold_convert (sizetype
, valist_tmp
);
13112 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
13113 fold_convert (TREE_TYPE (valist
),
13114 fold_build2 (BIT_AND_EXPR
, sizetype
, t
,
13115 size_int (-boundary
))));
13116 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
13117 gimplify_and_add (t
, pre_p
);
13119 /* Since it is zero-sized there's no increment for the item itself. */
13120 valist_tmp
= fold_convert (build_pointer_type (type
), valist_tmp
);
13121 return build_va_arg_indirect_ref (valist_tmp
);
13124 if (DEFAULT_ABI
!= ABI_V4
)
13126 if (targetm
.calls
.split_complex_arg
&& TREE_CODE (type
) == COMPLEX_TYPE
)
13128 tree elem_type
= TREE_TYPE (type
);
13129 machine_mode elem_mode
= TYPE_MODE (elem_type
);
13130 int elem_size
= GET_MODE_SIZE (elem_mode
);
13132 if (elem_size
< UNITS_PER_WORD
)
13134 tree real_part
, imag_part
;
13135 gimple_seq post
= NULL
;
13137 real_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
13139 /* Copy the value into a temporary, lest the formal temporary
13140 be reused out from under us. */
13141 real_part
= get_initialized_tmp_var (real_part
, pre_p
, &post
);
13142 gimple_seq_add_seq (pre_p
, post
);
13144 imag_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
13147 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
13151 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
13154 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
13155 f_fpr
= DECL_CHAIN (f_gpr
);
13156 f_res
= DECL_CHAIN (f_fpr
);
13157 f_ovf
= DECL_CHAIN (f_res
);
13158 f_sav
= DECL_CHAIN (f_ovf
);
13160 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
13161 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
13163 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
13165 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
13168 size
= int_size_in_bytes (type
);
13169 rsize
= (size
+ 3) / 4;
13170 int pad
= 4 * rsize
- size
;
13173 machine_mode mode
= TYPE_MODE (type
);
13174 if (abi_v4_pass_in_fpr (mode
, false))
13176 /* FP args go in FP registers, if present. */
13178 n_reg
= (size
+ 7) / 8;
13179 sav_ofs
= (TARGET_HARD_FLOAT
? 8 : 4) * 4;
13180 sav_scale
= (TARGET_HARD_FLOAT
? 8 : 4);
13181 if (mode
!= SFmode
&& mode
!= SDmode
)
13186 /* Otherwise into GP registers. */
13195 /* Pull the value out of the saved registers.... */
13198 addr
= create_tmp_var (ptr_type_node
, "addr");
13200 /* AltiVec vectors never go in registers when -mabi=altivec. */
13201 if (TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
13205 lab_false
= create_artificial_label (input_location
);
13206 lab_over
= create_artificial_label (input_location
);
13208 /* Long long is aligned in the registers. As are any other 2 gpr
13209 item such as complex int due to a historical mistake. */
13211 if (n_reg
== 2 && reg
== gpr
)
13214 u
= build2 (BIT_AND_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13215 build_int_cst (TREE_TYPE (reg
), n_reg
- 1));
13216 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
),
13217 unshare_expr (reg
), u
);
13219 /* _Decimal128 is passed in even/odd fpr pairs; the stored
13220 reg number is 0 for f1, so we want to make it odd. */
13221 else if (reg
== fpr
&& mode
== TDmode
)
13223 t
= build2 (BIT_IOR_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13224 build_int_cst (TREE_TYPE (reg
), 1));
13225 u
= build2 (MODIFY_EXPR
, void_type_node
, unshare_expr (reg
), t
);
13228 t
= fold_convert (TREE_TYPE (reg
), size_int (8 - n_reg
+ 1));
13229 t
= build2 (GE_EXPR
, boolean_type_node
, u
, t
);
13230 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
13231 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
13232 gimplify_and_add (t
, pre_p
);
13236 t
= fold_build_pointer_plus_hwi (sav
, sav_ofs
);
13238 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
13239 build_int_cst (TREE_TYPE (reg
), n_reg
));
13240 u
= fold_convert (sizetype
, u
);
13241 u
= build2 (MULT_EXPR
, sizetype
, u
, size_int (sav_scale
));
13242 t
= fold_build_pointer_plus (t
, u
);
13244 /* _Decimal32 varargs are located in the second word of the 64-bit
13245 FP register for 32-bit binaries. */
13246 if (TARGET_32BIT
&& TARGET_HARD_FLOAT
&& mode
== SDmode
)
13247 t
= fold_build_pointer_plus_hwi (t
, size
);
13249 /* Args are passed right-aligned. */
13250 if (BYTES_BIG_ENDIAN
)
13251 t
= fold_build_pointer_plus_hwi (t
, pad
);
13253 gimplify_assign (addr
, t
, pre_p
);
13255 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
13257 stmt
= gimple_build_label (lab_false
);
13258 gimple_seq_add_stmt (pre_p
, stmt
);
13260 if ((n_reg
== 2 && !regalign
) || n_reg
> 2)
13262 /* Ensure that we don't find any more args in regs.
13263 Alignment has taken care of for special cases. */
13264 gimplify_assign (reg
, build_int_cst (TREE_TYPE (reg
), 8), pre_p
);
13268 /* ... otherwise out of the overflow area. */
13270 /* Care for on-stack alignment if needed. */
13274 t
= fold_build_pointer_plus_hwi (t
, align
- 1);
13275 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
13276 build_int_cst (TREE_TYPE (t
), -align
));
13279 /* Args are passed right-aligned. */
13280 if (BYTES_BIG_ENDIAN
)
13281 t
= fold_build_pointer_plus_hwi (t
, pad
);
13283 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
13285 gimplify_assign (unshare_expr (addr
), t
, pre_p
);
13287 t
= fold_build_pointer_plus_hwi (t
, size
);
13288 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
13292 stmt
= gimple_build_label (lab_over
);
13293 gimple_seq_add_stmt (pre_p
, stmt
);
13296 if (STRICT_ALIGNMENT
13297 && (TYPE_ALIGN (type
)
13298 > (unsigned) BITS_PER_UNIT
* (align
< 4 ? 4 : align
)))
13300 /* The value (of type complex double, for example) may not be
13301 aligned in memory in the saved registers, so copy via a
13302 temporary. (This is the same code as used for SPARC.) */
13303 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
13304 tree dest_addr
= build_fold_addr_expr (tmp
);
13306 tree copy
= build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
13307 3, dest_addr
, addr
, size_int (rsize
* 4));
13308 TREE_ADDRESSABLE (tmp
) = 1;
13310 gimplify_and_add (copy
, pre_p
);
13314 addr
= fold_convert (ptrtype
, addr
);
13315 return build_va_arg_indirect_ref (addr
);
13321 def_builtin (const char *name
, tree type
, enum rs6000_builtins code
)
13324 unsigned classify
= rs6000_builtin_info
[(int)code
].attr
;
13325 const char *attr_string
= "";
13327 gcc_assert (name
!= NULL
);
13328 gcc_assert (IN_RANGE ((int)code
, 0, (int)RS6000_BUILTIN_COUNT
));
13330 if (rs6000_builtin_decls
[(int)code
])
13331 fatal_error (input_location
,
13332 "internal error: builtin function %qs already processed",
13335 rs6000_builtin_decls
[(int)code
] = t
=
13336 add_builtin_function (name
, type
, (int)code
, BUILT_IN_MD
, NULL
, NULL_TREE
);
13338 /* Set any special attributes. */
13339 if ((classify
& RS6000_BTC_CONST
) != 0)
13341 /* const function, function only depends on the inputs. */
13342 TREE_READONLY (t
) = 1;
13343 TREE_NOTHROW (t
) = 1;
13344 attr_string
= ", const";
13346 else if ((classify
& RS6000_BTC_PURE
) != 0)
13348 /* pure function, function can read global memory, but does not set any
13350 DECL_PURE_P (t
) = 1;
13351 TREE_NOTHROW (t
) = 1;
13352 attr_string
= ", pure";
13354 else if ((classify
& RS6000_BTC_FP
) != 0)
13356 /* Function is a math function. If rounding mode is on, then treat the
13357 function as not reading global memory, but it can have arbitrary side
13358 effects. If it is off, then assume the function is a const function.
13359 This mimics the ATTR_MATHFN_FPROUNDING attribute in
13360 builtin-attribute.def that is used for the math functions. */
13361 TREE_NOTHROW (t
) = 1;
13362 if (flag_rounding_math
)
13364 DECL_PURE_P (t
) = 1;
13365 DECL_IS_NOVOPS (t
) = 1;
13366 attr_string
= ", fp, pure";
13370 TREE_READONLY (t
) = 1;
13371 attr_string
= ", fp, const";
13374 else if ((classify
& RS6000_BTC_ATTR_MASK
) != 0)
13375 gcc_unreachable ();
13377 if (TARGET_DEBUG_BUILTIN
)
13378 fprintf (stderr
, "rs6000_builtin, code = %4d, %s%s\n",
13379 (int)code
, name
, attr_string
);
13382 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
13384 #undef RS6000_BUILTIN_0
13385 #undef RS6000_BUILTIN_1
13386 #undef RS6000_BUILTIN_2
13387 #undef RS6000_BUILTIN_3
13388 #undef RS6000_BUILTIN_A
13389 #undef RS6000_BUILTIN_D
13390 #undef RS6000_BUILTIN_H
13391 #undef RS6000_BUILTIN_P
13392 #undef RS6000_BUILTIN_X
13394 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13395 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13396 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13397 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
13398 { MASK, ICODE, NAME, ENUM },
13400 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13401 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13402 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13403 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13404 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13406 static const struct builtin_description bdesc_3arg
[] =
13408 #include "rs6000-builtin.def"
13411 /* DST operations: void foo (void *, const int, const char). */
13413 #undef RS6000_BUILTIN_0
13414 #undef RS6000_BUILTIN_1
13415 #undef RS6000_BUILTIN_2
13416 #undef RS6000_BUILTIN_3
13417 #undef RS6000_BUILTIN_A
13418 #undef RS6000_BUILTIN_D
13419 #undef RS6000_BUILTIN_H
13420 #undef RS6000_BUILTIN_P
13421 #undef RS6000_BUILTIN_X
13423 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13424 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13425 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13426 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13427 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13428 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
13429 { MASK, ICODE, NAME, ENUM },
13431 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13432 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13433 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13435 static const struct builtin_description bdesc_dst
[] =
13437 #include "rs6000-builtin.def"
13440 /* Simple binary operations: VECc = foo (VECa, VECb). */
13442 #undef RS6000_BUILTIN_0
13443 #undef RS6000_BUILTIN_1
13444 #undef RS6000_BUILTIN_2
13445 #undef RS6000_BUILTIN_3
13446 #undef RS6000_BUILTIN_A
13447 #undef RS6000_BUILTIN_D
13448 #undef RS6000_BUILTIN_H
13449 #undef RS6000_BUILTIN_P
13450 #undef RS6000_BUILTIN_X
13452 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13453 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13454 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
13455 { MASK, ICODE, NAME, ENUM },
13457 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13458 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13459 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13460 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13461 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13462 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13464 static const struct builtin_description bdesc_2arg
[] =
13466 #include "rs6000-builtin.def"
13469 #undef RS6000_BUILTIN_0
13470 #undef RS6000_BUILTIN_1
13471 #undef RS6000_BUILTIN_2
13472 #undef RS6000_BUILTIN_3
13473 #undef RS6000_BUILTIN_A
13474 #undef RS6000_BUILTIN_D
13475 #undef RS6000_BUILTIN_H
13476 #undef RS6000_BUILTIN_P
13477 #undef RS6000_BUILTIN_X
13479 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13480 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13481 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13482 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13483 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13484 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13485 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13486 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
13487 { MASK, ICODE, NAME, ENUM },
13489 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13491 /* AltiVec predicates. */
13493 static const struct builtin_description bdesc_altivec_preds
[] =
13495 #include "rs6000-builtin.def"
13498 /* ABS* operations. */
13500 #undef RS6000_BUILTIN_0
13501 #undef RS6000_BUILTIN_1
13502 #undef RS6000_BUILTIN_2
13503 #undef RS6000_BUILTIN_3
13504 #undef RS6000_BUILTIN_A
13505 #undef RS6000_BUILTIN_D
13506 #undef RS6000_BUILTIN_H
13507 #undef RS6000_BUILTIN_P
13508 #undef RS6000_BUILTIN_X
13510 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13511 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13512 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13513 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13514 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
13515 { MASK, ICODE, NAME, ENUM },
13517 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13518 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13519 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13520 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13522 static const struct builtin_description bdesc_abs
[] =
13524 #include "rs6000-builtin.def"
13527 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
13530 #undef RS6000_BUILTIN_0
13531 #undef RS6000_BUILTIN_1
13532 #undef RS6000_BUILTIN_2
13533 #undef RS6000_BUILTIN_3
13534 #undef RS6000_BUILTIN_A
13535 #undef RS6000_BUILTIN_D
13536 #undef RS6000_BUILTIN_H
13537 #undef RS6000_BUILTIN_P
13538 #undef RS6000_BUILTIN_X
13540 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13541 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
13542 { MASK, ICODE, NAME, ENUM },
13544 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13545 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13546 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13547 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13548 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13549 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13550 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13552 static const struct builtin_description bdesc_1arg
[] =
13554 #include "rs6000-builtin.def"
13557 /* Simple no-argument operations: result = __builtin_darn_32 () */
13559 #undef RS6000_BUILTIN_0
13560 #undef RS6000_BUILTIN_1
13561 #undef RS6000_BUILTIN_2
13562 #undef RS6000_BUILTIN_3
13563 #undef RS6000_BUILTIN_A
13564 #undef RS6000_BUILTIN_D
13565 #undef RS6000_BUILTIN_H
13566 #undef RS6000_BUILTIN_P
13567 #undef RS6000_BUILTIN_X
13569 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE) \
13570 { MASK, ICODE, NAME, ENUM },
13572 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13573 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13574 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13575 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13576 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13577 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
13578 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13579 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13581 static const struct builtin_description bdesc_0arg
[] =
13583 #include "rs6000-builtin.def"
13586 /* HTM builtins. */
13587 #undef RS6000_BUILTIN_0
13588 #undef RS6000_BUILTIN_1
13589 #undef RS6000_BUILTIN_2
13590 #undef RS6000_BUILTIN_3
13591 #undef RS6000_BUILTIN_A
13592 #undef RS6000_BUILTIN_D
13593 #undef RS6000_BUILTIN_H
13594 #undef RS6000_BUILTIN_P
13595 #undef RS6000_BUILTIN_X
13597 #define RS6000_BUILTIN_0(ENUM, NAME, MASK, ATTR, ICODE)
13598 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
13599 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
13600 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
13601 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
13602 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
13603 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
13604 { MASK, ICODE, NAME, ENUM },
13606 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
13607 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
13609 static const struct builtin_description bdesc_htm
[] =
13611 #include "rs6000-builtin.def"
13614 #undef RS6000_BUILTIN_0
13615 #undef RS6000_BUILTIN_1
13616 #undef RS6000_BUILTIN_2
13617 #undef RS6000_BUILTIN_3
13618 #undef RS6000_BUILTIN_A
13619 #undef RS6000_BUILTIN_D
13620 #undef RS6000_BUILTIN_H
13621 #undef RS6000_BUILTIN_P
13623 /* Return true if a builtin function is overloaded. */
13625 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode
)
13627 return (rs6000_builtin_info
[(int)fncode
].attr
& RS6000_BTC_OVERLOADED
) != 0;
13631 rs6000_overloaded_builtin_name (enum rs6000_builtins fncode
)
13633 return rs6000_builtin_info
[(int)fncode
].name
;
13636 /* Expand an expression EXP that calls a builtin without arguments. */
13638 rs6000_expand_zeroop_builtin (enum insn_code icode
, rtx target
)
13641 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13643 if (icode
== CODE_FOR_nothing
)
13644 /* Builtin not supported on this processor. */
13648 || GET_MODE (target
) != tmode
13649 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13650 target
= gen_reg_rtx (tmode
);
13652 pat
= GEN_FCN (icode
) (target
);
13662 rs6000_expand_mtfsf_builtin (enum insn_code icode
, tree exp
)
13665 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13666 tree arg1
= CALL_EXPR_ARG (exp
, 1);
13667 rtx op0
= expand_normal (arg0
);
13668 rtx op1
= expand_normal (arg1
);
13669 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
13670 machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
13672 if (icode
== CODE_FOR_nothing
)
13673 /* Builtin not supported on this processor. */
13676 /* If we got invalid arguments bail out before generating bad rtl. */
13677 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13680 if (GET_CODE (op0
) != CONST_INT
13681 || INTVAL (op0
) > 255
13682 || INTVAL (op0
) < 0)
13684 error ("argument 1 must be an 8-bit field value");
13688 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
13689 op0
= copy_to_mode_reg (mode0
, op0
);
13691 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
13692 op1
= copy_to_mode_reg (mode1
, op1
);
13694 pat
= GEN_FCN (icode
) (op0
, op1
);
13703 rs6000_expand_unop_builtin (enum insn_code icode
, tree exp
, rtx target
)
13706 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13707 rtx op0
= expand_normal (arg0
);
13708 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13709 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13711 if (icode
== CODE_FOR_nothing
)
13712 /* Builtin not supported on this processor. */
13715 /* If we got invalid arguments bail out before generating bad rtl. */
13716 if (arg0
== error_mark_node
)
13719 if (icode
== CODE_FOR_altivec_vspltisb
13720 || icode
== CODE_FOR_altivec_vspltish
13721 || icode
== CODE_FOR_altivec_vspltisw
)
13723 /* Only allow 5-bit *signed* literals. */
13724 if (GET_CODE (op0
) != CONST_INT
13725 || INTVAL (op0
) > 15
13726 || INTVAL (op0
) < -16)
13728 error ("argument 1 must be a 5-bit signed literal");
13729 return CONST0_RTX (tmode
);
13734 || GET_MODE (target
) != tmode
13735 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13736 target
= gen_reg_rtx (tmode
);
13738 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13739 op0
= copy_to_mode_reg (mode0
, op0
);
13741 pat
= GEN_FCN (icode
) (target
, op0
);
13750 altivec_expand_abs_builtin (enum insn_code icode
, tree exp
, rtx target
)
13752 rtx pat
, scratch1
, scratch2
;
13753 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13754 rtx op0
= expand_normal (arg0
);
13755 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13756 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13758 /* If we have invalid arguments, bail out before generating bad rtl. */
13759 if (arg0
== error_mark_node
)
13763 || GET_MODE (target
) != tmode
13764 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13765 target
= gen_reg_rtx (tmode
);
13767 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13768 op0
= copy_to_mode_reg (mode0
, op0
);
13770 scratch1
= gen_reg_rtx (mode0
);
13771 scratch2
= gen_reg_rtx (mode0
);
13773 pat
= GEN_FCN (icode
) (target
, op0
, scratch1
, scratch2
);
13782 rs6000_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
13785 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13786 tree arg1
= CALL_EXPR_ARG (exp
, 1);
13787 rtx op0
= expand_normal (arg0
);
13788 rtx op1
= expand_normal (arg1
);
13789 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13790 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13791 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
13793 if (icode
== CODE_FOR_nothing
)
13794 /* Builtin not supported on this processor. */
13797 /* If we got invalid arguments bail out before generating bad rtl. */
13798 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13801 if (icode
== CODE_FOR_altivec_vcfux
13802 || icode
== CODE_FOR_altivec_vcfsx
13803 || icode
== CODE_FOR_altivec_vctsxs
13804 || icode
== CODE_FOR_altivec_vctuxs
13805 || icode
== CODE_FOR_altivec_vspltb
13806 || icode
== CODE_FOR_altivec_vsplth
13807 || icode
== CODE_FOR_altivec_vspltw
)
13809 /* Only allow 5-bit unsigned literals. */
13811 if (TREE_CODE (arg1
) != INTEGER_CST
13812 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
13814 error ("argument 2 must be a 5-bit unsigned literal");
13815 return CONST0_RTX (tmode
);
13818 else if (icode
== CODE_FOR_dfptstsfi_eq_dd
13819 || icode
== CODE_FOR_dfptstsfi_lt_dd
13820 || icode
== CODE_FOR_dfptstsfi_gt_dd
13821 || icode
== CODE_FOR_dfptstsfi_unordered_dd
13822 || icode
== CODE_FOR_dfptstsfi_eq_td
13823 || icode
== CODE_FOR_dfptstsfi_lt_td
13824 || icode
== CODE_FOR_dfptstsfi_gt_td
13825 || icode
== CODE_FOR_dfptstsfi_unordered_td
)
13827 /* Only allow 6-bit unsigned literals. */
13829 if (TREE_CODE (arg0
) != INTEGER_CST
13830 || !IN_RANGE (TREE_INT_CST_LOW (arg0
), 0, 63))
13832 error ("argument 1 must be a 6-bit unsigned literal");
13833 return CONST0_RTX (tmode
);
13836 else if (icode
== CODE_FOR_xststdcqp_kf
13837 || icode
== CODE_FOR_xststdcqp_tf
13838 || icode
== CODE_FOR_xststdcdp
13839 || icode
== CODE_FOR_xststdcsp
13840 || icode
== CODE_FOR_xvtstdcdp
13841 || icode
== CODE_FOR_xvtstdcsp
)
13843 /* Only allow 7-bit unsigned literals. */
13845 if (TREE_CODE (arg1
) != INTEGER_CST
13846 || !IN_RANGE (TREE_INT_CST_LOW (arg1
), 0, 127))
13848 error ("argument 2 must be a 7-bit unsigned literal");
13849 return CONST0_RTX (tmode
);
13852 else if (icode
== CODE_FOR_unpackv1ti
13853 || icode
== CODE_FOR_unpackkf
13854 || icode
== CODE_FOR_unpacktf
13855 || icode
== CODE_FOR_unpackif
13856 || icode
== CODE_FOR_unpacktd
)
13858 /* Only allow 1-bit unsigned literals. */
13860 if (TREE_CODE (arg1
) != INTEGER_CST
13861 || !IN_RANGE (TREE_INT_CST_LOW (arg1
), 0, 1))
13863 error ("argument 2 must be a 1-bit unsigned literal");
13864 return CONST0_RTX (tmode
);
13869 || GET_MODE (target
) != tmode
13870 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13871 target
= gen_reg_rtx (tmode
);
13873 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13874 op0
= copy_to_mode_reg (mode0
, op0
);
13875 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
13876 op1
= copy_to_mode_reg (mode1
, op1
);
13878 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
13887 altivec_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
13890 tree cr6_form
= CALL_EXPR_ARG (exp
, 0);
13891 tree arg0
= CALL_EXPR_ARG (exp
, 1);
13892 tree arg1
= CALL_EXPR_ARG (exp
, 2);
13893 rtx op0
= expand_normal (arg0
);
13894 rtx op1
= expand_normal (arg1
);
13895 machine_mode tmode
= SImode
;
13896 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13897 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
13900 if (TREE_CODE (cr6_form
) != INTEGER_CST
)
13902 error ("argument 1 of %qs must be a constant",
13903 "__builtin_altivec_predicate");
13907 cr6_form_int
= TREE_INT_CST_LOW (cr6_form
);
13909 gcc_assert (mode0
== mode1
);
13911 /* If we have invalid arguments, bail out before generating bad rtl. */
13912 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13916 || GET_MODE (target
) != tmode
13917 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13918 target
= gen_reg_rtx (tmode
);
13920 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13921 op0
= copy_to_mode_reg (mode0
, op0
);
13922 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
13923 op1
= copy_to_mode_reg (mode1
, op1
);
13925 /* Note that for many of the relevant operations (e.g. cmpne or
13926 cmpeq) with float or double operands, it makes more sense for the
13927 mode of the allocated scratch register to select a vector of
13928 integer. But the choice to copy the mode of operand 0 was made
13929 long ago and there are no plans to change it. */
13930 scratch
= gen_reg_rtx (mode0
);
13932 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
13937 /* The vec_any* and vec_all* predicates use the same opcodes for two
13938 different operations, but the bits in CR6 will be different
13939 depending on what information we want. So we have to play tricks
13940 with CR6 to get the right bits out.
13942 If you think this is disgusting, look at the specs for the
13943 AltiVec predicates. */
13945 switch (cr6_form_int
)
13948 emit_insn (gen_cr6_test_for_zero (target
));
13951 emit_insn (gen_cr6_test_for_zero_reverse (target
));
13954 emit_insn (gen_cr6_test_for_lt (target
));
13957 emit_insn (gen_cr6_test_for_lt_reverse (target
));
13960 error ("argument 1 of %qs is out of range",
13961 "__builtin_altivec_predicate");
13969 swap_endian_selector_for_mode (machine_mode mode
)
13971 unsigned int swap1
[16] = {15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0};
13972 unsigned int swap2
[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
13973 unsigned int swap4
[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
13974 unsigned int swap8
[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
13976 unsigned int *swaparray
, i
;
13996 gcc_unreachable ();
13999 for (i
= 0; i
< 16; ++i
)
14000 perm
[i
] = GEN_INT (swaparray
[i
]);
14002 return force_reg (V16QImode
, gen_rtx_CONST_VECTOR (V16QImode
,
14003 gen_rtvec_v (16, perm
)));
14007 altivec_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
14010 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14011 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14012 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14013 machine_mode mode0
= Pmode
;
14014 machine_mode mode1
= Pmode
;
14015 rtx op0
= expand_normal (arg0
);
14016 rtx op1
= expand_normal (arg1
);
14018 if (icode
== CODE_FOR_nothing
)
14019 /* Builtin not supported on this processor. */
14022 /* If we got invalid arguments bail out before generating bad rtl. */
14023 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
14027 || GET_MODE (target
) != tmode
14028 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14029 target
= gen_reg_rtx (tmode
);
14031 op1
= copy_to_mode_reg (mode1
, op1
);
14033 /* For LVX, express the RTL accurately by ANDing the address with -16.
14034 LVXL and LVE*X expand to use UNSPECs to hide their special behavior,
14035 so the raw address is fine. */
14036 if (icode
== CODE_FOR_altivec_lvx_v1ti
14037 || icode
== CODE_FOR_altivec_lvx_v2df
14038 || icode
== CODE_FOR_altivec_lvx_v2di
14039 || icode
== CODE_FOR_altivec_lvx_v4sf
14040 || icode
== CODE_FOR_altivec_lvx_v4si
14041 || icode
== CODE_FOR_altivec_lvx_v8hi
14042 || icode
== CODE_FOR_altivec_lvx_v16qi
)
14045 if (op0
== const0_rtx
)
14049 op0
= copy_to_mode_reg (mode0
, op0
);
14050 rawaddr
= gen_rtx_PLUS (Pmode
, op1
, op0
);
14052 addr
= gen_rtx_AND (Pmode
, rawaddr
, gen_rtx_CONST_INT (Pmode
, -16));
14053 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, addr
);
14055 emit_insn (gen_rtx_SET (target
, addr
));
14059 if (op0
== const0_rtx
)
14060 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
14063 op0
= copy_to_mode_reg (mode0
, op0
);
14064 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
,
14065 gen_rtx_PLUS (Pmode
, op1
, op0
));
14068 pat
= GEN_FCN (icode
) (target
, addr
);
14078 altivec_expand_stxvl_builtin (enum insn_code icode
, tree exp
)
14081 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14082 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14083 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14084 rtx op0
= expand_normal (arg0
);
14085 rtx op1
= expand_normal (arg1
);
14086 rtx op2
= expand_normal (arg2
);
14087 machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
14088 machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
14089 machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
14091 if (icode
== CODE_FOR_nothing
)
14092 /* Builtin not supported on this processor. */
14095 /* If we got invalid arguments bail out before generating bad rtl. */
14096 if (arg0
== error_mark_node
14097 || arg1
== error_mark_node
14098 || arg2
== error_mark_node
)
14101 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14102 op0
= copy_to_mode_reg (mode0
, op0
);
14103 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14104 op1
= copy_to_mode_reg (mode1
, op1
);
14105 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
14106 op2
= copy_to_mode_reg (mode2
, op2
);
14108 pat
= GEN_FCN (icode
) (op0
, op1
, op2
);
14116 altivec_expand_stv_builtin (enum insn_code icode
, tree exp
)
14118 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14119 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14120 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14121 rtx op0
= expand_normal (arg0
);
14122 rtx op1
= expand_normal (arg1
);
14123 rtx op2
= expand_normal (arg2
);
14124 rtx pat
, addr
, rawaddr
;
14125 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14126 machine_mode smode
= insn_data
[icode
].operand
[1].mode
;
14127 machine_mode mode1
= Pmode
;
14128 machine_mode mode2
= Pmode
;
14130 /* Invalid arguments. Bail before doing anything stoopid! */
14131 if (arg0
== error_mark_node
14132 || arg1
== error_mark_node
14133 || arg2
== error_mark_node
)
14136 op2
= copy_to_mode_reg (mode2
, op2
);
14138 /* For STVX, express the RTL accurately by ANDing the address with -16.
14139 STVXL and STVE*X expand to use UNSPECs to hide their special behavior,
14140 so the raw address is fine. */
14141 if (icode
== CODE_FOR_altivec_stvx_v2df
14142 || icode
== CODE_FOR_altivec_stvx_v2di
14143 || icode
== CODE_FOR_altivec_stvx_v4sf
14144 || icode
== CODE_FOR_altivec_stvx_v4si
14145 || icode
== CODE_FOR_altivec_stvx_v8hi
14146 || icode
== CODE_FOR_altivec_stvx_v16qi
)
14148 if (op1
== const0_rtx
)
14152 op1
= copy_to_mode_reg (mode1
, op1
);
14153 rawaddr
= gen_rtx_PLUS (Pmode
, op2
, op1
);
14156 addr
= gen_rtx_AND (Pmode
, rawaddr
, gen_rtx_CONST_INT (Pmode
, -16));
14157 addr
= gen_rtx_MEM (tmode
, addr
);
14159 op0
= copy_to_mode_reg (tmode
, op0
);
14161 emit_insn (gen_rtx_SET (addr
, op0
));
14165 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, smode
))
14166 op0
= copy_to_mode_reg (smode
, op0
);
14168 if (op1
== const0_rtx
)
14169 addr
= gen_rtx_MEM (tmode
, op2
);
14172 op1
= copy_to_mode_reg (mode1
, op1
);
14173 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op2
, op1
));
14176 pat
= GEN_FCN (icode
) (addr
, op0
);
14184 /* Return the appropriate SPR number associated with the given builtin. */
14185 static inline HOST_WIDE_INT
14186 htm_spr_num (enum rs6000_builtins code
)
14188 if (code
== HTM_BUILTIN_GET_TFHAR
14189 || code
== HTM_BUILTIN_SET_TFHAR
)
14191 else if (code
== HTM_BUILTIN_GET_TFIAR
14192 || code
== HTM_BUILTIN_SET_TFIAR
)
14194 else if (code
== HTM_BUILTIN_GET_TEXASR
14195 || code
== HTM_BUILTIN_SET_TEXASR
)
14197 gcc_assert (code
== HTM_BUILTIN_GET_TEXASRU
14198 || code
== HTM_BUILTIN_SET_TEXASRU
);
14199 return TEXASRU_SPR
;
14202 /* Return the appropriate SPR regno associated with the given builtin. */
14203 static inline HOST_WIDE_INT
14204 htm_spr_regno (enum rs6000_builtins code
)
14206 if (code
== HTM_BUILTIN_GET_TFHAR
14207 || code
== HTM_BUILTIN_SET_TFHAR
)
14208 return TFHAR_REGNO
;
14209 else if (code
== HTM_BUILTIN_GET_TFIAR
14210 || code
== HTM_BUILTIN_SET_TFIAR
)
14211 return TFIAR_REGNO
;
14212 gcc_assert (code
== HTM_BUILTIN_GET_TEXASR
14213 || code
== HTM_BUILTIN_SET_TEXASR
14214 || code
== HTM_BUILTIN_GET_TEXASRU
14215 || code
== HTM_BUILTIN_SET_TEXASRU
);
14216 return TEXASR_REGNO
;
14219 /* Return the correct ICODE value depending on whether we are
14220 setting or reading the HTM SPRs. */
14221 static inline enum insn_code
14222 rs6000_htm_spr_icode (bool nonvoid
)
14225 return (TARGET_POWERPC64
) ? CODE_FOR_htm_mfspr_di
: CODE_FOR_htm_mfspr_si
;
14227 return (TARGET_POWERPC64
) ? CODE_FOR_htm_mtspr_di
: CODE_FOR_htm_mtspr_si
;
14230 /* Expand the HTM builtin in EXP and store the result in TARGET.
14231 Store true in *EXPANDEDP if we found a builtin to expand. */
14233 htm_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
14235 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
14236 bool nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
14237 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
14238 const struct builtin_description
*d
;
14243 if (!TARGET_POWERPC64
14244 && (fcode
== HTM_BUILTIN_TABORTDC
14245 || fcode
== HTM_BUILTIN_TABORTDCI
))
14247 size_t uns_fcode
= (size_t)fcode
;
14248 const char *name
= rs6000_builtin_info
[uns_fcode
].name
;
14249 error ("builtin %qs is only valid in 64-bit mode", name
);
14253 /* Expand the HTM builtins. */
14255 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
14256 if (d
->code
== fcode
)
14258 rtx op
[MAX_HTM_OPERANDS
], pat
;
14261 call_expr_arg_iterator iter
;
14262 unsigned attr
= rs6000_builtin_info
[fcode
].attr
;
14263 enum insn_code icode
= d
->icode
;
14264 const struct insn_operand_data
*insn_op
;
14265 bool uses_spr
= (attr
& RS6000_BTC_SPR
);
14269 icode
= rs6000_htm_spr_icode (nonvoid
);
14270 insn_op
= &insn_data
[icode
].operand
[0];
14274 machine_mode tmode
= (uses_spr
) ? insn_op
->mode
: E_SImode
;
14276 || GET_MODE (target
) != tmode
14277 || (uses_spr
&& !(*insn_op
->predicate
) (target
, tmode
)))
14278 target
= gen_reg_rtx (tmode
);
14280 op
[nopnds
++] = target
;
14283 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
14285 if (arg
== error_mark_node
|| nopnds
>= MAX_HTM_OPERANDS
)
14288 insn_op
= &insn_data
[icode
].operand
[nopnds
];
14290 op
[nopnds
] = expand_normal (arg
);
14292 if (!(*insn_op
->predicate
) (op
[nopnds
], insn_op
->mode
))
14294 if (!strcmp (insn_op
->constraint
, "n"))
14296 int arg_num
= (nonvoid
) ? nopnds
: nopnds
+ 1;
14297 if (!CONST_INT_P (op
[nopnds
]))
14298 error ("argument %d must be an unsigned literal", arg_num
);
14300 error ("argument %d is an unsigned literal that is "
14301 "out of range", arg_num
);
14304 op
[nopnds
] = copy_to_mode_reg (insn_op
->mode
, op
[nopnds
]);
14310 /* Handle the builtins for extended mnemonics. These accept
14311 no arguments, but map to builtins that take arguments. */
14314 case HTM_BUILTIN_TENDALL
: /* Alias for: tend. 1 */
14315 case HTM_BUILTIN_TRESUME
: /* Alias for: tsr. 1 */
14316 op
[nopnds
++] = GEN_INT (1);
14318 attr
|= RS6000_BTC_UNARY
;
14320 case HTM_BUILTIN_TSUSPEND
: /* Alias for: tsr. 0 */
14321 op
[nopnds
++] = GEN_INT (0);
14323 attr
|= RS6000_BTC_UNARY
;
14329 /* If this builtin accesses SPRs, then pass in the appropriate
14330 SPR number and SPR regno as the last two operands. */
14333 machine_mode mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
14334 op
[nopnds
++] = gen_rtx_CONST_INT (mode
, htm_spr_num (fcode
));
14335 op
[nopnds
++] = gen_rtx_REG (mode
, htm_spr_regno (fcode
));
14337 /* If this builtin accesses a CR, then pass in a scratch
14338 CR as the last operand. */
14339 else if (attr
& RS6000_BTC_CR
)
14340 { cr
= gen_reg_rtx (CCmode
);
14346 int expected_nopnds
= 0;
14347 if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_UNARY
)
14348 expected_nopnds
= 1;
14349 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_BINARY
)
14350 expected_nopnds
= 2;
14351 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_TERNARY
)
14352 expected_nopnds
= 3;
14353 if (!(attr
& RS6000_BTC_VOID
))
14354 expected_nopnds
+= 1;
14356 expected_nopnds
+= 2;
14358 gcc_assert (nopnds
== expected_nopnds
14359 && nopnds
<= MAX_HTM_OPERANDS
);
14365 pat
= GEN_FCN (icode
) (op
[0]);
14368 pat
= GEN_FCN (icode
) (op
[0], op
[1]);
14371 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2]);
14374 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2], op
[3]);
14377 gcc_unreachable ();
14383 if (attr
& RS6000_BTC_CR
)
14385 if (fcode
== HTM_BUILTIN_TBEGIN
)
14387 /* Emit code to set TARGET to true or false depending on
14388 whether the tbegin. instruction successfully or failed
14389 to start a transaction. We do this by placing the 1's
14390 complement of CR's EQ bit into TARGET. */
14391 rtx scratch
= gen_reg_rtx (SImode
);
14392 emit_insn (gen_rtx_SET (scratch
,
14393 gen_rtx_EQ (SImode
, cr
,
14395 emit_insn (gen_rtx_SET (target
,
14396 gen_rtx_XOR (SImode
, scratch
,
14401 /* Emit code to copy the 4-bit condition register field
14402 CR into the least significant end of register TARGET. */
14403 rtx scratch1
= gen_reg_rtx (SImode
);
14404 rtx scratch2
= gen_reg_rtx (SImode
);
14405 rtx subreg
= simplify_gen_subreg (CCmode
, scratch1
, SImode
, 0);
14406 emit_insn (gen_movcc (subreg
, cr
));
14407 emit_insn (gen_lshrsi3 (scratch2
, scratch1
, GEN_INT (28)));
14408 emit_insn (gen_andsi3 (target
, scratch2
, GEN_INT (0xf)));
14417 *expandedp
= false;
14421 /* Expand the CPU builtin in FCODE and store the result in TARGET. */
14424 cpu_expand_builtin (enum rs6000_builtins fcode
, tree exp ATTRIBUTE_UNUSED
,
14427 /* __builtin_cpu_init () is a nop, so expand to nothing. */
14428 if (fcode
== RS6000_BUILTIN_CPU_INIT
)
14431 if (target
== 0 || GET_MODE (target
) != SImode
)
14432 target
= gen_reg_rtx (SImode
);
14434 #ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
14435 tree arg
= TREE_OPERAND (CALL_EXPR_ARG (exp
, 0), 0);
14436 /* Target clones creates an ARRAY_REF instead of STRING_CST, convert it back
14437 to a STRING_CST. */
14438 if (TREE_CODE (arg
) == ARRAY_REF
14439 && TREE_CODE (TREE_OPERAND (arg
, 0)) == STRING_CST
14440 && TREE_CODE (TREE_OPERAND (arg
, 1)) == INTEGER_CST
14441 && compare_tree_int (TREE_OPERAND (arg
, 1), 0) == 0)
14442 arg
= TREE_OPERAND (arg
, 0);
14444 if (TREE_CODE (arg
) != STRING_CST
)
14446 error ("builtin %qs only accepts a string argument",
14447 rs6000_builtin_info
[(size_t) fcode
].name
);
14451 if (fcode
== RS6000_BUILTIN_CPU_IS
)
14453 const char *cpu
= TREE_STRING_POINTER (arg
);
14454 rtx cpuid
= NULL_RTX
;
14455 for (size_t i
= 0; i
< ARRAY_SIZE (cpu_is_info
); i
++)
14456 if (strcmp (cpu
, cpu_is_info
[i
].cpu
) == 0)
14458 /* The CPUID value in the TCB is offset by _DL_FIRST_PLATFORM. */
14459 cpuid
= GEN_INT (cpu_is_info
[i
].cpuid
+ _DL_FIRST_PLATFORM
);
14462 if (cpuid
== NULL_RTX
)
14464 /* Invalid CPU argument. */
14465 error ("cpu %qs is an invalid argument to builtin %qs",
14466 cpu
, rs6000_builtin_info
[(size_t) fcode
].name
);
14470 rtx platform
= gen_reg_rtx (SImode
);
14471 rtx tcbmem
= gen_const_mem (SImode
,
14472 gen_rtx_PLUS (Pmode
,
14473 gen_rtx_REG (Pmode
, TLS_REGNUM
),
14474 GEN_INT (TCB_PLATFORM_OFFSET
)));
14475 emit_move_insn (platform
, tcbmem
);
14476 emit_insn (gen_eqsi3 (target
, platform
, cpuid
));
14478 else if (fcode
== RS6000_BUILTIN_CPU_SUPPORTS
)
14480 const char *hwcap
= TREE_STRING_POINTER (arg
);
14481 rtx mask
= NULL_RTX
;
14483 for (size_t i
= 0; i
< ARRAY_SIZE (cpu_supports_info
); i
++)
14484 if (strcmp (hwcap
, cpu_supports_info
[i
].hwcap
) == 0)
14486 mask
= GEN_INT (cpu_supports_info
[i
].mask
);
14487 hwcap_offset
= TCB_HWCAP_OFFSET (cpu_supports_info
[i
].id
);
14490 if (mask
== NULL_RTX
)
14492 /* Invalid HWCAP argument. */
14493 error ("%s %qs is an invalid argument to builtin %qs",
14494 "hwcap", hwcap
, rs6000_builtin_info
[(size_t) fcode
].name
);
14498 rtx tcb_hwcap
= gen_reg_rtx (SImode
);
14499 rtx tcbmem
= gen_const_mem (SImode
,
14500 gen_rtx_PLUS (Pmode
,
14501 gen_rtx_REG (Pmode
, TLS_REGNUM
),
14502 GEN_INT (hwcap_offset
)));
14503 emit_move_insn (tcb_hwcap
, tcbmem
);
14504 rtx scratch1
= gen_reg_rtx (SImode
);
14505 emit_insn (gen_rtx_SET (scratch1
, gen_rtx_AND (SImode
, tcb_hwcap
, mask
)));
14506 rtx scratch2
= gen_reg_rtx (SImode
);
14507 emit_insn (gen_eqsi3 (scratch2
, scratch1
, const0_rtx
));
14508 emit_insn (gen_rtx_SET (target
, gen_rtx_XOR (SImode
, scratch2
, const1_rtx
)));
14511 gcc_unreachable ();
14513 /* Record that we have expanded a CPU builtin, so that we can later
14514 emit a reference to the special symbol exported by LIBC to ensure we
14515 do not link against an old LIBC that doesn't support this feature. */
14516 cpu_builtin_p
= true;
14519 warning (0, "builtin %qs needs GLIBC (2.23 and newer) that exports hardware "
14520 "capability bits", rs6000_builtin_info
[(size_t) fcode
].name
);
14522 /* For old LIBCs, always return FALSE. */
14523 emit_move_insn (target
, GEN_INT (0));
14524 #endif /* TARGET_LIBC_PROVIDES_HWCAP_IN_TCB */
14530 rs6000_expand_ternop_builtin (enum insn_code icode
, tree exp
, rtx target
)
14533 tree arg0
= CALL_EXPR_ARG (exp
, 0);
14534 tree arg1
= CALL_EXPR_ARG (exp
, 1);
14535 tree arg2
= CALL_EXPR_ARG (exp
, 2);
14536 rtx op0
= expand_normal (arg0
);
14537 rtx op1
= expand_normal (arg1
);
14538 rtx op2
= expand_normal (arg2
);
14539 machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
14540 machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
14541 machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
14542 machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
14544 if (icode
== CODE_FOR_nothing
)
14545 /* Builtin not supported on this processor. */
14548 /* If we got invalid arguments bail out before generating bad rtl. */
14549 if (arg0
== error_mark_node
14550 || arg1
== error_mark_node
14551 || arg2
== error_mark_node
)
14554 /* Check and prepare argument depending on the instruction code.
14556 Note that a switch statement instead of the sequence of tests
14557 would be incorrect as many of the CODE_FOR values could be
14558 CODE_FOR_nothing and that would yield multiple alternatives
14559 with identical values. We'd never reach here at runtime in
14561 if (icode
== CODE_FOR_altivec_vsldoi_v4sf
14562 || icode
== CODE_FOR_altivec_vsldoi_v2df
14563 || icode
== CODE_FOR_altivec_vsldoi_v4si
14564 || icode
== CODE_FOR_altivec_vsldoi_v8hi
14565 || icode
== CODE_FOR_altivec_vsldoi_v16qi
)
14567 /* Only allow 4-bit unsigned literals. */
14569 if (TREE_CODE (arg2
) != INTEGER_CST
14570 || TREE_INT_CST_LOW (arg2
) & ~0xf)
14572 error ("argument 3 must be a 4-bit unsigned literal");
14573 return CONST0_RTX (tmode
);
14576 else if (icode
== CODE_FOR_vsx_xxpermdi_v2df
14577 || icode
== CODE_FOR_vsx_xxpermdi_v2di
14578 || icode
== CODE_FOR_vsx_xxpermdi_v2df_be
14579 || icode
== CODE_FOR_vsx_xxpermdi_v2di_be
14580 || icode
== CODE_FOR_vsx_xxpermdi_v1ti
14581 || icode
== CODE_FOR_vsx_xxpermdi_v4sf
14582 || icode
== CODE_FOR_vsx_xxpermdi_v4si
14583 || icode
== CODE_FOR_vsx_xxpermdi_v8hi
14584 || icode
== CODE_FOR_vsx_xxpermdi_v16qi
14585 || icode
== CODE_FOR_vsx_xxsldwi_v16qi
14586 || icode
== CODE_FOR_vsx_xxsldwi_v8hi
14587 || icode
== CODE_FOR_vsx_xxsldwi_v4si
14588 || icode
== CODE_FOR_vsx_xxsldwi_v4sf
14589 || icode
== CODE_FOR_vsx_xxsldwi_v2di
14590 || icode
== CODE_FOR_vsx_xxsldwi_v2df
)
14592 /* Only allow 2-bit unsigned literals. */
14594 if (TREE_CODE (arg2
) != INTEGER_CST
14595 || TREE_INT_CST_LOW (arg2
) & ~0x3)
14597 error ("argument 3 must be a 2-bit unsigned literal");
14598 return CONST0_RTX (tmode
);
14601 else if (icode
== CODE_FOR_vsx_set_v2df
14602 || icode
== CODE_FOR_vsx_set_v2di
14603 || icode
== CODE_FOR_bcdadd
14604 || icode
== CODE_FOR_bcdadd_lt
14605 || icode
== CODE_FOR_bcdadd_eq
14606 || icode
== CODE_FOR_bcdadd_gt
14607 || icode
== CODE_FOR_bcdsub
14608 || icode
== CODE_FOR_bcdsub_lt
14609 || icode
== CODE_FOR_bcdsub_eq
14610 || icode
== CODE_FOR_bcdsub_gt
)
14612 /* Only allow 1-bit unsigned literals. */
14614 if (TREE_CODE (arg2
) != INTEGER_CST
14615 || TREE_INT_CST_LOW (arg2
) & ~0x1)
14617 error ("argument 3 must be a 1-bit unsigned literal");
14618 return CONST0_RTX (tmode
);
14621 else if (icode
== CODE_FOR_dfp_ddedpd_dd
14622 || icode
== CODE_FOR_dfp_ddedpd_td
)
14624 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
14626 if (TREE_CODE (arg0
) != INTEGER_CST
14627 || TREE_INT_CST_LOW (arg2
) & ~0x3)
14629 error ("argument 1 must be 0 or 2");
14630 return CONST0_RTX (tmode
);
14633 else if (icode
== CODE_FOR_dfp_denbcd_dd
14634 || icode
== CODE_FOR_dfp_denbcd_td
)
14636 /* Only allow 1-bit unsigned literals. */
14638 if (TREE_CODE (arg0
) != INTEGER_CST
14639 || TREE_INT_CST_LOW (arg0
) & ~0x1)
14641 error ("argument 1 must be a 1-bit unsigned literal");
14642 return CONST0_RTX (tmode
);
14645 else if (icode
== CODE_FOR_dfp_dscli_dd
14646 || icode
== CODE_FOR_dfp_dscli_td
14647 || icode
== CODE_FOR_dfp_dscri_dd
14648 || icode
== CODE_FOR_dfp_dscri_td
)
14650 /* Only allow 6-bit unsigned literals. */
14652 if (TREE_CODE (arg1
) != INTEGER_CST
14653 || TREE_INT_CST_LOW (arg1
) & ~0x3f)
14655 error ("argument 2 must be a 6-bit unsigned literal");
14656 return CONST0_RTX (tmode
);
14659 else if (icode
== CODE_FOR_crypto_vshasigmaw
14660 || icode
== CODE_FOR_crypto_vshasigmad
)
14662 /* Check whether the 2nd and 3rd arguments are integer constants and in
14663 range and prepare arguments. */
14665 if (TREE_CODE (arg1
) != INTEGER_CST
|| wi::geu_p (wi::to_wide (arg1
), 2))
14667 error ("argument 2 must be 0 or 1");
14668 return CONST0_RTX (tmode
);
14672 if (TREE_CODE (arg2
) != INTEGER_CST
14673 || wi::geu_p (wi::to_wide (arg2
), 16))
14675 error ("argument 3 must be in the range 0..15");
14676 return CONST0_RTX (tmode
);
14681 || GET_MODE (target
) != tmode
14682 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
14683 target
= gen_reg_rtx (tmode
);
14685 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
14686 op0
= copy_to_mode_reg (mode0
, op0
);
14687 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
14688 op1
= copy_to_mode_reg (mode1
, op1
);
14689 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
14690 op2
= copy_to_mode_reg (mode2
, op2
);
14692 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
14701 /* Expand the dst builtins. */
14703 altivec_expand_dst_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
14706 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
14707 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
14708 tree arg0
, arg1
, arg2
;
14709 machine_mode mode0
, mode1
;
14710 rtx pat
, op0
, op1
, op2
;
14711 const struct builtin_description
*d
;
14714 *expandedp
= false;
14716 /* Handle DST variants. */
14718 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
14719 if (d
->code
== fcode
)
14721 arg0
= CALL_EXPR_ARG (exp
, 0);
14722 arg1
= CALL_EXPR_ARG (exp
, 1);
14723 arg2
= CALL_EXPR_ARG (exp
, 2);
14724 op0
= expand_normal (arg0
);
14725 op1
= expand_normal (arg1
);
14726 op2
= expand_normal (arg2
);
14727 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
14728 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
14730 /* Invalid arguments, bail out before generating bad rtl. */
14731 if (arg0
== error_mark_node
14732 || arg1
== error_mark_node
14733 || arg2
== error_mark_node
)
14738 if (TREE_CODE (arg2
) != INTEGER_CST
14739 || TREE_INT_CST_LOW (arg2
) & ~0x3)
14741 error ("argument to %qs must be a 2-bit unsigned literal", d
->name
);
14745 if (! (*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
14746 op0
= copy_to_mode_reg (Pmode
, op0
);
14747 if (! (*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
14748 op1
= copy_to_mode_reg (mode1
, op1
);
14750 pat
= GEN_FCN (d
->icode
) (op0
, op1
, op2
);
14760 /* Expand vec_init builtin. */
14762 altivec_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
14764 machine_mode tmode
= TYPE_MODE (type
);
14765 machine_mode inner_mode
= GET_MODE_INNER (tmode
);
14766 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
14768 gcc_assert (VECTOR_MODE_P (tmode
));
14769 gcc_assert (n_elt
== call_expr_nargs (exp
));
14771 if (!target
|| !register_operand (target
, tmode
))
14772 target
= gen_reg_rtx (tmode
);
14774 /* If we have a vector compromised of a single element, such as V1TImode, do
14775 the initialization directly. */
14776 if (n_elt
== 1 && GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (inner_mode
))
14778 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, 0));
14779 emit_move_insn (target
, gen_lowpart (tmode
, x
));
14783 rtvec v
= rtvec_alloc (n_elt
);
14785 for (i
= 0; i
< n_elt
; ++i
)
14787 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
14788 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
14791 rs6000_expand_vector_init (target
, gen_rtx_PARALLEL (tmode
, v
));
14797 /* Return the integer constant in ARG. Constrain it to be in the range
14798 of the subparts of VEC_TYPE; issue an error if not. */
14801 get_element_number (tree vec_type
, tree arg
)
14803 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
14805 if (!tree_fits_uhwi_p (arg
)
14806 || (elt
= tree_to_uhwi (arg
), elt
> max
))
14808 error ("selector must be an integer constant in the range 0..%wi", max
);
14815 /* Expand vec_set builtin. */
14817 altivec_expand_vec_set_builtin (tree exp
)
14819 machine_mode tmode
, mode1
;
14820 tree arg0
, arg1
, arg2
;
14824 arg0
= CALL_EXPR_ARG (exp
, 0);
14825 arg1
= CALL_EXPR_ARG (exp
, 1);
14826 arg2
= CALL_EXPR_ARG (exp
, 2);
14828 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
14829 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
14830 gcc_assert (VECTOR_MODE_P (tmode
));
14832 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
14833 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
14834 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
14836 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
14837 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
14839 op0
= force_reg (tmode
, op0
);
14840 op1
= force_reg (mode1
, op1
);
14842 rs6000_expand_vector_set (op0
, op1
, elt
);
14847 /* Expand vec_ext builtin. */
14849 altivec_expand_vec_ext_builtin (tree exp
, rtx target
)
14851 machine_mode tmode
, mode0
;
14856 arg0
= CALL_EXPR_ARG (exp
, 0);
14857 arg1
= CALL_EXPR_ARG (exp
, 1);
14859 op0
= expand_normal (arg0
);
14860 op1
= expand_normal (arg1
);
14862 /* Call get_element_number to validate arg1 if it is a constant. */
14863 if (TREE_CODE (arg1
) == INTEGER_CST
)
14864 (void) get_element_number (TREE_TYPE (arg0
), arg1
);
14866 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
14867 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
14868 gcc_assert (VECTOR_MODE_P (mode0
));
14870 op0
= force_reg (mode0
, op0
);
14872 if (optimize
|| !target
|| !register_operand (target
, tmode
))
14873 target
= gen_reg_rtx (tmode
);
14875 rs6000_expand_vector_extract (target
, op0
, op1
);
14880 /* Expand the builtin in EXP and store the result in TARGET. Store
14881 true in *EXPANDEDP if we found a builtin to expand. */
14883 altivec_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
14885 const struct builtin_description
*d
;
14887 enum insn_code icode
;
14888 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
14889 tree arg0
, arg1
, arg2
;
14891 machine_mode tmode
, mode0
;
14892 enum rs6000_builtins fcode
14893 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
14895 if (rs6000_overloaded_builtin_p (fcode
))
14898 error ("unresolved overload for Altivec builtin %qF", fndecl
);
14900 /* Given it is invalid, just generate a normal call. */
14901 return expand_call (exp
, target
, false);
14904 target
= altivec_expand_dst_builtin (exp
, target
, expandedp
);
14912 case ALTIVEC_BUILTIN_STVX_V2DF
:
14913 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df
, exp
);
14914 case ALTIVEC_BUILTIN_STVX_V2DI
:
14915 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di
, exp
);
14916 case ALTIVEC_BUILTIN_STVX_V4SF
:
14917 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf
, exp
);
14918 case ALTIVEC_BUILTIN_STVX
:
14919 case ALTIVEC_BUILTIN_STVX_V4SI
:
14920 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si
, exp
);
14921 case ALTIVEC_BUILTIN_STVX_V8HI
:
14922 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi
, exp
);
14923 case ALTIVEC_BUILTIN_STVX_V16QI
:
14924 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi
, exp
);
14925 case ALTIVEC_BUILTIN_STVEBX
:
14926 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx
, exp
);
14927 case ALTIVEC_BUILTIN_STVEHX
:
14928 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx
, exp
);
14929 case ALTIVEC_BUILTIN_STVEWX
:
14930 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx
, exp
);
14931 case ALTIVEC_BUILTIN_STVXL_V2DF
:
14932 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df
, exp
);
14933 case ALTIVEC_BUILTIN_STVXL_V2DI
:
14934 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di
, exp
);
14935 case ALTIVEC_BUILTIN_STVXL_V4SF
:
14936 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf
, exp
);
14937 case ALTIVEC_BUILTIN_STVXL
:
14938 case ALTIVEC_BUILTIN_STVXL_V4SI
:
14939 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si
, exp
);
14940 case ALTIVEC_BUILTIN_STVXL_V8HI
:
14941 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi
, exp
);
14942 case ALTIVEC_BUILTIN_STVXL_V16QI
:
14943 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi
, exp
);
14945 case ALTIVEC_BUILTIN_STVLX
:
14946 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx
, exp
);
14947 case ALTIVEC_BUILTIN_STVLXL
:
14948 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl
, exp
);
14949 case ALTIVEC_BUILTIN_STVRX
:
14950 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx
, exp
);
14951 case ALTIVEC_BUILTIN_STVRXL
:
14952 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl
, exp
);
14954 case P9V_BUILTIN_STXVL
:
14955 return altivec_expand_stxvl_builtin (CODE_FOR_stxvl
, exp
);
14957 case P9V_BUILTIN_XST_LEN_R
:
14958 return altivec_expand_stxvl_builtin (CODE_FOR_xst_len_r
, exp
);
14960 case VSX_BUILTIN_STXVD2X_V1TI
:
14961 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti
, exp
);
14962 case VSX_BUILTIN_STXVD2X_V2DF
:
14963 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df
, exp
);
14964 case VSX_BUILTIN_STXVD2X_V2DI
:
14965 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di
, exp
);
14966 case VSX_BUILTIN_STXVW4X_V4SF
:
14967 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf
, exp
);
14968 case VSX_BUILTIN_STXVW4X_V4SI
:
14969 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si
, exp
);
14970 case VSX_BUILTIN_STXVW4X_V8HI
:
14971 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi
, exp
);
14972 case VSX_BUILTIN_STXVW4X_V16QI
:
14973 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi
, exp
);
14975 /* For the following on big endian, it's ok to use any appropriate
14976 unaligned-supporting store, so use a generic expander. For
14977 little-endian, the exact element-reversing instruction must
14979 case VSX_BUILTIN_ST_ELEMREV_V1TI
:
14981 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v1ti
14982 : CODE_FOR_vsx_st_elemrev_v1ti
);
14983 return altivec_expand_stv_builtin (code
, exp
);
14985 case VSX_BUILTIN_ST_ELEMREV_V2DF
:
14987 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v2df
14988 : CODE_FOR_vsx_st_elemrev_v2df
);
14989 return altivec_expand_stv_builtin (code
, exp
);
14991 case VSX_BUILTIN_ST_ELEMREV_V2DI
:
14993 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v2di
14994 : CODE_FOR_vsx_st_elemrev_v2di
);
14995 return altivec_expand_stv_builtin (code
, exp
);
14997 case VSX_BUILTIN_ST_ELEMREV_V4SF
:
14999 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v4sf
15000 : CODE_FOR_vsx_st_elemrev_v4sf
);
15001 return altivec_expand_stv_builtin (code
, exp
);
15003 case VSX_BUILTIN_ST_ELEMREV_V4SI
:
15005 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v4si
15006 : CODE_FOR_vsx_st_elemrev_v4si
);
15007 return altivec_expand_stv_builtin (code
, exp
);
15009 case VSX_BUILTIN_ST_ELEMREV_V8HI
:
15011 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v8hi
15012 : CODE_FOR_vsx_st_elemrev_v8hi
);
15013 return altivec_expand_stv_builtin (code
, exp
);
15015 case VSX_BUILTIN_ST_ELEMREV_V16QI
:
15017 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_store_v16qi
15018 : CODE_FOR_vsx_st_elemrev_v16qi
);
15019 return altivec_expand_stv_builtin (code
, exp
);
15022 case ALTIVEC_BUILTIN_MFVSCR
:
15023 icode
= CODE_FOR_altivec_mfvscr
;
15024 tmode
= insn_data
[icode
].operand
[0].mode
;
15027 || GET_MODE (target
) != tmode
15028 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
15029 target
= gen_reg_rtx (tmode
);
15031 pat
= GEN_FCN (icode
) (target
);
15037 case ALTIVEC_BUILTIN_MTVSCR
:
15038 icode
= CODE_FOR_altivec_mtvscr
;
15039 arg0
= CALL_EXPR_ARG (exp
, 0);
15040 op0
= expand_normal (arg0
);
15041 mode0
= insn_data
[icode
].operand
[0].mode
;
15043 /* If we got invalid arguments bail out before generating bad rtl. */
15044 if (arg0
== error_mark_node
)
15047 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15048 op0
= copy_to_mode_reg (mode0
, op0
);
15050 pat
= GEN_FCN (icode
) (op0
);
15055 case ALTIVEC_BUILTIN_DSSALL
:
15056 emit_insn (gen_altivec_dssall ());
15059 case ALTIVEC_BUILTIN_DSS
:
15060 icode
= CODE_FOR_altivec_dss
;
15061 arg0
= CALL_EXPR_ARG (exp
, 0);
15063 op0
= expand_normal (arg0
);
15064 mode0
= insn_data
[icode
].operand
[0].mode
;
15066 /* If we got invalid arguments bail out before generating bad rtl. */
15067 if (arg0
== error_mark_node
)
15070 if (TREE_CODE (arg0
) != INTEGER_CST
15071 || TREE_INT_CST_LOW (arg0
) & ~0x3)
15073 error ("argument to %qs must be a 2-bit unsigned literal", "dss");
15077 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
15078 op0
= copy_to_mode_reg (mode0
, op0
);
15080 emit_insn (gen_altivec_dss (op0
));
15083 case ALTIVEC_BUILTIN_VEC_INIT_V4SI
:
15084 case ALTIVEC_BUILTIN_VEC_INIT_V8HI
:
15085 case ALTIVEC_BUILTIN_VEC_INIT_V16QI
:
15086 case ALTIVEC_BUILTIN_VEC_INIT_V4SF
:
15087 case VSX_BUILTIN_VEC_INIT_V2DF
:
15088 case VSX_BUILTIN_VEC_INIT_V2DI
:
15089 case VSX_BUILTIN_VEC_INIT_V1TI
:
15090 return altivec_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
15092 case ALTIVEC_BUILTIN_VEC_SET_V4SI
:
15093 case ALTIVEC_BUILTIN_VEC_SET_V8HI
:
15094 case ALTIVEC_BUILTIN_VEC_SET_V16QI
:
15095 case ALTIVEC_BUILTIN_VEC_SET_V4SF
:
15096 case VSX_BUILTIN_VEC_SET_V2DF
:
15097 case VSX_BUILTIN_VEC_SET_V2DI
:
15098 case VSX_BUILTIN_VEC_SET_V1TI
:
15099 return altivec_expand_vec_set_builtin (exp
);
15101 case ALTIVEC_BUILTIN_VEC_EXT_V4SI
:
15102 case ALTIVEC_BUILTIN_VEC_EXT_V8HI
:
15103 case ALTIVEC_BUILTIN_VEC_EXT_V16QI
:
15104 case ALTIVEC_BUILTIN_VEC_EXT_V4SF
:
15105 case VSX_BUILTIN_VEC_EXT_V2DF
:
15106 case VSX_BUILTIN_VEC_EXT_V2DI
:
15107 case VSX_BUILTIN_VEC_EXT_V1TI
:
15108 return altivec_expand_vec_ext_builtin (exp
, target
);
15110 case P9V_BUILTIN_VEC_EXTRACT4B
:
15111 arg1
= CALL_EXPR_ARG (exp
, 1);
15114 /* Generate a normal call if it is invalid. */
15115 if (arg1
== error_mark_node
)
15116 return expand_call (exp
, target
, false);
15118 if (TREE_CODE (arg1
) != INTEGER_CST
|| TREE_INT_CST_LOW (arg1
) > 12)
15120 error ("second argument to %qs must be 0..12", "vec_vextract4b");
15121 return expand_call (exp
, target
, false);
15125 case P9V_BUILTIN_VEC_INSERT4B
:
15126 arg2
= CALL_EXPR_ARG (exp
, 2);
15129 /* Generate a normal call if it is invalid. */
15130 if (arg2
== error_mark_node
)
15131 return expand_call (exp
, target
, false);
15133 if (TREE_CODE (arg2
) != INTEGER_CST
|| TREE_INT_CST_LOW (arg2
) > 12)
15135 error ("third argument to %qs must be 0..12", "vec_vinsert4b");
15136 return expand_call (exp
, target
, false);
15142 /* Fall through. */
15145 /* Expand abs* operations. */
15147 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
15148 if (d
->code
== fcode
)
15149 return altivec_expand_abs_builtin (d
->icode
, exp
, target
);
15151 /* Expand the AltiVec predicates. */
15152 d
= bdesc_altivec_preds
;
15153 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
15154 if (d
->code
== fcode
)
15155 return altivec_expand_predicate_builtin (d
->icode
, exp
, target
);
15157 /* LV* are funky. We initialized them differently. */
15160 case ALTIVEC_BUILTIN_LVSL
:
15161 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl
,
15162 exp
, target
, false);
15163 case ALTIVEC_BUILTIN_LVSR
:
15164 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr
,
15165 exp
, target
, false);
15166 case ALTIVEC_BUILTIN_LVEBX
:
15167 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx
,
15168 exp
, target
, false);
15169 case ALTIVEC_BUILTIN_LVEHX
:
15170 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx
,
15171 exp
, target
, false);
15172 case ALTIVEC_BUILTIN_LVEWX
:
15173 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx
,
15174 exp
, target
, false);
15175 case ALTIVEC_BUILTIN_LVXL_V2DF
:
15176 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df
,
15177 exp
, target
, false);
15178 case ALTIVEC_BUILTIN_LVXL_V2DI
:
15179 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di
,
15180 exp
, target
, false);
15181 case ALTIVEC_BUILTIN_LVXL_V4SF
:
15182 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf
,
15183 exp
, target
, false);
15184 case ALTIVEC_BUILTIN_LVXL
:
15185 case ALTIVEC_BUILTIN_LVXL_V4SI
:
15186 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si
,
15187 exp
, target
, false);
15188 case ALTIVEC_BUILTIN_LVXL_V8HI
:
15189 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi
,
15190 exp
, target
, false);
15191 case ALTIVEC_BUILTIN_LVXL_V16QI
:
15192 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi
,
15193 exp
, target
, false);
15194 case ALTIVEC_BUILTIN_LVX_V1TI
:
15195 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v1ti
,
15196 exp
, target
, false);
15197 case ALTIVEC_BUILTIN_LVX_V2DF
:
15198 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df
,
15199 exp
, target
, false);
15200 case ALTIVEC_BUILTIN_LVX_V2DI
:
15201 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di
,
15202 exp
, target
, false);
15203 case ALTIVEC_BUILTIN_LVX_V4SF
:
15204 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf
,
15205 exp
, target
, false);
15206 case ALTIVEC_BUILTIN_LVX
:
15207 case ALTIVEC_BUILTIN_LVX_V4SI
:
15208 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si
,
15209 exp
, target
, false);
15210 case ALTIVEC_BUILTIN_LVX_V8HI
:
15211 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi
,
15212 exp
, target
, false);
15213 case ALTIVEC_BUILTIN_LVX_V16QI
:
15214 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi
,
15215 exp
, target
, false);
15216 case ALTIVEC_BUILTIN_LVLX
:
15217 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx
,
15218 exp
, target
, true);
15219 case ALTIVEC_BUILTIN_LVLXL
:
15220 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl
,
15221 exp
, target
, true);
15222 case ALTIVEC_BUILTIN_LVRX
:
15223 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx
,
15224 exp
, target
, true);
15225 case ALTIVEC_BUILTIN_LVRXL
:
15226 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl
,
15227 exp
, target
, true);
15228 case VSX_BUILTIN_LXVD2X_V1TI
:
15229 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti
,
15230 exp
, target
, false);
15231 case VSX_BUILTIN_LXVD2X_V2DF
:
15232 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df
,
15233 exp
, target
, false);
15234 case VSX_BUILTIN_LXVD2X_V2DI
:
15235 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di
,
15236 exp
, target
, false);
15237 case VSX_BUILTIN_LXVW4X_V4SF
:
15238 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf
,
15239 exp
, target
, false);
15240 case VSX_BUILTIN_LXVW4X_V4SI
:
15241 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si
,
15242 exp
, target
, false);
15243 case VSX_BUILTIN_LXVW4X_V8HI
:
15244 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi
,
15245 exp
, target
, false);
15246 case VSX_BUILTIN_LXVW4X_V16QI
:
15247 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi
,
15248 exp
, target
, false);
15249 /* For the following on big endian, it's ok to use any appropriate
15250 unaligned-supporting load, so use a generic expander. For
15251 little-endian, the exact element-reversing instruction must
15253 case VSX_BUILTIN_LD_ELEMREV_V2DF
:
15255 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v2df
15256 : CODE_FOR_vsx_ld_elemrev_v2df
);
15257 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15259 case VSX_BUILTIN_LD_ELEMREV_V1TI
:
15261 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v1ti
15262 : CODE_FOR_vsx_ld_elemrev_v1ti
);
15263 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15265 case VSX_BUILTIN_LD_ELEMREV_V2DI
:
15267 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v2di
15268 : CODE_FOR_vsx_ld_elemrev_v2di
);
15269 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15271 case VSX_BUILTIN_LD_ELEMREV_V4SF
:
15273 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v4sf
15274 : CODE_FOR_vsx_ld_elemrev_v4sf
);
15275 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15277 case VSX_BUILTIN_LD_ELEMREV_V4SI
:
15279 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v4si
15280 : CODE_FOR_vsx_ld_elemrev_v4si
);
15281 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15283 case VSX_BUILTIN_LD_ELEMREV_V8HI
:
15285 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v8hi
15286 : CODE_FOR_vsx_ld_elemrev_v8hi
);
15287 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15289 case VSX_BUILTIN_LD_ELEMREV_V16QI
:
15291 enum insn_code code
= (BYTES_BIG_ENDIAN
? CODE_FOR_vsx_load_v16qi
15292 : CODE_FOR_vsx_ld_elemrev_v16qi
);
15293 return altivec_expand_lv_builtin (code
, exp
, target
, false);
15298 /* Fall through. */
15301 *expandedp
= false;
15305 /* Check whether a builtin function is supported in this target
15308 rs6000_builtin_is_supported_p (enum rs6000_builtins fncode
)
15310 HOST_WIDE_INT fnmask
= rs6000_builtin_info
[fncode
].mask
;
15311 if ((fnmask
& rs6000_builtin_mask
) != fnmask
)
15317 /* Raise an error message for a builtin function that is called without the
15318 appropriate target options being set. */
15321 rs6000_invalid_builtin (enum rs6000_builtins fncode
)
15323 size_t uns_fncode
= (size_t) fncode
;
15324 const char *name
= rs6000_builtin_info
[uns_fncode
].name
;
15325 HOST_WIDE_INT fnmask
= rs6000_builtin_info
[uns_fncode
].mask
;
15327 gcc_assert (name
!= NULL
);
15328 if ((fnmask
& RS6000_BTM_CELL
) != 0)
15329 error ("builtin function %qs is only valid for the cell processor", name
);
15330 else if ((fnmask
& RS6000_BTM_VSX
) != 0)
15331 error ("builtin function %qs requires the %qs option", name
, "-mvsx");
15332 else if ((fnmask
& RS6000_BTM_HTM
) != 0)
15333 error ("builtin function %qs requires the %qs option", name
, "-mhtm");
15334 else if ((fnmask
& RS6000_BTM_ALTIVEC
) != 0)
15335 error ("builtin function %qs requires the %qs option", name
, "-maltivec");
15336 else if ((fnmask
& (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
15337 == (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
15338 error ("builtin function %qs requires the %qs and %qs options",
15339 name
, "-mhard-dfp", "-mpower8-vector");
15340 else if ((fnmask
& RS6000_BTM_DFP
) != 0)
15341 error ("builtin function %qs requires the %qs option", name
, "-mhard-dfp");
15342 else if ((fnmask
& RS6000_BTM_P8_VECTOR
) != 0)
15343 error ("builtin function %qs requires the %qs option", name
,
15344 "-mpower8-vector");
15345 else if ((fnmask
& (RS6000_BTM_P9_VECTOR
| RS6000_BTM_64BIT
))
15346 == (RS6000_BTM_P9_VECTOR
| RS6000_BTM_64BIT
))
15347 error ("builtin function %qs requires the %qs and %qs options",
15348 name
, "-mcpu=power9", "-m64");
15349 else if ((fnmask
& RS6000_BTM_P9_VECTOR
) != 0)
15350 error ("builtin function %qs requires the %qs option", name
,
15352 else if ((fnmask
& (RS6000_BTM_P9_MISC
| RS6000_BTM_64BIT
))
15353 == (RS6000_BTM_P9_MISC
| RS6000_BTM_64BIT
))
15354 error ("builtin function %qs requires the %qs and %qs options",
15355 name
, "-mcpu=power9", "-m64");
15356 else if ((fnmask
& RS6000_BTM_P9_MISC
) == RS6000_BTM_P9_MISC
)
15357 error ("builtin function %qs requires the %qs option", name
,
15359 else if ((fnmask
& RS6000_BTM_LDBL128
) == RS6000_BTM_LDBL128
)
15361 if (!TARGET_HARD_FLOAT
)
15362 error ("builtin function %qs requires the %qs option", name
,
15365 error ("builtin function %qs requires the %qs option", name
,
15366 TARGET_IEEEQUAD
? "-mabi=ibmlongdouble" : "-mlong-double-128");
15368 else if ((fnmask
& RS6000_BTM_HARD_FLOAT
) != 0)
15369 error ("builtin function %qs requires the %qs option", name
,
15371 else if ((fnmask
& RS6000_BTM_FLOAT128_HW
) != 0)
15372 error ("builtin function %qs requires ISA 3.0 IEEE 128-bit floating point",
15374 else if ((fnmask
& RS6000_BTM_FLOAT128
) != 0)
15375 error ("builtin function %qs requires the %qs option", name
, "-mfloat128");
15376 else if ((fnmask
& (RS6000_BTM_POPCNTD
| RS6000_BTM_POWERPC64
))
15377 == (RS6000_BTM_POPCNTD
| RS6000_BTM_POWERPC64
))
15378 error ("builtin function %qs requires the %qs (or newer), and "
15379 "%qs or %qs options",
15380 name
, "-mcpu=power7", "-m64", "-mpowerpc64");
15382 error ("builtin function %qs is not supported with the current options",
15386 /* Target hook for early folding of built-ins, shamelessly stolen
15390 rs6000_fold_builtin (tree fndecl ATTRIBUTE_UNUSED
,
15391 int n_args ATTRIBUTE_UNUSED
,
15392 tree
*args ATTRIBUTE_UNUSED
,
15393 bool ignore ATTRIBUTE_UNUSED
)
15395 #ifdef SUBTARGET_FOLD_BUILTIN
15396 return SUBTARGET_FOLD_BUILTIN (fndecl
, n_args
, args
, ignore
);
15402 /* Helper function to sort out which built-ins may be valid without having
15405 rs6000_builtin_valid_without_lhs (enum rs6000_builtins fn_code
)
15409 case ALTIVEC_BUILTIN_STVX_V16QI
:
15410 case ALTIVEC_BUILTIN_STVX_V8HI
:
15411 case ALTIVEC_BUILTIN_STVX_V4SI
:
15412 case ALTIVEC_BUILTIN_STVX_V4SF
:
15413 case ALTIVEC_BUILTIN_STVX_V2DI
:
15414 case ALTIVEC_BUILTIN_STVX_V2DF
:
15421 /* Helper function to handle the gimple folding of a vector compare
15422 operation. This sets up true/false vectors, and uses the
15423 VEC_COND_EXPR operation.
15424 CODE indicates which comparison is to be made. (EQ, GT, ...).
15425 TYPE indicates the type of the result. */
15427 fold_build_vec_cmp (tree_code code
, tree type
,
15428 tree arg0
, tree arg1
)
15430 tree cmp_type
= build_same_sized_truth_vector_type (type
);
15431 tree zero_vec
= build_zero_cst (type
);
15432 tree minus_one_vec
= build_minus_one_cst (type
);
15433 tree cmp
= fold_build2 (code
, cmp_type
, arg0
, arg1
);
15434 return fold_build3 (VEC_COND_EXPR
, type
, cmp
, minus_one_vec
, zero_vec
);
15437 /* Helper function to handle the in-between steps for the
15438 vector compare built-ins. */
15440 fold_compare_helper (gimple_stmt_iterator
*gsi
, tree_code code
, gimple
*stmt
)
15442 tree arg0
= gimple_call_arg (stmt
, 0);
15443 tree arg1
= gimple_call_arg (stmt
, 1);
15444 tree lhs
= gimple_call_lhs (stmt
);
15445 tree cmp
= fold_build_vec_cmp (code
, TREE_TYPE (lhs
), arg0
, arg1
);
15446 gimple
*g
= gimple_build_assign (lhs
, cmp
);
15447 gimple_set_location (g
, gimple_location (stmt
));
15448 gsi_replace (gsi
, g
, true);
15451 /* Helper function to handle the vector merge[hl] built-ins. The
15452 implementation difference between h and l versions for this code are in
15453 the values used when building of the permute vector for high word versus
15454 low word merge. The variance is keyed off the use_high parameter. */
15456 fold_mergehl_helper (gimple_stmt_iterator
*gsi
, gimple
*stmt
, int use_high
)
15458 tree arg0
= gimple_call_arg (stmt
, 0);
15459 tree arg1
= gimple_call_arg (stmt
, 1);
15460 tree lhs
= gimple_call_lhs (stmt
);
15461 tree lhs_type
= TREE_TYPE (lhs
);
15462 tree lhs_type_type
= TREE_TYPE (lhs_type
);
15463 int n_elts
= TYPE_VECTOR_SUBPARTS (lhs_type
);
15464 int midpoint
= n_elts
/ 2;
15470 tree_vector_builder
elts (lhs_type
, VECTOR_CST_NELTS (arg0
), 1);
15472 for (int i
= 0; i
< midpoint
; i
++)
15474 elts
.safe_push (build_int_cst (lhs_type_type
, offset
+ i
));
15475 elts
.safe_push (build_int_cst (lhs_type_type
, offset
+ n_elts
+ i
));
15478 tree permute
= elts
.build ();
15480 gimple
*g
= gimple_build_assign (lhs
, VEC_PERM_EXPR
, arg0
, arg1
, permute
);
15481 gimple_set_location (g
, gimple_location (stmt
));
15482 gsi_replace (gsi
, g
, true);
15485 /* Fold a machine-dependent built-in in GIMPLE. (For folding into
15486 a constant, use rs6000_fold_builtin.) */
15489 rs6000_gimple_fold_builtin (gimple_stmt_iterator
*gsi
)
15491 gimple
*stmt
= gsi_stmt (*gsi
);
15492 tree fndecl
= gimple_call_fndecl (stmt
);
15493 gcc_checking_assert (fndecl
&& DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
);
15494 enum rs6000_builtins fn_code
15495 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
15496 tree arg0
, arg1
, lhs
, temp
;
15499 size_t uns_fncode
= (size_t) fn_code
;
15500 enum insn_code icode
= rs6000_builtin_info
[uns_fncode
].icode
;
15501 const char *fn_name1
= rs6000_builtin_info
[uns_fncode
].name
;
15502 const char *fn_name2
= (icode
!= CODE_FOR_nothing
)
15503 ? get_insn_name ((int) icode
)
15506 if (TARGET_DEBUG_BUILTIN
)
15507 fprintf (stderr
, "rs6000_gimple_fold_builtin %d %s %s\n",
15508 fn_code
, fn_name1
, fn_name2
);
15510 if (!rs6000_fold_gimple
)
15513 /* Prevent gimple folding for code that does not have a LHS, unless it is
15514 allowed per the rs6000_builtin_valid_without_lhs helper function. */
15515 if (!gimple_call_lhs (stmt
) && !rs6000_builtin_valid_without_lhs (fn_code
))
15518 /* Don't fold invalid builtins, let rs6000_expand_builtin diagnose it. */
15519 HOST_WIDE_INT mask
= rs6000_builtin_info
[uns_fncode
].mask
;
15520 bool func_valid_p
= (rs6000_builtin_mask
& mask
) == mask
;
15526 /* Flavors of vec_add. We deliberately don't expand
15527 P8V_BUILTIN_VADDUQM as it gets lowered from V1TImode to
15528 TImode, resulting in much poorer code generation. */
15529 case ALTIVEC_BUILTIN_VADDUBM
:
15530 case ALTIVEC_BUILTIN_VADDUHM
:
15531 case ALTIVEC_BUILTIN_VADDUWM
:
15532 case P8V_BUILTIN_VADDUDM
:
15533 case ALTIVEC_BUILTIN_VADDFP
:
15534 case VSX_BUILTIN_XVADDDP
:
15535 arg0
= gimple_call_arg (stmt
, 0);
15536 arg1
= gimple_call_arg (stmt
, 1);
15537 lhs
= gimple_call_lhs (stmt
);
15538 g
= gimple_build_assign (lhs
, PLUS_EXPR
, arg0
, arg1
);
15539 gimple_set_location (g
, gimple_location (stmt
));
15540 gsi_replace (gsi
, g
, true);
15542 /* Flavors of vec_sub. We deliberately don't expand
15543 P8V_BUILTIN_VSUBUQM. */
15544 case ALTIVEC_BUILTIN_VSUBUBM
:
15545 case ALTIVEC_BUILTIN_VSUBUHM
:
15546 case ALTIVEC_BUILTIN_VSUBUWM
:
15547 case P8V_BUILTIN_VSUBUDM
:
15548 case ALTIVEC_BUILTIN_VSUBFP
:
15549 case VSX_BUILTIN_XVSUBDP
:
15550 arg0
= gimple_call_arg (stmt
, 0);
15551 arg1
= gimple_call_arg (stmt
, 1);
15552 lhs
= gimple_call_lhs (stmt
);
15553 g
= gimple_build_assign (lhs
, MINUS_EXPR
, arg0
, arg1
);
15554 gimple_set_location (g
, gimple_location (stmt
));
15555 gsi_replace (gsi
, g
, true);
15557 case VSX_BUILTIN_XVMULSP
:
15558 case VSX_BUILTIN_XVMULDP
:
15559 arg0
= gimple_call_arg (stmt
, 0);
15560 arg1
= gimple_call_arg (stmt
, 1);
15561 lhs
= gimple_call_lhs (stmt
);
15562 g
= gimple_build_assign (lhs
, MULT_EXPR
, arg0
, arg1
);
15563 gimple_set_location (g
, gimple_location (stmt
));
15564 gsi_replace (gsi
, g
, true);
15566 /* Even element flavors of vec_mul (signed). */
15567 case ALTIVEC_BUILTIN_VMULESB
:
15568 case ALTIVEC_BUILTIN_VMULESH
:
15569 case P8V_BUILTIN_VMULESW
:
15570 /* Even element flavors of vec_mul (unsigned). */
15571 case ALTIVEC_BUILTIN_VMULEUB
:
15572 case ALTIVEC_BUILTIN_VMULEUH
:
15573 case P8V_BUILTIN_VMULEUW
:
15574 arg0
= gimple_call_arg (stmt
, 0);
15575 arg1
= gimple_call_arg (stmt
, 1);
15576 lhs
= gimple_call_lhs (stmt
);
15577 g
= gimple_build_assign (lhs
, VEC_WIDEN_MULT_EVEN_EXPR
, arg0
, arg1
);
15578 gimple_set_location (g
, gimple_location (stmt
));
15579 gsi_replace (gsi
, g
, true);
15581 /* Odd element flavors of vec_mul (signed). */
15582 case ALTIVEC_BUILTIN_VMULOSB
:
15583 case ALTIVEC_BUILTIN_VMULOSH
:
15584 case P8V_BUILTIN_VMULOSW
:
15585 /* Odd element flavors of vec_mul (unsigned). */
15586 case ALTIVEC_BUILTIN_VMULOUB
:
15587 case ALTIVEC_BUILTIN_VMULOUH
:
15588 case P8V_BUILTIN_VMULOUW
:
15589 arg0
= gimple_call_arg (stmt
, 0);
15590 arg1
= gimple_call_arg (stmt
, 1);
15591 lhs
= gimple_call_lhs (stmt
);
15592 g
= gimple_build_assign (lhs
, VEC_WIDEN_MULT_ODD_EXPR
, arg0
, arg1
);
15593 gimple_set_location (g
, gimple_location (stmt
));
15594 gsi_replace (gsi
, g
, true);
15596 /* Flavors of vec_div (Integer). */
15597 case VSX_BUILTIN_DIV_V2DI
:
15598 case VSX_BUILTIN_UDIV_V2DI
:
15599 arg0
= gimple_call_arg (stmt
, 0);
15600 arg1
= gimple_call_arg (stmt
, 1);
15601 lhs
= gimple_call_lhs (stmt
);
15602 g
= gimple_build_assign (lhs
, TRUNC_DIV_EXPR
, arg0
, arg1
);
15603 gimple_set_location (g
, gimple_location (stmt
));
15604 gsi_replace (gsi
, g
, true);
15606 /* Flavors of vec_div (Float). */
15607 case VSX_BUILTIN_XVDIVSP
:
15608 case VSX_BUILTIN_XVDIVDP
:
15609 arg0
= gimple_call_arg (stmt
, 0);
15610 arg1
= gimple_call_arg (stmt
, 1);
15611 lhs
= gimple_call_lhs (stmt
);
15612 g
= gimple_build_assign (lhs
, RDIV_EXPR
, arg0
, arg1
);
15613 gimple_set_location (g
, gimple_location (stmt
));
15614 gsi_replace (gsi
, g
, true);
15616 /* Flavors of vec_and. */
15617 case ALTIVEC_BUILTIN_VAND
:
15618 arg0
= gimple_call_arg (stmt
, 0);
15619 arg1
= gimple_call_arg (stmt
, 1);
15620 lhs
= gimple_call_lhs (stmt
);
15621 g
= gimple_build_assign (lhs
, BIT_AND_EXPR
, arg0
, arg1
);
15622 gimple_set_location (g
, gimple_location (stmt
));
15623 gsi_replace (gsi
, g
, true);
15625 /* Flavors of vec_andc. */
15626 case ALTIVEC_BUILTIN_VANDC
:
15627 arg0
= gimple_call_arg (stmt
, 0);
15628 arg1
= gimple_call_arg (stmt
, 1);
15629 lhs
= gimple_call_lhs (stmt
);
15630 temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
15631 g
= gimple_build_assign (temp
, BIT_NOT_EXPR
, arg1
);
15632 gimple_set_location (g
, gimple_location (stmt
));
15633 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
15634 g
= gimple_build_assign (lhs
, BIT_AND_EXPR
, arg0
, temp
);
15635 gimple_set_location (g
, gimple_location (stmt
));
15636 gsi_replace (gsi
, g
, true);
15638 /* Flavors of vec_nand. */
15639 case P8V_BUILTIN_VEC_NAND
:
15640 case P8V_BUILTIN_NAND_V16QI
:
15641 case P8V_BUILTIN_NAND_V8HI
:
15642 case P8V_BUILTIN_NAND_V4SI
:
15643 case P8V_BUILTIN_NAND_V4SF
:
15644 case P8V_BUILTIN_NAND_V2DF
:
15645 case P8V_BUILTIN_NAND_V2DI
:
15646 arg0
= gimple_call_arg (stmt
, 0);
15647 arg1
= gimple_call_arg (stmt
, 1);
15648 lhs
= gimple_call_lhs (stmt
);
15649 temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
15650 g
= gimple_build_assign (temp
, BIT_AND_EXPR
, arg0
, arg1
);
15651 gimple_set_location (g
, gimple_location (stmt
));
15652 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
15653 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
15654 gimple_set_location (g
, gimple_location (stmt
));
15655 gsi_replace (gsi
, g
, true);
15657 /* Flavors of vec_or. */
15658 case ALTIVEC_BUILTIN_VOR
:
15659 arg0
= gimple_call_arg (stmt
, 0);
15660 arg1
= gimple_call_arg (stmt
, 1);
15661 lhs
= gimple_call_lhs (stmt
);
15662 g
= gimple_build_assign (lhs
, BIT_IOR_EXPR
, arg0
, arg1
);
15663 gimple_set_location (g
, gimple_location (stmt
));
15664 gsi_replace (gsi
, g
, true);
15666 /* flavors of vec_orc. */
15667 case P8V_BUILTIN_ORC_V16QI
:
15668 case P8V_BUILTIN_ORC_V8HI
:
15669 case P8V_BUILTIN_ORC_V4SI
:
15670 case P8V_BUILTIN_ORC_V4SF
:
15671 case P8V_BUILTIN_ORC_V2DF
:
15672 case P8V_BUILTIN_ORC_V2DI
:
15673 arg0
= gimple_call_arg (stmt
, 0);
15674 arg1
= gimple_call_arg (stmt
, 1);
15675 lhs
= gimple_call_lhs (stmt
);
15676 temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
15677 g
= gimple_build_assign (temp
, BIT_NOT_EXPR
, arg1
);
15678 gimple_set_location (g
, gimple_location (stmt
));
15679 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
15680 g
= gimple_build_assign (lhs
, BIT_IOR_EXPR
, arg0
, temp
);
15681 gimple_set_location (g
, gimple_location (stmt
));
15682 gsi_replace (gsi
, g
, true);
15684 /* Flavors of vec_xor. */
15685 case ALTIVEC_BUILTIN_VXOR
:
15686 arg0
= gimple_call_arg (stmt
, 0);
15687 arg1
= gimple_call_arg (stmt
, 1);
15688 lhs
= gimple_call_lhs (stmt
);
15689 g
= gimple_build_assign (lhs
, BIT_XOR_EXPR
, arg0
, arg1
);
15690 gimple_set_location (g
, gimple_location (stmt
));
15691 gsi_replace (gsi
, g
, true);
15693 /* Flavors of vec_nor. */
15694 case ALTIVEC_BUILTIN_VNOR
:
15695 arg0
= gimple_call_arg (stmt
, 0);
15696 arg1
= gimple_call_arg (stmt
, 1);
15697 lhs
= gimple_call_lhs (stmt
);
15698 temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
15699 g
= gimple_build_assign (temp
, BIT_IOR_EXPR
, arg0
, arg1
);
15700 gimple_set_location (g
, gimple_location (stmt
));
15701 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
15702 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
15703 gimple_set_location (g
, gimple_location (stmt
));
15704 gsi_replace (gsi
, g
, true);
15706 /* flavors of vec_abs. */
15707 case ALTIVEC_BUILTIN_ABS_V16QI
:
15708 case ALTIVEC_BUILTIN_ABS_V8HI
:
15709 case ALTIVEC_BUILTIN_ABS_V4SI
:
15710 case ALTIVEC_BUILTIN_ABS_V4SF
:
15711 case P8V_BUILTIN_ABS_V2DI
:
15712 case VSX_BUILTIN_XVABSDP
:
15713 arg0
= gimple_call_arg (stmt
, 0);
15714 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0
)))
15715 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0
))))
15717 lhs
= gimple_call_lhs (stmt
);
15718 g
= gimple_build_assign (lhs
, ABS_EXPR
, arg0
);
15719 gimple_set_location (g
, gimple_location (stmt
));
15720 gsi_replace (gsi
, g
, true);
15722 /* flavors of vec_min. */
15723 case VSX_BUILTIN_XVMINDP
:
15724 case P8V_BUILTIN_VMINSD
:
15725 case P8V_BUILTIN_VMINUD
:
15726 case ALTIVEC_BUILTIN_VMINSB
:
15727 case ALTIVEC_BUILTIN_VMINSH
:
15728 case ALTIVEC_BUILTIN_VMINSW
:
15729 case ALTIVEC_BUILTIN_VMINUB
:
15730 case ALTIVEC_BUILTIN_VMINUH
:
15731 case ALTIVEC_BUILTIN_VMINUW
:
15732 case ALTIVEC_BUILTIN_VMINFP
:
15733 arg0
= gimple_call_arg (stmt
, 0);
15734 arg1
= gimple_call_arg (stmt
, 1);
15735 lhs
= gimple_call_lhs (stmt
);
15736 g
= gimple_build_assign (lhs
, MIN_EXPR
, arg0
, arg1
);
15737 gimple_set_location (g
, gimple_location (stmt
));
15738 gsi_replace (gsi
, g
, true);
15740 /* flavors of vec_max. */
15741 case VSX_BUILTIN_XVMAXDP
:
15742 case P8V_BUILTIN_VMAXSD
:
15743 case P8V_BUILTIN_VMAXUD
:
15744 case ALTIVEC_BUILTIN_VMAXSB
:
15745 case ALTIVEC_BUILTIN_VMAXSH
:
15746 case ALTIVEC_BUILTIN_VMAXSW
:
15747 case ALTIVEC_BUILTIN_VMAXUB
:
15748 case ALTIVEC_BUILTIN_VMAXUH
:
15749 case ALTIVEC_BUILTIN_VMAXUW
:
15750 case ALTIVEC_BUILTIN_VMAXFP
:
15751 arg0
= gimple_call_arg (stmt
, 0);
15752 arg1
= gimple_call_arg (stmt
, 1);
15753 lhs
= gimple_call_lhs (stmt
);
15754 g
= gimple_build_assign (lhs
, MAX_EXPR
, arg0
, arg1
);
15755 gimple_set_location (g
, gimple_location (stmt
));
15756 gsi_replace (gsi
, g
, true);
15758 /* Flavors of vec_eqv. */
15759 case P8V_BUILTIN_EQV_V16QI
:
15760 case P8V_BUILTIN_EQV_V8HI
:
15761 case P8V_BUILTIN_EQV_V4SI
:
15762 case P8V_BUILTIN_EQV_V4SF
:
15763 case P8V_BUILTIN_EQV_V2DF
:
15764 case P8V_BUILTIN_EQV_V2DI
:
15765 arg0
= gimple_call_arg (stmt
, 0);
15766 arg1
= gimple_call_arg (stmt
, 1);
15767 lhs
= gimple_call_lhs (stmt
);
15768 temp
= create_tmp_reg_or_ssa_name (TREE_TYPE (arg1
));
15769 g
= gimple_build_assign (temp
, BIT_XOR_EXPR
, arg0
, arg1
);
15770 gimple_set_location (g
, gimple_location (stmt
));
15771 gsi_insert_before (gsi
, g
, GSI_SAME_STMT
);
15772 g
= gimple_build_assign (lhs
, BIT_NOT_EXPR
, temp
);
15773 gimple_set_location (g
, gimple_location (stmt
));
15774 gsi_replace (gsi
, g
, true);
15776 /* Flavors of vec_rotate_left. */
15777 case ALTIVEC_BUILTIN_VRLB
:
15778 case ALTIVEC_BUILTIN_VRLH
:
15779 case ALTIVEC_BUILTIN_VRLW
:
15780 case P8V_BUILTIN_VRLD
:
15781 arg0
= gimple_call_arg (stmt
, 0);
15782 arg1
= gimple_call_arg (stmt
, 1);
15783 lhs
= gimple_call_lhs (stmt
);
15784 g
= gimple_build_assign (lhs
, LROTATE_EXPR
, arg0
, arg1
);
15785 gimple_set_location (g
, gimple_location (stmt
));
15786 gsi_replace (gsi
, g
, true);
15788 /* Flavors of vector shift right algebraic.
15789 vec_sra{b,h,w} -> vsra{b,h,w}. */
15790 case ALTIVEC_BUILTIN_VSRAB
:
15791 case ALTIVEC_BUILTIN_VSRAH
:
15792 case ALTIVEC_BUILTIN_VSRAW
:
15793 case P8V_BUILTIN_VSRAD
:
15794 arg0
= gimple_call_arg (stmt
, 0);
15795 arg1
= gimple_call_arg (stmt
, 1);
15796 lhs
= gimple_call_lhs (stmt
);
15797 g
= gimple_build_assign (lhs
, RSHIFT_EXPR
, arg0
, arg1
);
15798 gimple_set_location (g
, gimple_location (stmt
));
15799 gsi_replace (gsi
, g
, true);
15801 /* Flavors of vector shift left.
15802 builtin_altivec_vsl{b,h,w} -> vsl{b,h,w}. */
15803 case ALTIVEC_BUILTIN_VSLB
:
15804 case ALTIVEC_BUILTIN_VSLH
:
15805 case ALTIVEC_BUILTIN_VSLW
:
15806 case P8V_BUILTIN_VSLD
:
15807 arg0
= gimple_call_arg (stmt
, 0);
15808 if (INTEGRAL_TYPE_P (TREE_TYPE (TREE_TYPE (arg0
)))
15809 && !TYPE_OVERFLOW_WRAPS (TREE_TYPE (TREE_TYPE (arg0
))))
15811 arg1
= gimple_call_arg (stmt
, 1);
15812 lhs
= gimple_call_lhs (stmt
);
15813 g
= gimple_build_assign (lhs
, LSHIFT_EXPR
, arg0
, arg1
);
15814 gimple_set_location (g
, gimple_location (stmt
));
15815 gsi_replace (gsi
, g
, true);
15817 /* Flavors of vector shift right. */
15818 case ALTIVEC_BUILTIN_VSRB
:
15819 case ALTIVEC_BUILTIN_VSRH
:
15820 case ALTIVEC_BUILTIN_VSRW
:
15821 case P8V_BUILTIN_VSRD
:
15823 arg0
= gimple_call_arg (stmt
, 0);
15824 arg1
= gimple_call_arg (stmt
, 1);
15825 lhs
= gimple_call_lhs (stmt
);
15826 gimple_seq stmts
= NULL
;
15827 /* Convert arg0 to unsigned. */
15829 = gimple_build (&stmts
, VIEW_CONVERT_EXPR
,
15830 unsigned_type_for (TREE_TYPE (arg0
)), arg0
);
15832 = gimple_build (&stmts
, RSHIFT_EXPR
,
15833 TREE_TYPE (arg0_unsigned
), arg0_unsigned
, arg1
);
15834 /* Convert result back to the lhs type. */
15835 res
= gimple_build (&stmts
, VIEW_CONVERT_EXPR
, TREE_TYPE (lhs
), res
);
15836 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
15837 update_call_from_tree (gsi
, res
);
15840 /* Vector loads. */
15841 case ALTIVEC_BUILTIN_LVX_V16QI
:
15842 case ALTIVEC_BUILTIN_LVX_V8HI
:
15843 case ALTIVEC_BUILTIN_LVX_V4SI
:
15844 case ALTIVEC_BUILTIN_LVX_V4SF
:
15845 case ALTIVEC_BUILTIN_LVX_V2DI
:
15846 case ALTIVEC_BUILTIN_LVX_V2DF
:
15847 case ALTIVEC_BUILTIN_LVX_V1TI
:
15849 arg0
= gimple_call_arg (stmt
, 0); // offset
15850 arg1
= gimple_call_arg (stmt
, 1); // address
15851 lhs
= gimple_call_lhs (stmt
);
15852 location_t loc
= gimple_location (stmt
);
15853 /* Since arg1 may be cast to a different type, just use ptr_type_node
15854 here instead of trying to enforce TBAA on pointer types. */
15855 tree arg1_type
= ptr_type_node
;
15856 tree lhs_type
= TREE_TYPE (lhs
);
15857 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15858 the tree using the value from arg0. The resulting type will match
15859 the type of arg1. */
15860 gimple_seq stmts
= NULL
;
15861 tree temp_offset
= gimple_convert (&stmts
, loc
, sizetype
, arg0
);
15862 tree temp_addr
= gimple_build (&stmts
, loc
, POINTER_PLUS_EXPR
,
15863 arg1_type
, arg1
, temp_offset
);
15864 /* Mask off any lower bits from the address. */
15865 tree aligned_addr
= gimple_build (&stmts
, loc
, BIT_AND_EXPR
,
15866 arg1_type
, temp_addr
,
15867 build_int_cst (arg1_type
, -16));
15868 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
15869 /* Use the build2 helper to set up the mem_ref. The MEM_REF could also
15870 take an offset, but since we've already incorporated the offset
15871 above, here we just pass in a zero. */
15873 = gimple_build_assign (lhs
, build2 (MEM_REF
, lhs_type
, aligned_addr
,
15874 build_int_cst (arg1_type
, 0)));
15875 gimple_set_location (g
, loc
);
15876 gsi_replace (gsi
, g
, true);
15879 /* Vector stores. */
15880 case ALTIVEC_BUILTIN_STVX_V16QI
:
15881 case ALTIVEC_BUILTIN_STVX_V8HI
:
15882 case ALTIVEC_BUILTIN_STVX_V4SI
:
15883 case ALTIVEC_BUILTIN_STVX_V4SF
:
15884 case ALTIVEC_BUILTIN_STVX_V2DI
:
15885 case ALTIVEC_BUILTIN_STVX_V2DF
:
15887 arg0
= gimple_call_arg (stmt
, 0); /* Value to be stored. */
15888 arg1
= gimple_call_arg (stmt
, 1); /* Offset. */
15889 tree arg2
= gimple_call_arg (stmt
, 2); /* Store-to address. */
15890 location_t loc
= gimple_location (stmt
);
15891 tree arg0_type
= TREE_TYPE (arg0
);
15892 /* Use ptr_type_node (no TBAA) for the arg2_type.
15893 FIXME: (Richard) "A proper fix would be to transition this type as
15894 seen from the frontend to GIMPLE, for example in a similar way we
15895 do for MEM_REFs by piggy-backing that on an extra argument, a
15896 constant zero pointer of the alias pointer type to use (which would
15897 also serve as a type indicator of the store itself). I'd use a
15898 target specific internal function for this (not sure if we can have
15899 those target specific, but I guess if it's folded away then that's
15900 fine) and get away with the overload set." */
15901 tree arg2_type
= ptr_type_node
;
15902 /* POINTER_PLUS_EXPR wants the offset to be of type 'sizetype'. Create
15903 the tree using the value from arg0. The resulting type will match
15904 the type of arg2. */
15905 gimple_seq stmts
= NULL
;
15906 tree temp_offset
= gimple_convert (&stmts
, loc
, sizetype
, arg1
);
15907 tree temp_addr
= gimple_build (&stmts
, loc
, POINTER_PLUS_EXPR
,
15908 arg2_type
, arg2
, temp_offset
);
15909 /* Mask off any lower bits from the address. */
15910 tree aligned_addr
= gimple_build (&stmts
, loc
, BIT_AND_EXPR
,
15911 arg2_type
, temp_addr
,
15912 build_int_cst (arg2_type
, -16));
15913 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
15914 /* The desired gimple result should be similar to:
15915 MEM[(__vector floatD.1407 *)_1] = vf1D.2697; */
15917 = gimple_build_assign (build2 (MEM_REF
, arg0_type
, aligned_addr
,
15918 build_int_cst (arg2_type
, 0)), arg0
);
15919 gimple_set_location (g
, loc
);
15920 gsi_replace (gsi
, g
, true);
15924 /* Vector Fused multiply-add (fma). */
15925 case ALTIVEC_BUILTIN_VMADDFP
:
15926 case VSX_BUILTIN_XVMADDDP
:
15927 case ALTIVEC_BUILTIN_VMLADDUHM
:
15929 arg0
= gimple_call_arg (stmt
, 0);
15930 arg1
= gimple_call_arg (stmt
, 1);
15931 tree arg2
= gimple_call_arg (stmt
, 2);
15932 lhs
= gimple_call_lhs (stmt
);
15933 gcall
*g
= gimple_build_call_internal (IFN_FMA
, 3, arg0
, arg1
, arg2
);
15934 gimple_call_set_lhs (g
, lhs
);
15935 gimple_call_set_nothrow (g
, true);
15936 gimple_set_location (g
, gimple_location (stmt
));
15937 gsi_replace (gsi
, g
, true);
15941 /* Vector compares; EQ, NE, GE, GT, LE. */
15942 case ALTIVEC_BUILTIN_VCMPEQUB
:
15943 case ALTIVEC_BUILTIN_VCMPEQUH
:
15944 case ALTIVEC_BUILTIN_VCMPEQUW
:
15945 case P8V_BUILTIN_VCMPEQUD
:
15946 fold_compare_helper (gsi
, EQ_EXPR
, stmt
);
15949 case P9V_BUILTIN_CMPNEB
:
15950 case P9V_BUILTIN_CMPNEH
:
15951 case P9V_BUILTIN_CMPNEW
:
15952 fold_compare_helper (gsi
, NE_EXPR
, stmt
);
15955 case VSX_BUILTIN_CMPGE_16QI
:
15956 case VSX_BUILTIN_CMPGE_U16QI
:
15957 case VSX_BUILTIN_CMPGE_8HI
:
15958 case VSX_BUILTIN_CMPGE_U8HI
:
15959 case VSX_BUILTIN_CMPGE_4SI
:
15960 case VSX_BUILTIN_CMPGE_U4SI
:
15961 case VSX_BUILTIN_CMPGE_2DI
:
15962 case VSX_BUILTIN_CMPGE_U2DI
:
15963 fold_compare_helper (gsi
, GE_EXPR
, stmt
);
15966 case ALTIVEC_BUILTIN_VCMPGTSB
:
15967 case ALTIVEC_BUILTIN_VCMPGTUB
:
15968 case ALTIVEC_BUILTIN_VCMPGTSH
:
15969 case ALTIVEC_BUILTIN_VCMPGTUH
:
15970 case ALTIVEC_BUILTIN_VCMPGTSW
:
15971 case ALTIVEC_BUILTIN_VCMPGTUW
:
15972 case P8V_BUILTIN_VCMPGTUD
:
15973 case P8V_BUILTIN_VCMPGTSD
:
15974 fold_compare_helper (gsi
, GT_EXPR
, stmt
);
15977 case VSX_BUILTIN_CMPLE_16QI
:
15978 case VSX_BUILTIN_CMPLE_U16QI
:
15979 case VSX_BUILTIN_CMPLE_8HI
:
15980 case VSX_BUILTIN_CMPLE_U8HI
:
15981 case VSX_BUILTIN_CMPLE_4SI
:
15982 case VSX_BUILTIN_CMPLE_U4SI
:
15983 case VSX_BUILTIN_CMPLE_2DI
:
15984 case VSX_BUILTIN_CMPLE_U2DI
:
15985 fold_compare_helper (gsi
, LE_EXPR
, stmt
);
15988 /* flavors of vec_splat_[us]{8,16,32}. */
15989 case ALTIVEC_BUILTIN_VSPLTISB
:
15990 case ALTIVEC_BUILTIN_VSPLTISH
:
15991 case ALTIVEC_BUILTIN_VSPLTISW
:
15995 if (fn_code
== ALTIVEC_BUILTIN_VSPLTISB
)
15997 else if (fn_code
== ALTIVEC_BUILTIN_VSPLTISH
)
16002 arg0
= gimple_call_arg (stmt
, 0);
16003 lhs
= gimple_call_lhs (stmt
);
16005 /* Only fold the vec_splat_*() if the lower bits of arg 0 is a
16006 5-bit signed constant in range -16 to +15. */
16007 if (TREE_CODE (arg0
) != INTEGER_CST
16008 || !IN_RANGE (sext_hwi(TREE_INT_CST_LOW (arg0
), size
),
16011 gimple_seq stmts
= NULL
;
16012 location_t loc
= gimple_location (stmt
);
16013 tree splat_value
= gimple_convert (&stmts
, loc
,
16014 TREE_TYPE (TREE_TYPE (lhs
)), arg0
);
16015 gsi_insert_seq_before (gsi
, stmts
, GSI_SAME_STMT
);
16016 tree splat_tree
= build_vector_from_val (TREE_TYPE (lhs
), splat_value
);
16017 g
= gimple_build_assign (lhs
, splat_tree
);
16018 gimple_set_location (g
, gimple_location (stmt
));
16019 gsi_replace (gsi
, g
, true);
16023 /* vec_mergel (integrals). */
16024 case ALTIVEC_BUILTIN_VMRGLH
:
16025 case ALTIVEC_BUILTIN_VMRGLW
:
16026 case VSX_BUILTIN_XXMRGLW_4SI
:
16027 case ALTIVEC_BUILTIN_VMRGLB
:
16028 case VSX_BUILTIN_VEC_MERGEL_V2DI
:
16029 fold_mergehl_helper (gsi
, stmt
, 1);
16031 /* vec_mergeh (integrals). */
16032 case ALTIVEC_BUILTIN_VMRGHH
:
16033 case ALTIVEC_BUILTIN_VMRGHW
:
16034 case VSX_BUILTIN_XXMRGHW_4SI
:
16035 case ALTIVEC_BUILTIN_VMRGHB
:
16036 case VSX_BUILTIN_VEC_MERGEH_V2DI
:
16037 fold_mergehl_helper (gsi
, stmt
, 0);
16040 if (TARGET_DEBUG_BUILTIN
)
16041 fprintf (stderr
, "gimple builtin intrinsic not matched:%d %s %s\n",
16042 fn_code
, fn_name1
, fn_name2
);
16049 /* Expand an expression EXP that calls a built-in function,
16050 with result going to TARGET if that's convenient
16051 (and in mode MODE if that's convenient).
16052 SUBTARGET may be used as the target for computing one of EXP's operands.
16053 IGNORE is nonzero if the value is to be ignored. */
16056 rs6000_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
16057 machine_mode mode ATTRIBUTE_UNUSED
,
16058 int ignore ATTRIBUTE_UNUSED
)
16060 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
16061 enum rs6000_builtins fcode
16062 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
16063 size_t uns_fcode
= (size_t)fcode
;
16064 const struct builtin_description
*d
;
16068 HOST_WIDE_INT mask
= rs6000_builtin_info
[uns_fcode
].mask
;
16069 bool func_valid_p
= ((rs6000_builtin_mask
& mask
) == mask
);
16070 enum insn_code icode
= rs6000_builtin_info
[uns_fcode
].icode
;
16072 /* We have two different modes (KFmode, TFmode) that are the IEEE 128-bit
16073 floating point type, depending on whether long double is the IBM extended
16074 double (KFmode) or long double is IEEE 128-bit (TFmode). It is simpler if
16075 we only define one variant of the built-in function, and switch the code
16076 when defining it, rather than defining two built-ins and using the
16077 overload table in rs6000-c.c to switch between the two. If we don't have
16078 the proper assembler, don't do this switch because CODE_FOR_*kf* and
16079 CODE_FOR_*tf* will be CODE_FOR_nothing. */
16080 #ifdef HAVE_AS_POWER9
16081 if (FLOAT128_IEEE_P (TFmode
))
16087 case CODE_FOR_sqrtkf2_odd
: icode
= CODE_FOR_sqrttf2_odd
; break;
16088 case CODE_FOR_trunckfdf2_odd
: icode
= CODE_FOR_trunctfdf2_odd
; break;
16089 case CODE_FOR_addkf3_odd
: icode
= CODE_FOR_addtf3_odd
; break;
16090 case CODE_FOR_subkf3_odd
: icode
= CODE_FOR_subtf3_odd
; break;
16091 case CODE_FOR_mulkf3_odd
: icode
= CODE_FOR_multf3_odd
; break;
16092 case CODE_FOR_divkf3_odd
: icode
= CODE_FOR_divtf3_odd
; break;
16093 case CODE_FOR_fmakf4_odd
: icode
= CODE_FOR_fmatf4_odd
; break;
16094 case CODE_FOR_xsxexpqp_kf
: icode
= CODE_FOR_xsxexpqp_tf
; break;
16095 case CODE_FOR_xsxsigqp_kf
: icode
= CODE_FOR_xsxsigqp_tf
; break;
16096 case CODE_FOR_xststdcnegqp_kf
: icode
= CODE_FOR_xststdcnegqp_tf
; break;
16097 case CODE_FOR_xsiexpqp_kf
: icode
= CODE_FOR_xsiexpqp_tf
; break;
16098 case CODE_FOR_xsiexpqpf_kf
: icode
= CODE_FOR_xsiexpqpf_tf
; break;
16099 case CODE_FOR_xststdcqp_kf
: icode
= CODE_FOR_xststdcqp_tf
; break;
16103 if (TARGET_DEBUG_BUILTIN
)
16105 const char *name1
= rs6000_builtin_info
[uns_fcode
].name
;
16106 const char *name2
= (icode
!= CODE_FOR_nothing
)
16107 ? get_insn_name ((int) icode
)
16111 switch (rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
)
16113 default: name3
= "unknown"; break;
16114 case RS6000_BTC_SPECIAL
: name3
= "special"; break;
16115 case RS6000_BTC_UNARY
: name3
= "unary"; break;
16116 case RS6000_BTC_BINARY
: name3
= "binary"; break;
16117 case RS6000_BTC_TERNARY
: name3
= "ternary"; break;
16118 case RS6000_BTC_PREDICATE
: name3
= "predicate"; break;
16119 case RS6000_BTC_ABS
: name3
= "abs"; break;
16120 case RS6000_BTC_DST
: name3
= "dst"; break;
16125 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
16126 (name1
) ? name1
: "---", fcode
,
16127 (name2
) ? name2
: "---", (int) icode
,
16129 func_valid_p
? "" : ", not valid");
16134 rs6000_invalid_builtin (fcode
);
16136 /* Given it is invalid, just generate a normal call. */
16137 return expand_call (exp
, target
, ignore
);
16142 case RS6000_BUILTIN_RECIP
:
16143 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3
, exp
, target
);
16145 case RS6000_BUILTIN_RECIPF
:
16146 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3
, exp
, target
);
16148 case RS6000_BUILTIN_RSQRTF
:
16149 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2
, exp
, target
);
16151 case RS6000_BUILTIN_RSQRT
:
16152 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2
, exp
, target
);
16154 case POWER7_BUILTIN_BPERMD
:
16155 return rs6000_expand_binop_builtin (((TARGET_64BIT
)
16156 ? CODE_FOR_bpermd_di
16157 : CODE_FOR_bpermd_si
), exp
, target
);
16159 case RS6000_BUILTIN_GET_TB
:
16160 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase
,
16163 case RS6000_BUILTIN_MFTB
:
16164 return rs6000_expand_zeroop_builtin (((TARGET_64BIT
)
16165 ? CODE_FOR_rs6000_mftb_di
16166 : CODE_FOR_rs6000_mftb_si
),
16169 case RS6000_BUILTIN_MFFS
:
16170 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs
, target
);
16172 case RS6000_BUILTIN_MTFSF
:
16173 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf
, exp
);
16175 case RS6000_BUILTIN_CPU_INIT
:
16176 case RS6000_BUILTIN_CPU_IS
:
16177 case RS6000_BUILTIN_CPU_SUPPORTS
:
16178 return cpu_expand_builtin (fcode
, exp
, target
);
16180 case MISC_BUILTIN_SPEC_BARRIER
:
16182 emit_insn (gen_rs6000_speculation_barrier ());
16186 case ALTIVEC_BUILTIN_MASK_FOR_LOAD
:
16187 case ALTIVEC_BUILTIN_MASK_FOR_STORE
:
16189 int icode2
= (BYTES_BIG_ENDIAN
? (int) CODE_FOR_altivec_lvsr_direct
16190 : (int) CODE_FOR_altivec_lvsl_direct
);
16191 machine_mode tmode
= insn_data
[icode2
].operand
[0].mode
;
16192 machine_mode mode
= insn_data
[icode2
].operand
[1].mode
;
16196 gcc_assert (TARGET_ALTIVEC
);
16198 arg
= CALL_EXPR_ARG (exp
, 0);
16199 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg
)));
16200 op
= expand_expr (arg
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
16201 addr
= memory_address (mode
, op
);
16202 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
16206 /* For the load case need to negate the address. */
16207 op
= gen_reg_rtx (GET_MODE (addr
));
16208 emit_insn (gen_rtx_SET (op
, gen_rtx_NEG (GET_MODE (addr
), addr
)));
16210 op
= gen_rtx_MEM (mode
, op
);
16213 || GET_MODE (target
) != tmode
16214 || ! (*insn_data
[icode2
].operand
[0].predicate
) (target
, tmode
))
16215 target
= gen_reg_rtx (tmode
);
16217 pat
= GEN_FCN (icode2
) (target
, op
);
16225 case ALTIVEC_BUILTIN_VCFUX
:
16226 case ALTIVEC_BUILTIN_VCFSX
:
16227 case ALTIVEC_BUILTIN_VCTUXS
:
16228 case ALTIVEC_BUILTIN_VCTSXS
:
16229 /* FIXME: There's got to be a nicer way to handle this case than
16230 constructing a new CALL_EXPR. */
16231 if (call_expr_nargs (exp
) == 1)
16233 exp
= build_call_nary (TREE_TYPE (exp
), CALL_EXPR_FN (exp
),
16234 2, CALL_EXPR_ARG (exp
, 0), integer_zero_node
);
16238 /* For the pack and unpack int128 routines, fix up the builtin so it
16239 uses the correct IBM128 type. */
16240 case MISC_BUILTIN_PACK_IF
:
16241 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
16243 icode
= CODE_FOR_packtf
;
16244 fcode
= MISC_BUILTIN_PACK_TF
;
16245 uns_fcode
= (size_t)fcode
;
16249 case MISC_BUILTIN_UNPACK_IF
:
16250 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
16252 icode
= CODE_FOR_unpacktf
;
16253 fcode
= MISC_BUILTIN_UNPACK_TF
;
16254 uns_fcode
= (size_t)fcode
;
16262 if (TARGET_ALTIVEC
)
16264 ret
= altivec_expand_builtin (exp
, target
, &success
);
16271 ret
= htm_expand_builtin (exp
, target
, &success
);
16277 unsigned attr
= rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
;
16278 /* RS6000_BTC_SPECIAL represents no-operand operators. */
16279 gcc_assert (attr
== RS6000_BTC_UNARY
16280 || attr
== RS6000_BTC_BINARY
16281 || attr
== RS6000_BTC_TERNARY
16282 || attr
== RS6000_BTC_SPECIAL
);
16284 /* Handle simple unary operations. */
16286 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
16287 if (d
->code
== fcode
)
16288 return rs6000_expand_unop_builtin (icode
, exp
, target
);
16290 /* Handle simple binary operations. */
16292 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
16293 if (d
->code
== fcode
)
16294 return rs6000_expand_binop_builtin (icode
, exp
, target
);
16296 /* Handle simple ternary operations. */
16298 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
16299 if (d
->code
== fcode
)
16300 return rs6000_expand_ternop_builtin (icode
, exp
, target
);
16302 /* Handle simple no-argument operations. */
16304 for (i
= 0; i
< ARRAY_SIZE (bdesc_0arg
); i
++, d
++)
16305 if (d
->code
== fcode
)
16306 return rs6000_expand_zeroop_builtin (icode
, target
);
16308 gcc_unreachable ();
16311 /* Create a builtin vector type with a name. Taking care not to give
16312 the canonical type a name. */
16315 rs6000_vector_type (const char *name
, tree elt_type
, unsigned num_elts
)
16317 tree result
= build_vector_type (elt_type
, num_elts
);
16319 /* Copy so we don't give the canonical type a name. */
16320 result
= build_variant_type_copy (result
);
16322 add_builtin_type (name
, result
);
16328 rs6000_init_builtins (void)
16334 if (TARGET_DEBUG_BUILTIN
)
16335 fprintf (stderr
, "rs6000_init_builtins%s%s\n",
16336 (TARGET_ALTIVEC
) ? ", altivec" : "",
16337 (TARGET_VSX
) ? ", vsx" : "");
16339 V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
? "__vector long"
16340 : "__vector long long",
16341 intDI_type_node
, 2);
16342 V2DF_type_node
= rs6000_vector_type ("__vector double", double_type_node
, 2);
16343 V4SI_type_node
= rs6000_vector_type ("__vector signed int",
16344 intSI_type_node
, 4);
16345 V4SF_type_node
= rs6000_vector_type ("__vector float", float_type_node
, 4);
16346 V8HI_type_node
= rs6000_vector_type ("__vector signed short",
16347 intHI_type_node
, 8);
16348 V16QI_type_node
= rs6000_vector_type ("__vector signed char",
16349 intQI_type_node
, 16);
16351 unsigned_V16QI_type_node
= rs6000_vector_type ("__vector unsigned char",
16352 unsigned_intQI_type_node
, 16);
16353 unsigned_V8HI_type_node
= rs6000_vector_type ("__vector unsigned short",
16354 unsigned_intHI_type_node
, 8);
16355 unsigned_V4SI_type_node
= rs6000_vector_type ("__vector unsigned int",
16356 unsigned_intSI_type_node
, 4);
16357 unsigned_V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
16358 ? "__vector unsigned long"
16359 : "__vector unsigned long long",
16360 unsigned_intDI_type_node
, 2);
16362 opaque_V4SI_type_node
= build_opaque_vector_type (intSI_type_node
, 4);
16364 const_str_type_node
16365 = build_pointer_type (build_qualified_type (char_type_node
,
16368 /* We use V1TI mode as a special container to hold __int128_t items that
16369 must live in VSX registers. */
16370 if (intTI_type_node
)
16372 V1TI_type_node
= rs6000_vector_type ("__vector __int128",
16373 intTI_type_node
, 1);
16374 unsigned_V1TI_type_node
16375 = rs6000_vector_type ("__vector unsigned __int128",
16376 unsigned_intTI_type_node
, 1);
16379 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
16380 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
16381 'vector unsigned short'. */
16383 bool_char_type_node
= build_distinct_type_copy (unsigned_intQI_type_node
);
16384 bool_short_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
16385 bool_int_type_node
= build_distinct_type_copy (unsigned_intSI_type_node
);
16386 bool_long_long_type_node
= build_distinct_type_copy (unsigned_intDI_type_node
);
16387 pixel_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
16389 long_integer_type_internal_node
= long_integer_type_node
;
16390 long_unsigned_type_internal_node
= long_unsigned_type_node
;
16391 long_long_integer_type_internal_node
= long_long_integer_type_node
;
16392 long_long_unsigned_type_internal_node
= long_long_unsigned_type_node
;
16393 intQI_type_internal_node
= intQI_type_node
;
16394 uintQI_type_internal_node
= unsigned_intQI_type_node
;
16395 intHI_type_internal_node
= intHI_type_node
;
16396 uintHI_type_internal_node
= unsigned_intHI_type_node
;
16397 intSI_type_internal_node
= intSI_type_node
;
16398 uintSI_type_internal_node
= unsigned_intSI_type_node
;
16399 intDI_type_internal_node
= intDI_type_node
;
16400 uintDI_type_internal_node
= unsigned_intDI_type_node
;
16401 intTI_type_internal_node
= intTI_type_node
;
16402 uintTI_type_internal_node
= unsigned_intTI_type_node
;
16403 float_type_internal_node
= float_type_node
;
16404 double_type_internal_node
= double_type_node
;
16405 long_double_type_internal_node
= long_double_type_node
;
16406 dfloat64_type_internal_node
= dfloat64_type_node
;
16407 dfloat128_type_internal_node
= dfloat128_type_node
;
16408 void_type_internal_node
= void_type_node
;
16410 /* 128-bit floating point support. KFmode is IEEE 128-bit floating point.
16411 IFmode is the IBM extended 128-bit format that is a pair of doubles.
16412 TFmode will be either IEEE 128-bit floating point or the IBM double-double
16413 format that uses a pair of doubles, depending on the switches and
16416 If we don't support for either 128-bit IBM double double or IEEE 128-bit
16417 floating point, we need make sure the type is non-zero or else self-test
16418 fails during bootstrap.
16420 Always create __ibm128 as a separate type, even if the current long double
16421 format is IBM extended double.
16423 For IEEE 128-bit floating point, always create the type __ieee128. If the
16424 user used -mfloat128, rs6000-c.c will create a define from __float128 to
16426 if (TARGET_FLOAT128_TYPE
)
16428 if (!TARGET_IEEEQUAD
&& TARGET_LONG_DOUBLE_128
)
16429 ibm128_float_type_node
= long_double_type_node
;
16432 ibm128_float_type_node
= make_node (REAL_TYPE
);
16433 TYPE_PRECISION (ibm128_float_type_node
) = 128;
16434 SET_TYPE_MODE (ibm128_float_type_node
, IFmode
);
16435 layout_type (ibm128_float_type_node
);
16438 lang_hooks
.types
.register_builtin_type (ibm128_float_type_node
,
16441 if (TARGET_IEEEQUAD
&& TARGET_LONG_DOUBLE_128
)
16442 ieee128_float_type_node
= long_double_type_node
;
16444 ieee128_float_type_node
= float128_type_node
;
16446 lang_hooks
.types
.register_builtin_type (ieee128_float_type_node
,
16451 ieee128_float_type_node
= ibm128_float_type_node
= long_double_type_node
;
16453 /* Initialize the modes for builtin_function_type, mapping a machine mode to
16455 builtin_mode_to_type
[QImode
][0] = integer_type_node
;
16456 builtin_mode_to_type
[HImode
][0] = integer_type_node
;
16457 builtin_mode_to_type
[SImode
][0] = intSI_type_node
;
16458 builtin_mode_to_type
[SImode
][1] = unsigned_intSI_type_node
;
16459 builtin_mode_to_type
[DImode
][0] = intDI_type_node
;
16460 builtin_mode_to_type
[DImode
][1] = unsigned_intDI_type_node
;
16461 builtin_mode_to_type
[TImode
][0] = intTI_type_node
;
16462 builtin_mode_to_type
[TImode
][1] = unsigned_intTI_type_node
;
16463 builtin_mode_to_type
[SFmode
][0] = float_type_node
;
16464 builtin_mode_to_type
[DFmode
][0] = double_type_node
;
16465 builtin_mode_to_type
[IFmode
][0] = ibm128_float_type_node
;
16466 builtin_mode_to_type
[KFmode
][0] = ieee128_float_type_node
;
16467 builtin_mode_to_type
[TFmode
][0] = long_double_type_node
;
16468 builtin_mode_to_type
[DDmode
][0] = dfloat64_type_node
;
16469 builtin_mode_to_type
[TDmode
][0] = dfloat128_type_node
;
16470 builtin_mode_to_type
[V1TImode
][0] = V1TI_type_node
;
16471 builtin_mode_to_type
[V1TImode
][1] = unsigned_V1TI_type_node
;
16472 builtin_mode_to_type
[V2DImode
][0] = V2DI_type_node
;
16473 builtin_mode_to_type
[V2DImode
][1] = unsigned_V2DI_type_node
;
16474 builtin_mode_to_type
[V2DFmode
][0] = V2DF_type_node
;
16475 builtin_mode_to_type
[V4SImode
][0] = V4SI_type_node
;
16476 builtin_mode_to_type
[V4SImode
][1] = unsigned_V4SI_type_node
;
16477 builtin_mode_to_type
[V4SFmode
][0] = V4SF_type_node
;
16478 builtin_mode_to_type
[V8HImode
][0] = V8HI_type_node
;
16479 builtin_mode_to_type
[V8HImode
][1] = unsigned_V8HI_type_node
;
16480 builtin_mode_to_type
[V16QImode
][0] = V16QI_type_node
;
16481 builtin_mode_to_type
[V16QImode
][1] = unsigned_V16QI_type_node
;
16483 tdecl
= add_builtin_type ("__bool char", bool_char_type_node
);
16484 TYPE_NAME (bool_char_type_node
) = tdecl
;
16486 tdecl
= add_builtin_type ("__bool short", bool_short_type_node
);
16487 TYPE_NAME (bool_short_type_node
) = tdecl
;
16489 tdecl
= add_builtin_type ("__bool int", bool_int_type_node
);
16490 TYPE_NAME (bool_int_type_node
) = tdecl
;
16492 tdecl
= add_builtin_type ("__pixel", pixel_type_node
);
16493 TYPE_NAME (pixel_type_node
) = tdecl
;
16495 bool_V16QI_type_node
= rs6000_vector_type ("__vector __bool char",
16496 bool_char_type_node
, 16);
16497 bool_V8HI_type_node
= rs6000_vector_type ("__vector __bool short",
16498 bool_short_type_node
, 8);
16499 bool_V4SI_type_node
= rs6000_vector_type ("__vector __bool int",
16500 bool_int_type_node
, 4);
16501 bool_V2DI_type_node
= rs6000_vector_type (TARGET_POWERPC64
16502 ? "__vector __bool long"
16503 : "__vector __bool long long",
16504 bool_long_long_type_node
, 2);
16505 pixel_V8HI_type_node
= rs6000_vector_type ("__vector __pixel",
16506 pixel_type_node
, 8);
16508 /* Create Altivec and VSX builtins on machines with at least the
16509 general purpose extensions (970 and newer) to allow the use of
16510 the target attribute. */
16511 if (TARGET_EXTRA_BUILTINS
)
16512 altivec_init_builtins ();
16514 htm_init_builtins ();
16516 if (TARGET_EXTRA_BUILTINS
)
16517 rs6000_common_init_builtins ();
16519 ftype
= builtin_function_type (DFmode
, DFmode
, DFmode
, VOIDmode
,
16520 RS6000_BUILTIN_RECIP
, "__builtin_recipdiv");
16521 def_builtin ("__builtin_recipdiv", ftype
, RS6000_BUILTIN_RECIP
);
16523 ftype
= builtin_function_type (SFmode
, SFmode
, SFmode
, VOIDmode
,
16524 RS6000_BUILTIN_RECIPF
, "__builtin_recipdivf");
16525 def_builtin ("__builtin_recipdivf", ftype
, RS6000_BUILTIN_RECIPF
);
16527 ftype
= builtin_function_type (DFmode
, DFmode
, VOIDmode
, VOIDmode
,
16528 RS6000_BUILTIN_RSQRT
, "__builtin_rsqrt");
16529 def_builtin ("__builtin_rsqrt", ftype
, RS6000_BUILTIN_RSQRT
);
16531 ftype
= builtin_function_type (SFmode
, SFmode
, VOIDmode
, VOIDmode
,
16532 RS6000_BUILTIN_RSQRTF
, "__builtin_rsqrtf");
16533 def_builtin ("__builtin_rsqrtf", ftype
, RS6000_BUILTIN_RSQRTF
);
16535 mode
= (TARGET_64BIT
) ? DImode
: SImode
;
16536 ftype
= builtin_function_type (mode
, mode
, mode
, VOIDmode
,
16537 POWER7_BUILTIN_BPERMD
, "__builtin_bpermd");
16538 def_builtin ("__builtin_bpermd", ftype
, POWER7_BUILTIN_BPERMD
);
16540 ftype
= build_function_type_list (unsigned_intDI_type_node
,
16542 def_builtin ("__builtin_ppc_get_timebase", ftype
, RS6000_BUILTIN_GET_TB
);
16545 ftype
= build_function_type_list (unsigned_intDI_type_node
,
16548 ftype
= build_function_type_list (unsigned_intSI_type_node
,
16550 def_builtin ("__builtin_ppc_mftb", ftype
, RS6000_BUILTIN_MFTB
);
16552 ftype
= build_function_type_list (double_type_node
, NULL_TREE
);
16553 def_builtin ("__builtin_mffs", ftype
, RS6000_BUILTIN_MFFS
);
16555 ftype
= build_function_type_list (void_type_node
,
16556 intSI_type_node
, double_type_node
,
16558 def_builtin ("__builtin_mtfsf", ftype
, RS6000_BUILTIN_MTFSF
);
16560 ftype
= build_function_type_list (void_type_node
, NULL_TREE
);
16561 def_builtin ("__builtin_cpu_init", ftype
, RS6000_BUILTIN_CPU_INIT
);
16562 def_builtin ("__builtin_ppc_speculation_barrier", ftype
,
16563 MISC_BUILTIN_SPEC_BARRIER
);
16565 ftype
= build_function_type_list (bool_int_type_node
, const_ptr_type_node
,
16567 def_builtin ("__builtin_cpu_is", ftype
, RS6000_BUILTIN_CPU_IS
);
16568 def_builtin ("__builtin_cpu_supports", ftype
, RS6000_BUILTIN_CPU_SUPPORTS
);
16570 /* AIX libm provides clog as __clog. */
16571 if (TARGET_XCOFF
&&
16572 (tdecl
= builtin_decl_explicit (BUILT_IN_CLOG
)) != NULL_TREE
)
16573 set_user_assembler_name (tdecl
, "__clog");
16575 #ifdef SUBTARGET_INIT_BUILTINS
16576 SUBTARGET_INIT_BUILTINS
;
16580 /* Returns the rs6000 builtin decl for CODE. */
16583 rs6000_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
16585 HOST_WIDE_INT fnmask
;
16587 if (code
>= RS6000_BUILTIN_COUNT
)
16588 return error_mark_node
;
16590 fnmask
= rs6000_builtin_info
[code
].mask
;
16591 if ((fnmask
& rs6000_builtin_mask
) != fnmask
)
16593 rs6000_invalid_builtin ((enum rs6000_builtins
)code
);
16594 return error_mark_node
;
16597 return rs6000_builtin_decls
[code
];
16601 altivec_init_builtins (void)
16603 const struct builtin_description
*d
;
16607 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
16609 tree pvoid_type_node
= build_pointer_type (void_type_node
);
16611 tree pcvoid_type_node
16612 = build_pointer_type (build_qualified_type (void_type_node
,
16615 tree int_ftype_opaque
16616 = build_function_type_list (integer_type_node
,
16617 opaque_V4SI_type_node
, NULL_TREE
);
16618 tree opaque_ftype_opaque
16619 = build_function_type_list (integer_type_node
, NULL_TREE
);
16620 tree opaque_ftype_opaque_int
16621 = build_function_type_list (opaque_V4SI_type_node
,
16622 opaque_V4SI_type_node
, integer_type_node
, NULL_TREE
);
16623 tree opaque_ftype_opaque_opaque_int
16624 = build_function_type_list (opaque_V4SI_type_node
,
16625 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
16626 integer_type_node
, NULL_TREE
);
16627 tree opaque_ftype_opaque_opaque_opaque
16628 = build_function_type_list (opaque_V4SI_type_node
,
16629 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
16630 opaque_V4SI_type_node
, NULL_TREE
);
16631 tree opaque_ftype_opaque_opaque
16632 = build_function_type_list (opaque_V4SI_type_node
,
16633 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
16635 tree int_ftype_int_opaque_opaque
16636 = build_function_type_list (integer_type_node
,
16637 integer_type_node
, opaque_V4SI_type_node
,
16638 opaque_V4SI_type_node
, NULL_TREE
);
16639 tree int_ftype_int_v4si_v4si
16640 = build_function_type_list (integer_type_node
,
16641 integer_type_node
, V4SI_type_node
,
16642 V4SI_type_node
, NULL_TREE
);
16643 tree int_ftype_int_v2di_v2di
16644 = build_function_type_list (integer_type_node
,
16645 integer_type_node
, V2DI_type_node
,
16646 V2DI_type_node
, NULL_TREE
);
16647 tree void_ftype_v4si
16648 = build_function_type_list (void_type_node
, V4SI_type_node
, NULL_TREE
);
16649 tree v8hi_ftype_void
16650 = build_function_type_list (V8HI_type_node
, NULL_TREE
);
16651 tree void_ftype_void
16652 = build_function_type_list (void_type_node
, NULL_TREE
);
16653 tree void_ftype_int
16654 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
16656 tree opaque_ftype_long_pcvoid
16657 = build_function_type_list (opaque_V4SI_type_node
,
16658 long_integer_type_node
, pcvoid_type_node
,
16660 tree v16qi_ftype_long_pcvoid
16661 = build_function_type_list (V16QI_type_node
,
16662 long_integer_type_node
, pcvoid_type_node
,
16664 tree v8hi_ftype_long_pcvoid
16665 = build_function_type_list (V8HI_type_node
,
16666 long_integer_type_node
, pcvoid_type_node
,
16668 tree v4si_ftype_long_pcvoid
16669 = build_function_type_list (V4SI_type_node
,
16670 long_integer_type_node
, pcvoid_type_node
,
16672 tree v4sf_ftype_long_pcvoid
16673 = build_function_type_list (V4SF_type_node
,
16674 long_integer_type_node
, pcvoid_type_node
,
16676 tree v2df_ftype_long_pcvoid
16677 = build_function_type_list (V2DF_type_node
,
16678 long_integer_type_node
, pcvoid_type_node
,
16680 tree v2di_ftype_long_pcvoid
16681 = build_function_type_list (V2DI_type_node
,
16682 long_integer_type_node
, pcvoid_type_node
,
16684 tree v1ti_ftype_long_pcvoid
16685 = build_function_type_list (V1TI_type_node
,
16686 long_integer_type_node
, pcvoid_type_node
,
16689 tree void_ftype_opaque_long_pvoid
16690 = build_function_type_list (void_type_node
,
16691 opaque_V4SI_type_node
, long_integer_type_node
,
16692 pvoid_type_node
, NULL_TREE
);
16693 tree void_ftype_v4si_long_pvoid
16694 = build_function_type_list (void_type_node
,
16695 V4SI_type_node
, long_integer_type_node
,
16696 pvoid_type_node
, NULL_TREE
);
16697 tree void_ftype_v16qi_long_pvoid
16698 = build_function_type_list (void_type_node
,
16699 V16QI_type_node
, long_integer_type_node
,
16700 pvoid_type_node
, NULL_TREE
);
16702 tree void_ftype_v16qi_pvoid_long
16703 = build_function_type_list (void_type_node
,
16704 V16QI_type_node
, pvoid_type_node
,
16705 long_integer_type_node
, NULL_TREE
);
16707 tree void_ftype_v8hi_long_pvoid
16708 = build_function_type_list (void_type_node
,
16709 V8HI_type_node
, long_integer_type_node
,
16710 pvoid_type_node
, NULL_TREE
);
16711 tree void_ftype_v4sf_long_pvoid
16712 = build_function_type_list (void_type_node
,
16713 V4SF_type_node
, long_integer_type_node
,
16714 pvoid_type_node
, NULL_TREE
);
16715 tree void_ftype_v2df_long_pvoid
16716 = build_function_type_list (void_type_node
,
16717 V2DF_type_node
, long_integer_type_node
,
16718 pvoid_type_node
, NULL_TREE
);
16719 tree void_ftype_v1ti_long_pvoid
16720 = build_function_type_list (void_type_node
,
16721 V1TI_type_node
, long_integer_type_node
,
16722 pvoid_type_node
, NULL_TREE
);
16723 tree void_ftype_v2di_long_pvoid
16724 = build_function_type_list (void_type_node
,
16725 V2DI_type_node
, long_integer_type_node
,
16726 pvoid_type_node
, NULL_TREE
);
16727 tree int_ftype_int_v8hi_v8hi
16728 = build_function_type_list (integer_type_node
,
16729 integer_type_node
, V8HI_type_node
,
16730 V8HI_type_node
, NULL_TREE
);
16731 tree int_ftype_int_v16qi_v16qi
16732 = build_function_type_list (integer_type_node
,
16733 integer_type_node
, V16QI_type_node
,
16734 V16QI_type_node
, NULL_TREE
);
16735 tree int_ftype_int_v4sf_v4sf
16736 = build_function_type_list (integer_type_node
,
16737 integer_type_node
, V4SF_type_node
,
16738 V4SF_type_node
, NULL_TREE
);
16739 tree int_ftype_int_v2df_v2df
16740 = build_function_type_list (integer_type_node
,
16741 integer_type_node
, V2DF_type_node
,
16742 V2DF_type_node
, NULL_TREE
);
16743 tree v2di_ftype_v2di
16744 = build_function_type_list (V2DI_type_node
, V2DI_type_node
, NULL_TREE
);
16745 tree v4si_ftype_v4si
16746 = build_function_type_list (V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
16747 tree v8hi_ftype_v8hi
16748 = build_function_type_list (V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
16749 tree v16qi_ftype_v16qi
16750 = build_function_type_list (V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
16751 tree v4sf_ftype_v4sf
16752 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
16753 tree v2df_ftype_v2df
16754 = build_function_type_list (V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
16755 tree void_ftype_pcvoid_int_int
16756 = build_function_type_list (void_type_node
,
16757 pcvoid_type_node
, integer_type_node
,
16758 integer_type_node
, NULL_TREE
);
16760 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si
, ALTIVEC_BUILTIN_MTVSCR
);
16761 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void
, ALTIVEC_BUILTIN_MFVSCR
);
16762 def_builtin ("__builtin_altivec_dssall", void_ftype_void
, ALTIVEC_BUILTIN_DSSALL
);
16763 def_builtin ("__builtin_altivec_dss", void_ftype_int
, ALTIVEC_BUILTIN_DSS
);
16764 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSL
);
16765 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSR
);
16766 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEBX
);
16767 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEHX
);
16768 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEWX
);
16769 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVXL
);
16770 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid
,
16771 ALTIVEC_BUILTIN_LVXL_V2DF
);
16772 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid
,
16773 ALTIVEC_BUILTIN_LVXL_V2DI
);
16774 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid
,
16775 ALTIVEC_BUILTIN_LVXL_V4SF
);
16776 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid
,
16777 ALTIVEC_BUILTIN_LVXL_V4SI
);
16778 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid
,
16779 ALTIVEC_BUILTIN_LVXL_V8HI
);
16780 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid
,
16781 ALTIVEC_BUILTIN_LVXL_V16QI
);
16782 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVX
);
16783 def_builtin ("__builtin_altivec_lvx_v1ti", v1ti_ftype_long_pcvoid
,
16784 ALTIVEC_BUILTIN_LVX_V1TI
);
16785 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid
,
16786 ALTIVEC_BUILTIN_LVX_V2DF
);
16787 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid
,
16788 ALTIVEC_BUILTIN_LVX_V2DI
);
16789 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid
,
16790 ALTIVEC_BUILTIN_LVX_V4SF
);
16791 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid
,
16792 ALTIVEC_BUILTIN_LVX_V4SI
);
16793 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid
,
16794 ALTIVEC_BUILTIN_LVX_V8HI
);
16795 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid
,
16796 ALTIVEC_BUILTIN_LVX_V16QI
);
16797 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVX
);
16798 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid
,
16799 ALTIVEC_BUILTIN_STVX_V2DF
);
16800 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid
,
16801 ALTIVEC_BUILTIN_STVX_V2DI
);
16802 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid
,
16803 ALTIVEC_BUILTIN_STVX_V4SF
);
16804 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid
,
16805 ALTIVEC_BUILTIN_STVX_V4SI
);
16806 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid
,
16807 ALTIVEC_BUILTIN_STVX_V8HI
);
16808 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid
,
16809 ALTIVEC_BUILTIN_STVX_V16QI
);
16810 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVEWX
);
16811 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVXL
);
16812 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid
,
16813 ALTIVEC_BUILTIN_STVXL_V2DF
);
16814 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid
,
16815 ALTIVEC_BUILTIN_STVXL_V2DI
);
16816 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid
,
16817 ALTIVEC_BUILTIN_STVXL_V4SF
);
16818 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid
,
16819 ALTIVEC_BUILTIN_STVXL_V4SI
);
16820 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid
,
16821 ALTIVEC_BUILTIN_STVXL_V8HI
);
16822 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid
,
16823 ALTIVEC_BUILTIN_STVXL_V16QI
);
16824 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVEBX
);
16825 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid
, ALTIVEC_BUILTIN_STVEHX
);
16826 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LD
);
16827 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDE
);
16828 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDL
);
16829 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSL
);
16830 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSR
);
16831 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEBX
);
16832 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEHX
);
16833 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEWX
);
16834 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_ST
);
16835 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STE
);
16836 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STL
);
16837 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEWX
);
16838 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEBX
);
16839 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEHX
);
16841 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid
,
16842 VSX_BUILTIN_LXVD2X_V2DF
);
16843 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid
,
16844 VSX_BUILTIN_LXVD2X_V2DI
);
16845 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid
,
16846 VSX_BUILTIN_LXVW4X_V4SF
);
16847 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid
,
16848 VSX_BUILTIN_LXVW4X_V4SI
);
16849 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid
,
16850 VSX_BUILTIN_LXVW4X_V8HI
);
16851 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid
,
16852 VSX_BUILTIN_LXVW4X_V16QI
);
16853 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid
,
16854 VSX_BUILTIN_STXVD2X_V2DF
);
16855 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid
,
16856 VSX_BUILTIN_STXVD2X_V2DI
);
16857 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid
,
16858 VSX_BUILTIN_STXVW4X_V4SF
);
16859 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid
,
16860 VSX_BUILTIN_STXVW4X_V4SI
);
16861 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid
,
16862 VSX_BUILTIN_STXVW4X_V8HI
);
16863 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid
,
16864 VSX_BUILTIN_STXVW4X_V16QI
);
16866 def_builtin ("__builtin_vsx_ld_elemrev_v2df", v2df_ftype_long_pcvoid
,
16867 VSX_BUILTIN_LD_ELEMREV_V2DF
);
16868 def_builtin ("__builtin_vsx_ld_elemrev_v2di", v2di_ftype_long_pcvoid
,
16869 VSX_BUILTIN_LD_ELEMREV_V2DI
);
16870 def_builtin ("__builtin_vsx_ld_elemrev_v4sf", v4sf_ftype_long_pcvoid
,
16871 VSX_BUILTIN_LD_ELEMREV_V4SF
);
16872 def_builtin ("__builtin_vsx_ld_elemrev_v4si", v4si_ftype_long_pcvoid
,
16873 VSX_BUILTIN_LD_ELEMREV_V4SI
);
16874 def_builtin ("__builtin_vsx_ld_elemrev_v8hi", v8hi_ftype_long_pcvoid
,
16875 VSX_BUILTIN_LD_ELEMREV_V8HI
);
16876 def_builtin ("__builtin_vsx_ld_elemrev_v16qi", v16qi_ftype_long_pcvoid
,
16877 VSX_BUILTIN_LD_ELEMREV_V16QI
);
16878 def_builtin ("__builtin_vsx_st_elemrev_v2df", void_ftype_v2df_long_pvoid
,
16879 VSX_BUILTIN_ST_ELEMREV_V2DF
);
16880 def_builtin ("__builtin_vsx_st_elemrev_v1ti", void_ftype_v1ti_long_pvoid
,
16881 VSX_BUILTIN_ST_ELEMREV_V1TI
);
16882 def_builtin ("__builtin_vsx_st_elemrev_v2di", void_ftype_v2di_long_pvoid
,
16883 VSX_BUILTIN_ST_ELEMREV_V2DI
);
16884 def_builtin ("__builtin_vsx_st_elemrev_v4sf", void_ftype_v4sf_long_pvoid
,
16885 VSX_BUILTIN_ST_ELEMREV_V4SF
);
16886 def_builtin ("__builtin_vsx_st_elemrev_v4si", void_ftype_v4si_long_pvoid
,
16887 VSX_BUILTIN_ST_ELEMREV_V4SI
);
16888 def_builtin ("__builtin_vsx_st_elemrev_v8hi", void_ftype_v8hi_long_pvoid
,
16889 VSX_BUILTIN_ST_ELEMREV_V8HI
);
16890 def_builtin ("__builtin_vsx_st_elemrev_v16qi", void_ftype_v16qi_long_pvoid
,
16891 VSX_BUILTIN_ST_ELEMREV_V16QI
);
16893 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid
,
16894 VSX_BUILTIN_VEC_LD
);
16895 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid
,
16896 VSX_BUILTIN_VEC_ST
);
16897 def_builtin ("__builtin_vec_xl", opaque_ftype_long_pcvoid
,
16898 VSX_BUILTIN_VEC_XL
);
16899 def_builtin ("__builtin_vec_xl_be", opaque_ftype_long_pcvoid
,
16900 VSX_BUILTIN_VEC_XL_BE
);
16901 def_builtin ("__builtin_vec_xst", void_ftype_opaque_long_pvoid
,
16902 VSX_BUILTIN_VEC_XST
);
16903 def_builtin ("__builtin_vec_xst_be", void_ftype_opaque_long_pvoid
,
16904 VSX_BUILTIN_VEC_XST_BE
);
16906 def_builtin ("__builtin_vec_step", int_ftype_opaque
, ALTIVEC_BUILTIN_VEC_STEP
);
16907 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_SPLATS
);
16908 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_PROMOTE
);
16910 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_SLD
);
16911 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_SPLAT
);
16912 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_EXTRACT
);
16913 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_INSERT
);
16914 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTW
);
16915 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTH
);
16916 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTB
);
16917 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTF
);
16918 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFSX
);
16919 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFUX
);
16920 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTS
);
16921 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTU
);
16923 def_builtin ("__builtin_vec_adde", opaque_ftype_opaque_opaque_opaque
,
16924 ALTIVEC_BUILTIN_VEC_ADDE
);
16925 def_builtin ("__builtin_vec_addec", opaque_ftype_opaque_opaque_opaque
,
16926 ALTIVEC_BUILTIN_VEC_ADDEC
);
16927 def_builtin ("__builtin_vec_cmpne", opaque_ftype_opaque_opaque
,
16928 ALTIVEC_BUILTIN_VEC_CMPNE
);
16929 def_builtin ("__builtin_vec_mul", opaque_ftype_opaque_opaque
,
16930 ALTIVEC_BUILTIN_VEC_MUL
);
16931 def_builtin ("__builtin_vec_sube", opaque_ftype_opaque_opaque_opaque
,
16932 ALTIVEC_BUILTIN_VEC_SUBE
);
16933 def_builtin ("__builtin_vec_subec", opaque_ftype_opaque_opaque_opaque
,
16934 ALTIVEC_BUILTIN_VEC_SUBEC
);
16936 /* Cell builtins. */
16937 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLX
);
16938 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLXL
);
16939 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRX
);
16940 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRXL
);
16942 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLX
);
16943 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLXL
);
16944 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRX
);
16945 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRXL
);
16947 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLX
);
16948 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLXL
);
16949 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRX
);
16950 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRXL
);
16952 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLX
);
16953 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLXL
);
16954 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRX
);
16955 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRXL
);
16957 if (TARGET_P9_VECTOR
)
16959 def_builtin ("__builtin_altivec_stxvl", void_ftype_v16qi_pvoid_long
,
16960 P9V_BUILTIN_STXVL
);
16961 def_builtin ("__builtin_xst_len_r", void_ftype_v16qi_pvoid_long
,
16962 P9V_BUILTIN_XST_LEN_R
);
16965 /* Add the DST variants. */
16967 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
16969 HOST_WIDE_INT mask
= d
->mask
;
16971 /* It is expected that these dst built-in functions may have
16972 d->icode equal to CODE_FOR_nothing. */
16973 if ((mask
& builtin_mask
) != mask
)
16975 if (TARGET_DEBUG_BUILTIN
)
16976 fprintf (stderr
, "altivec_init_builtins, skip dst %s\n",
16980 def_builtin (d
->name
, void_ftype_pcvoid_int_int
, d
->code
);
16983 /* Initialize the predicates. */
16984 d
= bdesc_altivec_preds
;
16985 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
16987 machine_mode mode1
;
16989 HOST_WIDE_INT mask
= d
->mask
;
16991 if ((mask
& builtin_mask
) != mask
)
16993 if (TARGET_DEBUG_BUILTIN
)
16994 fprintf (stderr
, "altivec_init_builtins, skip predicate %s\n",
16999 if (rs6000_overloaded_builtin_p (d
->code
))
17003 /* Cannot define builtin if the instruction is disabled. */
17004 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17005 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
17011 type
= int_ftype_int_opaque_opaque
;
17014 type
= int_ftype_int_v2di_v2di
;
17017 type
= int_ftype_int_v4si_v4si
;
17020 type
= int_ftype_int_v8hi_v8hi
;
17023 type
= int_ftype_int_v16qi_v16qi
;
17026 type
= int_ftype_int_v4sf_v4sf
;
17029 type
= int_ftype_int_v2df_v2df
;
17032 gcc_unreachable ();
17035 def_builtin (d
->name
, type
, d
->code
);
17038 /* Initialize the abs* operators. */
17040 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
17042 machine_mode mode0
;
17044 HOST_WIDE_INT mask
= d
->mask
;
17046 if ((mask
& builtin_mask
) != mask
)
17048 if (TARGET_DEBUG_BUILTIN
)
17049 fprintf (stderr
, "altivec_init_builtins, skip abs %s\n",
17054 /* Cannot define builtin if the instruction is disabled. */
17055 gcc_assert (d
->icode
!= CODE_FOR_nothing
);
17056 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
17061 type
= v2di_ftype_v2di
;
17064 type
= v4si_ftype_v4si
;
17067 type
= v8hi_ftype_v8hi
;
17070 type
= v16qi_ftype_v16qi
;
17073 type
= v4sf_ftype_v4sf
;
17076 type
= v2df_ftype_v2df
;
17079 gcc_unreachable ();
17082 def_builtin (d
->name
, type
, d
->code
);
17085 /* Initialize target builtin that implements
17086 targetm.vectorize.builtin_mask_for_load. */
17088 decl
= add_builtin_function ("__builtin_altivec_mask_for_load",
17089 v16qi_ftype_long_pcvoid
,
17090 ALTIVEC_BUILTIN_MASK_FOR_LOAD
,
17091 BUILT_IN_MD
, NULL
, NULL_TREE
);
17092 TREE_READONLY (decl
) = 1;
17093 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
17094 altivec_builtin_mask_for_load
= decl
;
17096 /* Access to the vec_init patterns. */
17097 ftype
= build_function_type_list (V4SI_type_node
, integer_type_node
,
17098 integer_type_node
, integer_type_node
,
17099 integer_type_node
, NULL_TREE
);
17100 def_builtin ("__builtin_vec_init_v4si", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SI
);
17102 ftype
= build_function_type_list (V8HI_type_node
, short_integer_type_node
,
17103 short_integer_type_node
,
17104 short_integer_type_node
,
17105 short_integer_type_node
,
17106 short_integer_type_node
,
17107 short_integer_type_node
,
17108 short_integer_type_node
,
17109 short_integer_type_node
, NULL_TREE
);
17110 def_builtin ("__builtin_vec_init_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V8HI
);
17112 ftype
= build_function_type_list (V16QI_type_node
, char_type_node
,
17113 char_type_node
, char_type_node
,
17114 char_type_node
, char_type_node
,
17115 char_type_node
, char_type_node
,
17116 char_type_node
, char_type_node
,
17117 char_type_node
, char_type_node
,
17118 char_type_node
, char_type_node
,
17119 char_type_node
, char_type_node
,
17120 char_type_node
, NULL_TREE
);
17121 def_builtin ("__builtin_vec_init_v16qi", ftype
,
17122 ALTIVEC_BUILTIN_VEC_INIT_V16QI
);
17124 ftype
= build_function_type_list (V4SF_type_node
, float_type_node
,
17125 float_type_node
, float_type_node
,
17126 float_type_node
, NULL_TREE
);
17127 def_builtin ("__builtin_vec_init_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SF
);
17129 /* VSX builtins. */
17130 ftype
= build_function_type_list (V2DF_type_node
, double_type_node
,
17131 double_type_node
, NULL_TREE
);
17132 def_builtin ("__builtin_vec_init_v2df", ftype
, VSX_BUILTIN_VEC_INIT_V2DF
);
17134 ftype
= build_function_type_list (V2DI_type_node
, intDI_type_node
,
17135 intDI_type_node
, NULL_TREE
);
17136 def_builtin ("__builtin_vec_init_v2di", ftype
, VSX_BUILTIN_VEC_INIT_V2DI
);
17138 /* Access to the vec_set patterns. */
17139 ftype
= build_function_type_list (V4SI_type_node
, V4SI_type_node
,
17141 integer_type_node
, NULL_TREE
);
17142 def_builtin ("__builtin_vec_set_v4si", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SI
);
17144 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
17146 integer_type_node
, NULL_TREE
);
17147 def_builtin ("__builtin_vec_set_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V8HI
);
17149 ftype
= build_function_type_list (V16QI_type_node
, V16QI_type_node
,
17151 integer_type_node
, NULL_TREE
);
17152 def_builtin ("__builtin_vec_set_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V16QI
);
17154 ftype
= build_function_type_list (V4SF_type_node
, V4SF_type_node
,
17156 integer_type_node
, NULL_TREE
);
17157 def_builtin ("__builtin_vec_set_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SF
);
17159 ftype
= build_function_type_list (V2DF_type_node
, V2DF_type_node
,
17161 integer_type_node
, NULL_TREE
);
17162 def_builtin ("__builtin_vec_set_v2df", ftype
, VSX_BUILTIN_VEC_SET_V2DF
);
17164 ftype
= build_function_type_list (V2DI_type_node
, V2DI_type_node
,
17166 integer_type_node
, NULL_TREE
);
17167 def_builtin ("__builtin_vec_set_v2di", ftype
, VSX_BUILTIN_VEC_SET_V2DI
);
17169 /* Access to the vec_extract patterns. */
17170 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
17171 integer_type_node
, NULL_TREE
);
17172 def_builtin ("__builtin_vec_ext_v4si", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SI
);
17174 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
17175 integer_type_node
, NULL_TREE
);
17176 def_builtin ("__builtin_vec_ext_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V8HI
);
17178 ftype
= build_function_type_list (intQI_type_node
, V16QI_type_node
,
17179 integer_type_node
, NULL_TREE
);
17180 def_builtin ("__builtin_vec_ext_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V16QI
);
17182 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
17183 integer_type_node
, NULL_TREE
);
17184 def_builtin ("__builtin_vec_ext_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SF
);
17186 ftype
= build_function_type_list (double_type_node
, V2DF_type_node
,
17187 integer_type_node
, NULL_TREE
);
17188 def_builtin ("__builtin_vec_ext_v2df", ftype
, VSX_BUILTIN_VEC_EXT_V2DF
);
17190 ftype
= build_function_type_list (intDI_type_node
, V2DI_type_node
,
17191 integer_type_node
, NULL_TREE
);
17192 def_builtin ("__builtin_vec_ext_v2di", ftype
, VSX_BUILTIN_VEC_EXT_V2DI
);
17195 if (V1TI_type_node
)
17197 tree v1ti_ftype_long_pcvoid
17198 = build_function_type_list (V1TI_type_node
,
17199 long_integer_type_node
, pcvoid_type_node
,
17201 tree void_ftype_v1ti_long_pvoid
17202 = build_function_type_list (void_type_node
,
17203 V1TI_type_node
, long_integer_type_node
,
17204 pvoid_type_node
, NULL_TREE
);
17205 def_builtin ("__builtin_vsx_ld_elemrev_v1ti", v1ti_ftype_long_pcvoid
,
17206 VSX_BUILTIN_LD_ELEMREV_V1TI
);
17207 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid
,
17208 VSX_BUILTIN_LXVD2X_V1TI
);
17209 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid
,
17210 VSX_BUILTIN_STXVD2X_V1TI
);
17211 ftype
= build_function_type_list (V1TI_type_node
, intTI_type_node
,
17212 NULL_TREE
, NULL_TREE
);
17213 def_builtin ("__builtin_vec_init_v1ti", ftype
, VSX_BUILTIN_VEC_INIT_V1TI
);
17214 ftype
= build_function_type_list (V1TI_type_node
, V1TI_type_node
,
17216 integer_type_node
, NULL_TREE
);
17217 def_builtin ("__builtin_vec_set_v1ti", ftype
, VSX_BUILTIN_VEC_SET_V1TI
);
17218 ftype
= build_function_type_list (intTI_type_node
, V1TI_type_node
,
17219 integer_type_node
, NULL_TREE
);
17220 def_builtin ("__builtin_vec_ext_v1ti", ftype
, VSX_BUILTIN_VEC_EXT_V1TI
);
17226 htm_init_builtins (void)
17228 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17229 const struct builtin_description
*d
;
17233 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
17235 tree op
[MAX_HTM_OPERANDS
], type
;
17236 HOST_WIDE_INT mask
= d
->mask
;
17237 unsigned attr
= rs6000_builtin_info
[d
->code
].attr
;
17238 bool void_func
= (attr
& RS6000_BTC_VOID
);
17239 int attr_args
= (attr
& RS6000_BTC_TYPE_MASK
);
17241 tree gpr_type_node
;
17245 /* It is expected that these htm built-in functions may have
17246 d->icode equal to CODE_FOR_nothing. */
17248 if (TARGET_32BIT
&& TARGET_POWERPC64
)
17249 gpr_type_node
= long_long_unsigned_type_node
;
17251 gpr_type_node
= long_unsigned_type_node
;
17253 if (attr
& RS6000_BTC_SPR
)
17255 rettype
= gpr_type_node
;
17256 argtype
= gpr_type_node
;
17258 else if (d
->code
== HTM_BUILTIN_TABORTDC
17259 || d
->code
== HTM_BUILTIN_TABORTDCI
)
17261 rettype
= unsigned_type_node
;
17262 argtype
= gpr_type_node
;
17266 rettype
= unsigned_type_node
;
17267 argtype
= unsigned_type_node
;
17270 if ((mask
& builtin_mask
) != mask
)
17272 if (TARGET_DEBUG_BUILTIN
)
17273 fprintf (stderr
, "htm_builtin, skip binary %s\n", d
->name
);
17279 if (TARGET_DEBUG_BUILTIN
)
17280 fprintf (stderr
, "htm_builtin, bdesc_htm[%ld] no name\n",
17281 (long unsigned) i
);
17285 op
[nopnds
++] = (void_func
) ? void_type_node
: rettype
;
17287 if (attr_args
== RS6000_BTC_UNARY
)
17288 op
[nopnds
++] = argtype
;
17289 else if (attr_args
== RS6000_BTC_BINARY
)
17291 op
[nopnds
++] = argtype
;
17292 op
[nopnds
++] = argtype
;
17294 else if (attr_args
== RS6000_BTC_TERNARY
)
17296 op
[nopnds
++] = argtype
;
17297 op
[nopnds
++] = argtype
;
17298 op
[nopnds
++] = argtype
;
17304 type
= build_function_type_list (op
[0], NULL_TREE
);
17307 type
= build_function_type_list (op
[0], op
[1], NULL_TREE
);
17310 type
= build_function_type_list (op
[0], op
[1], op
[2], NULL_TREE
);
17313 type
= build_function_type_list (op
[0], op
[1], op
[2], op
[3],
17317 gcc_unreachable ();
17320 def_builtin (d
->name
, type
, d
->code
);
17324 /* Hash function for builtin functions with up to 3 arguments and a return
17327 builtin_hasher::hash (builtin_hash_struct
*bh
)
17332 for (i
= 0; i
< 4; i
++)
17334 ret
= (ret
* (unsigned)MAX_MACHINE_MODE
) + ((unsigned)bh
->mode
[i
]);
17335 ret
= (ret
* 2) + bh
->uns_p
[i
];
17341 /* Compare builtin hash entries H1 and H2 for equivalence. */
17343 builtin_hasher::equal (builtin_hash_struct
*p1
, builtin_hash_struct
*p2
)
17345 return ((p1
->mode
[0] == p2
->mode
[0])
17346 && (p1
->mode
[1] == p2
->mode
[1])
17347 && (p1
->mode
[2] == p2
->mode
[2])
17348 && (p1
->mode
[3] == p2
->mode
[3])
17349 && (p1
->uns_p
[0] == p2
->uns_p
[0])
17350 && (p1
->uns_p
[1] == p2
->uns_p
[1])
17351 && (p1
->uns_p
[2] == p2
->uns_p
[2])
17352 && (p1
->uns_p
[3] == p2
->uns_p
[3]));
17355 /* Map types for builtin functions with an explicit return type and up to 3
17356 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
17357 of the argument. */
17359 builtin_function_type (machine_mode mode_ret
, machine_mode mode_arg0
,
17360 machine_mode mode_arg1
, machine_mode mode_arg2
,
17361 enum rs6000_builtins builtin
, const char *name
)
17363 struct builtin_hash_struct h
;
17364 struct builtin_hash_struct
*h2
;
17367 tree ret_type
= NULL_TREE
;
17368 tree arg_type
[3] = { NULL_TREE
, NULL_TREE
, NULL_TREE
};
17370 /* Create builtin_hash_table. */
17371 if (builtin_hash_table
== NULL
)
17372 builtin_hash_table
= hash_table
<builtin_hasher
>::create_ggc (1500);
17374 h
.type
= NULL_TREE
;
17375 h
.mode
[0] = mode_ret
;
17376 h
.mode
[1] = mode_arg0
;
17377 h
.mode
[2] = mode_arg1
;
17378 h
.mode
[3] = mode_arg2
;
17384 /* If the builtin is a type that produces unsigned results or takes unsigned
17385 arguments, and it is returned as a decl for the vectorizer (such as
17386 widening multiplies, permute), make sure the arguments and return value
17387 are type correct. */
17390 /* unsigned 1 argument functions. */
17391 case CRYPTO_BUILTIN_VSBOX
:
17392 case P8V_BUILTIN_VGBBD
:
17393 case MISC_BUILTIN_CDTBCD
:
17394 case MISC_BUILTIN_CBCDTD
:
17399 /* unsigned 2 argument functions. */
17400 case ALTIVEC_BUILTIN_VMULEUB
:
17401 case ALTIVEC_BUILTIN_VMULEUH
:
17402 case P8V_BUILTIN_VMULEUW
:
17403 case ALTIVEC_BUILTIN_VMULOUB
:
17404 case ALTIVEC_BUILTIN_VMULOUH
:
17405 case P8V_BUILTIN_VMULOUW
:
17406 case CRYPTO_BUILTIN_VCIPHER
:
17407 case CRYPTO_BUILTIN_VCIPHERLAST
:
17408 case CRYPTO_BUILTIN_VNCIPHER
:
17409 case CRYPTO_BUILTIN_VNCIPHERLAST
:
17410 case CRYPTO_BUILTIN_VPMSUMB
:
17411 case CRYPTO_BUILTIN_VPMSUMH
:
17412 case CRYPTO_BUILTIN_VPMSUMW
:
17413 case CRYPTO_BUILTIN_VPMSUMD
:
17414 case CRYPTO_BUILTIN_VPMSUM
:
17415 case MISC_BUILTIN_ADDG6S
:
17416 case MISC_BUILTIN_DIVWEU
:
17417 case MISC_BUILTIN_DIVDEU
:
17418 case VSX_BUILTIN_UDIV_V2DI
:
17419 case ALTIVEC_BUILTIN_VMAXUB
:
17420 case ALTIVEC_BUILTIN_VMINUB
:
17421 case ALTIVEC_BUILTIN_VMAXUH
:
17422 case ALTIVEC_BUILTIN_VMINUH
:
17423 case ALTIVEC_BUILTIN_VMAXUW
:
17424 case ALTIVEC_BUILTIN_VMINUW
:
17425 case P8V_BUILTIN_VMAXUD
:
17426 case P8V_BUILTIN_VMINUD
:
17432 /* unsigned 3 argument functions. */
17433 case ALTIVEC_BUILTIN_VPERM_16QI_UNS
:
17434 case ALTIVEC_BUILTIN_VPERM_8HI_UNS
:
17435 case ALTIVEC_BUILTIN_VPERM_4SI_UNS
:
17436 case ALTIVEC_BUILTIN_VPERM_2DI_UNS
:
17437 case ALTIVEC_BUILTIN_VSEL_16QI_UNS
:
17438 case ALTIVEC_BUILTIN_VSEL_8HI_UNS
:
17439 case ALTIVEC_BUILTIN_VSEL_4SI_UNS
:
17440 case ALTIVEC_BUILTIN_VSEL_2DI_UNS
:
17441 case VSX_BUILTIN_VPERM_16QI_UNS
:
17442 case VSX_BUILTIN_VPERM_8HI_UNS
:
17443 case VSX_BUILTIN_VPERM_4SI_UNS
:
17444 case VSX_BUILTIN_VPERM_2DI_UNS
:
17445 case VSX_BUILTIN_XXSEL_16QI_UNS
:
17446 case VSX_BUILTIN_XXSEL_8HI_UNS
:
17447 case VSX_BUILTIN_XXSEL_4SI_UNS
:
17448 case VSX_BUILTIN_XXSEL_2DI_UNS
:
17449 case CRYPTO_BUILTIN_VPERMXOR
:
17450 case CRYPTO_BUILTIN_VPERMXOR_V2DI
:
17451 case CRYPTO_BUILTIN_VPERMXOR_V4SI
:
17452 case CRYPTO_BUILTIN_VPERMXOR_V8HI
:
17453 case CRYPTO_BUILTIN_VPERMXOR_V16QI
:
17454 case CRYPTO_BUILTIN_VSHASIGMAW
:
17455 case CRYPTO_BUILTIN_VSHASIGMAD
:
17456 case CRYPTO_BUILTIN_VSHASIGMA
:
17463 /* signed permute functions with unsigned char mask. */
17464 case ALTIVEC_BUILTIN_VPERM_16QI
:
17465 case ALTIVEC_BUILTIN_VPERM_8HI
:
17466 case ALTIVEC_BUILTIN_VPERM_4SI
:
17467 case ALTIVEC_BUILTIN_VPERM_4SF
:
17468 case ALTIVEC_BUILTIN_VPERM_2DI
:
17469 case ALTIVEC_BUILTIN_VPERM_2DF
:
17470 case VSX_BUILTIN_VPERM_16QI
:
17471 case VSX_BUILTIN_VPERM_8HI
:
17472 case VSX_BUILTIN_VPERM_4SI
:
17473 case VSX_BUILTIN_VPERM_4SF
:
17474 case VSX_BUILTIN_VPERM_2DI
:
17475 case VSX_BUILTIN_VPERM_2DF
:
17479 /* unsigned args, signed return. */
17480 case VSX_BUILTIN_XVCVUXDSP
:
17481 case VSX_BUILTIN_XVCVUXDDP_UNS
:
17482 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF
:
17486 /* signed args, unsigned return. */
17487 case VSX_BUILTIN_XVCVDPUXDS_UNS
:
17488 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI
:
17489 case MISC_BUILTIN_UNPACK_TD
:
17490 case MISC_BUILTIN_UNPACK_V1TI
:
17494 /* unsigned arguments, bool return (compares). */
17495 case ALTIVEC_BUILTIN_VCMPEQUB
:
17496 case ALTIVEC_BUILTIN_VCMPEQUH
:
17497 case ALTIVEC_BUILTIN_VCMPEQUW
:
17498 case P8V_BUILTIN_VCMPEQUD
:
17499 case VSX_BUILTIN_CMPGE_U16QI
:
17500 case VSX_BUILTIN_CMPGE_U8HI
:
17501 case VSX_BUILTIN_CMPGE_U4SI
:
17502 case VSX_BUILTIN_CMPGE_U2DI
:
17503 case ALTIVEC_BUILTIN_VCMPGTUB
:
17504 case ALTIVEC_BUILTIN_VCMPGTUH
:
17505 case ALTIVEC_BUILTIN_VCMPGTUW
:
17506 case P8V_BUILTIN_VCMPGTUD
:
17511 /* unsigned arguments for 128-bit pack instructions. */
17512 case MISC_BUILTIN_PACK_TD
:
17513 case MISC_BUILTIN_PACK_V1TI
:
17518 /* unsigned second arguments (vector shift right). */
17519 case ALTIVEC_BUILTIN_VSRB
:
17520 case ALTIVEC_BUILTIN_VSRH
:
17521 case ALTIVEC_BUILTIN_VSRW
:
17522 case P8V_BUILTIN_VSRD
:
17530 /* Figure out how many args are present. */
17531 while (num_args
> 0 && h
.mode
[num_args
] == VOIDmode
)
17534 ret_type
= builtin_mode_to_type
[h
.mode
[0]][h
.uns_p
[0]];
17535 if (!ret_type
&& h
.uns_p
[0])
17536 ret_type
= builtin_mode_to_type
[h
.mode
[0]][0];
17539 fatal_error (input_location
,
17540 "internal error: builtin function %qs had an unexpected "
17541 "return type %qs", name
, GET_MODE_NAME (h
.mode
[0]));
17543 for (i
= 0; i
< (int) ARRAY_SIZE (arg_type
); i
++)
17544 arg_type
[i
] = NULL_TREE
;
17546 for (i
= 0; i
< num_args
; i
++)
17548 int m
= (int) h
.mode
[i
+1];
17549 int uns_p
= h
.uns_p
[i
+1];
17551 arg_type
[i
] = builtin_mode_to_type
[m
][uns_p
];
17552 if (!arg_type
[i
] && uns_p
)
17553 arg_type
[i
] = builtin_mode_to_type
[m
][0];
17556 fatal_error (input_location
,
17557 "internal error: builtin function %qs, argument %d "
17558 "had unexpected argument type %qs", name
, i
,
17559 GET_MODE_NAME (m
));
17562 builtin_hash_struct
**found
= builtin_hash_table
->find_slot (&h
, INSERT
);
17563 if (*found
== NULL
)
17565 h2
= ggc_alloc
<builtin_hash_struct
> ();
17569 h2
->type
= build_function_type_list (ret_type
, arg_type
[0], arg_type
[1],
17570 arg_type
[2], NULL_TREE
);
17573 return (*found
)->type
;
17577 rs6000_common_init_builtins (void)
17579 const struct builtin_description
*d
;
17582 tree opaque_ftype_opaque
= NULL_TREE
;
17583 tree opaque_ftype_opaque_opaque
= NULL_TREE
;
17584 tree opaque_ftype_opaque_opaque_opaque
= NULL_TREE
;
17585 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
17587 /* Create Altivec and VSX builtins on machines with at least the
17588 general purpose extensions (970 and newer) to allow the use of
17589 the target attribute. */
17591 if (TARGET_EXTRA_BUILTINS
)
17592 builtin_mask
|= RS6000_BTM_COMMON
;
17594 /* Add the ternary operators. */
17596 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
17599 HOST_WIDE_INT mask
= d
->mask
;
17601 if ((mask
& builtin_mask
) != mask
)
17603 if (TARGET_DEBUG_BUILTIN
)
17604 fprintf (stderr
, "rs6000_builtin, skip ternary %s\n", d
->name
);
17608 if (rs6000_overloaded_builtin_p (d
->code
))
17610 if (! (type
= opaque_ftype_opaque_opaque_opaque
))
17611 type
= opaque_ftype_opaque_opaque_opaque
17612 = build_function_type_list (opaque_V4SI_type_node
,
17613 opaque_V4SI_type_node
,
17614 opaque_V4SI_type_node
,
17615 opaque_V4SI_type_node
,
17620 enum insn_code icode
= d
->icode
;
17623 if (TARGET_DEBUG_BUILTIN
)
17624 fprintf (stderr
, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
17630 if (icode
== CODE_FOR_nothing
)
17632 if (TARGET_DEBUG_BUILTIN
)
17633 fprintf (stderr
, "rs6000_builtin, skip ternary %s (no code)\n",
17639 type
= builtin_function_type (insn_data
[icode
].operand
[0].mode
,
17640 insn_data
[icode
].operand
[1].mode
,
17641 insn_data
[icode
].operand
[2].mode
,
17642 insn_data
[icode
].operand
[3].mode
,
17646 def_builtin (d
->name
, type
, d
->code
);
17649 /* Add the binary operators. */
17651 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
17653 machine_mode mode0
, mode1
, mode2
;
17655 HOST_WIDE_INT mask
= d
->mask
;
17657 if ((mask
& builtin_mask
) != mask
)
17659 if (TARGET_DEBUG_BUILTIN
)
17660 fprintf (stderr
, "rs6000_builtin, skip binary %s\n", d
->name
);
17664 if (rs6000_overloaded_builtin_p (d
->code
))
17666 if (! (type
= opaque_ftype_opaque_opaque
))
17667 type
= opaque_ftype_opaque_opaque
17668 = build_function_type_list (opaque_V4SI_type_node
,
17669 opaque_V4SI_type_node
,
17670 opaque_V4SI_type_node
,
17675 enum insn_code icode
= d
->icode
;
17678 if (TARGET_DEBUG_BUILTIN
)
17679 fprintf (stderr
, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
17685 if (icode
== CODE_FOR_nothing
)
17687 if (TARGET_DEBUG_BUILTIN
)
17688 fprintf (stderr
, "rs6000_builtin, skip binary %s (no code)\n",
17694 mode0
= insn_data
[icode
].operand
[0].mode
;
17695 mode1
= insn_data
[icode
].operand
[1].mode
;
17696 mode2
= insn_data
[icode
].operand
[2].mode
;
17698 type
= builtin_function_type (mode0
, mode1
, mode2
, VOIDmode
,
17702 def_builtin (d
->name
, type
, d
->code
);
17705 /* Add the simple unary operators. */
17707 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
17709 machine_mode mode0
, mode1
;
17711 HOST_WIDE_INT mask
= d
->mask
;
17713 if ((mask
& builtin_mask
) != mask
)
17715 if (TARGET_DEBUG_BUILTIN
)
17716 fprintf (stderr
, "rs6000_builtin, skip unary %s\n", d
->name
);
17720 if (rs6000_overloaded_builtin_p (d
->code
))
17722 if (! (type
= opaque_ftype_opaque
))
17723 type
= opaque_ftype_opaque
17724 = build_function_type_list (opaque_V4SI_type_node
,
17725 opaque_V4SI_type_node
,
17730 enum insn_code icode
= d
->icode
;
17733 if (TARGET_DEBUG_BUILTIN
)
17734 fprintf (stderr
, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
17740 if (icode
== CODE_FOR_nothing
)
17742 if (TARGET_DEBUG_BUILTIN
)
17743 fprintf (stderr
, "rs6000_builtin, skip unary %s (no code)\n",
17749 mode0
= insn_data
[icode
].operand
[0].mode
;
17750 mode1
= insn_data
[icode
].operand
[1].mode
;
17752 type
= builtin_function_type (mode0
, mode1
, VOIDmode
, VOIDmode
,
17756 def_builtin (d
->name
, type
, d
->code
);
17759 /* Add the simple no-argument operators. */
17761 for (i
= 0; i
< ARRAY_SIZE (bdesc_0arg
); i
++, d
++)
17763 machine_mode mode0
;
17765 HOST_WIDE_INT mask
= d
->mask
;
17767 if ((mask
& builtin_mask
) != mask
)
17769 if (TARGET_DEBUG_BUILTIN
)
17770 fprintf (stderr
, "rs6000_builtin, skip no-argument %s\n", d
->name
);
17773 if (rs6000_overloaded_builtin_p (d
->code
))
17775 if (!opaque_ftype_opaque
)
17776 opaque_ftype_opaque
17777 = build_function_type_list (opaque_V4SI_type_node
, NULL_TREE
);
17778 type
= opaque_ftype_opaque
;
17782 enum insn_code icode
= d
->icode
;
17785 if (TARGET_DEBUG_BUILTIN
)
17786 fprintf (stderr
, "rs6000_builtin, bdesc_0arg[%lu] no name\n",
17787 (long unsigned) i
);
17790 if (icode
== CODE_FOR_nothing
)
17792 if (TARGET_DEBUG_BUILTIN
)
17794 "rs6000_builtin, skip no-argument %s (no code)\n",
17798 mode0
= insn_data
[icode
].operand
[0].mode
;
17799 type
= builtin_function_type (mode0
, VOIDmode
, VOIDmode
, VOIDmode
,
17802 def_builtin (d
->name
, type
, d
->code
);
17806 /* Set up AIX/Darwin/64-bit Linux quad floating point routines. */
17808 init_float128_ibm (machine_mode mode
)
17810 if (!TARGET_XL_COMPAT
)
17812 set_optab_libfunc (add_optab
, mode
, "__gcc_qadd");
17813 set_optab_libfunc (sub_optab
, mode
, "__gcc_qsub");
17814 set_optab_libfunc (smul_optab
, mode
, "__gcc_qmul");
17815 set_optab_libfunc (sdiv_optab
, mode
, "__gcc_qdiv");
17817 if (!TARGET_HARD_FLOAT
)
17819 set_optab_libfunc (neg_optab
, mode
, "__gcc_qneg");
17820 set_optab_libfunc (eq_optab
, mode
, "__gcc_qeq");
17821 set_optab_libfunc (ne_optab
, mode
, "__gcc_qne");
17822 set_optab_libfunc (gt_optab
, mode
, "__gcc_qgt");
17823 set_optab_libfunc (ge_optab
, mode
, "__gcc_qge");
17824 set_optab_libfunc (lt_optab
, mode
, "__gcc_qlt");
17825 set_optab_libfunc (le_optab
, mode
, "__gcc_qle");
17826 set_optab_libfunc (unord_optab
, mode
, "__gcc_qunord");
17828 set_conv_libfunc (sext_optab
, mode
, SFmode
, "__gcc_stoq");
17829 set_conv_libfunc (sext_optab
, mode
, DFmode
, "__gcc_dtoq");
17830 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "__gcc_qtos");
17831 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "__gcc_qtod");
17832 set_conv_libfunc (sfix_optab
, SImode
, mode
, "__gcc_qtoi");
17833 set_conv_libfunc (ufix_optab
, SImode
, mode
, "__gcc_qtou");
17834 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "__gcc_itoq");
17835 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "__gcc_utoq");
17840 set_optab_libfunc (add_optab
, mode
, "_xlqadd");
17841 set_optab_libfunc (sub_optab
, mode
, "_xlqsub");
17842 set_optab_libfunc (smul_optab
, mode
, "_xlqmul");
17843 set_optab_libfunc (sdiv_optab
, mode
, "_xlqdiv");
17846 /* Add various conversions for IFmode to use the traditional TFmode
17848 if (mode
== IFmode
)
17850 set_conv_libfunc (sext_optab
, mode
, SDmode
, "__dpd_extendsdtf2");
17851 set_conv_libfunc (sext_optab
, mode
, DDmode
, "__dpd_extendddtf2");
17852 set_conv_libfunc (trunc_optab
, mode
, TDmode
, "__dpd_trunctftd2");
17853 set_conv_libfunc (trunc_optab
, SDmode
, mode
, "__dpd_trunctfsd2");
17854 set_conv_libfunc (trunc_optab
, DDmode
, mode
, "__dpd_trunctfdd2");
17855 set_conv_libfunc (sext_optab
, TDmode
, mode
, "__dpd_extendtdtf2");
17857 if (TARGET_POWERPC64
)
17859 set_conv_libfunc (sfix_optab
, TImode
, mode
, "__fixtfti");
17860 set_conv_libfunc (ufix_optab
, TImode
, mode
, "__fixunstfti");
17861 set_conv_libfunc (sfloat_optab
, mode
, TImode
, "__floattitf");
17862 set_conv_libfunc (ufloat_optab
, mode
, TImode
, "__floatuntitf");
17867 /* Create a decl for either complex long double multiply or complex long double
17868 divide when long double is IEEE 128-bit floating point. We can't use
17869 __multc3 and __divtc3 because the original long double using IBM extended
17870 double used those names. The complex multiply/divide functions are encoded
17871 as builtin functions with a complex result and 4 scalar inputs. */
17874 create_complex_muldiv (const char *name
, built_in_function fncode
, tree fntype
)
17876 tree fndecl
= add_builtin_function (name
, fntype
, fncode
, BUILT_IN_NORMAL
,
17879 set_builtin_decl (fncode
, fndecl
, true);
17881 if (TARGET_DEBUG_BUILTIN
)
17882 fprintf (stderr
, "create complex %s, fncode: %d\n", name
, (int) fncode
);
17887 /* Set up IEEE 128-bit floating point routines. Use different names if the
17888 arguments can be passed in a vector register. The historical PowerPC
17889 implementation of IEEE 128-bit floating point used _q_<op> for the names, so
17890 continue to use that if we aren't using vector registers to pass IEEE
17891 128-bit floating point. */
17894 init_float128_ieee (machine_mode mode
)
17896 if (FLOAT128_VECTOR_P (mode
))
17898 static bool complex_muldiv_init_p
= false;
17900 /* Set up to call __mulkc3 and __divkc3 under -mabi=ieeelongdouble. If
17901 we have clone or target attributes, this will be called a second
17902 time. We want to create the built-in function only once. */
17903 if (mode
== TFmode
&& TARGET_IEEEQUAD
&& !complex_muldiv_init_p
)
17905 complex_muldiv_init_p
= true;
17906 built_in_function fncode_mul
=
17907 (built_in_function
) (BUILT_IN_COMPLEX_MUL_MIN
+ TCmode
17908 - MIN_MODE_COMPLEX_FLOAT
);
17909 built_in_function fncode_div
=
17910 (built_in_function
) (BUILT_IN_COMPLEX_DIV_MIN
+ TCmode
17911 - MIN_MODE_COMPLEX_FLOAT
);
17913 tree fntype
= build_function_type_list (complex_long_double_type_node
,
17914 long_double_type_node
,
17915 long_double_type_node
,
17916 long_double_type_node
,
17917 long_double_type_node
,
17920 create_complex_muldiv ("__mulkc3", fncode_mul
, fntype
);
17921 create_complex_muldiv ("__divkc3", fncode_div
, fntype
);
17924 set_optab_libfunc (add_optab
, mode
, "__addkf3");
17925 set_optab_libfunc (sub_optab
, mode
, "__subkf3");
17926 set_optab_libfunc (neg_optab
, mode
, "__negkf2");
17927 set_optab_libfunc (smul_optab
, mode
, "__mulkf3");
17928 set_optab_libfunc (sdiv_optab
, mode
, "__divkf3");
17929 set_optab_libfunc (sqrt_optab
, mode
, "__sqrtkf2");
17930 set_optab_libfunc (abs_optab
, mode
, "__abskf2");
17931 set_optab_libfunc (powi_optab
, mode
, "__powikf2");
17933 set_optab_libfunc (eq_optab
, mode
, "__eqkf2");
17934 set_optab_libfunc (ne_optab
, mode
, "__nekf2");
17935 set_optab_libfunc (gt_optab
, mode
, "__gtkf2");
17936 set_optab_libfunc (ge_optab
, mode
, "__gekf2");
17937 set_optab_libfunc (lt_optab
, mode
, "__ltkf2");
17938 set_optab_libfunc (le_optab
, mode
, "__lekf2");
17939 set_optab_libfunc (unord_optab
, mode
, "__unordkf2");
17941 set_conv_libfunc (sext_optab
, mode
, SFmode
, "__extendsfkf2");
17942 set_conv_libfunc (sext_optab
, mode
, DFmode
, "__extenddfkf2");
17943 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "__trunckfsf2");
17944 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "__trunckfdf2");
17946 set_conv_libfunc (sext_optab
, mode
, IFmode
, "__trunctfkf2");
17947 if (mode
!= TFmode
&& FLOAT128_IBM_P (TFmode
))
17948 set_conv_libfunc (sext_optab
, mode
, TFmode
, "__trunctfkf2");
17950 set_conv_libfunc (trunc_optab
, IFmode
, mode
, "__extendkftf2");
17951 if (mode
!= TFmode
&& FLOAT128_IBM_P (TFmode
))
17952 set_conv_libfunc (trunc_optab
, TFmode
, mode
, "__extendkftf2");
17954 set_conv_libfunc (sext_optab
, mode
, SDmode
, "__dpd_extendsdkf2");
17955 set_conv_libfunc (sext_optab
, mode
, DDmode
, "__dpd_extendddkf2");
17956 set_conv_libfunc (trunc_optab
, mode
, TDmode
, "__dpd_trunckftd2");
17957 set_conv_libfunc (trunc_optab
, SDmode
, mode
, "__dpd_trunckfsd2");
17958 set_conv_libfunc (trunc_optab
, DDmode
, mode
, "__dpd_trunckfdd2");
17959 set_conv_libfunc (sext_optab
, TDmode
, mode
, "__dpd_extendtdkf2");
17961 set_conv_libfunc (sfix_optab
, SImode
, mode
, "__fixkfsi");
17962 set_conv_libfunc (ufix_optab
, SImode
, mode
, "__fixunskfsi");
17963 set_conv_libfunc (sfix_optab
, DImode
, mode
, "__fixkfdi");
17964 set_conv_libfunc (ufix_optab
, DImode
, mode
, "__fixunskfdi");
17966 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "__floatsikf");
17967 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "__floatunsikf");
17968 set_conv_libfunc (sfloat_optab
, mode
, DImode
, "__floatdikf");
17969 set_conv_libfunc (ufloat_optab
, mode
, DImode
, "__floatundikf");
17971 if (TARGET_POWERPC64
)
17973 set_conv_libfunc (sfix_optab
, TImode
, mode
, "__fixkfti");
17974 set_conv_libfunc (ufix_optab
, TImode
, mode
, "__fixunskfti");
17975 set_conv_libfunc (sfloat_optab
, mode
, TImode
, "__floattikf");
17976 set_conv_libfunc (ufloat_optab
, mode
, TImode
, "__floatuntikf");
17982 set_optab_libfunc (add_optab
, mode
, "_q_add");
17983 set_optab_libfunc (sub_optab
, mode
, "_q_sub");
17984 set_optab_libfunc (neg_optab
, mode
, "_q_neg");
17985 set_optab_libfunc (smul_optab
, mode
, "_q_mul");
17986 set_optab_libfunc (sdiv_optab
, mode
, "_q_div");
17987 if (TARGET_PPC_GPOPT
)
17988 set_optab_libfunc (sqrt_optab
, mode
, "_q_sqrt");
17990 set_optab_libfunc (eq_optab
, mode
, "_q_feq");
17991 set_optab_libfunc (ne_optab
, mode
, "_q_fne");
17992 set_optab_libfunc (gt_optab
, mode
, "_q_fgt");
17993 set_optab_libfunc (ge_optab
, mode
, "_q_fge");
17994 set_optab_libfunc (lt_optab
, mode
, "_q_flt");
17995 set_optab_libfunc (le_optab
, mode
, "_q_fle");
17997 set_conv_libfunc (sext_optab
, mode
, SFmode
, "_q_stoq");
17998 set_conv_libfunc (sext_optab
, mode
, DFmode
, "_q_dtoq");
17999 set_conv_libfunc (trunc_optab
, SFmode
, mode
, "_q_qtos");
18000 set_conv_libfunc (trunc_optab
, DFmode
, mode
, "_q_qtod");
18001 set_conv_libfunc (sfix_optab
, SImode
, mode
, "_q_qtoi");
18002 set_conv_libfunc (ufix_optab
, SImode
, mode
, "_q_qtou");
18003 set_conv_libfunc (sfloat_optab
, mode
, SImode
, "_q_itoq");
18004 set_conv_libfunc (ufloat_optab
, mode
, SImode
, "_q_utoq");
18009 rs6000_init_libfuncs (void)
18011 /* __float128 support. */
18012 if (TARGET_FLOAT128_TYPE
)
18014 init_float128_ibm (IFmode
);
18015 init_float128_ieee (KFmode
);
18018 /* AIX/Darwin/64-bit Linux quad floating point routines. */
18019 if (TARGET_LONG_DOUBLE_128
)
18021 if (!TARGET_IEEEQUAD
)
18022 init_float128_ibm (TFmode
);
18024 /* IEEE 128-bit including 32-bit SVR4 quad floating point routines. */
18026 init_float128_ieee (TFmode
);
18030 /* Emit a potentially record-form instruction, setting DST from SRC.
18031 If DOT is 0, that is all; otherwise, set CCREG to the result of the
18032 signed comparison of DST with zero. If DOT is 1, the generated RTL
18033 doesn't care about the DST result; if DOT is 2, it does. If CCREG
18034 is CR0 do a single dot insn (as a PARALLEL); otherwise, do a SET and
18035 a separate COMPARE. */
18038 rs6000_emit_dot_insn (rtx dst
, rtx src
, int dot
, rtx ccreg
)
18042 emit_move_insn (dst
, src
);
18046 if (cc_reg_not_cr0_operand (ccreg
, CCmode
))
18048 emit_move_insn (dst
, src
);
18049 emit_move_insn (ccreg
, gen_rtx_COMPARE (CCmode
, dst
, const0_rtx
));
18053 rtx ccset
= gen_rtx_SET (ccreg
, gen_rtx_COMPARE (CCmode
, src
, const0_rtx
));
18056 rtx clobber
= gen_rtx_CLOBBER (VOIDmode
, dst
);
18057 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, ccset
, clobber
)));
18061 rtx set
= gen_rtx_SET (dst
, src
);
18062 emit_insn (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, ccset
, set
)));
18067 /* A validation routine: say whether CODE, a condition code, and MODE
18068 match. The other alternatives either don't make sense or should
18069 never be generated. */
18072 validate_condition_mode (enum rtx_code code
, machine_mode mode
)
18074 gcc_assert ((GET_RTX_CLASS (code
) == RTX_COMPARE
18075 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
18076 && GET_MODE_CLASS (mode
) == MODE_CC
);
18078 /* These don't make sense. */
18079 gcc_assert ((code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
)
18080 || mode
!= CCUNSmode
);
18082 gcc_assert ((code
!= GTU
&& code
!= LTU
&& code
!= GEU
&& code
!= LEU
)
18083 || mode
== CCUNSmode
);
18085 gcc_assert (mode
== CCFPmode
18086 || (code
!= ORDERED
&& code
!= UNORDERED
18087 && code
!= UNEQ
&& code
!= LTGT
18088 && code
!= UNGT
&& code
!= UNLT
18089 && code
!= UNGE
&& code
!= UNLE
));
18091 /* These should never be generated except for
18092 flag_finite_math_only. */
18093 gcc_assert (mode
!= CCFPmode
18094 || flag_finite_math_only
18095 || (code
!= LE
&& code
!= GE
18096 && code
!= UNEQ
&& code
!= LTGT
18097 && code
!= UNGT
&& code
!= UNLT
));
18099 /* These are invalid; the information is not there. */
18100 gcc_assert (mode
!= CCEQmode
|| code
== EQ
|| code
== NE
);
18104 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm,
18105 rldicl, rldicr, or rldic instruction in mode MODE. If so, if E is
18106 not zero, store there the bit offset (counted from the right) where
18107 the single stretch of 1 bits begins; and similarly for B, the bit
18108 offset where it ends. */
18111 rs6000_is_valid_mask (rtx mask
, int *b
, int *e
, machine_mode mode
)
18113 unsigned HOST_WIDE_INT val
= INTVAL (mask
);
18114 unsigned HOST_WIDE_INT bit
;
18116 int n
= GET_MODE_PRECISION (mode
);
18118 if (mode
!= DImode
&& mode
!= SImode
)
18121 if (INTVAL (mask
) >= 0)
18124 ne
= exact_log2 (bit
);
18125 nb
= exact_log2 (val
+ bit
);
18127 else if (val
+ 1 == 0)
18136 nb
= exact_log2 (bit
);
18137 ne
= exact_log2 (val
+ bit
);
18142 ne
= exact_log2 (bit
);
18143 if (val
+ bit
== 0)
18151 if (nb
< 0 || ne
< 0 || nb
>= n
|| ne
>= n
)
18162 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwinm, rldicl,
18163 or rldicr instruction, to implement an AND with it in mode MODE. */
18166 rs6000_is_valid_and_mask (rtx mask
, machine_mode mode
)
18170 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18173 /* For DImode, we need a rldicl, rldicr, or a rlwinm with mask that
18175 if (mode
== DImode
)
18176 return (ne
== 0 || nb
== 63 || (nb
< 32 && ne
<= nb
));
18178 /* For SImode, rlwinm can do everything. */
18179 if (mode
== SImode
)
18180 return (nb
< 32 && ne
< 32);
18185 /* Return the instruction template for an AND with mask in mode MODE, with
18186 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18189 rs6000_insn_for_and_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18193 if (!rs6000_is_valid_mask (operands
[2], &nb
, &ne
, mode
))
18194 gcc_unreachable ();
18196 if (mode
== DImode
&& ne
== 0)
18198 operands
[3] = GEN_INT (63 - nb
);
18200 return "rldicl. %0,%1,0,%3";
18201 return "rldicl %0,%1,0,%3";
18204 if (mode
== DImode
&& nb
== 63)
18206 operands
[3] = GEN_INT (63 - ne
);
18208 return "rldicr. %0,%1,0,%3";
18209 return "rldicr %0,%1,0,%3";
18212 if (nb
< 32 && ne
< 32)
18214 operands
[3] = GEN_INT (31 - nb
);
18215 operands
[4] = GEN_INT (31 - ne
);
18217 return "rlwinm. %0,%1,0,%3,%4";
18218 return "rlwinm %0,%1,0,%3,%4";
18221 gcc_unreachable ();
18224 /* Return whether MASK (a CONST_INT) is a valid mask for any rlw[i]nm,
18225 rld[i]cl, rld[i]cr, or rld[i]c instruction, to implement an AND with
18226 shift SHIFT (a ROTATE, ASHIFT, or LSHIFTRT) in mode MODE. */
18229 rs6000_is_valid_shift_mask (rtx mask
, rtx shift
, machine_mode mode
)
18233 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18236 int n
= GET_MODE_PRECISION (mode
);
18239 if (CONST_INT_P (XEXP (shift
, 1)))
18241 sh
= INTVAL (XEXP (shift
, 1));
18242 if (sh
< 0 || sh
>= n
)
18246 rtx_code code
= GET_CODE (shift
);
18248 /* Convert any shift by 0 to a rotate, to simplify below code. */
18252 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18253 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& ne
>= sh
)
18255 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& nb
< sh
)
18261 /* DImode rotates need rld*. */
18262 if (mode
== DImode
&& code
== ROTATE
)
18263 return (nb
== 63 || ne
== 0 || ne
== sh
);
18265 /* SImode rotates need rlw*. */
18266 if (mode
== SImode
&& code
== ROTATE
)
18267 return (nb
< 32 && ne
< 32 && sh
< 32);
18269 /* Wrap-around masks are only okay for rotates. */
18273 /* Variable shifts are only okay for rotates. */
18277 /* Don't allow ASHIFT if the mask is wrong for that. */
18278 if (code
== ASHIFT
&& ne
< sh
)
18281 /* If we can do it with an rlw*, we can do it. Don't allow LSHIFTRT
18282 if the mask is wrong for that. */
18283 if (nb
< 32 && ne
< 32 && sh
< 32
18284 && !(code
== LSHIFTRT
&& nb
>= 32 - sh
))
18287 /* If we can do it with an rld*, we can do it. Don't allow LSHIFTRT
18288 if the mask is wrong for that. */
18289 if (code
== LSHIFTRT
)
18291 if (nb
== 63 || ne
== 0 || ne
== sh
)
18292 return !(code
== LSHIFTRT
&& nb
>= sh
);
18297 /* Return the instruction template for a shift with mask in mode MODE, with
18298 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18301 rs6000_insn_for_shift_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18305 if (!rs6000_is_valid_mask (operands
[3], &nb
, &ne
, mode
))
18306 gcc_unreachable ();
18308 if (mode
== DImode
&& ne
== 0)
18310 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18311 operands
[2] = GEN_INT (64 - INTVAL (operands
[2]));
18312 operands
[3] = GEN_INT (63 - nb
);
18314 return "rld%I2cl. %0,%1,%2,%3";
18315 return "rld%I2cl %0,%1,%2,%3";
18318 if (mode
== DImode
&& nb
== 63)
18320 operands
[3] = GEN_INT (63 - ne
);
18322 return "rld%I2cr. %0,%1,%2,%3";
18323 return "rld%I2cr %0,%1,%2,%3";
18327 && GET_CODE (operands
[4]) != LSHIFTRT
18328 && CONST_INT_P (operands
[2])
18329 && ne
== INTVAL (operands
[2]))
18331 operands
[3] = GEN_INT (63 - nb
);
18333 return "rld%I2c. %0,%1,%2,%3";
18334 return "rld%I2c %0,%1,%2,%3";
18337 if (nb
< 32 && ne
< 32)
18339 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18340 operands
[2] = GEN_INT (32 - INTVAL (operands
[2]));
18341 operands
[3] = GEN_INT (31 - nb
);
18342 operands
[4] = GEN_INT (31 - ne
);
18343 /* This insn can also be a 64-bit rotate with mask that really makes
18344 it just a shift right (with mask); the %h below are to adjust for
18345 that situation (shift count is >= 32 in that case). */
18347 return "rlw%I2nm. %0,%1,%h2,%3,%4";
18348 return "rlw%I2nm %0,%1,%h2,%3,%4";
18351 gcc_unreachable ();
18354 /* Return whether MASK (a CONST_INT) is a valid mask for any rlwimi or
18355 rldimi instruction, to implement an insert with shift SHIFT (a ROTATE,
18356 ASHIFT, or LSHIFTRT) in mode MODE. */
18359 rs6000_is_valid_insert_mask (rtx mask
, rtx shift
, machine_mode mode
)
18363 if (!rs6000_is_valid_mask (mask
, &nb
, &ne
, mode
))
18366 int n
= GET_MODE_PRECISION (mode
);
18368 int sh
= INTVAL (XEXP (shift
, 1));
18369 if (sh
< 0 || sh
>= n
)
18372 rtx_code code
= GET_CODE (shift
);
18374 /* Convert any shift by 0 to a rotate, to simplify below code. */
18378 /* Convert rotate to simple shift if we can, to make analysis simpler. */
18379 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& ne
>= sh
)
18381 if (code
== ROTATE
&& sh
>= 0 && nb
>= ne
&& nb
< sh
)
18387 /* DImode rotates need rldimi. */
18388 if (mode
== DImode
&& code
== ROTATE
)
18391 /* SImode rotates need rlwimi. */
18392 if (mode
== SImode
&& code
== ROTATE
)
18393 return (nb
< 32 && ne
< 32 && sh
< 32);
18395 /* Wrap-around masks are only okay for rotates. */
18399 /* Don't allow ASHIFT if the mask is wrong for that. */
18400 if (code
== ASHIFT
&& ne
< sh
)
18403 /* If we can do it with an rlwimi, we can do it. Don't allow LSHIFTRT
18404 if the mask is wrong for that. */
18405 if (nb
< 32 && ne
< 32 && sh
< 32
18406 && !(code
== LSHIFTRT
&& nb
>= 32 - sh
))
18409 /* If we can do it with an rldimi, we can do it. Don't allow LSHIFTRT
18410 if the mask is wrong for that. */
18411 if (code
== LSHIFTRT
)
18414 return !(code
== LSHIFTRT
&& nb
>= sh
);
18419 /* Return the instruction template for an insert with mask in mode MODE, with
18420 operands OPERANDS. If DOT is true, make it a record-form instruction. */
18423 rs6000_insn_for_insert_mask (machine_mode mode
, rtx
*operands
, bool dot
)
18427 if (!rs6000_is_valid_mask (operands
[3], &nb
, &ne
, mode
))
18428 gcc_unreachable ();
18430 /* Prefer rldimi because rlwimi is cracked. */
18431 if (TARGET_POWERPC64
18432 && (!dot
|| mode
== DImode
)
18433 && GET_CODE (operands
[4]) != LSHIFTRT
18434 && ne
== INTVAL (operands
[2]))
18436 operands
[3] = GEN_INT (63 - nb
);
18438 return "rldimi. %0,%1,%2,%3";
18439 return "rldimi %0,%1,%2,%3";
18442 if (nb
< 32 && ne
< 32)
18444 if (GET_CODE (operands
[4]) == LSHIFTRT
&& INTVAL (operands
[2]))
18445 operands
[2] = GEN_INT (32 - INTVAL (operands
[2]));
18446 operands
[3] = GEN_INT (31 - nb
);
18447 operands
[4] = GEN_INT (31 - ne
);
18449 return "rlwimi. %0,%1,%2,%3,%4";
18450 return "rlwimi %0,%1,%2,%3,%4";
18453 gcc_unreachable ();
18456 /* Return whether an AND with C (a CONST_INT) in mode MODE can be done
18457 using two machine instructions. */
18460 rs6000_is_valid_2insn_and (rtx c
, machine_mode mode
)
18462 /* There are two kinds of AND we can handle with two insns:
18463 1) those we can do with two rl* insn;
18466 We do not handle that last case yet. */
18468 /* If there is just one stretch of ones, we can do it. */
18469 if (rs6000_is_valid_mask (c
, NULL
, NULL
, mode
))
18472 /* Otherwise, fill in the lowest "hole"; if we can do the result with
18473 one insn, we can do the whole thing with two. */
18474 unsigned HOST_WIDE_INT val
= INTVAL (c
);
18475 unsigned HOST_WIDE_INT bit1
= val
& -val
;
18476 unsigned HOST_WIDE_INT bit2
= (val
+ bit1
) & ~val
;
18477 unsigned HOST_WIDE_INT val1
= (val
+ bit1
) & val
;
18478 unsigned HOST_WIDE_INT bit3
= val1
& -val1
;
18479 return rs6000_is_valid_and_mask (GEN_INT (val
+ bit3
- bit2
), mode
);
18482 /* Emit the two insns to do an AND in mode MODE, with operands OPERANDS.
18483 If EXPAND is true, split rotate-and-mask instructions we generate to
18484 their constituent parts as well (this is used during expand); if DOT
18485 is 1, make the last insn a record-form instruction clobbering the
18486 destination GPR and setting the CC reg (from operands[3]); if 2, set
18487 that GPR as well as the CC reg. */
18490 rs6000_emit_2insn_and (machine_mode mode
, rtx
*operands
, bool expand
, int dot
)
18492 gcc_assert (!(expand
&& dot
));
18494 unsigned HOST_WIDE_INT val
= INTVAL (operands
[2]);
18496 /* If it is one stretch of ones, it is DImode; shift left, mask, then
18497 shift right. This generates better code than doing the masks without
18498 shifts, or shifting first right and then left. */
18500 if (rs6000_is_valid_mask (operands
[2], &nb
, &ne
, mode
) && nb
>= ne
)
18502 gcc_assert (mode
== DImode
);
18504 int shift
= 63 - nb
;
18507 rtx tmp1
= gen_reg_rtx (DImode
);
18508 rtx tmp2
= gen_reg_rtx (DImode
);
18509 emit_insn (gen_ashldi3 (tmp1
, operands
[1], GEN_INT (shift
)));
18510 emit_insn (gen_anddi3 (tmp2
, tmp1
, GEN_INT (val
<< shift
)));
18511 emit_insn (gen_lshrdi3 (operands
[0], tmp2
, GEN_INT (shift
)));
18515 rtx tmp
= gen_rtx_ASHIFT (mode
, operands
[1], GEN_INT (shift
));
18516 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (val
<< shift
));
18517 emit_move_insn (operands
[0], tmp
);
18518 tmp
= gen_rtx_LSHIFTRT (mode
, operands
[0], GEN_INT (shift
));
18519 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
18524 /* Otherwise, make a mask2 that cuts out the lowest "hole", and a mask1
18525 that does the rest. */
18526 unsigned HOST_WIDE_INT bit1
= val
& -val
;
18527 unsigned HOST_WIDE_INT bit2
= (val
+ bit1
) & ~val
;
18528 unsigned HOST_WIDE_INT val1
= (val
+ bit1
) & val
;
18529 unsigned HOST_WIDE_INT bit3
= val1
& -val1
;
18531 unsigned HOST_WIDE_INT mask1
= -bit3
+ bit2
- 1;
18532 unsigned HOST_WIDE_INT mask2
= val
+ bit3
- bit2
;
18534 gcc_assert (rs6000_is_valid_and_mask (GEN_INT (mask2
), mode
));
18536 /* Two "no-rotate"-and-mask instructions, for SImode. */
18537 if (rs6000_is_valid_and_mask (GEN_INT (mask1
), mode
))
18539 gcc_assert (mode
== SImode
);
18541 rtx reg
= expand
? gen_reg_rtx (mode
) : operands
[0];
18542 rtx tmp
= gen_rtx_AND (mode
, operands
[1], GEN_INT (mask1
));
18543 emit_move_insn (reg
, tmp
);
18544 tmp
= gen_rtx_AND (mode
, reg
, GEN_INT (mask2
));
18545 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
18549 gcc_assert (mode
== DImode
);
18551 /* Two "no-rotate"-and-mask instructions, for DImode: both are rlwinm
18552 insns; we have to do the first in SImode, because it wraps. */
18553 if (mask2
<= 0xffffffff
18554 && rs6000_is_valid_and_mask (GEN_INT (mask1
), SImode
))
18556 rtx reg
= expand
? gen_reg_rtx (mode
) : operands
[0];
18557 rtx tmp
= gen_rtx_AND (SImode
, gen_lowpart (SImode
, operands
[1]),
18559 rtx reg_low
= gen_lowpart (SImode
, reg
);
18560 emit_move_insn (reg_low
, tmp
);
18561 tmp
= gen_rtx_AND (mode
, reg
, GEN_INT (mask2
));
18562 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
18566 /* Two rld* insns: rotate, clear the hole in the middle (which now is
18567 at the top end), rotate back and clear the other hole. */
18568 int right
= exact_log2 (bit3
);
18569 int left
= 64 - right
;
18571 /* Rotate the mask too. */
18572 mask1
= (mask1
>> right
) | ((bit2
- 1) << left
);
18576 rtx tmp1
= gen_reg_rtx (DImode
);
18577 rtx tmp2
= gen_reg_rtx (DImode
);
18578 rtx tmp3
= gen_reg_rtx (DImode
);
18579 emit_insn (gen_rotldi3 (tmp1
, operands
[1], GEN_INT (left
)));
18580 emit_insn (gen_anddi3 (tmp2
, tmp1
, GEN_INT (mask1
)));
18581 emit_insn (gen_rotldi3 (tmp3
, tmp2
, GEN_INT (right
)));
18582 emit_insn (gen_anddi3 (operands
[0], tmp3
, GEN_INT (mask2
)));
18586 rtx tmp
= gen_rtx_ROTATE (mode
, operands
[1], GEN_INT (left
));
18587 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (mask1
));
18588 emit_move_insn (operands
[0], tmp
);
18589 tmp
= gen_rtx_ROTATE (mode
, operands
[0], GEN_INT (right
));
18590 tmp
= gen_rtx_AND (mode
, tmp
, GEN_INT (mask2
));
18591 rs6000_emit_dot_insn (operands
[0], tmp
, dot
, dot
? operands
[3] : 0);
18595 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
18596 for lfq and stfq insns iff the registers are hard registers. */
18599 registers_ok_for_quad_peep (rtx reg1
, rtx reg2
)
18601 /* We might have been passed a SUBREG. */
18602 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
18605 /* We might have been passed non floating point registers. */
18606 if (!FP_REGNO_P (REGNO (reg1
))
18607 || !FP_REGNO_P (REGNO (reg2
)))
18610 return (REGNO (reg1
) == REGNO (reg2
) - 1);
18613 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
18614 addr1 and addr2 must be in consecutive memory locations
18615 (addr2 == addr1 + 8). */
18618 mems_ok_for_quad_peep (rtx mem1
, rtx mem2
)
18621 unsigned int reg1
, reg2
;
18622 int offset1
, offset2
;
18624 /* The mems cannot be volatile. */
18625 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
18628 addr1
= XEXP (mem1
, 0);
18629 addr2
= XEXP (mem2
, 0);
18631 /* Extract an offset (if used) from the first addr. */
18632 if (GET_CODE (addr1
) == PLUS
)
18634 /* If not a REG, return zero. */
18635 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
18639 reg1
= REGNO (XEXP (addr1
, 0));
18640 /* The offset must be constant! */
18641 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
18643 offset1
= INTVAL (XEXP (addr1
, 1));
18646 else if (GET_CODE (addr1
) != REG
)
18650 reg1
= REGNO (addr1
);
18651 /* This was a simple (mem (reg)) expression. Offset is 0. */
18655 /* And now for the second addr. */
18656 if (GET_CODE (addr2
) == PLUS
)
18658 /* If not a REG, return zero. */
18659 if (GET_CODE (XEXP (addr2
, 0)) != REG
)
18663 reg2
= REGNO (XEXP (addr2
, 0));
18664 /* The offset must be constant. */
18665 if (GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
18667 offset2
= INTVAL (XEXP (addr2
, 1));
18670 else if (GET_CODE (addr2
) != REG
)
18674 reg2
= REGNO (addr2
);
18675 /* This was a simple (mem (reg)) expression. Offset is 0. */
18679 /* Both of these must have the same base register. */
18683 /* The offset for the second addr must be 8 more than the first addr. */
18684 if (offset2
!= offset1
+ 8)
18687 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
18692 /* Implement TARGET_SECONDARY_RELOAD_NEEDED_MODE. For SDmode values we
18693 need to use DDmode, in all other cases we can use the same mode. */
18694 static machine_mode
18695 rs6000_secondary_memory_needed_mode (machine_mode mode
)
18697 if (lra_in_progress
&& mode
== SDmode
)
18702 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
18703 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
18704 only work on the traditional altivec registers, note if an altivec register
18707 static enum rs6000_reg_type
18708 register_to_reg_type (rtx reg
, bool *is_altivec
)
18710 HOST_WIDE_INT regno
;
18711 enum reg_class rclass
;
18713 if (GET_CODE (reg
) == SUBREG
)
18714 reg
= SUBREG_REG (reg
);
18717 return NO_REG_TYPE
;
18719 regno
= REGNO (reg
);
18720 if (regno
>= FIRST_PSEUDO_REGISTER
)
18722 if (!lra_in_progress
&& !reload_completed
)
18723 return PSEUDO_REG_TYPE
;
18725 regno
= true_regnum (reg
);
18726 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
18727 return PSEUDO_REG_TYPE
;
18730 gcc_assert (regno
>= 0);
18732 if (is_altivec
&& ALTIVEC_REGNO_P (regno
))
18733 *is_altivec
= true;
18735 rclass
= rs6000_regno_regclass
[regno
];
18736 return reg_class_to_reg_type
[(int)rclass
];
18739 /* Helper function to return the cost of adding a TOC entry address. */
18742 rs6000_secondary_reload_toc_costs (addr_mask_type addr_mask
)
18746 if (TARGET_CMODEL
!= CMODEL_SMALL
)
18747 ret
= ((addr_mask
& RELOAD_REG_OFFSET
) == 0) ? 1 : 2;
18750 ret
= (TARGET_MINIMAL_TOC
) ? 6 : 3;
18755 /* Helper function for rs6000_secondary_reload to determine whether the memory
18756 address (ADDR) with a given register class (RCLASS) and machine mode (MODE)
18757 needs reloading. Return negative if the memory is not handled by the memory
18758 helper functions and to try a different reload method, 0 if no additional
18759 instructions are need, and positive to give the extra cost for the
18763 rs6000_secondary_reload_memory (rtx addr
,
18764 enum reg_class rclass
,
18767 int extra_cost
= 0;
18768 rtx reg
, and_arg
, plus_arg0
, plus_arg1
;
18769 addr_mask_type addr_mask
;
18770 const char *type
= NULL
;
18771 const char *fail_msg
= NULL
;
18773 if (GPR_REG_CLASS_P (rclass
))
18774 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_GPR
];
18776 else if (rclass
== FLOAT_REGS
)
18777 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
];
18779 else if (rclass
== ALTIVEC_REGS
)
18780 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
];
18782 /* For the combined VSX_REGS, turn off Altivec AND -16. */
18783 else if (rclass
== VSX_REGS
)
18784 addr_mask
= (reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
]
18785 & ~RELOAD_REG_AND_M16
);
18787 /* If the register allocator hasn't made up its mind yet on the register
18788 class to use, settle on defaults to use. */
18789 else if (rclass
== NO_REGS
)
18791 addr_mask
= (reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
]
18792 & ~RELOAD_REG_AND_M16
);
18794 if ((addr_mask
& RELOAD_REG_MULTIPLE
) != 0)
18795 addr_mask
&= ~(RELOAD_REG_INDEXED
18796 | RELOAD_REG_PRE_INCDEC
18797 | RELOAD_REG_PRE_MODIFY
);
18803 /* If the register isn't valid in this register class, just return now. */
18804 if ((addr_mask
& RELOAD_REG_VALID
) == 0)
18806 if (TARGET_DEBUG_ADDR
)
18809 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
18810 "not valid in class\n",
18811 GET_MODE_NAME (mode
), reg_class_names
[rclass
]);
18818 switch (GET_CODE (addr
))
18820 /* Does the register class supports auto update forms for this mode? We
18821 don't need a scratch register, since the powerpc only supports
18822 PRE_INC, PRE_DEC, and PRE_MODIFY. */
18825 reg
= XEXP (addr
, 0);
18826 if (!base_reg_operand (addr
, GET_MODE (reg
)))
18828 fail_msg
= "no base register #1";
18832 else if ((addr_mask
& RELOAD_REG_PRE_INCDEC
) == 0)
18840 reg
= XEXP (addr
, 0);
18841 plus_arg1
= XEXP (addr
, 1);
18842 if (!base_reg_operand (reg
, GET_MODE (reg
))
18843 || GET_CODE (plus_arg1
) != PLUS
18844 || !rtx_equal_p (reg
, XEXP (plus_arg1
, 0)))
18846 fail_msg
= "bad PRE_MODIFY";
18850 else if ((addr_mask
& RELOAD_REG_PRE_MODIFY
) == 0)
18857 /* Do we need to simulate AND -16 to clear the bottom address bits used
18858 in VMX load/stores? Only allow the AND for vector sizes. */
18860 and_arg
= XEXP (addr
, 0);
18861 if (GET_MODE_SIZE (mode
) != 16
18862 || GET_CODE (XEXP (addr
, 1)) != CONST_INT
18863 || INTVAL (XEXP (addr
, 1)) != -16)
18865 fail_msg
= "bad Altivec AND #1";
18869 if (rclass
!= ALTIVEC_REGS
)
18871 if (legitimate_indirect_address_p (and_arg
, false))
18874 else if (legitimate_indexed_address_p (and_arg
, false))
18879 fail_msg
= "bad Altivec AND #2";
18887 /* If this is an indirect address, make sure it is a base register. */
18890 if (!legitimate_indirect_address_p (addr
, false))
18897 /* If this is an indexed address, make sure the register class can handle
18898 indexed addresses for this mode. */
18900 plus_arg0
= XEXP (addr
, 0);
18901 plus_arg1
= XEXP (addr
, 1);
18903 /* (plus (plus (reg) (constant)) (constant)) is generated during
18904 push_reload processing, so handle it now. */
18905 if (GET_CODE (plus_arg0
) == PLUS
&& CONST_INT_P (plus_arg1
))
18907 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
18914 /* (plus (plus (reg) (constant)) (reg)) is also generated during
18915 push_reload processing, so handle it now. */
18916 else if (GET_CODE (plus_arg0
) == PLUS
&& REG_P (plus_arg1
))
18918 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
18921 type
= "indexed #2";
18925 else if (!base_reg_operand (plus_arg0
, GET_MODE (plus_arg0
)))
18927 fail_msg
= "no base register #2";
18931 else if (int_reg_operand (plus_arg1
, GET_MODE (plus_arg1
)))
18933 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0
18934 || !legitimate_indexed_address_p (addr
, false))
18941 else if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0
18942 && CONST_INT_P (plus_arg1
))
18944 if (!quad_address_offset_p (INTVAL (plus_arg1
)))
18947 type
= "vector d-form offset";
18951 /* Make sure the register class can handle offset addresses. */
18952 else if (rs6000_legitimate_offset_address_p (mode
, addr
, false, true))
18954 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
18957 type
= "offset #2";
18963 fail_msg
= "bad PLUS";
18970 /* Quad offsets are restricted and can't handle normal addresses. */
18971 if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
18974 type
= "vector d-form lo_sum";
18977 else if (!legitimate_lo_sum_address_p (mode
, addr
, false))
18979 fail_msg
= "bad LO_SUM";
18983 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
18990 /* Static addresses need to create a TOC entry. */
18994 if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
18997 type
= "vector d-form lo_sum #2";
19003 extra_cost
= rs6000_secondary_reload_toc_costs (addr_mask
);
19007 /* TOC references look like offsetable memory. */
19009 if (TARGET_CMODEL
== CMODEL_SMALL
|| XINT (addr
, 1) != UNSPEC_TOCREL
)
19011 fail_msg
= "bad UNSPEC";
19015 else if ((addr_mask
& RELOAD_REG_QUAD_OFFSET
) != 0)
19018 type
= "vector d-form lo_sum #3";
19021 else if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19024 type
= "toc reference";
19030 fail_msg
= "bad address";
19035 if (TARGET_DEBUG_ADDR
/* && extra_cost != 0 */)
19037 if (extra_cost
< 0)
19039 "rs6000_secondary_reload_memory error: mode = %s, "
19040 "class = %s, addr_mask = '%s', %s\n",
19041 GET_MODE_NAME (mode
),
19042 reg_class_names
[rclass
],
19043 rs6000_debug_addr_mask (addr_mask
, false),
19044 (fail_msg
!= NULL
) ? fail_msg
: "<bad address>");
19048 "rs6000_secondary_reload_memory: mode = %s, class = %s, "
19049 "addr_mask = '%s', extra cost = %d, %s\n",
19050 GET_MODE_NAME (mode
),
19051 reg_class_names
[rclass
],
19052 rs6000_debug_addr_mask (addr_mask
, false),
19054 (type
) ? type
: "<none>");
19062 /* Helper function for rs6000_secondary_reload to return true if a move to a
19063 different register classe is really a simple move. */
19066 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type
,
19067 enum rs6000_reg_type from_type
,
19070 int size
= GET_MODE_SIZE (mode
);
19072 /* Add support for various direct moves available. In this function, we only
19073 look at cases where we don't need any extra registers, and one or more
19074 simple move insns are issued. Originally small integers are not allowed
19075 in FPR/VSX registers. Single precision binary floating is not a simple
19076 move because we need to convert to the single precision memory layout.
19077 The 4-byte SDmode can be moved. TDmode values are disallowed since they
19078 need special direct move handling, which we do not support yet. */
19079 if (TARGET_DIRECT_MOVE
19080 && ((to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19081 || (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19083 if (TARGET_POWERPC64
)
19085 /* ISA 2.07: MTVSRD or MVFVSRD. */
19089 /* ISA 3.0: MTVSRDD or MFVSRD + MFVSRLD. */
19090 if (size
== 16 && TARGET_P9_VECTOR
&& mode
!= TDmode
)
19094 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19095 if (TARGET_P8_VECTOR
)
19097 if (mode
== SImode
)
19100 if (TARGET_P9_VECTOR
&& (mode
== HImode
|| mode
== QImode
))
19104 /* ISA 2.07: MTVSRWZ or MFVSRWZ. */
19105 if (mode
== SDmode
)
19109 /* Power6+: MFTGPR or MFFGPR. */
19110 else if (TARGET_MFPGPR
&& TARGET_POWERPC64
&& size
== 8
19111 && ((to_type
== GPR_REG_TYPE
&& from_type
== FPR_REG_TYPE
)
19112 || (to_type
== FPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19115 /* Move to/from SPR. */
19116 else if ((size
== 4 || (TARGET_POWERPC64
&& size
== 8))
19117 && ((to_type
== GPR_REG_TYPE
&& from_type
== SPR_REG_TYPE
)
19118 || (to_type
== SPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
19124 /* Direct move helper function for rs6000_secondary_reload, handle all of the
19125 special direct moves that involve allocating an extra register, return the
19126 insn code of the helper function if there is such a function or
19127 CODE_FOR_nothing if not. */
19130 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type
,
19131 enum rs6000_reg_type from_type
,
19133 secondary_reload_info
*sri
,
19137 enum insn_code icode
= CODE_FOR_nothing
;
19139 int size
= GET_MODE_SIZE (mode
);
19141 if (TARGET_POWERPC64
&& size
== 16)
19143 /* Handle moving 128-bit values from GPRs to VSX point registers on
19144 ISA 2.07 (power8, power9) when running in 64-bit mode using
19145 XXPERMDI to glue the two 64-bit values back together. */
19146 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
19148 cost
= 3; /* 2 mtvsrd's, 1 xxpermdi. */
19149 icode
= reg_addr
[mode
].reload_vsx_gpr
;
19152 /* Handle moving 128-bit values from VSX point registers to GPRs on
19153 ISA 2.07 when running in 64-bit mode using XXPERMDI to get access to the
19154 bottom 64-bit value. */
19155 else if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19157 cost
= 3; /* 2 mfvsrd's, 1 xxpermdi. */
19158 icode
= reg_addr
[mode
].reload_gpr_vsx
;
19162 else if (TARGET_POWERPC64
&& mode
== SFmode
)
19164 if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
19166 cost
= 3; /* xscvdpspn, mfvsrd, and. */
19167 icode
= reg_addr
[mode
].reload_gpr_vsx
;
19170 else if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
19172 cost
= 2; /* mtvsrz, xscvspdpn. */
19173 icode
= reg_addr
[mode
].reload_vsx_gpr
;
19177 else if (!TARGET_POWERPC64
&& size
== 8)
19179 /* Handle moving 64-bit values from GPRs to floating point registers on
19180 ISA 2.07 when running in 32-bit mode using FMRGOW to glue the two
19181 32-bit values back together. Altivec register classes must be handled
19182 specially since a different instruction is used, and the secondary
19183 reload support requires a single instruction class in the scratch
19184 register constraint. However, right now TFmode is not allowed in
19185 Altivec registers, so the pattern will never match. */
19186 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
&& !altivec_p
)
19188 cost
= 3; /* 2 mtvsrwz's, 1 fmrgow. */
19189 icode
= reg_addr
[mode
].reload_fpr_gpr
;
19193 if (icode
!= CODE_FOR_nothing
)
19198 sri
->icode
= icode
;
19199 sri
->extra_cost
= cost
;
19206 /* Return whether a move between two register classes can be done either
19207 directly (simple move) or via a pattern that uses a single extra temporary
19208 (using ISA 2.07's direct move in this case. */
19211 rs6000_secondary_reload_move (enum rs6000_reg_type to_type
,
19212 enum rs6000_reg_type from_type
,
19214 secondary_reload_info
*sri
,
19217 /* Fall back to load/store reloads if either type is not a register. */
19218 if (to_type
== NO_REG_TYPE
|| from_type
== NO_REG_TYPE
)
19221 /* If we haven't allocated registers yet, assume the move can be done for the
19222 standard register types. */
19223 if ((to_type
== PSEUDO_REG_TYPE
&& from_type
== PSEUDO_REG_TYPE
)
19224 || (to_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (from_type
))
19225 || (from_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (to_type
)))
19228 /* Moves to the same set of registers is a simple move for non-specialized
19230 if (to_type
== from_type
&& IS_STD_REG_TYPE (to_type
))
19233 /* Check whether a simple move can be done directly. */
19234 if (rs6000_secondary_reload_simple_move (to_type
, from_type
, mode
))
19238 sri
->icode
= CODE_FOR_nothing
;
19239 sri
->extra_cost
= 0;
19244 /* Now check if we can do it in a few steps. */
19245 return rs6000_secondary_reload_direct_move (to_type
, from_type
, mode
, sri
,
19249 /* Inform reload about cases where moving X with a mode MODE to a register in
19250 RCLASS requires an extra scratch or immediate register. Return the class
19251 needed for the immediate register.
19253 For VSX and Altivec, we may need a register to convert sp+offset into
19256 For misaligned 64-bit gpr loads and stores we need a register to
19257 convert an offset address to indirect. */
19260 rs6000_secondary_reload (bool in_p
,
19262 reg_class_t rclass_i
,
19264 secondary_reload_info
*sri
)
19266 enum reg_class rclass
= (enum reg_class
) rclass_i
;
19267 reg_class_t ret
= ALL_REGS
;
19268 enum insn_code icode
;
19269 bool default_p
= false;
19270 bool done_p
= false;
19272 /* Allow subreg of memory before/during reload. */
19273 bool memory_p
= (MEM_P (x
)
19274 || (!reload_completed
&& GET_CODE (x
) == SUBREG
19275 && MEM_P (SUBREG_REG (x
))));
19277 sri
->icode
= CODE_FOR_nothing
;
19278 sri
->t_icode
= CODE_FOR_nothing
;
19279 sri
->extra_cost
= 0;
19281 ? reg_addr
[mode
].reload_load
19282 : reg_addr
[mode
].reload_store
);
19284 if (REG_P (x
) || register_operand (x
, mode
))
19286 enum rs6000_reg_type to_type
= reg_class_to_reg_type
[(int)rclass
];
19287 bool altivec_p
= (rclass
== ALTIVEC_REGS
);
19288 enum rs6000_reg_type from_type
= register_to_reg_type (x
, &altivec_p
);
19291 std::swap (to_type
, from_type
);
19293 /* Can we do a direct move of some sort? */
19294 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
, sri
,
19297 icode
= (enum insn_code
)sri
->icode
;
19304 /* Make sure 0.0 is not reloaded or forced into memory. */
19305 if (x
== CONST0_RTX (mode
) && VSX_REG_CLASS_P (rclass
))
19312 /* If this is a scalar floating point value and we want to load it into the
19313 traditional Altivec registers, do it via a move via a traditional floating
19314 point register, unless we have D-form addressing. Also make sure that
19315 non-zero constants use a FPR. */
19316 if (!done_p
&& reg_addr
[mode
].scalar_in_vmx_p
19317 && !mode_supports_vmx_dform (mode
)
19318 && (rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
)
19319 && (memory_p
|| (GET_CODE (x
) == CONST_DOUBLE
)))
19326 /* Handle reload of load/stores if we have reload helper functions. */
19327 if (!done_p
&& icode
!= CODE_FOR_nothing
&& memory_p
)
19329 int extra_cost
= rs6000_secondary_reload_memory (XEXP (x
, 0), rclass
,
19332 if (extra_cost
>= 0)
19336 if (extra_cost
> 0)
19338 sri
->extra_cost
= extra_cost
;
19339 sri
->icode
= icode
;
19344 /* Handle unaligned loads and stores of integer registers. */
19345 if (!done_p
&& TARGET_POWERPC64
19346 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
19348 && GET_MODE_SIZE (GET_MODE (x
)) >= UNITS_PER_WORD
)
19350 rtx addr
= XEXP (x
, 0);
19351 rtx off
= address_offset (addr
);
19353 if (off
!= NULL_RTX
)
19355 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
19356 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
19358 /* We need a secondary reload when our legitimate_address_p
19359 says the address is good (as otherwise the entire address
19360 will be reloaded), and the offset is not a multiple of
19361 four or we have an address wrap. Address wrap will only
19362 occur for LO_SUMs since legitimate_offset_address_p
19363 rejects addresses for 16-byte mems that will wrap. */
19364 if (GET_CODE (addr
) == LO_SUM
19365 ? (1 /* legitimate_address_p allows any offset for lo_sum */
19366 && ((offset
& 3) != 0
19367 || ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
))
19368 : (offset
+ 0x8000 < 0x10000 - extra
/* legitimate_address_p */
19369 && (offset
& 3) != 0))
19371 /* -m32 -mpowerpc64 needs to use a 32-bit scratch register. */
19373 sri
->icode
= ((TARGET_32BIT
) ? CODE_FOR_reload_si_load
19374 : CODE_FOR_reload_di_load
);
19376 sri
->icode
= ((TARGET_32BIT
) ? CODE_FOR_reload_si_store
19377 : CODE_FOR_reload_di_store
);
19378 sri
->extra_cost
= 2;
19389 if (!done_p
&& !TARGET_POWERPC64
19390 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
19392 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
19394 rtx addr
= XEXP (x
, 0);
19395 rtx off
= address_offset (addr
);
19397 if (off
!= NULL_RTX
)
19399 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
19400 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
19402 /* We need a secondary reload when our legitimate_address_p
19403 says the address is good (as otherwise the entire address
19404 will be reloaded), and we have a wrap.
19406 legitimate_lo_sum_address_p allows LO_SUM addresses to
19407 have any offset so test for wrap in the low 16 bits.
19409 legitimate_offset_address_p checks for the range
19410 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
19411 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
19412 [0x7ff4,0x7fff] respectively, so test for the
19413 intersection of these ranges, [0x7ffc,0x7fff] and
19414 [0x7ff4,0x7ff7] respectively.
19416 Note that the address we see here may have been
19417 manipulated by legitimize_reload_address. */
19418 if (GET_CODE (addr
) == LO_SUM
19419 ? ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
19420 : offset
- (0x8000 - extra
) < UNITS_PER_WORD
)
19423 sri
->icode
= CODE_FOR_reload_si_load
;
19425 sri
->icode
= CODE_FOR_reload_si_store
;
19426 sri
->extra_cost
= 2;
19441 ret
= default_secondary_reload (in_p
, x
, rclass
, mode
, sri
);
19443 gcc_assert (ret
!= ALL_REGS
);
19445 if (TARGET_DEBUG_ADDR
)
19448 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
19450 reg_class_names
[ret
],
19451 in_p
? "true" : "false",
19452 reg_class_names
[rclass
],
19453 GET_MODE_NAME (mode
));
19455 if (reload_completed
)
19456 fputs (", after reload", stderr
);
19459 fputs (", done_p not set", stderr
);
19462 fputs (", default secondary reload", stderr
);
19464 if (sri
->icode
!= CODE_FOR_nothing
)
19465 fprintf (stderr
, ", reload func = %s, extra cost = %d",
19466 insn_data
[sri
->icode
].name
, sri
->extra_cost
);
19468 else if (sri
->extra_cost
> 0)
19469 fprintf (stderr
, ", extra cost = %d", sri
->extra_cost
);
19471 fputs ("\n", stderr
);
19478 /* Better tracing for rs6000_secondary_reload_inner. */
19481 rs6000_secondary_reload_trace (int line
, rtx reg
, rtx mem
, rtx scratch
,
19486 gcc_assert (reg
!= NULL_RTX
&& mem
!= NULL_RTX
&& scratch
!= NULL_RTX
);
19488 fprintf (stderr
, "rs6000_secondary_reload_inner:%d, type = %s\n", line
,
19489 store_p
? "store" : "load");
19492 set
= gen_rtx_SET (mem
, reg
);
19494 set
= gen_rtx_SET (reg
, mem
);
19496 clobber
= gen_rtx_CLOBBER (VOIDmode
, scratch
);
19497 debug_rtx (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
)));
19500 static void rs6000_secondary_reload_fail (int, rtx
, rtx
, rtx
, bool)
19501 ATTRIBUTE_NORETURN
;
19504 rs6000_secondary_reload_fail (int line
, rtx reg
, rtx mem
, rtx scratch
,
19507 rs6000_secondary_reload_trace (line
, reg
, mem
, scratch
, store_p
);
19508 gcc_unreachable ();
19511 /* Fixup reload addresses for values in GPR, FPR, and VMX registers that have
19512 reload helper functions. These were identified in
19513 rs6000_secondary_reload_memory, and if reload decided to use the secondary
19514 reload, it calls the insns:
19515 reload_<RELOAD:mode>_<P:mptrsize>_store
19516 reload_<RELOAD:mode>_<P:mptrsize>_load
19518 which in turn calls this function, to do whatever is necessary to create
19519 valid addresses. */
19522 rs6000_secondary_reload_inner (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
19524 int regno
= true_regnum (reg
);
19525 machine_mode mode
= GET_MODE (reg
);
19526 addr_mask_type addr_mask
;
19529 rtx op_reg
, op0
, op1
;
19534 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
|| !MEM_P (mem
)
19535 || !base_reg_operand (scratch
, GET_MODE (scratch
)))
19536 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19538 if (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
))
19539 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_GPR
];
19541 else if (IN_RANGE (regno
, FIRST_FPR_REGNO
, LAST_FPR_REGNO
))
19542 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
];
19544 else if (IN_RANGE (regno
, FIRST_ALTIVEC_REGNO
, LAST_ALTIVEC_REGNO
))
19545 addr_mask
= reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
];
19548 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19550 /* Make sure the mode is valid in this register class. */
19551 if ((addr_mask
& RELOAD_REG_VALID
) == 0)
19552 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19554 if (TARGET_DEBUG_ADDR
)
19555 rs6000_secondary_reload_trace (__LINE__
, reg
, mem
, scratch
, store_p
);
19557 new_addr
= addr
= XEXP (mem
, 0);
19558 switch (GET_CODE (addr
))
19560 /* Does the register class support auto update forms for this mode? If
19561 not, do the update now. We don't need a scratch register, since the
19562 powerpc only supports PRE_INC, PRE_DEC, and PRE_MODIFY. */
19565 op_reg
= XEXP (addr
, 0);
19566 if (!base_reg_operand (op_reg
, Pmode
))
19567 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19569 if ((addr_mask
& RELOAD_REG_PRE_INCDEC
) == 0)
19571 emit_insn (gen_add2_insn (op_reg
, GEN_INT (GET_MODE_SIZE (mode
))));
19577 op0
= XEXP (addr
, 0);
19578 op1
= XEXP (addr
, 1);
19579 if (!base_reg_operand (op0
, Pmode
)
19580 || GET_CODE (op1
) != PLUS
19581 || !rtx_equal_p (op0
, XEXP (op1
, 0)))
19582 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19584 if ((addr_mask
& RELOAD_REG_PRE_MODIFY
) == 0)
19586 emit_insn (gen_rtx_SET (op0
, op1
));
19591 /* Do we need to simulate AND -16 to clear the bottom address bits used
19592 in VMX load/stores? */
19594 op0
= XEXP (addr
, 0);
19595 op1
= XEXP (addr
, 1);
19596 if ((addr_mask
& RELOAD_REG_AND_M16
) == 0)
19598 if (REG_P (op0
) || GET_CODE (op0
) == SUBREG
)
19601 else if (GET_CODE (op1
) == PLUS
)
19603 emit_insn (gen_rtx_SET (scratch
, op1
));
19608 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19610 and_op
= gen_rtx_AND (GET_MODE (scratch
), op_reg
, op1
);
19611 cc_clobber
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (CCmode
));
19612 rv
= gen_rtvec (2, gen_rtx_SET (scratch
, and_op
), cc_clobber
);
19613 emit_insn (gen_rtx_PARALLEL (VOIDmode
, rv
));
19614 new_addr
= scratch
;
19618 /* If this is an indirect address, make sure it is a base register. */
19621 if (!base_reg_operand (addr
, GET_MODE (addr
)))
19623 emit_insn (gen_rtx_SET (scratch
, addr
));
19624 new_addr
= scratch
;
19628 /* If this is an indexed address, make sure the register class can handle
19629 indexed addresses for this mode. */
19631 op0
= XEXP (addr
, 0);
19632 op1
= XEXP (addr
, 1);
19633 if (!base_reg_operand (op0
, Pmode
))
19634 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19636 else if (int_reg_operand (op1
, Pmode
))
19638 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
19640 emit_insn (gen_rtx_SET (scratch
, addr
));
19641 new_addr
= scratch
;
19645 else if (mode_supports_dq_form (mode
) && CONST_INT_P (op1
))
19647 if (((addr_mask
& RELOAD_REG_QUAD_OFFSET
) == 0)
19648 || !quad_address_p (addr
, mode
, false))
19650 emit_insn (gen_rtx_SET (scratch
, addr
));
19651 new_addr
= scratch
;
19655 /* Make sure the register class can handle offset addresses. */
19656 else if (rs6000_legitimate_offset_address_p (mode
, addr
, false, true))
19658 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19660 emit_insn (gen_rtx_SET (scratch
, addr
));
19661 new_addr
= scratch
;
19666 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19671 op0
= XEXP (addr
, 0);
19672 op1
= XEXP (addr
, 1);
19673 if (!base_reg_operand (op0
, Pmode
))
19674 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19676 else if (int_reg_operand (op1
, Pmode
))
19678 if ((addr_mask
& RELOAD_REG_INDEXED
) == 0)
19680 emit_insn (gen_rtx_SET (scratch
, addr
));
19681 new_addr
= scratch
;
19685 /* Quad offsets are restricted and can't handle normal addresses. */
19686 else if (mode_supports_dq_form (mode
))
19688 emit_insn (gen_rtx_SET (scratch
, addr
));
19689 new_addr
= scratch
;
19692 /* Make sure the register class can handle offset addresses. */
19693 else if (legitimate_lo_sum_address_p (mode
, addr
, false))
19695 if ((addr_mask
& RELOAD_REG_OFFSET
) == 0)
19697 emit_insn (gen_rtx_SET (scratch
, addr
));
19698 new_addr
= scratch
;
19703 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19710 rs6000_emit_move (scratch
, addr
, Pmode
);
19711 new_addr
= scratch
;
19715 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
19718 /* Adjust the address if it changed. */
19719 if (addr
!= new_addr
)
19721 mem
= replace_equiv_address_nv (mem
, new_addr
);
19722 if (TARGET_DEBUG_ADDR
)
19723 fprintf (stderr
, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
19726 /* Now create the move. */
19728 emit_insn (gen_rtx_SET (mem
, reg
));
19730 emit_insn (gen_rtx_SET (reg
, mem
));
19735 /* Convert reloads involving 64-bit gprs and misaligned offset
19736 addressing, or multiple 32-bit gprs and offsets that are too large,
19737 to use indirect addressing. */
19740 rs6000_secondary_reload_gpr (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
19742 int regno
= true_regnum (reg
);
19743 enum reg_class rclass
;
19745 rtx scratch_or_premodify
= scratch
;
19747 if (TARGET_DEBUG_ADDR
)
19749 fprintf (stderr
, "\nrs6000_secondary_reload_gpr, type = %s\n",
19750 store_p
? "store" : "load");
19751 fprintf (stderr
, "reg:\n");
19753 fprintf (stderr
, "mem:\n");
19755 fprintf (stderr
, "scratch:\n");
19756 debug_rtx (scratch
);
19759 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
19760 gcc_assert (GET_CODE (mem
) == MEM
);
19761 rclass
= REGNO_REG_CLASS (regno
);
19762 gcc_assert (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
);
19763 addr
= XEXP (mem
, 0);
19765 if (GET_CODE (addr
) == PRE_MODIFY
)
19767 gcc_assert (REG_P (XEXP (addr
, 0))
19768 && GET_CODE (XEXP (addr
, 1)) == PLUS
19769 && XEXP (XEXP (addr
, 1), 0) == XEXP (addr
, 0));
19770 scratch_or_premodify
= XEXP (addr
, 0);
19771 if (!HARD_REGISTER_P (scratch_or_premodify
))
19772 /* If we have a pseudo here then reload will have arranged
19773 to have it replaced, but only in the original insn.
19774 Use the replacement here too. */
19775 scratch_or_premodify
= find_replacement (&XEXP (addr
, 0));
19777 /* RTL emitted by rs6000_secondary_reload_gpr uses RTL
19778 expressions from the original insn, without unsharing them.
19779 Any RTL that points into the original insn will of course
19780 have register replacements applied. That is why we don't
19781 need to look for replacements under the PLUS. */
19782 addr
= XEXP (addr
, 1);
19784 gcc_assert (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
);
19786 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
19788 mem
= replace_equiv_address_nv (mem
, scratch_or_premodify
);
19790 /* Now create the move. */
19792 emit_insn (gen_rtx_SET (mem
, reg
));
19794 emit_insn (gen_rtx_SET (reg
, mem
));
19799 /* Given an rtx X being reloaded into a reg required to be
19800 in class CLASS, return the class of reg to actually use.
19801 In general this is just CLASS; but on some machines
19802 in some cases it is preferable to use a more restrictive class.
19804 On the RS/6000, we have to return NO_REGS when we want to reload a
19805 floating-point CONST_DOUBLE to force it to be copied to memory.
19807 We also don't want to reload integer values into floating-point
19808 registers if we can at all help it. In fact, this can
19809 cause reload to die, if it tries to generate a reload of CTR
19810 into a FP register and discovers it doesn't have the memory location
19813 ??? Would it be a good idea to have reload do the converse, that is
19814 try to reload floating modes into FP registers if possible?
19817 static enum reg_class
19818 rs6000_preferred_reload_class (rtx x
, enum reg_class rclass
)
19820 machine_mode mode
= GET_MODE (x
);
19821 bool is_constant
= CONSTANT_P (x
);
19823 /* If a mode can't go in FPR/ALTIVEC/VSX registers, don't return a preferred
19824 reload class for it. */
19825 if ((rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
19826 && (reg_addr
[mode
].addr_mask
[RELOAD_REG_VMX
] & RELOAD_REG_VALID
) == 0)
19829 if ((rclass
== FLOAT_REGS
|| rclass
== VSX_REGS
)
19830 && (reg_addr
[mode
].addr_mask
[RELOAD_REG_FPR
] & RELOAD_REG_VALID
) == 0)
19833 /* For VSX, see if we should prefer FLOAT_REGS or ALTIVEC_REGS. Do not allow
19834 the reloading of address expressions using PLUS into floating point
19836 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
) && GET_CODE (x
) != PLUS
)
19840 /* Zero is always allowed in all VSX registers. */
19841 if (x
== CONST0_RTX (mode
))
19844 /* If this is a vector constant that can be formed with a few Altivec
19845 instructions, we want altivec registers. */
19846 if (GET_CODE (x
) == CONST_VECTOR
&& easy_vector_constant (x
, mode
))
19847 return ALTIVEC_REGS
;
19849 /* If this is an integer constant that can easily be loaded into
19850 vector registers, allow it. */
19851 if (CONST_INT_P (x
))
19853 HOST_WIDE_INT value
= INTVAL (x
);
19855 /* ISA 2.07 can generate -1 in all registers with XXLORC. ISA
19856 2.06 can generate it in the Altivec registers with
19860 if (TARGET_P8_VECTOR
)
19862 else if (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
19863 return ALTIVEC_REGS
;
19868 /* ISA 3.0 can load -128..127 using the XXSPLTIB instruction and
19869 a sign extend in the Altivec registers. */
19870 if (IN_RANGE (value
, -128, 127) && TARGET_P9_VECTOR
19871 && (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
))
19872 return ALTIVEC_REGS
;
19875 /* Force constant to memory. */
19879 /* D-form addressing can easily reload the value. */
19880 if (mode_supports_vmx_dform (mode
)
19881 || mode_supports_dq_form (mode
))
19884 /* If this is a scalar floating point value and we don't have D-form
19885 addressing, prefer the traditional floating point registers so that we
19886 can use D-form (register+offset) addressing. */
19887 if (rclass
== VSX_REGS
19888 && (mode
== SFmode
|| GET_MODE_SIZE (mode
) == 8))
19891 /* Prefer the Altivec registers if Altivec is handling the vector
19892 operations (i.e. V16QI, V8HI, and V4SI), or if we prefer Altivec
19894 if (VECTOR_UNIT_ALTIVEC_P (mode
) || VECTOR_MEM_ALTIVEC_P (mode
)
19895 || mode
== V1TImode
)
19896 return ALTIVEC_REGS
;
19901 if (is_constant
|| GET_CODE (x
) == PLUS
)
19903 if (reg_class_subset_p (GENERAL_REGS
, rclass
))
19904 return GENERAL_REGS
;
19905 if (reg_class_subset_p (BASE_REGS
, rclass
))
19910 if (GET_MODE_CLASS (mode
) == MODE_INT
&& rclass
== NON_SPECIAL_REGS
)
19911 return GENERAL_REGS
;
19916 /* Debug version of rs6000_preferred_reload_class. */
19917 static enum reg_class
19918 rs6000_debug_preferred_reload_class (rtx x
, enum reg_class rclass
)
19920 enum reg_class ret
= rs6000_preferred_reload_class (x
, rclass
);
19923 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
19925 reg_class_names
[ret
], reg_class_names
[rclass
],
19926 GET_MODE_NAME (GET_MODE (x
)));
19932 /* If we are copying between FP or AltiVec registers and anything else, we need
19933 a memory location. The exception is when we are targeting ppc64 and the
19934 move to/from fpr to gpr instructions are available. Also, under VSX, you
19935 can copy vector registers from the FP register set to the Altivec register
19936 set and vice versa. */
19939 rs6000_secondary_memory_needed (machine_mode mode
,
19940 reg_class_t from_class
,
19941 reg_class_t to_class
)
19943 enum rs6000_reg_type from_type
, to_type
;
19944 bool altivec_p
= ((from_class
== ALTIVEC_REGS
)
19945 || (to_class
== ALTIVEC_REGS
));
19947 /* If a simple/direct move is available, we don't need secondary memory */
19948 from_type
= reg_class_to_reg_type
[(int)from_class
];
19949 to_type
= reg_class_to_reg_type
[(int)to_class
];
19951 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
,
19952 (secondary_reload_info
*)0, altivec_p
))
19955 /* If we have a floating point or vector register class, we need to use
19956 memory to transfer the data. */
19957 if (IS_FP_VECT_REG_TYPE (from_type
) || IS_FP_VECT_REG_TYPE (to_type
))
19963 /* Debug version of rs6000_secondary_memory_needed. */
19965 rs6000_debug_secondary_memory_needed (machine_mode mode
,
19966 reg_class_t from_class
,
19967 reg_class_t to_class
)
19969 bool ret
= rs6000_secondary_memory_needed (mode
, from_class
, to_class
);
19972 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
19973 "to_class = %s, mode = %s\n",
19974 ret
? "true" : "false",
19975 reg_class_names
[from_class
],
19976 reg_class_names
[to_class
],
19977 GET_MODE_NAME (mode
));
19982 /* Return the register class of a scratch register needed to copy IN into
19983 or out of a register in RCLASS in MODE. If it can be done directly,
19984 NO_REGS is returned. */
19986 static enum reg_class
19987 rs6000_secondary_reload_class (enum reg_class rclass
, machine_mode mode
,
19992 if (TARGET_ELF
|| (DEFAULT_ABI
== ABI_DARWIN
19994 && MACHOPIC_INDIRECT
19998 /* We cannot copy a symbolic operand directly into anything
19999 other than BASE_REGS for TARGET_ELF. So indicate that a
20000 register from BASE_REGS is needed as an intermediate
20003 On Darwin, pic addresses require a load from memory, which
20004 needs a base register. */
20005 if (rclass
!= BASE_REGS
20006 && (GET_CODE (in
) == SYMBOL_REF
20007 || GET_CODE (in
) == HIGH
20008 || GET_CODE (in
) == LABEL_REF
20009 || GET_CODE (in
) == CONST
))
20013 if (GET_CODE (in
) == REG
)
20015 regno
= REGNO (in
);
20016 if (regno
>= FIRST_PSEUDO_REGISTER
)
20018 regno
= true_regnum (in
);
20019 if (regno
>= FIRST_PSEUDO_REGISTER
)
20023 else if (GET_CODE (in
) == SUBREG
)
20025 regno
= true_regnum (in
);
20026 if (regno
>= FIRST_PSEUDO_REGISTER
)
20032 /* If we have VSX register moves, prefer moving scalar values between
20033 Altivec registers and GPR by going via an FPR (and then via memory)
20034 instead of reloading the secondary memory address for Altivec moves. */
20036 && GET_MODE_SIZE (mode
) < 16
20037 && !mode_supports_vmx_dform (mode
)
20038 && (((rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
)
20039 && (regno
>= 0 && ALTIVEC_REGNO_P (regno
)))
20040 || ((rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
)
20041 && (regno
>= 0 && INT_REGNO_P (regno
)))))
20044 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
20046 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
20047 || (regno
>= 0 && INT_REGNO_P (regno
)))
20050 /* Constants, memory, and VSX registers can go into VSX registers (both the
20051 traditional floating point and the altivec registers). */
20052 if (rclass
== VSX_REGS
20053 && (regno
== -1 || VSX_REGNO_P (regno
)))
20056 /* Constants, memory, and FP registers can go into FP registers. */
20057 if ((regno
== -1 || FP_REGNO_P (regno
))
20058 && (rclass
== FLOAT_REGS
|| rclass
== NON_SPECIAL_REGS
))
20059 return (mode
!= SDmode
|| lra_in_progress
) ? NO_REGS
: GENERAL_REGS
;
20061 /* Memory, and AltiVec registers can go into AltiVec registers. */
20062 if ((regno
== -1 || ALTIVEC_REGNO_P (regno
))
20063 && rclass
== ALTIVEC_REGS
)
20066 /* We can copy among the CR registers. */
20067 if ((rclass
== CR_REGS
|| rclass
== CR0_REGS
)
20068 && regno
>= 0 && CR_REGNO_P (regno
))
20071 /* Otherwise, we need GENERAL_REGS. */
20072 return GENERAL_REGS
;
20075 /* Debug version of rs6000_secondary_reload_class. */
20076 static enum reg_class
20077 rs6000_debug_secondary_reload_class (enum reg_class rclass
,
20078 machine_mode mode
, rtx in
)
20080 enum reg_class ret
= rs6000_secondary_reload_class (rclass
, mode
, in
);
20082 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
20083 "mode = %s, input rtx:\n",
20084 reg_class_names
[ret
], reg_class_names
[rclass
],
20085 GET_MODE_NAME (mode
));
20091 /* Implement TARGET_CAN_CHANGE_MODE_CLASS. */
20094 rs6000_can_change_mode_class (machine_mode from
,
20096 reg_class_t rclass
)
20098 unsigned from_size
= GET_MODE_SIZE (from
);
20099 unsigned to_size
= GET_MODE_SIZE (to
);
20101 if (from_size
!= to_size
)
20103 enum reg_class xclass
= (TARGET_VSX
) ? VSX_REGS
: FLOAT_REGS
;
20105 if (reg_classes_intersect_p (xclass
, rclass
))
20107 unsigned to_nregs
= hard_regno_nregs (FIRST_FPR_REGNO
, to
);
20108 unsigned from_nregs
= hard_regno_nregs (FIRST_FPR_REGNO
, from
);
20109 bool to_float128_vector_p
= FLOAT128_VECTOR_P (to
);
20110 bool from_float128_vector_p
= FLOAT128_VECTOR_P (from
);
20112 /* Don't allow 64-bit types to overlap with 128-bit types that take a
20113 single register under VSX because the scalar part of the register
20114 is in the upper 64-bits, and not the lower 64-bits. Types like
20115 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
20116 IEEE floating point can't overlap, and neither can small
20119 if (to_float128_vector_p
&& from_float128_vector_p
)
20122 else if (to_float128_vector_p
|| from_float128_vector_p
)
20125 /* TDmode in floating-mode registers must always go into a register
20126 pair with the most significant word in the even-numbered register
20127 to match ISA requirements. In little-endian mode, this does not
20128 match subreg numbering, so we cannot allow subregs. */
20129 if (!BYTES_BIG_ENDIAN
&& (to
== TDmode
|| from
== TDmode
))
20132 if (from_size
< 8 || to_size
< 8)
20135 if (from_size
== 8 && (8 * to_nregs
) != to_size
)
20138 if (to_size
== 8 && (8 * from_nregs
) != from_size
)
20147 /* Since the VSX register set includes traditional floating point registers
20148 and altivec registers, just check for the size being different instead of
20149 trying to check whether the modes are vector modes. Otherwise it won't
20150 allow say DF and DI to change classes. For types like TFmode and TDmode
20151 that take 2 64-bit registers, rather than a single 128-bit register, don't
20152 allow subregs of those types to other 128 bit types. */
20153 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
))
20155 unsigned num_regs
= (from_size
+ 15) / 16;
20156 if (hard_regno_nregs (FIRST_FPR_REGNO
, to
) > num_regs
20157 || hard_regno_nregs (FIRST_FPR_REGNO
, from
) > num_regs
)
20160 return (from_size
== 8 || from_size
== 16);
20163 if (TARGET_ALTIVEC
&& rclass
== ALTIVEC_REGS
20164 && (ALTIVEC_VECTOR_MODE (from
) + ALTIVEC_VECTOR_MODE (to
)) == 1)
20170 /* Debug version of rs6000_can_change_mode_class. */
20172 rs6000_debug_can_change_mode_class (machine_mode from
,
20174 reg_class_t rclass
)
20176 bool ret
= rs6000_can_change_mode_class (from
, to
, rclass
);
20179 "rs6000_can_change_mode_class, return %s, from = %s, "
20180 "to = %s, rclass = %s\n",
20181 ret
? "true" : "false",
20182 GET_MODE_NAME (from
), GET_MODE_NAME (to
),
20183 reg_class_names
[rclass
]);
20188 /* Return a string to do a move operation of 128 bits of data. */
20191 rs6000_output_move_128bit (rtx operands
[])
20193 rtx dest
= operands
[0];
20194 rtx src
= operands
[1];
20195 machine_mode mode
= GET_MODE (dest
);
20198 bool dest_gpr_p
, dest_fp_p
, dest_vmx_p
, dest_vsx_p
;
20199 bool src_gpr_p
, src_fp_p
, src_vmx_p
, src_vsx_p
;
20203 dest_regno
= REGNO (dest
);
20204 dest_gpr_p
= INT_REGNO_P (dest_regno
);
20205 dest_fp_p
= FP_REGNO_P (dest_regno
);
20206 dest_vmx_p
= ALTIVEC_REGNO_P (dest_regno
);
20207 dest_vsx_p
= dest_fp_p
| dest_vmx_p
;
20212 dest_gpr_p
= dest_fp_p
= dest_vmx_p
= dest_vsx_p
= false;
20217 src_regno
= REGNO (src
);
20218 src_gpr_p
= INT_REGNO_P (src_regno
);
20219 src_fp_p
= FP_REGNO_P (src_regno
);
20220 src_vmx_p
= ALTIVEC_REGNO_P (src_regno
);
20221 src_vsx_p
= src_fp_p
| src_vmx_p
;
20226 src_gpr_p
= src_fp_p
= src_vmx_p
= src_vsx_p
= false;
20229 /* Register moves. */
20230 if (dest_regno
>= 0 && src_regno
>= 0)
20237 if (TARGET_DIRECT_MOVE_128
&& src_vsx_p
)
20238 return (WORDS_BIG_ENDIAN
20239 ? "mfvsrd %0,%x1\n\tmfvsrld %L0,%x1"
20240 : "mfvsrd %L0,%x1\n\tmfvsrld %0,%x1");
20242 else if (TARGET_VSX
&& TARGET_DIRECT_MOVE
&& src_vsx_p
)
20246 else if (TARGET_VSX
&& dest_vsx_p
)
20249 return "xxlor %x0,%x1,%x1";
20251 else if (TARGET_DIRECT_MOVE_128
&& src_gpr_p
)
20252 return (WORDS_BIG_ENDIAN
20253 ? "mtvsrdd %x0,%1,%L1"
20254 : "mtvsrdd %x0,%L1,%1");
20256 else if (TARGET_DIRECT_MOVE
&& src_gpr_p
)
20260 else if (TARGET_ALTIVEC
&& dest_vmx_p
&& src_vmx_p
)
20261 return "vor %0,%1,%1";
20263 else if (dest_fp_p
&& src_fp_p
)
20268 else if (dest_regno
>= 0 && MEM_P (src
))
20272 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
20278 else if (TARGET_ALTIVEC
&& dest_vmx_p
20279 && altivec_indexed_or_indirect_operand (src
, mode
))
20280 return "lvx %0,%y1";
20282 else if (TARGET_VSX
&& dest_vsx_p
)
20284 if (mode_supports_dq_form (mode
)
20285 && quad_address_p (XEXP (src
, 0), mode
, true))
20286 return "lxv %x0,%1";
20288 else if (TARGET_P9_VECTOR
)
20289 return "lxvx %x0,%y1";
20291 else if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
20292 return "lxvw4x %x0,%y1";
20295 return "lxvd2x %x0,%y1";
20298 else if (TARGET_ALTIVEC
&& dest_vmx_p
)
20299 return "lvx %0,%y1";
20301 else if (dest_fp_p
)
20306 else if (src_regno
>= 0 && MEM_P (dest
))
20310 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
20311 return "stq %1,%0";
20316 else if (TARGET_ALTIVEC
&& src_vmx_p
20317 && altivec_indexed_or_indirect_operand (dest
, mode
))
20318 return "stvx %1,%y0";
20320 else if (TARGET_VSX
&& src_vsx_p
)
20322 if (mode_supports_dq_form (mode
)
20323 && quad_address_p (XEXP (dest
, 0), mode
, true))
20324 return "stxv %x1,%0";
20326 else if (TARGET_P9_VECTOR
)
20327 return "stxvx %x1,%y0";
20329 else if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
20330 return "stxvw4x %x1,%y0";
20333 return "stxvd2x %x1,%y0";
20336 else if (TARGET_ALTIVEC
&& src_vmx_p
)
20337 return "stvx %1,%y0";
20344 else if (dest_regno
>= 0
20345 && (GET_CODE (src
) == CONST_INT
20346 || GET_CODE (src
) == CONST_WIDE_INT
20347 || GET_CODE (src
) == CONST_DOUBLE
20348 || GET_CODE (src
) == CONST_VECTOR
))
20353 else if ((dest_vmx_p
&& TARGET_ALTIVEC
)
20354 || (dest_vsx_p
&& TARGET_VSX
))
20355 return output_vec_const_move (operands
);
20358 fatal_insn ("Bad 128-bit move", gen_rtx_SET (dest
, src
));
20361 /* Validate a 128-bit move. */
20363 rs6000_move_128bit_ok_p (rtx operands
[])
20365 machine_mode mode
= GET_MODE (operands
[0]);
20366 return (gpc_reg_operand (operands
[0], mode
)
20367 || gpc_reg_operand (operands
[1], mode
));
20370 /* Return true if a 128-bit move needs to be split. */
20372 rs6000_split_128bit_ok_p (rtx operands
[])
20374 if (!reload_completed
)
20377 if (!gpr_or_gpr_p (operands
[0], operands
[1]))
20380 if (quad_load_store_p (operands
[0], operands
[1]))
20387 /* Given a comparison operation, return the bit number in CCR to test. We
20388 know this is a valid comparison.
20390 SCC_P is 1 if this is for an scc. That means that %D will have been
20391 used instead of %C, so the bits will be in different places.
20393 Return -1 if OP isn't a valid comparison for some reason. */
20396 ccr_bit (rtx op
, int scc_p
)
20398 enum rtx_code code
= GET_CODE (op
);
20399 machine_mode cc_mode
;
20404 if (!COMPARISON_P (op
))
20407 reg
= XEXP (op
, 0);
20409 gcc_assert (GET_CODE (reg
) == REG
&& CR_REGNO_P (REGNO (reg
)));
20411 cc_mode
= GET_MODE (reg
);
20412 cc_regnum
= REGNO (reg
);
20413 base_bit
= 4 * (cc_regnum
- CR0_REGNO
);
20415 validate_condition_mode (code
, cc_mode
);
20417 /* When generating a sCOND operation, only positive conditions are
20420 || code
== EQ
|| code
== GT
|| code
== LT
|| code
== UNORDERED
20421 || code
== GTU
|| code
== LTU
);
20426 return scc_p
? base_bit
+ 3 : base_bit
+ 2;
20428 return base_bit
+ 2;
20429 case GT
: case GTU
: case UNLE
:
20430 return base_bit
+ 1;
20431 case LT
: case LTU
: case UNGE
:
20433 case ORDERED
: case UNORDERED
:
20434 return base_bit
+ 3;
20437 /* If scc, we will have done a cror to put the bit in the
20438 unordered position. So test that bit. For integer, this is ! LT
20439 unless this is an scc insn. */
20440 return scc_p
? base_bit
+ 3 : base_bit
;
20443 return scc_p
? base_bit
+ 3 : base_bit
+ 1;
20446 gcc_unreachable ();
20450 /* Return the GOT register. */
20453 rs6000_got_register (rtx value ATTRIBUTE_UNUSED
)
20455 /* The second flow pass currently (June 1999) can't update
20456 regs_ever_live without disturbing other parts of the compiler, so
20457 update it here to make the prolog/epilogue code happy. */
20458 if (!can_create_pseudo_p ()
20459 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
20460 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM
, true);
20462 crtl
->uses_pic_offset_table
= 1;
20464 return pic_offset_table_rtx
;
20467 static rs6000_stack_t stack_info
;
20469 /* Function to init struct machine_function.
20470 This will be called, via a pointer variable,
20471 from push_function_context. */
20473 static struct machine_function
*
20474 rs6000_init_machine_status (void)
20476 stack_info
.reload_completed
= 0;
20477 return ggc_cleared_alloc
<machine_function
> ();
20480 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
20482 /* Write out a function code label. */
20485 rs6000_output_function_entry (FILE *file
, const char *fname
)
20487 if (fname
[0] != '.')
20489 switch (DEFAULT_ABI
)
20492 gcc_unreachable ();
20498 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "L.");
20508 RS6000_OUTPUT_BASENAME (file
, fname
);
20511 /* Print an operand. Recognize special options, documented below. */
20514 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
20515 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
20517 #define SMALL_DATA_RELOC "sda21"
20518 #define SMALL_DATA_REG 0
20522 print_operand (FILE *file
, rtx x
, int code
)
20525 unsigned HOST_WIDE_INT uval
;
20529 /* %a is output_address. */
20531 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
20535 /* Like 'J' but get to the GT bit only. */
20536 gcc_assert (REG_P (x
));
20538 /* Bit 1 is GT bit. */
20539 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 1;
20541 /* Add one for shift count in rlinm for scc. */
20542 fprintf (file
, "%d", i
+ 1);
20546 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
20549 output_operand_lossage ("invalid %%e value");
20554 if ((uval
& 0xffff) == 0 && uval
!= 0)
20559 /* X is a CR register. Print the number of the EQ bit of the CR */
20560 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
20561 output_operand_lossage ("invalid %%E value");
20563 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 2);
20567 /* X is a CR register. Print the shift count needed to move it
20568 to the high-order four bits. */
20569 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
20570 output_operand_lossage ("invalid %%f value");
20572 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
));
20576 /* Similar, but print the count for the rotate in the opposite
20578 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
20579 output_operand_lossage ("invalid %%F value");
20581 fprintf (file
, "%d", 32 - 4 * (REGNO (x
) - CR0_REGNO
));
20585 /* X is a constant integer. If it is negative, print "m",
20586 otherwise print "z". This is to make an aze or ame insn. */
20587 if (GET_CODE (x
) != CONST_INT
)
20588 output_operand_lossage ("invalid %%G value");
20589 else if (INTVAL (x
) >= 0)
20596 /* If constant, output low-order five bits. Otherwise, write
20599 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 31);
20601 print_operand (file
, x
, 0);
20605 /* If constant, output low-order six bits. Otherwise, write
20608 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 63);
20610 print_operand (file
, x
, 0);
20614 /* Print `i' if this is a constant, else nothing. */
20620 /* Write the bit number in CCR for jump. */
20621 i
= ccr_bit (x
, 0);
20623 output_operand_lossage ("invalid %%j code");
20625 fprintf (file
, "%d", i
);
20629 /* Similar, but add one for shift count in rlinm for scc and pass
20630 scc flag to `ccr_bit'. */
20631 i
= ccr_bit (x
, 1);
20633 output_operand_lossage ("invalid %%J code");
20635 /* If we want bit 31, write a shift count of zero, not 32. */
20636 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
20640 /* X must be a constant. Write the 1's complement of the
20643 output_operand_lossage ("invalid %%k value");
20645 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
20649 /* X must be a symbolic constant on ELF. Write an
20650 expression suitable for an 'addi' that adds in the low 16
20651 bits of the MEM. */
20652 if (GET_CODE (x
) == CONST
)
20654 if (GET_CODE (XEXP (x
, 0)) != PLUS
20655 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
20656 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
20657 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
20658 output_operand_lossage ("invalid %%K value");
20660 print_operand_address (file
, x
);
20661 fputs ("@l", file
);
20664 /* %l is output_asm_label. */
20667 /* Write second word of DImode or DFmode reference. Works on register
20668 or non-indexed memory only. */
20670 fputs (reg_names
[REGNO (x
) + 1], file
);
20671 else if (MEM_P (x
))
20673 machine_mode mode
= GET_MODE (x
);
20674 /* Handle possible auto-increment. Since it is pre-increment and
20675 we have already done it, we can just use an offset of word. */
20676 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
20677 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
20678 output_address (mode
, plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
20680 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
20681 output_address (mode
, plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
20684 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
,
20688 if (small_data_operand (x
, GET_MODE (x
)))
20689 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
20690 reg_names
[SMALL_DATA_REG
]);
20694 case 'N': /* Unused */
20695 /* Write the number of elements in the vector times 4. */
20696 if (GET_CODE (x
) != PARALLEL
)
20697 output_operand_lossage ("invalid %%N value");
20699 fprintf (file
, "%d", XVECLEN (x
, 0) * 4);
20702 case 'O': /* Unused */
20703 /* Similar, but subtract 1 first. */
20704 if (GET_CODE (x
) != PARALLEL
)
20705 output_operand_lossage ("invalid %%O value");
20707 fprintf (file
, "%d", (XVECLEN (x
, 0) - 1) * 4);
20711 /* X is a CONST_INT that is a power of two. Output the logarithm. */
20714 || (i
= exact_log2 (INTVAL (x
))) < 0)
20715 output_operand_lossage ("invalid %%p value");
20717 fprintf (file
, "%d", i
);
20721 /* The operand must be an indirect memory reference. The result
20722 is the register name. */
20723 if (GET_CODE (x
) != MEM
|| GET_CODE (XEXP (x
, 0)) != REG
20724 || REGNO (XEXP (x
, 0)) >= 32)
20725 output_operand_lossage ("invalid %%P value");
20727 fputs (reg_names
[REGNO (XEXP (x
, 0))], file
);
20731 /* This outputs the logical code corresponding to a boolean
20732 expression. The expression may have one or both operands
20733 negated (if one, only the first one). For condition register
20734 logical operations, it will also treat the negated
20735 CR codes as NOTs, but not handle NOTs of them. */
20737 const char *const *t
= 0;
20739 enum rtx_code code
= GET_CODE (x
);
20740 static const char * const tbl
[3][3] = {
20741 { "and", "andc", "nor" },
20742 { "or", "orc", "nand" },
20743 { "xor", "eqv", "xor" } };
20747 else if (code
== IOR
)
20749 else if (code
== XOR
)
20752 output_operand_lossage ("invalid %%q value");
20754 if (GET_CODE (XEXP (x
, 0)) != NOT
)
20758 if (GET_CODE (XEXP (x
, 1)) == NOT
)
20769 if (! TARGET_MFCRF
)
20775 /* X is a CR register. Print the mask for `mtcrf'. */
20776 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
20777 output_operand_lossage ("invalid %%R value");
20779 fprintf (file
, "%d", 128 >> (REGNO (x
) - CR0_REGNO
));
20783 /* Low 5 bits of 32 - value */
20785 output_operand_lossage ("invalid %%s value");
20787 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (32 - INTVAL (x
)) & 31);
20791 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
20792 gcc_assert (REG_P (x
) && GET_MODE (x
) == CCmode
);
20794 /* Bit 3 is OV bit. */
20795 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 3;
20797 /* If we want bit 31, write a shift count of zero, not 32. */
20798 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
20802 /* Print the symbolic name of a branch target register. */
20803 if (GET_CODE (x
) != REG
|| (REGNO (x
) != LR_REGNO
20804 && REGNO (x
) != CTR_REGNO
))
20805 output_operand_lossage ("invalid %%T value");
20806 else if (REGNO (x
) == LR_REGNO
)
20807 fputs ("lr", file
);
20809 fputs ("ctr", file
);
20813 /* High-order or low-order 16 bits of constant, whichever is non-zero,
20814 for use in unsigned operand. */
20817 output_operand_lossage ("invalid %%u value");
20822 if ((uval
& 0xffff) == 0)
20825 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, uval
& 0xffff);
20829 /* High-order 16 bits of constant for use in signed operand. */
20831 output_operand_lossage ("invalid %%v value");
20833 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
20834 (INTVAL (x
) >> 16) & 0xffff);
20838 /* Print `u' if this has an auto-increment or auto-decrement. */
20840 && (GET_CODE (XEXP (x
, 0)) == PRE_INC
20841 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
20842 || GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
))
20847 /* Print the trap code for this operand. */
20848 switch (GET_CODE (x
))
20851 fputs ("eq", file
); /* 4 */
20854 fputs ("ne", file
); /* 24 */
20857 fputs ("lt", file
); /* 16 */
20860 fputs ("le", file
); /* 20 */
20863 fputs ("gt", file
); /* 8 */
20866 fputs ("ge", file
); /* 12 */
20869 fputs ("llt", file
); /* 2 */
20872 fputs ("lle", file
); /* 6 */
20875 fputs ("lgt", file
); /* 1 */
20878 fputs ("lge", file
); /* 5 */
20881 gcc_unreachable ();
20886 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
20889 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
20890 ((INTVAL (x
) & 0xffff) ^ 0x8000) - 0x8000);
20892 print_operand (file
, x
, 0);
20896 /* X is a FPR or Altivec register used in a VSX context. */
20897 if (GET_CODE (x
) != REG
|| !VSX_REGNO_P (REGNO (x
)))
20898 output_operand_lossage ("invalid %%x value");
20901 int reg
= REGNO (x
);
20902 int vsx_reg
= (FP_REGNO_P (reg
)
20904 : reg
- FIRST_ALTIVEC_REGNO
+ 32);
20906 #ifdef TARGET_REGNAMES
20907 if (TARGET_REGNAMES
)
20908 fprintf (file
, "%%vs%d", vsx_reg
);
20911 fprintf (file
, "%d", vsx_reg
);
20917 && (legitimate_indexed_address_p (XEXP (x
, 0), 0)
20918 || (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
20919 && legitimate_indexed_address_p (XEXP (XEXP (x
, 0), 1), 0))))
20924 /* Like 'L', for third word of TImode/PTImode */
20926 fputs (reg_names
[REGNO (x
) + 2], file
);
20927 else if (MEM_P (x
))
20929 machine_mode mode
= GET_MODE (x
);
20930 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
20931 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
20932 output_address (mode
, plus_constant (Pmode
,
20933 XEXP (XEXP (x
, 0), 0), 8));
20934 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
20935 output_address (mode
, plus_constant (Pmode
,
20936 XEXP (XEXP (x
, 0), 0), 8));
20938 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
, 8), 0));
20939 if (small_data_operand (x
, GET_MODE (x
)))
20940 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
20941 reg_names
[SMALL_DATA_REG
]);
20946 /* X is a SYMBOL_REF. Write out the name preceded by a
20947 period and without any trailing data in brackets. Used for function
20948 names. If we are configured for System V (or the embedded ABI) on
20949 the PowerPC, do not emit the period, since those systems do not use
20950 TOCs and the like. */
20951 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
20953 /* For macho, check to see if we need a stub. */
20956 const char *name
= XSTR (x
, 0);
20958 if (darwin_emit_branch_islands
20959 && MACHOPIC_INDIRECT
20960 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
20961 name
= machopic_indirection_name (x
, /*stub_p=*/true);
20963 assemble_name (file
, name
);
20965 else if (!DOT_SYMBOLS
)
20966 assemble_name (file
, XSTR (x
, 0));
20968 rs6000_output_function_entry (file
, XSTR (x
, 0));
20972 /* Like 'L', for last word of TImode/PTImode. */
20974 fputs (reg_names
[REGNO (x
) + 3], file
);
20975 else if (MEM_P (x
))
20977 machine_mode mode
= GET_MODE (x
);
20978 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
20979 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
20980 output_address (mode
, plus_constant (Pmode
,
20981 XEXP (XEXP (x
, 0), 0), 12));
20982 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
20983 output_address (mode
, plus_constant (Pmode
,
20984 XEXP (XEXP (x
, 0), 0), 12));
20986 output_address (mode
, XEXP (adjust_address_nv (x
, SImode
, 12), 0));
20987 if (small_data_operand (x
, GET_MODE (x
)))
20988 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
20989 reg_names
[SMALL_DATA_REG
]);
20993 /* Print AltiVec memory operand. */
20998 gcc_assert (MEM_P (x
));
21002 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (GET_MODE (x
))
21003 && GET_CODE (tmp
) == AND
21004 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
21005 && INTVAL (XEXP (tmp
, 1)) == -16)
21006 tmp
= XEXP (tmp
, 0);
21007 else if (VECTOR_MEM_VSX_P (GET_MODE (x
))
21008 && GET_CODE (tmp
) == PRE_MODIFY
)
21009 tmp
= XEXP (tmp
, 1);
21011 fprintf (file
, "0,%s", reg_names
[REGNO (tmp
)]);
21014 if (GET_CODE (tmp
) != PLUS
21015 || !REG_P (XEXP (tmp
, 0))
21016 || !REG_P (XEXP (tmp
, 1)))
21018 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
21022 if (REGNO (XEXP (tmp
, 0)) == 0)
21023 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 1)) ],
21024 reg_names
[ REGNO (XEXP (tmp
, 0)) ]);
21026 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 0)) ],
21027 reg_names
[ REGNO (XEXP (tmp
, 1)) ]);
21034 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
21035 else if (MEM_P (x
))
21037 /* We need to handle PRE_INC and PRE_DEC here, since we need to
21038 know the width from the mode. */
21039 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
)
21040 fprintf (file
, "%d(%s)", GET_MODE_SIZE (GET_MODE (x
)),
21041 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
21042 else if (GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
21043 fprintf (file
, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x
)),
21044 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
21045 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
21046 output_address (GET_MODE (x
), XEXP (XEXP (x
, 0), 1));
21048 output_address (GET_MODE (x
), XEXP (x
, 0));
21052 if (toc_relative_expr_p (x
, false, &tocrel_base_oac
, &tocrel_offset_oac
))
21053 /* This hack along with a corresponding hack in
21054 rs6000_output_addr_const_extra arranges to output addends
21055 where the assembler expects to find them. eg.
21056 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
21057 without this hack would be output as "x@toc+4". We
21059 output_addr_const (file
, CONST_CAST_RTX (tocrel_base_oac
));
21061 output_addr_const (file
, x
);
21066 if (const char *name
= get_some_local_dynamic_name ())
21067 assemble_name (file
, name
);
21069 output_operand_lossage ("'%%&' used without any "
21070 "local dynamic TLS references");
21074 output_operand_lossage ("invalid %%xn code");
21078 /* Print the address of an operand. */
21081 print_operand_address (FILE *file
, rtx x
)
21084 fprintf (file
, "0(%s)", reg_names
[ REGNO (x
) ]);
21085 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
21086 || GET_CODE (x
) == LABEL_REF
)
21088 output_addr_const (file
, x
);
21089 if (small_data_operand (x
, GET_MODE (x
)))
21090 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
21091 reg_names
[SMALL_DATA_REG
]);
21093 gcc_assert (!TARGET_TOC
);
21095 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
21096 && REG_P (XEXP (x
, 1)))
21098 if (REGNO (XEXP (x
, 0)) == 0)
21099 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 1)) ],
21100 reg_names
[ REGNO (XEXP (x
, 0)) ]);
21102 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 0)) ],
21103 reg_names
[ REGNO (XEXP (x
, 1)) ]);
21105 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
21106 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
21107 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"(%s)",
21108 INTVAL (XEXP (x
, 1)), reg_names
[ REGNO (XEXP (x
, 0)) ]);
21110 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
21111 && CONSTANT_P (XEXP (x
, 1)))
21113 fprintf (file
, "lo16(");
21114 output_addr_const (file
, XEXP (x
, 1));
21115 fprintf (file
, ")(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
21119 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
21120 && CONSTANT_P (XEXP (x
, 1)))
21122 output_addr_const (file
, XEXP (x
, 1));
21123 fprintf (file
, "@l(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
21126 else if (toc_relative_expr_p (x
, false, &tocrel_base_oac
, &tocrel_offset_oac
))
21128 /* This hack along with a corresponding hack in
21129 rs6000_output_addr_const_extra arranges to output addends
21130 where the assembler expects to find them. eg.
21132 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
21133 without this hack would be output as "x@toc+8@l(9)". We
21134 want "x+8@toc@l(9)". */
21135 output_addr_const (file
, CONST_CAST_RTX (tocrel_base_oac
));
21136 if (GET_CODE (x
) == LO_SUM
)
21137 fprintf (file
, "@l(%s)", reg_names
[REGNO (XEXP (x
, 0))]);
21139 fprintf (file
, "(%s)", reg_names
[REGNO (XVECEXP (tocrel_base_oac
, 0, 1))]);
21142 gcc_unreachable ();
21145 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
21148 rs6000_output_addr_const_extra (FILE *file
, rtx x
)
21150 if (GET_CODE (x
) == UNSPEC
)
21151 switch (XINT (x
, 1))
21153 case UNSPEC_TOCREL
:
21154 gcc_checking_assert (GET_CODE (XVECEXP (x
, 0, 0)) == SYMBOL_REF
21155 && REG_P (XVECEXP (x
, 0, 1))
21156 && REGNO (XVECEXP (x
, 0, 1)) == TOC_REGISTER
);
21157 output_addr_const (file
, XVECEXP (x
, 0, 0));
21158 if (x
== tocrel_base_oac
&& tocrel_offset_oac
!= const0_rtx
)
21160 if (INTVAL (tocrel_offset_oac
) >= 0)
21161 fprintf (file
, "+");
21162 output_addr_const (file
, CONST_CAST_RTX (tocrel_offset_oac
));
21164 if (!TARGET_AIX
|| (TARGET_ELF
&& TARGET_MINIMAL_TOC
))
21167 assemble_name (file
, toc_label_name
);
21170 else if (TARGET_ELF
)
21171 fputs ("@toc", file
);
21175 case UNSPEC_MACHOPIC_OFFSET
:
21176 output_addr_const (file
, XVECEXP (x
, 0, 0));
21178 machopic_output_function_base_name (file
);
21185 /* Target hook for assembling integer objects. The PowerPC version has
21186 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
21187 is defined. It also needs to handle DI-mode objects on 64-bit
21191 rs6000_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
21193 #ifdef RELOCATABLE_NEEDS_FIXUP
21194 /* Special handling for SI values. */
21195 if (RELOCATABLE_NEEDS_FIXUP
&& size
== 4 && aligned_p
)
21197 static int recurse
= 0;
21199 /* For -mrelocatable, we mark all addresses that need to be fixed up in
21200 the .fixup section. Since the TOC section is already relocated, we
21201 don't need to mark it here. We used to skip the text section, but it
21202 should never be valid for relocated addresses to be placed in the text
21204 if (DEFAULT_ABI
== ABI_V4
21205 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
21206 && in_section
!= toc_section
21208 && !CONST_SCALAR_INT_P (x
)
21214 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCP", fixuplabelno
);
21216 ASM_OUTPUT_LABEL (asm_out_file
, buf
);
21217 fprintf (asm_out_file
, "\t.long\t(");
21218 output_addr_const (asm_out_file
, x
);
21219 fprintf (asm_out_file
, ")@fixup\n");
21220 fprintf (asm_out_file
, "\t.section\t\".fixup\",\"aw\"\n");
21221 ASM_OUTPUT_ALIGN (asm_out_file
, 2);
21222 fprintf (asm_out_file
, "\t.long\t");
21223 assemble_name (asm_out_file
, buf
);
21224 fprintf (asm_out_file
, "\n\t.previous\n");
21228 /* Remove initial .'s to turn a -mcall-aixdesc function
21229 address into the address of the descriptor, not the function
21231 else if (GET_CODE (x
) == SYMBOL_REF
21232 && XSTR (x
, 0)[0] == '.'
21233 && DEFAULT_ABI
== ABI_AIX
)
21235 const char *name
= XSTR (x
, 0);
21236 while (*name
== '.')
21239 fprintf (asm_out_file
, "\t.long\t%s\n", name
);
21243 #endif /* RELOCATABLE_NEEDS_FIXUP */
21244 return default_assemble_integer (x
, size
, aligned_p
);
21247 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
21248 /* Emit an assembler directive to set symbol visibility for DECL to
21249 VISIBILITY_TYPE. */
21252 rs6000_assemble_visibility (tree decl
, int vis
)
21257 /* Functions need to have their entry point symbol visibility set as
21258 well as their descriptor symbol visibility. */
21259 if (DEFAULT_ABI
== ABI_AIX
21261 && TREE_CODE (decl
) == FUNCTION_DECL
)
21263 static const char * const visibility_types
[] = {
21264 NULL
, "protected", "hidden", "internal"
21267 const char *name
, *type
;
21269 name
= ((* targetm
.strip_name_encoding
)
21270 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
))));
21271 type
= visibility_types
[vis
];
21273 fprintf (asm_out_file
, "\t.%s\t%s\n", type
, name
);
21274 fprintf (asm_out_file
, "\t.%s\t.%s\n", type
, name
);
21277 default_assemble_visibility (decl
, vis
);
21282 rs6000_reverse_condition (machine_mode mode
, enum rtx_code code
)
21284 /* Reversal of FP compares takes care -- an ordered compare
21285 becomes an unordered compare and vice versa. */
21286 if (mode
== CCFPmode
21287 && (!flag_finite_math_only
21288 || code
== UNLT
|| code
== UNLE
|| code
== UNGT
|| code
== UNGE
21289 || code
== UNEQ
|| code
== LTGT
))
21290 return reverse_condition_maybe_unordered (code
);
21292 return reverse_condition (code
);
21295 /* Generate a compare for CODE. Return a brand-new rtx that
21296 represents the result of the compare. */
21299 rs6000_generate_compare (rtx cmp
, machine_mode mode
)
21301 machine_mode comp_mode
;
21302 rtx compare_result
;
21303 enum rtx_code code
= GET_CODE (cmp
);
21304 rtx op0
= XEXP (cmp
, 0);
21305 rtx op1
= XEXP (cmp
, 1);
21307 if (!TARGET_FLOAT128_HW
&& FLOAT128_VECTOR_P (mode
))
21308 comp_mode
= CCmode
;
21309 else if (FLOAT_MODE_P (mode
))
21310 comp_mode
= CCFPmode
;
21311 else if (code
== GTU
|| code
== LTU
21312 || code
== GEU
|| code
== LEU
)
21313 comp_mode
= CCUNSmode
;
21314 else if ((code
== EQ
|| code
== NE
)
21315 && unsigned_reg_p (op0
)
21316 && (unsigned_reg_p (op1
)
21317 || (CONST_INT_P (op1
) && INTVAL (op1
) != 0)))
21318 /* These are unsigned values, perhaps there will be a later
21319 ordering compare that can be shared with this one. */
21320 comp_mode
= CCUNSmode
;
21322 comp_mode
= CCmode
;
21324 /* If we have an unsigned compare, make sure we don't have a signed value as
21326 if (comp_mode
== CCUNSmode
&& GET_CODE (op1
) == CONST_INT
21327 && INTVAL (op1
) < 0)
21329 op0
= copy_rtx_if_shared (op0
);
21330 op1
= force_reg (GET_MODE (op0
), op1
);
21331 cmp
= gen_rtx_fmt_ee (code
, GET_MODE (cmp
), op0
, op1
);
21334 /* First, the compare. */
21335 compare_result
= gen_reg_rtx (comp_mode
);
21337 /* IEEE 128-bit support in VSX registers when we do not have hardware
21339 if (!TARGET_FLOAT128_HW
&& FLOAT128_VECTOR_P (mode
))
21341 rtx libfunc
= NULL_RTX
;
21342 bool check_nan
= false;
21349 libfunc
= optab_libfunc (eq_optab
, mode
);
21354 libfunc
= optab_libfunc (ge_optab
, mode
);
21359 libfunc
= optab_libfunc (le_optab
, mode
);
21364 libfunc
= optab_libfunc (unord_optab
, mode
);
21365 code
= (code
== UNORDERED
) ? NE
: EQ
;
21371 libfunc
= optab_libfunc (ge_optab
, mode
);
21372 code
= (code
== UNGE
) ? GE
: GT
;
21378 libfunc
= optab_libfunc (le_optab
, mode
);
21379 code
= (code
== UNLE
) ? LE
: LT
;
21385 libfunc
= optab_libfunc (eq_optab
, mode
);
21386 code
= (code
= UNEQ
) ? EQ
: NE
;
21390 gcc_unreachable ();
21393 gcc_assert (libfunc
);
21396 dest
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
21397 SImode
, op0
, mode
, op1
, mode
);
21399 /* The library signals an exception for signalling NaNs, so we need to
21400 handle isgreater, etc. by first checking isordered. */
21403 rtx ne_rtx
, normal_dest
, unord_dest
;
21404 rtx unord_func
= optab_libfunc (unord_optab
, mode
);
21405 rtx join_label
= gen_label_rtx ();
21406 rtx join_ref
= gen_rtx_LABEL_REF (VOIDmode
, join_label
);
21407 rtx unord_cmp
= gen_reg_rtx (comp_mode
);
21410 /* Test for either value being a NaN. */
21411 gcc_assert (unord_func
);
21412 unord_dest
= emit_library_call_value (unord_func
, NULL_RTX
, LCT_CONST
,
21413 SImode
, op0
, mode
, op1
, mode
);
21415 /* Set value (0) if either value is a NaN, and jump to the join
21417 dest
= gen_reg_rtx (SImode
);
21418 emit_move_insn (dest
, const1_rtx
);
21419 emit_insn (gen_rtx_SET (unord_cmp
,
21420 gen_rtx_COMPARE (comp_mode
, unord_dest
,
21423 ne_rtx
= gen_rtx_NE (comp_mode
, unord_cmp
, const0_rtx
);
21424 emit_jump_insn (gen_rtx_SET (pc_rtx
,
21425 gen_rtx_IF_THEN_ELSE (VOIDmode
, ne_rtx
,
21429 /* Do the normal comparison, knowing that the values are not
21431 normal_dest
= emit_library_call_value (libfunc
, NULL_RTX
, LCT_CONST
,
21432 SImode
, op0
, mode
, op1
, mode
);
21434 emit_insn (gen_cstoresi4 (dest
,
21435 gen_rtx_fmt_ee (code
, SImode
, normal_dest
,
21437 normal_dest
, const0_rtx
));
21439 /* Join NaN and non-Nan paths. Compare dest against 0. */
21440 emit_label (join_label
);
21444 emit_insn (gen_rtx_SET (compare_result
,
21445 gen_rtx_COMPARE (comp_mode
, dest
, const0_rtx
)));
21450 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
21451 CLOBBERs to match cmptf_internal2 pattern. */
21452 if (comp_mode
== CCFPmode
&& TARGET_XL_COMPAT
21453 && FLOAT128_IBM_P (GET_MODE (op0
))
21454 && TARGET_HARD_FLOAT
)
21455 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
21457 gen_rtx_SET (compare_result
,
21458 gen_rtx_COMPARE (comp_mode
, op0
, op1
)),
21459 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21460 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21461 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21462 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21463 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21464 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21465 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21466 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
21467 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (Pmode
)))));
21468 else if (GET_CODE (op1
) == UNSPEC
21469 && XINT (op1
, 1) == UNSPEC_SP_TEST
)
21471 rtx op1b
= XVECEXP (op1
, 0, 0);
21472 comp_mode
= CCEQmode
;
21473 compare_result
= gen_reg_rtx (CCEQmode
);
21475 emit_insn (gen_stack_protect_testdi (compare_result
, op0
, op1b
));
21477 emit_insn (gen_stack_protect_testsi (compare_result
, op0
, op1b
));
21480 emit_insn (gen_rtx_SET (compare_result
,
21481 gen_rtx_COMPARE (comp_mode
, op0
, op1
)));
21484 /* Some kinds of FP comparisons need an OR operation;
21485 under flag_finite_math_only we don't bother. */
21486 if (FLOAT_MODE_P (mode
)
21487 && (!FLOAT128_IEEE_P (mode
) || TARGET_FLOAT128_HW
)
21488 && !flag_finite_math_only
21489 && (code
== LE
|| code
== GE
21490 || code
== UNEQ
|| code
== LTGT
21491 || code
== UNGT
|| code
== UNLT
))
21493 enum rtx_code or1
, or2
;
21494 rtx or1_rtx
, or2_rtx
, compare2_rtx
;
21495 rtx or_result
= gen_reg_rtx (CCEQmode
);
21499 case LE
: or1
= LT
; or2
= EQ
; break;
21500 case GE
: or1
= GT
; or2
= EQ
; break;
21501 case UNEQ
: or1
= UNORDERED
; or2
= EQ
; break;
21502 case LTGT
: or1
= LT
; or2
= GT
; break;
21503 case UNGT
: or1
= UNORDERED
; or2
= GT
; break;
21504 case UNLT
: or1
= UNORDERED
; or2
= LT
; break;
21505 default: gcc_unreachable ();
21507 validate_condition_mode (or1
, comp_mode
);
21508 validate_condition_mode (or2
, comp_mode
);
21509 or1_rtx
= gen_rtx_fmt_ee (or1
, SImode
, compare_result
, const0_rtx
);
21510 or2_rtx
= gen_rtx_fmt_ee (or2
, SImode
, compare_result
, const0_rtx
);
21511 compare2_rtx
= gen_rtx_COMPARE (CCEQmode
,
21512 gen_rtx_IOR (SImode
, or1_rtx
, or2_rtx
),
21514 emit_insn (gen_rtx_SET (or_result
, compare2_rtx
));
21516 compare_result
= or_result
;
21520 validate_condition_mode (code
, GET_MODE (compare_result
));
21522 return gen_rtx_fmt_ee (code
, VOIDmode
, compare_result
, const0_rtx
);
21526 /* Return the diagnostic message string if the binary operation OP is
21527 not permitted on TYPE1 and TYPE2, NULL otherwise. */
21530 rs6000_invalid_binary_op (int op ATTRIBUTE_UNUSED
,
21534 machine_mode mode1
= TYPE_MODE (type1
);
21535 machine_mode mode2
= TYPE_MODE (type2
);
21537 /* For complex modes, use the inner type. */
21538 if (COMPLEX_MODE_P (mode1
))
21539 mode1
= GET_MODE_INNER (mode1
);
21541 if (COMPLEX_MODE_P (mode2
))
21542 mode2
= GET_MODE_INNER (mode2
);
21544 /* Don't allow IEEE 754R 128-bit binary floating point and IBM extended
21545 double to intermix unless -mfloat128-convert. */
21546 if (mode1
== mode2
)
21549 if (!TARGET_FLOAT128_CVT
)
21551 if ((mode1
== KFmode
&& mode2
== IFmode
)
21552 || (mode1
== IFmode
&& mode2
== KFmode
))
21553 return N_("__float128 and __ibm128 cannot be used in the same "
21556 if (TARGET_IEEEQUAD
21557 && ((mode1
== IFmode
&& mode2
== TFmode
)
21558 || (mode1
== TFmode
&& mode2
== IFmode
)))
21559 return N_("__ibm128 and long double cannot be used in the same "
21562 if (!TARGET_IEEEQUAD
21563 && ((mode1
== KFmode
&& mode2
== TFmode
)
21564 || (mode1
== TFmode
&& mode2
== KFmode
)))
21565 return N_("__float128 and long double cannot be used in the same "
21573 /* Expand floating point conversion to/from __float128 and __ibm128. */
21576 rs6000_expand_float128_convert (rtx dest
, rtx src
, bool unsigned_p
)
21578 machine_mode dest_mode
= GET_MODE (dest
);
21579 machine_mode src_mode
= GET_MODE (src
);
21580 convert_optab cvt
= unknown_optab
;
21581 bool do_move
= false;
21582 rtx libfunc
= NULL_RTX
;
21584 typedef rtx (*rtx_2func_t
) (rtx
, rtx
);
21585 rtx_2func_t hw_convert
= (rtx_2func_t
)0;
21589 rtx_2func_t from_df
;
21590 rtx_2func_t from_sf
;
21591 rtx_2func_t from_si_sign
;
21592 rtx_2func_t from_si_uns
;
21593 rtx_2func_t from_di_sign
;
21594 rtx_2func_t from_di_uns
;
21597 rtx_2func_t to_si_sign
;
21598 rtx_2func_t to_si_uns
;
21599 rtx_2func_t to_di_sign
;
21600 rtx_2func_t to_di_uns
;
21601 } hw_conversions
[2] = {
21602 /* convertions to/from KFmode */
21604 gen_extenddfkf2_hw
, /* KFmode <- DFmode. */
21605 gen_extendsfkf2_hw
, /* KFmode <- SFmode. */
21606 gen_float_kfsi2_hw
, /* KFmode <- SImode (signed). */
21607 gen_floatuns_kfsi2_hw
, /* KFmode <- SImode (unsigned). */
21608 gen_float_kfdi2_hw
, /* KFmode <- DImode (signed). */
21609 gen_floatuns_kfdi2_hw
, /* KFmode <- DImode (unsigned). */
21610 gen_trunckfdf2_hw
, /* DFmode <- KFmode. */
21611 gen_trunckfsf2_hw
, /* SFmode <- KFmode. */
21612 gen_fix_kfsi2_hw
, /* SImode <- KFmode (signed). */
21613 gen_fixuns_kfsi2_hw
, /* SImode <- KFmode (unsigned). */
21614 gen_fix_kfdi2_hw
, /* DImode <- KFmode (signed). */
21615 gen_fixuns_kfdi2_hw
, /* DImode <- KFmode (unsigned). */
21618 /* convertions to/from TFmode */
21620 gen_extenddftf2_hw
, /* TFmode <- DFmode. */
21621 gen_extendsftf2_hw
, /* TFmode <- SFmode. */
21622 gen_float_tfsi2_hw
, /* TFmode <- SImode (signed). */
21623 gen_floatuns_tfsi2_hw
, /* TFmode <- SImode (unsigned). */
21624 gen_float_tfdi2_hw
, /* TFmode <- DImode (signed). */
21625 gen_floatuns_tfdi2_hw
, /* TFmode <- DImode (unsigned). */
21626 gen_trunctfdf2_hw
, /* DFmode <- TFmode. */
21627 gen_trunctfsf2_hw
, /* SFmode <- TFmode. */
21628 gen_fix_tfsi2_hw
, /* SImode <- TFmode (signed). */
21629 gen_fixuns_tfsi2_hw
, /* SImode <- TFmode (unsigned). */
21630 gen_fix_tfdi2_hw
, /* DImode <- TFmode (signed). */
21631 gen_fixuns_tfdi2_hw
, /* DImode <- TFmode (unsigned). */
21635 if (dest_mode
== src_mode
)
21636 gcc_unreachable ();
21638 /* Eliminate memory operations. */
21640 src
= force_reg (src_mode
, src
);
21644 rtx tmp
= gen_reg_rtx (dest_mode
);
21645 rs6000_expand_float128_convert (tmp
, src
, unsigned_p
);
21646 rs6000_emit_move (dest
, tmp
, dest_mode
);
21650 /* Convert to IEEE 128-bit floating point. */
21651 if (FLOAT128_IEEE_P (dest_mode
))
21653 if (dest_mode
== KFmode
)
21655 else if (dest_mode
== TFmode
)
21658 gcc_unreachable ();
21664 hw_convert
= hw_conversions
[kf_or_tf
].from_df
;
21669 hw_convert
= hw_conversions
[kf_or_tf
].from_sf
;
21675 if (FLOAT128_IBM_P (src_mode
))
21684 cvt
= ufloat_optab
;
21685 hw_convert
= hw_conversions
[kf_or_tf
].from_si_uns
;
21689 cvt
= sfloat_optab
;
21690 hw_convert
= hw_conversions
[kf_or_tf
].from_si_sign
;
21697 cvt
= ufloat_optab
;
21698 hw_convert
= hw_conversions
[kf_or_tf
].from_di_uns
;
21702 cvt
= sfloat_optab
;
21703 hw_convert
= hw_conversions
[kf_or_tf
].from_di_sign
;
21708 gcc_unreachable ();
21712 /* Convert from IEEE 128-bit floating point. */
21713 else if (FLOAT128_IEEE_P (src_mode
))
21715 if (src_mode
== KFmode
)
21717 else if (src_mode
== TFmode
)
21720 gcc_unreachable ();
21726 hw_convert
= hw_conversions
[kf_or_tf
].to_df
;
21731 hw_convert
= hw_conversions
[kf_or_tf
].to_sf
;
21737 if (FLOAT128_IBM_P (dest_mode
))
21747 hw_convert
= hw_conversions
[kf_or_tf
].to_si_uns
;
21752 hw_convert
= hw_conversions
[kf_or_tf
].to_si_sign
;
21760 hw_convert
= hw_conversions
[kf_or_tf
].to_di_uns
;
21765 hw_convert
= hw_conversions
[kf_or_tf
].to_di_sign
;
21770 gcc_unreachable ();
21774 /* Both IBM format. */
21775 else if (FLOAT128_IBM_P (dest_mode
) && FLOAT128_IBM_P (src_mode
))
21779 gcc_unreachable ();
21781 /* Handle conversion between TFmode/KFmode/IFmode. */
21783 emit_insn (gen_rtx_SET (dest
, gen_rtx_FLOAT_EXTEND (dest_mode
, src
)));
21785 /* Handle conversion if we have hardware support. */
21786 else if (TARGET_FLOAT128_HW
&& hw_convert
)
21787 emit_insn ((hw_convert
) (dest
, src
));
21789 /* Call an external function to do the conversion. */
21790 else if (cvt
!= unknown_optab
)
21792 libfunc
= convert_optab_libfunc (cvt
, dest_mode
, src_mode
);
21793 gcc_assert (libfunc
!= NULL_RTX
);
21795 dest2
= emit_library_call_value (libfunc
, dest
, LCT_CONST
, dest_mode
,
21798 gcc_assert (dest2
!= NULL_RTX
);
21799 if (!rtx_equal_p (dest
, dest2
))
21800 emit_move_insn (dest
, dest2
);
21804 gcc_unreachable ();
21810 /* Emit RTL that sets a register to zero if OP1 and OP2 are equal. SCRATCH
21811 can be used as that dest register. Return the dest register. */
21814 rs6000_emit_eqne (machine_mode mode
, rtx op1
, rtx op2
, rtx scratch
)
21816 if (op2
== const0_rtx
)
21819 if (GET_CODE (scratch
) == SCRATCH
)
21820 scratch
= gen_reg_rtx (mode
);
21822 if (logical_operand (op2
, mode
))
21823 emit_insn (gen_rtx_SET (scratch
, gen_rtx_XOR (mode
, op1
, op2
)));
21825 emit_insn (gen_rtx_SET (scratch
,
21826 gen_rtx_PLUS (mode
, op1
, negate_rtx (mode
, op2
))));
21832 rs6000_emit_sCOND (machine_mode mode
, rtx operands
[])
21835 machine_mode op_mode
;
21836 enum rtx_code cond_code
;
21837 rtx result
= operands
[0];
21839 condition_rtx
= rs6000_generate_compare (operands
[1], mode
);
21840 cond_code
= GET_CODE (condition_rtx
);
21842 if (cond_code
== NE
21843 || cond_code
== GE
|| cond_code
== LE
21844 || cond_code
== GEU
|| cond_code
== LEU
21845 || cond_code
== ORDERED
|| cond_code
== UNGE
|| cond_code
== UNLE
)
21847 rtx not_result
= gen_reg_rtx (CCEQmode
);
21848 rtx not_op
, rev_cond_rtx
;
21849 machine_mode cc_mode
;
21851 cc_mode
= GET_MODE (XEXP (condition_rtx
, 0));
21853 rev_cond_rtx
= gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode
, cond_code
),
21854 SImode
, XEXP (condition_rtx
, 0), const0_rtx
);
21855 not_op
= gen_rtx_COMPARE (CCEQmode
, rev_cond_rtx
, const0_rtx
);
21856 emit_insn (gen_rtx_SET (not_result
, not_op
));
21857 condition_rtx
= gen_rtx_EQ (VOIDmode
, not_result
, const0_rtx
);
21860 op_mode
= GET_MODE (XEXP (operands
[1], 0));
21861 if (op_mode
== VOIDmode
)
21862 op_mode
= GET_MODE (XEXP (operands
[1], 1));
21864 if (TARGET_POWERPC64
&& (op_mode
== DImode
|| FLOAT_MODE_P (mode
)))
21866 PUT_MODE (condition_rtx
, DImode
);
21867 convert_move (result
, condition_rtx
, 0);
21871 PUT_MODE (condition_rtx
, SImode
);
21872 emit_insn (gen_rtx_SET (result
, condition_rtx
));
21876 /* Emit a branch of kind CODE to location LOC. */
21879 rs6000_emit_cbranch (machine_mode mode
, rtx operands
[])
21881 rtx condition_rtx
, loc_ref
;
21883 condition_rtx
= rs6000_generate_compare (operands
[0], mode
);
21884 loc_ref
= gen_rtx_LABEL_REF (VOIDmode
, operands
[3]);
21885 emit_jump_insn (gen_rtx_SET (pc_rtx
,
21886 gen_rtx_IF_THEN_ELSE (VOIDmode
, condition_rtx
,
21887 loc_ref
, pc_rtx
)));
21890 /* Return the string to output a conditional branch to LABEL, which is
21891 the operand template of the label, or NULL if the branch is really a
21892 conditional return.
21894 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
21895 condition code register and its mode specifies what kind of
21896 comparison we made.
21898 REVERSED is nonzero if we should reverse the sense of the comparison.
21900 INSN is the insn. */
21903 output_cbranch (rtx op
, const char *label
, int reversed
, rtx_insn
*insn
)
21905 static char string
[64];
21906 enum rtx_code code
= GET_CODE (op
);
21907 rtx cc_reg
= XEXP (op
, 0);
21908 machine_mode mode
= GET_MODE (cc_reg
);
21909 int cc_regno
= REGNO (cc_reg
) - CR0_REGNO
;
21910 int need_longbranch
= label
!= NULL
&& get_attr_length (insn
) == 8;
21911 int really_reversed
= reversed
^ need_longbranch
;
21917 validate_condition_mode (code
, mode
);
21919 /* Work out which way this really branches. We could use
21920 reverse_condition_maybe_unordered here always but this
21921 makes the resulting assembler clearer. */
21922 if (really_reversed
)
21924 /* Reversal of FP compares takes care -- an ordered compare
21925 becomes an unordered compare and vice versa. */
21926 if (mode
== CCFPmode
)
21927 code
= reverse_condition_maybe_unordered (code
);
21929 code
= reverse_condition (code
);
21934 /* Not all of these are actually distinct opcodes, but
21935 we distinguish them for clarity of the resulting assembler. */
21936 case NE
: case LTGT
:
21937 ccode
= "ne"; break;
21938 case EQ
: case UNEQ
:
21939 ccode
= "eq"; break;
21941 ccode
= "ge"; break;
21942 case GT
: case GTU
: case UNGT
:
21943 ccode
= "gt"; break;
21945 ccode
= "le"; break;
21946 case LT
: case LTU
: case UNLT
:
21947 ccode
= "lt"; break;
21948 case UNORDERED
: ccode
= "un"; break;
21949 case ORDERED
: ccode
= "nu"; break;
21950 case UNGE
: ccode
= "nl"; break;
21951 case UNLE
: ccode
= "ng"; break;
21953 gcc_unreachable ();
21956 /* Maybe we have a guess as to how likely the branch is. */
21958 note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
);
21959 if (note
!= NULL_RTX
)
21961 /* PROB is the difference from 50%. */
21962 int prob
= profile_probability::from_reg_br_prob_note (XINT (note
, 0))
21963 .to_reg_br_prob_base () - REG_BR_PROB_BASE
/ 2;
21965 /* Only hint for highly probable/improbable branches on newer cpus when
21966 we have real profile data, as static prediction overrides processor
21967 dynamic prediction. For older cpus we may as well always hint, but
21968 assume not taken for branches that are very close to 50% as a
21969 mispredicted taken branch is more expensive than a
21970 mispredicted not-taken branch. */
21971 if (rs6000_always_hint
21972 || (abs (prob
) > REG_BR_PROB_BASE
/ 100 * 48
21973 && (profile_status_for_fn (cfun
) != PROFILE_GUESSED
)
21974 && br_prob_note_reliable_p (note
)))
21976 if (abs (prob
) > REG_BR_PROB_BASE
/ 20
21977 && ((prob
> 0) ^ need_longbranch
))
21985 s
+= sprintf (s
, "b%slr%s ", ccode
, pred
);
21987 s
+= sprintf (s
, "b%s%s ", ccode
, pred
);
21989 /* We need to escape any '%' characters in the reg_names string.
21990 Assume they'd only be the first character.... */
21991 if (reg_names
[cc_regno
+ CR0_REGNO
][0] == '%')
21993 s
+= sprintf (s
, "%s", reg_names
[cc_regno
+ CR0_REGNO
]);
21997 /* If the branch distance was too far, we may have to use an
21998 unconditional branch to go the distance. */
21999 if (need_longbranch
)
22000 s
+= sprintf (s
, ",$+8\n\tb %s", label
);
22002 s
+= sprintf (s
, ",%s", label
);
22008 /* Return insn for VSX or Altivec comparisons. */
22011 rs6000_emit_vector_compare_inner (enum rtx_code code
, rtx op0
, rtx op1
)
22014 machine_mode mode
= GET_MODE (op0
);
22022 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
22033 mask
= gen_reg_rtx (mode
);
22034 emit_insn (gen_rtx_SET (mask
, gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
22041 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
22042 DMODE is expected destination mode. This is a recursive function. */
22045 rs6000_emit_vector_compare (enum rtx_code rcode
,
22047 machine_mode dmode
)
22050 bool swap_operands
= false;
22051 bool try_again
= false;
22053 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode
));
22054 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
22056 /* See if the comparison works as is. */
22057 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
22065 swap_operands
= true;
22070 swap_operands
= true;
22078 /* Invert condition and try again.
22079 e.g., A != B becomes ~(A==B). */
22081 enum rtx_code rev_code
;
22082 enum insn_code nor_code
;
22085 rev_code
= reverse_condition_maybe_unordered (rcode
);
22086 if (rev_code
== UNKNOWN
)
22089 nor_code
= optab_handler (one_cmpl_optab
, dmode
);
22090 if (nor_code
== CODE_FOR_nothing
)
22093 mask2
= rs6000_emit_vector_compare (rev_code
, op0
, op1
, dmode
);
22097 mask
= gen_reg_rtx (dmode
);
22098 emit_insn (GEN_FCN (nor_code
) (mask
, mask2
));
22106 /* Try GT/GTU/LT/LTU OR EQ */
22109 enum insn_code ior_code
;
22110 enum rtx_code new_code
;
22131 gcc_unreachable ();
22134 ior_code
= optab_handler (ior_optab
, dmode
);
22135 if (ior_code
== CODE_FOR_nothing
)
22138 c_rtx
= rs6000_emit_vector_compare (new_code
, op0
, op1
, dmode
);
22142 eq_rtx
= rs6000_emit_vector_compare (EQ
, op0
, op1
, dmode
);
22146 mask
= gen_reg_rtx (dmode
);
22147 emit_insn (GEN_FCN (ior_code
) (mask
, c_rtx
, eq_rtx
));
22158 std::swap (op0
, op1
);
22160 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
22165 /* You only get two chances. */
22169 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
22170 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
22171 operands for the relation operation COND. */
22174 rs6000_emit_vector_cond_expr (rtx dest
, rtx op_true
, rtx op_false
,
22175 rtx cond
, rtx cc_op0
, rtx cc_op1
)
22177 machine_mode dest_mode
= GET_MODE (dest
);
22178 machine_mode mask_mode
= GET_MODE (cc_op0
);
22179 enum rtx_code rcode
= GET_CODE (cond
);
22180 machine_mode cc_mode
= CCmode
;
22183 bool invert_move
= false;
22185 if (VECTOR_UNIT_NONE_P (dest_mode
))
22188 gcc_assert (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (mask_mode
)
22189 && GET_MODE_NUNITS (dest_mode
) == GET_MODE_NUNITS (mask_mode
));
22193 /* Swap operands if we can, and fall back to doing the operation as
22194 specified, and doing a NOR to invert the test. */
22200 /* Invert condition and try again.
22201 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
22202 invert_move
= true;
22203 rcode
= reverse_condition_maybe_unordered (rcode
);
22204 if (rcode
== UNKNOWN
)
22210 if (GET_MODE_CLASS (mask_mode
) == MODE_VECTOR_INT
)
22212 /* Invert condition to avoid compound test. */
22213 invert_move
= true;
22214 rcode
= reverse_condition (rcode
);
22222 /* Mark unsigned tests with CCUNSmode. */
22223 cc_mode
= CCUNSmode
;
22225 /* Invert condition to avoid compound test if necessary. */
22226 if (rcode
== GEU
|| rcode
== LEU
)
22228 invert_move
= true;
22229 rcode
= reverse_condition (rcode
);
22237 /* Get the vector mask for the given relational operations. */
22238 mask
= rs6000_emit_vector_compare (rcode
, cc_op0
, cc_op1
, mask_mode
);
22244 std::swap (op_true
, op_false
);
22246 /* Optimize vec1 == vec2, to know the mask generates -1/0. */
22247 if (GET_MODE_CLASS (dest_mode
) == MODE_VECTOR_INT
22248 && (GET_CODE (op_true
) == CONST_VECTOR
22249 || GET_CODE (op_false
) == CONST_VECTOR
))
22251 rtx constant_0
= CONST0_RTX (dest_mode
);
22252 rtx constant_m1
= CONSTM1_RTX (dest_mode
);
22254 if (op_true
== constant_m1
&& op_false
== constant_0
)
22256 emit_move_insn (dest
, mask
);
22260 else if (op_true
== constant_0
&& op_false
== constant_m1
)
22262 emit_insn (gen_rtx_SET (dest
, gen_rtx_NOT (dest_mode
, mask
)));
22266 /* If we can't use the vector comparison directly, perhaps we can use
22267 the mask for the true or false fields, instead of loading up a
22269 if (op_true
== constant_m1
)
22272 if (op_false
== constant_0
)
22276 if (!REG_P (op_true
) && !SUBREG_P (op_true
))
22277 op_true
= force_reg (dest_mode
, op_true
);
22279 if (!REG_P (op_false
) && !SUBREG_P (op_false
))
22280 op_false
= force_reg (dest_mode
, op_false
);
22282 cond2
= gen_rtx_fmt_ee (NE
, cc_mode
, gen_lowpart (dest_mode
, mask
),
22283 CONST0_RTX (dest_mode
));
22284 emit_insn (gen_rtx_SET (dest
,
22285 gen_rtx_IF_THEN_ELSE (dest_mode
,
22292 /* ISA 3.0 (power9) minmax subcase to emit a XSMAXCDP or XSMINCDP instruction
22293 for SF/DF scalars. Move TRUE_COND to DEST if OP of the operands of the last
22294 comparison is nonzero/true, FALSE_COND if it is zero/false. Return 0 if the
22295 hardware has no such operation. */
22298 rs6000_emit_p9_fp_minmax (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22300 enum rtx_code code
= GET_CODE (op
);
22301 rtx op0
= XEXP (op
, 0);
22302 rtx op1
= XEXP (op
, 1);
22303 machine_mode compare_mode
= GET_MODE (op0
);
22304 machine_mode result_mode
= GET_MODE (dest
);
22305 bool max_p
= false;
22307 if (result_mode
!= compare_mode
)
22310 if (code
== GE
|| code
== GT
)
22312 else if (code
== LE
|| code
== LT
)
22317 if (rtx_equal_p (op0
, true_cond
) && rtx_equal_p (op1
, false_cond
))
22320 else if (rtx_equal_p (op1
, true_cond
) && rtx_equal_p (op0
, false_cond
))
22326 rs6000_emit_minmax (dest
, max_p
? SMAX
: SMIN
, op0
, op1
);
22330 /* ISA 3.0 (power9) conditional move subcase to emit XSCMP{EQ,GE,GT,NE}DP and
22331 XXSEL instructions for SF/DF scalars. Move TRUE_COND to DEST if OP of the
22332 operands of the last comparison is nonzero/true, FALSE_COND if it is
22333 zero/false. Return 0 if the hardware has no such operation. */
22336 rs6000_emit_p9_fp_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22338 enum rtx_code code
= GET_CODE (op
);
22339 rtx op0
= XEXP (op
, 0);
22340 rtx op1
= XEXP (op
, 1);
22341 machine_mode result_mode
= GET_MODE (dest
);
22346 if (!can_create_pseudo_p ())
22359 code
= swap_condition (code
);
22360 std::swap (op0
, op1
);
22367 /* Generate: [(parallel [(set (dest)
22368 (if_then_else (op (cmp1) (cmp2))
22371 (clobber (scratch))])]. */
22373 compare_rtx
= gen_rtx_fmt_ee (code
, CCFPmode
, op0
, op1
);
22374 cmove_rtx
= gen_rtx_SET (dest
,
22375 gen_rtx_IF_THEN_ELSE (result_mode
,
22380 clobber_rtx
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (V2DImode
));
22381 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
22382 gen_rtvec (2, cmove_rtx
, clobber_rtx
)));
22387 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
22388 operands of the last comparison is nonzero/true, FALSE_COND if it
22389 is zero/false. Return 0 if the hardware has no such operation. */
22392 rs6000_emit_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22394 enum rtx_code code
= GET_CODE (op
);
22395 rtx op0
= XEXP (op
, 0);
22396 rtx op1
= XEXP (op
, 1);
22397 machine_mode compare_mode
= GET_MODE (op0
);
22398 machine_mode result_mode
= GET_MODE (dest
);
22400 bool is_against_zero
;
22402 /* These modes should always match. */
22403 if (GET_MODE (op1
) != compare_mode
22404 /* In the isel case however, we can use a compare immediate, so
22405 op1 may be a small constant. */
22406 && (!TARGET_ISEL
|| !short_cint_operand (op1
, VOIDmode
)))
22408 if (GET_MODE (true_cond
) != result_mode
)
22410 if (GET_MODE (false_cond
) != result_mode
)
22413 /* See if we can use the ISA 3.0 (power9) min/max/compare functions. */
22414 if (TARGET_P9_MINMAX
22415 && (compare_mode
== SFmode
|| compare_mode
== DFmode
)
22416 && (result_mode
== SFmode
|| result_mode
== DFmode
))
22418 if (rs6000_emit_p9_fp_minmax (dest
, op
, true_cond
, false_cond
))
22421 if (rs6000_emit_p9_fp_cmove (dest
, op
, true_cond
, false_cond
))
22425 /* Don't allow using floating point comparisons for integer results for
22427 if (FLOAT_MODE_P (compare_mode
) && !FLOAT_MODE_P (result_mode
))
22430 /* First, work out if the hardware can do this at all, or
22431 if it's too slow.... */
22432 if (!FLOAT_MODE_P (compare_mode
))
22435 return rs6000_emit_int_cmove (dest
, op
, true_cond
, false_cond
);
22439 is_against_zero
= op1
== CONST0_RTX (compare_mode
);
22441 /* A floating-point subtract might overflow, underflow, or produce
22442 an inexact result, thus changing the floating-point flags, so it
22443 can't be generated if we care about that. It's safe if one side
22444 of the construct is zero, since then no subtract will be
22446 if (SCALAR_FLOAT_MODE_P (compare_mode
)
22447 && flag_trapping_math
&& ! is_against_zero
)
22450 /* Eliminate half of the comparisons by switching operands, this
22451 makes the remaining code simpler. */
22452 if (code
== UNLT
|| code
== UNGT
|| code
== UNORDERED
|| code
== NE
22453 || code
== LTGT
|| code
== LT
|| code
== UNLE
)
22455 code
= reverse_condition_maybe_unordered (code
);
22457 true_cond
= false_cond
;
22461 /* UNEQ and LTGT take four instructions for a comparison with zero,
22462 it'll probably be faster to use a branch here too. */
22463 if (code
== UNEQ
&& HONOR_NANS (compare_mode
))
22466 /* We're going to try to implement comparisons by performing
22467 a subtract, then comparing against zero. Unfortunately,
22468 Inf - Inf is NaN which is not zero, and so if we don't
22469 know that the operand is finite and the comparison
22470 would treat EQ different to UNORDERED, we can't do it. */
22471 if (HONOR_INFINITIES (compare_mode
)
22472 && code
!= GT
&& code
!= UNGE
22473 && (GET_CODE (op1
) != CONST_DOUBLE
22474 || real_isinf (CONST_DOUBLE_REAL_VALUE (op1
)))
22475 /* Constructs of the form (a OP b ? a : b) are safe. */
22476 && ((! rtx_equal_p (op0
, false_cond
) && ! rtx_equal_p (op1
, false_cond
))
22477 || (! rtx_equal_p (op0
, true_cond
)
22478 && ! rtx_equal_p (op1
, true_cond
))))
22481 /* At this point we know we can use fsel. */
22483 /* Reduce the comparison to a comparison against zero. */
22484 if (! is_against_zero
)
22486 temp
= gen_reg_rtx (compare_mode
);
22487 emit_insn (gen_rtx_SET (temp
, gen_rtx_MINUS (compare_mode
, op0
, op1
)));
22489 op1
= CONST0_RTX (compare_mode
);
22492 /* If we don't care about NaNs we can reduce some of the comparisons
22493 down to faster ones. */
22494 if (! HONOR_NANS (compare_mode
))
22500 true_cond
= false_cond
;
22513 /* Now, reduce everything down to a GE. */
22520 temp
= gen_reg_rtx (compare_mode
);
22521 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
22526 temp
= gen_reg_rtx (compare_mode
);
22527 emit_insn (gen_rtx_SET (temp
, gen_rtx_ABS (compare_mode
, op0
)));
22532 temp
= gen_reg_rtx (compare_mode
);
22533 emit_insn (gen_rtx_SET (temp
,
22534 gen_rtx_NEG (compare_mode
,
22535 gen_rtx_ABS (compare_mode
, op0
))));
22540 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
22541 temp
= gen_reg_rtx (result_mode
);
22542 emit_insn (gen_rtx_SET (temp
,
22543 gen_rtx_IF_THEN_ELSE (result_mode
,
22544 gen_rtx_GE (VOIDmode
,
22546 true_cond
, false_cond
)));
22547 false_cond
= true_cond
;
22550 temp
= gen_reg_rtx (compare_mode
);
22551 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
22556 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
22557 temp
= gen_reg_rtx (result_mode
);
22558 emit_insn (gen_rtx_SET (temp
,
22559 gen_rtx_IF_THEN_ELSE (result_mode
,
22560 gen_rtx_GE (VOIDmode
,
22562 true_cond
, false_cond
)));
22563 true_cond
= false_cond
;
22566 temp
= gen_reg_rtx (compare_mode
);
22567 emit_insn (gen_rtx_SET (temp
, gen_rtx_NEG (compare_mode
, op0
)));
22572 gcc_unreachable ();
22575 emit_insn (gen_rtx_SET (dest
,
22576 gen_rtx_IF_THEN_ELSE (result_mode
,
22577 gen_rtx_GE (VOIDmode
,
22579 true_cond
, false_cond
)));
22583 /* Same as above, but for ints (isel). */
22586 rs6000_emit_int_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
22588 rtx condition_rtx
, cr
;
22589 machine_mode mode
= GET_MODE (dest
);
22590 enum rtx_code cond_code
;
22591 rtx (*isel_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
22594 if (mode
!= SImode
&& (!TARGET_POWERPC64
|| mode
!= DImode
))
22597 /* We still have to do the compare, because isel doesn't do a
22598 compare, it just looks at the CRx bits set by a previous compare
22600 condition_rtx
= rs6000_generate_compare (op
, mode
);
22601 cond_code
= GET_CODE (condition_rtx
);
22602 cr
= XEXP (condition_rtx
, 0);
22603 signedp
= GET_MODE (cr
) == CCmode
;
22605 isel_func
= (mode
== SImode
22606 ? (signedp
? gen_isel_signed_si
: gen_isel_unsigned_si
)
22607 : (signedp
? gen_isel_signed_di
: gen_isel_unsigned_di
));
22611 case LT
: case GT
: case LTU
: case GTU
: case EQ
:
22612 /* isel handles these directly. */
22616 /* We need to swap the sense of the comparison. */
22618 std::swap (false_cond
, true_cond
);
22619 PUT_CODE (condition_rtx
, reverse_condition (cond_code
));
22624 false_cond
= force_reg (mode
, false_cond
);
22625 if (true_cond
!= const0_rtx
)
22626 true_cond
= force_reg (mode
, true_cond
);
22628 emit_insn (isel_func (dest
, condition_rtx
, true_cond
, false_cond
, cr
));
22634 rs6000_emit_minmax (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
22636 machine_mode mode
= GET_MODE (op0
);
22640 /* VSX/altivec have direct min/max insns. */
22641 if ((code
== SMAX
|| code
== SMIN
)
22642 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
22643 || (mode
== SFmode
&& VECTOR_UNIT_VSX_P (DFmode
))))
22645 emit_insn (gen_rtx_SET (dest
, gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
22649 if (code
== SMAX
|| code
== SMIN
)
22654 if (code
== SMAX
|| code
== UMAX
)
22655 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
22656 op0
, op1
, mode
, 0);
22658 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
22659 op1
, op0
, mode
, 0);
22660 gcc_assert (target
);
22661 if (target
!= dest
)
22662 emit_move_insn (dest
, target
);
22665 /* A subroutine of the atomic operation splitters. Jump to LABEL if
22666 COND is true. Mark the jump as unlikely to be taken. */
22669 emit_unlikely_jump (rtx cond
, rtx label
)
22671 rtx x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
22672 rtx_insn
*insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, x
));
22673 add_reg_br_prob_note (insn
, profile_probability::very_unlikely ());
22676 /* A subroutine of the atomic operation splitters. Emit a load-locked
22677 instruction in MODE. For QI/HImode, possibly use a pattern than includes
22678 the zero_extend operation. */
22681 emit_load_locked (machine_mode mode
, rtx reg
, rtx mem
)
22683 rtx (*fn
) (rtx
, rtx
) = NULL
;
22688 fn
= gen_load_lockedqi
;
22691 fn
= gen_load_lockedhi
;
22694 if (GET_MODE (mem
) == QImode
)
22695 fn
= gen_load_lockedqi_si
;
22696 else if (GET_MODE (mem
) == HImode
)
22697 fn
= gen_load_lockedhi_si
;
22699 fn
= gen_load_lockedsi
;
22702 fn
= gen_load_lockeddi
;
22705 fn
= gen_load_lockedti
;
22708 gcc_unreachable ();
22710 emit_insn (fn (reg
, mem
));
22713 /* A subroutine of the atomic operation splitters. Emit a store-conditional
22714 instruction in MODE. */
22717 emit_store_conditional (machine_mode mode
, rtx res
, rtx mem
, rtx val
)
22719 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
22724 fn
= gen_store_conditionalqi
;
22727 fn
= gen_store_conditionalhi
;
22730 fn
= gen_store_conditionalsi
;
22733 fn
= gen_store_conditionaldi
;
22736 fn
= gen_store_conditionalti
;
22739 gcc_unreachable ();
22742 /* Emit sync before stwcx. to address PPC405 Erratum. */
22743 if (PPC405_ERRATUM77
)
22744 emit_insn (gen_hwsync ());
22746 emit_insn (fn (res
, mem
, val
));
22749 /* Expand barriers before and after a load_locked/store_cond sequence. */
22752 rs6000_pre_atomic_barrier (rtx mem
, enum memmodel model
)
22754 rtx addr
= XEXP (mem
, 0);
22756 if (!legitimate_indirect_address_p (addr
, reload_completed
)
22757 && !legitimate_indexed_address_p (addr
, reload_completed
))
22759 addr
= force_reg (Pmode
, addr
);
22760 mem
= replace_equiv_address_nv (mem
, addr
);
22765 case MEMMODEL_RELAXED
:
22766 case MEMMODEL_CONSUME
:
22767 case MEMMODEL_ACQUIRE
:
22769 case MEMMODEL_RELEASE
:
22770 case MEMMODEL_ACQ_REL
:
22771 emit_insn (gen_lwsync ());
22773 case MEMMODEL_SEQ_CST
:
22774 emit_insn (gen_hwsync ());
22777 gcc_unreachable ();
22783 rs6000_post_atomic_barrier (enum memmodel model
)
22787 case MEMMODEL_RELAXED
:
22788 case MEMMODEL_CONSUME
:
22789 case MEMMODEL_RELEASE
:
22791 case MEMMODEL_ACQUIRE
:
22792 case MEMMODEL_ACQ_REL
:
22793 case MEMMODEL_SEQ_CST
:
22794 emit_insn (gen_isync ());
22797 gcc_unreachable ();
22801 /* A subroutine of the various atomic expanders. For sub-word operations,
22802 we must adjust things to operate on SImode. Given the original MEM,
22803 return a new aligned memory. Also build and return the quantities by
22804 which to shift and mask. */
22807 rs6000_adjust_atomic_subword (rtx orig_mem
, rtx
*pshift
, rtx
*pmask
)
22809 rtx addr
, align
, shift
, mask
, mem
;
22810 HOST_WIDE_INT shift_mask
;
22811 machine_mode mode
= GET_MODE (orig_mem
);
22813 /* For smaller modes, we have to implement this via SImode. */
22814 shift_mask
= (mode
== QImode
? 0x18 : 0x10);
22816 addr
= XEXP (orig_mem
, 0);
22817 addr
= force_reg (GET_MODE (addr
), addr
);
22819 /* Aligned memory containing subword. Generate a new memory. We
22820 do not want any of the existing MEM_ATTR data, as we're now
22821 accessing memory outside the original object. */
22822 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-4),
22823 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
22824 mem
= gen_rtx_MEM (SImode
, align
);
22825 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
22826 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
22827 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
22829 /* Shift amount for subword relative to aligned word. */
22830 shift
= gen_reg_rtx (SImode
);
22831 addr
= gen_lowpart (SImode
, addr
);
22832 rtx tmp
= gen_reg_rtx (SImode
);
22833 emit_insn (gen_ashlsi3 (tmp
, addr
, GEN_INT (3)));
22834 emit_insn (gen_andsi3 (shift
, tmp
, GEN_INT (shift_mask
)));
22835 if (BYTES_BIG_ENDIAN
)
22836 shift
= expand_simple_binop (SImode
, XOR
, shift
, GEN_INT (shift_mask
),
22837 shift
, 1, OPTAB_LIB_WIDEN
);
22840 /* Mask for insertion. */
22841 mask
= expand_simple_binop (SImode
, ASHIFT
, GEN_INT (GET_MODE_MASK (mode
)),
22842 shift
, NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
22848 /* A subroutine of the various atomic expanders. For sub-word operands,
22849 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
22852 rs6000_mask_atomic_subword (rtx oldval
, rtx newval
, rtx mask
)
22856 x
= gen_reg_rtx (SImode
);
22857 emit_insn (gen_rtx_SET (x
, gen_rtx_AND (SImode
,
22858 gen_rtx_NOT (SImode
, mask
),
22861 x
= expand_simple_binop (SImode
, IOR
, newval
, x
, x
, 1, OPTAB_LIB_WIDEN
);
22866 /* A subroutine of the various atomic expanders. For sub-word operands,
22867 extract WIDE to NARROW via SHIFT. */
22870 rs6000_finish_atomic_subword (rtx narrow
, rtx wide
, rtx shift
)
22872 wide
= expand_simple_binop (SImode
, LSHIFTRT
, wide
, shift
,
22873 wide
, 1, OPTAB_LIB_WIDEN
);
22874 emit_move_insn (narrow
, gen_lowpart (GET_MODE (narrow
), wide
));
22877 /* Expand an atomic compare and swap operation. */
22880 rs6000_expand_atomic_compare_and_swap (rtx operands
[])
22882 rtx boolval
, retval
, mem
, oldval
, newval
, cond
;
22883 rtx label1
, label2
, x
, mask
, shift
;
22884 machine_mode mode
, orig_mode
;
22885 enum memmodel mod_s
, mod_f
;
22888 boolval
= operands
[0];
22889 retval
= operands
[1];
22891 oldval
= operands
[3];
22892 newval
= operands
[4];
22893 is_weak
= (INTVAL (operands
[5]) != 0);
22894 mod_s
= memmodel_base (INTVAL (operands
[6]));
22895 mod_f
= memmodel_base (INTVAL (operands
[7]));
22896 orig_mode
= mode
= GET_MODE (mem
);
22898 mask
= shift
= NULL_RTX
;
22899 if (mode
== QImode
|| mode
== HImode
)
22901 /* Before power8, we didn't have access to lbarx/lharx, so generate a
22902 lwarx and shift/mask operations. With power8, we need to do the
22903 comparison in SImode, but the store is still done in QI/HImode. */
22904 oldval
= convert_modes (SImode
, mode
, oldval
, 1);
22906 if (!TARGET_SYNC_HI_QI
)
22908 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
22910 /* Shift and mask OLDVAL into position with the word. */
22911 oldval
= expand_simple_binop (SImode
, ASHIFT
, oldval
, shift
,
22912 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
22914 /* Shift and mask NEWVAL into position within the word. */
22915 newval
= convert_modes (SImode
, mode
, newval
, 1);
22916 newval
= expand_simple_binop (SImode
, ASHIFT
, newval
, shift
,
22917 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
22920 /* Prepare to adjust the return value. */
22921 retval
= gen_reg_rtx (SImode
);
22924 else if (reg_overlap_mentioned_p (retval
, oldval
))
22925 oldval
= copy_to_reg (oldval
);
22927 if (mode
!= TImode
&& !reg_or_short_operand (oldval
, mode
))
22928 oldval
= copy_to_mode_reg (mode
, oldval
);
22930 if (reg_overlap_mentioned_p (retval
, newval
))
22931 newval
= copy_to_reg (newval
);
22933 mem
= rs6000_pre_atomic_barrier (mem
, mod_s
);
22938 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
22939 emit_label (XEXP (label1
, 0));
22941 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
22943 emit_load_locked (mode
, retval
, mem
);
22947 x
= expand_simple_binop (SImode
, AND
, retval
, mask
,
22948 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
22950 cond
= gen_reg_rtx (CCmode
);
22951 /* If we have TImode, synthesize a comparison. */
22952 if (mode
!= TImode
)
22953 x
= gen_rtx_COMPARE (CCmode
, x
, oldval
);
22956 rtx xor1_result
= gen_reg_rtx (DImode
);
22957 rtx xor2_result
= gen_reg_rtx (DImode
);
22958 rtx or_result
= gen_reg_rtx (DImode
);
22959 rtx new_word0
= simplify_gen_subreg (DImode
, x
, TImode
, 0);
22960 rtx new_word1
= simplify_gen_subreg (DImode
, x
, TImode
, 8);
22961 rtx old_word0
= simplify_gen_subreg (DImode
, oldval
, TImode
, 0);
22962 rtx old_word1
= simplify_gen_subreg (DImode
, oldval
, TImode
, 8);
22964 emit_insn (gen_xordi3 (xor1_result
, new_word0
, old_word0
));
22965 emit_insn (gen_xordi3 (xor2_result
, new_word1
, old_word1
));
22966 emit_insn (gen_iordi3 (or_result
, xor1_result
, xor2_result
));
22967 x
= gen_rtx_COMPARE (CCmode
, or_result
, const0_rtx
);
22970 emit_insn (gen_rtx_SET (cond
, x
));
22972 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
22973 emit_unlikely_jump (x
, label2
);
22977 x
= rs6000_mask_atomic_subword (retval
, newval
, mask
);
22979 emit_store_conditional (orig_mode
, cond
, mem
, x
);
22983 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
22984 emit_unlikely_jump (x
, label1
);
22987 if (!is_mm_relaxed (mod_f
))
22988 emit_label (XEXP (label2
, 0));
22990 rs6000_post_atomic_barrier (mod_s
);
22992 if (is_mm_relaxed (mod_f
))
22993 emit_label (XEXP (label2
, 0));
22996 rs6000_finish_atomic_subword (operands
[1], retval
, shift
);
22997 else if (mode
!= GET_MODE (operands
[1]))
22998 convert_move (operands
[1], retval
, 1);
23000 /* In all cases, CR0 contains EQ on success, and NE on failure. */
23001 x
= gen_rtx_EQ (SImode
, cond
, const0_rtx
);
23002 emit_insn (gen_rtx_SET (boolval
, x
));
23005 /* Expand an atomic exchange operation. */
23008 rs6000_expand_atomic_exchange (rtx operands
[])
23010 rtx retval
, mem
, val
, cond
;
23012 enum memmodel model
;
23013 rtx label
, x
, mask
, shift
;
23015 retval
= operands
[0];
23018 model
= memmodel_base (INTVAL (operands
[3]));
23019 mode
= GET_MODE (mem
);
23021 mask
= shift
= NULL_RTX
;
23022 if (!TARGET_SYNC_HI_QI
&& (mode
== QImode
|| mode
== HImode
))
23024 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23026 /* Shift and mask VAL into position with the word. */
23027 val
= convert_modes (SImode
, mode
, val
, 1);
23028 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
23029 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23031 /* Prepare to adjust the return value. */
23032 retval
= gen_reg_rtx (SImode
);
23036 mem
= rs6000_pre_atomic_barrier (mem
, model
);
23038 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
23039 emit_label (XEXP (label
, 0));
23041 emit_load_locked (mode
, retval
, mem
);
23045 x
= rs6000_mask_atomic_subword (retval
, val
, mask
);
23047 cond
= gen_reg_rtx (CCmode
);
23048 emit_store_conditional (mode
, cond
, mem
, x
);
23050 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23051 emit_unlikely_jump (x
, label
);
23053 rs6000_post_atomic_barrier (model
);
23056 rs6000_finish_atomic_subword (operands
[0], retval
, shift
);
23059 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
23060 to perform. MEM is the memory on which to operate. VAL is the second
23061 operand of the binary operator. BEFORE and AFTER are optional locations to
23062 return the value of MEM either before of after the operation. MODEL_RTX
23063 is a CONST_INT containing the memory model to use. */
23066 rs6000_expand_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
23067 rtx orig_before
, rtx orig_after
, rtx model_rtx
)
23069 enum memmodel model
= memmodel_base (INTVAL (model_rtx
));
23070 machine_mode mode
= GET_MODE (mem
);
23071 machine_mode store_mode
= mode
;
23072 rtx label
, x
, cond
, mask
, shift
;
23073 rtx before
= orig_before
, after
= orig_after
;
23075 mask
= shift
= NULL_RTX
;
23076 /* On power8, we want to use SImode for the operation. On previous systems,
23077 use the operation in a subword and shift/mask to get the proper byte or
23079 if (mode
== QImode
|| mode
== HImode
)
23081 if (TARGET_SYNC_HI_QI
)
23083 val
= convert_modes (SImode
, mode
, val
, 1);
23085 /* Prepare to adjust the return value. */
23086 before
= gen_reg_rtx (SImode
);
23088 after
= gen_reg_rtx (SImode
);
23093 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
23095 /* Shift and mask VAL into position with the word. */
23096 val
= convert_modes (SImode
, mode
, val
, 1);
23097 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
23098 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23104 /* We've already zero-extended VAL. That is sufficient to
23105 make certain that it does not affect other bits. */
23110 /* If we make certain that all of the other bits in VAL are
23111 set, that will be sufficient to not affect other bits. */
23112 x
= gen_rtx_NOT (SImode
, mask
);
23113 x
= gen_rtx_IOR (SImode
, x
, val
);
23114 emit_insn (gen_rtx_SET (val
, x
));
23121 /* These will all affect bits outside the field and need
23122 adjustment via MASK within the loop. */
23126 gcc_unreachable ();
23129 /* Prepare to adjust the return value. */
23130 before
= gen_reg_rtx (SImode
);
23132 after
= gen_reg_rtx (SImode
);
23133 store_mode
= mode
= SImode
;
23137 mem
= rs6000_pre_atomic_barrier (mem
, model
);
23139 label
= gen_label_rtx ();
23140 emit_label (label
);
23141 label
= gen_rtx_LABEL_REF (VOIDmode
, label
);
23143 if (before
== NULL_RTX
)
23144 before
= gen_reg_rtx (mode
);
23146 emit_load_locked (mode
, before
, mem
);
23150 x
= expand_simple_binop (mode
, AND
, before
, val
,
23151 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23152 after
= expand_simple_unop (mode
, NOT
, x
, after
, 1);
23156 after
= expand_simple_binop (mode
, code
, before
, val
,
23157 after
, 1, OPTAB_LIB_WIDEN
);
23163 x
= expand_simple_binop (SImode
, AND
, after
, mask
,
23164 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
23165 x
= rs6000_mask_atomic_subword (before
, x
, mask
);
23167 else if (store_mode
!= mode
)
23168 x
= convert_modes (store_mode
, mode
, x
, 1);
23170 cond
= gen_reg_rtx (CCmode
);
23171 emit_store_conditional (store_mode
, cond
, mem
, x
);
23173 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
23174 emit_unlikely_jump (x
, label
);
23176 rs6000_post_atomic_barrier (model
);
23180 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
23181 then do the calcuations in a SImode register. */
23183 rs6000_finish_atomic_subword (orig_before
, before
, shift
);
23185 rs6000_finish_atomic_subword (orig_after
, after
, shift
);
23187 else if (store_mode
!= mode
)
23189 /* QImode/HImode on machines with lbarx/lharx where we do the native
23190 operation and then do the calcuations in a SImode register. */
23192 convert_move (orig_before
, before
, 1);
23194 convert_move (orig_after
, after
, 1);
23196 else if (orig_after
&& after
!= orig_after
)
23197 emit_move_insn (orig_after
, after
);
23200 /* Emit instructions to move SRC to DST. Called by splitters for
23201 multi-register moves. It will emit at most one instruction for
23202 each register that is accessed; that is, it won't emit li/lis pairs
23203 (or equivalent for 64-bit code). One of SRC or DST must be a hard
23207 rs6000_split_multireg_move (rtx dst
, rtx src
)
23209 /* The register number of the first register being moved. */
23211 /* The mode that is to be moved. */
23213 /* The mode that the move is being done in, and its size. */
23214 machine_mode reg_mode
;
23216 /* The number of registers that will be moved. */
23219 reg
= REG_P (dst
) ? REGNO (dst
) : REGNO (src
);
23220 mode
= GET_MODE (dst
);
23221 nregs
= hard_regno_nregs (reg
, mode
);
23222 if (FP_REGNO_P (reg
))
23223 reg_mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
:
23224 (TARGET_HARD_FLOAT
? DFmode
: SFmode
);
23225 else if (ALTIVEC_REGNO_P (reg
))
23226 reg_mode
= V16QImode
;
23228 reg_mode
= word_mode
;
23229 reg_mode_size
= GET_MODE_SIZE (reg_mode
);
23231 gcc_assert (reg_mode_size
* nregs
== GET_MODE_SIZE (mode
));
23233 /* TDmode residing in FP registers is special, since the ISA requires that
23234 the lower-numbered word of a register pair is always the most significant
23235 word, even in little-endian mode. This does not match the usual subreg
23236 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
23237 the appropriate constituent registers "by hand" in little-endian mode.
23239 Note we do not need to check for destructive overlap here since TDmode
23240 can only reside in even/odd register pairs. */
23241 if (FP_REGNO_P (reg
) && DECIMAL_FLOAT_MODE_P (mode
) && !BYTES_BIG_ENDIAN
)
23246 for (i
= 0; i
< nregs
; i
++)
23248 if (REG_P (src
) && FP_REGNO_P (REGNO (src
)))
23249 p_src
= gen_rtx_REG (reg_mode
, REGNO (src
) + nregs
- 1 - i
);
23251 p_src
= simplify_gen_subreg (reg_mode
, src
, mode
,
23252 i
* reg_mode_size
);
23254 if (REG_P (dst
) && FP_REGNO_P (REGNO (dst
)))
23255 p_dst
= gen_rtx_REG (reg_mode
, REGNO (dst
) + nregs
- 1 - i
);
23257 p_dst
= simplify_gen_subreg (reg_mode
, dst
, mode
,
23258 i
* reg_mode_size
);
23260 emit_insn (gen_rtx_SET (p_dst
, p_src
));
23266 if (REG_P (src
) && REG_P (dst
) && (REGNO (src
) < REGNO (dst
)))
23268 /* Move register range backwards, if we might have destructive
23271 for (i
= nregs
- 1; i
>= 0; i
--)
23272 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode
, dst
, mode
,
23273 i
* reg_mode_size
),
23274 simplify_gen_subreg (reg_mode
, src
, mode
,
23275 i
* reg_mode_size
)));
23281 bool used_update
= false;
23282 rtx restore_basereg
= NULL_RTX
;
23284 if (MEM_P (src
) && INT_REGNO_P (reg
))
23288 if (GET_CODE (XEXP (src
, 0)) == PRE_INC
23289 || GET_CODE (XEXP (src
, 0)) == PRE_DEC
)
23292 breg
= XEXP (XEXP (src
, 0), 0);
23293 delta_rtx
= (GET_CODE (XEXP (src
, 0)) == PRE_INC
23294 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src
)))
23295 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src
))));
23296 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
23297 src
= replace_equiv_address (src
, breg
);
23299 else if (! rs6000_offsettable_memref_p (src
, reg_mode
, true))
23301 if (GET_CODE (XEXP (src
, 0)) == PRE_MODIFY
)
23303 rtx basereg
= XEXP (XEXP (src
, 0), 0);
23306 rtx ndst
= simplify_gen_subreg (reg_mode
, dst
, mode
, 0);
23307 emit_insn (gen_rtx_SET (ndst
,
23308 gen_rtx_MEM (reg_mode
,
23310 used_update
= true;
23313 emit_insn (gen_rtx_SET (basereg
,
23314 XEXP (XEXP (src
, 0), 1)));
23315 src
= replace_equiv_address (src
, basereg
);
23319 rtx basereg
= gen_rtx_REG (Pmode
, reg
);
23320 emit_insn (gen_rtx_SET (basereg
, XEXP (src
, 0)));
23321 src
= replace_equiv_address (src
, basereg
);
23325 breg
= XEXP (src
, 0);
23326 if (GET_CODE (breg
) == PLUS
|| GET_CODE (breg
) == LO_SUM
)
23327 breg
= XEXP (breg
, 0);
23329 /* If the base register we are using to address memory is
23330 also a destination reg, then change that register last. */
23332 && REGNO (breg
) >= REGNO (dst
)
23333 && REGNO (breg
) < REGNO (dst
) + nregs
)
23334 j
= REGNO (breg
) - REGNO (dst
);
23336 else if (MEM_P (dst
) && INT_REGNO_P (reg
))
23340 if (GET_CODE (XEXP (dst
, 0)) == PRE_INC
23341 || GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
23344 breg
= XEXP (XEXP (dst
, 0), 0);
23345 delta_rtx
= (GET_CODE (XEXP (dst
, 0)) == PRE_INC
23346 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst
)))
23347 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst
))));
23349 /* We have to update the breg before doing the store.
23350 Use store with update, if available. */
23354 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
23355 emit_insn (TARGET_32BIT
23356 ? (TARGET_POWERPC64
23357 ? gen_movdi_si_update (breg
, breg
, delta_rtx
, nsrc
)
23358 : gen_movsi_update (breg
, breg
, delta_rtx
, nsrc
))
23359 : gen_movdi_di_update (breg
, breg
, delta_rtx
, nsrc
));
23360 used_update
= true;
23363 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
23364 dst
= replace_equiv_address (dst
, breg
);
23366 else if (!rs6000_offsettable_memref_p (dst
, reg_mode
, true)
23367 && GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
23369 if (GET_CODE (XEXP (dst
, 0)) == PRE_MODIFY
)
23371 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
23374 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
23375 emit_insn (gen_rtx_SET (gen_rtx_MEM (reg_mode
,
23378 used_update
= true;
23381 emit_insn (gen_rtx_SET (basereg
,
23382 XEXP (XEXP (dst
, 0), 1)));
23383 dst
= replace_equiv_address (dst
, basereg
);
23387 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
23388 rtx offsetreg
= XEXP (XEXP (dst
, 0), 1);
23389 gcc_assert (GET_CODE (XEXP (dst
, 0)) == PLUS
23391 && REG_P (offsetreg
)
23392 && REGNO (basereg
) != REGNO (offsetreg
));
23393 if (REGNO (basereg
) == 0)
23395 rtx tmp
= offsetreg
;
23396 offsetreg
= basereg
;
23399 emit_insn (gen_add3_insn (basereg
, basereg
, offsetreg
));
23400 restore_basereg
= gen_sub3_insn (basereg
, basereg
, offsetreg
);
23401 dst
= replace_equiv_address (dst
, basereg
);
23404 else if (GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
23405 gcc_assert (rs6000_offsettable_memref_p (dst
, reg_mode
, true));
23408 for (i
= 0; i
< nregs
; i
++)
23410 /* Calculate index to next subword. */
23415 /* If compiler already emitted move of first word by
23416 store with update, no need to do anything. */
23417 if (j
== 0 && used_update
)
23420 emit_insn (gen_rtx_SET (simplify_gen_subreg (reg_mode
, dst
, mode
,
23421 j
* reg_mode_size
),
23422 simplify_gen_subreg (reg_mode
, src
, mode
,
23423 j
* reg_mode_size
)));
23425 if (restore_basereg
!= NULL_RTX
)
23426 emit_insn (restore_basereg
);
23431 /* This page contains routines that are used to determine what the
23432 function prologue and epilogue code will do and write them out. */
23434 /* Determine whether the REG is really used. */
23437 save_reg_p (int reg
)
23439 /* We need to mark the PIC offset register live for the same conditions
23440 as it is set up, or otherwise it won't be saved before we clobber it. */
23442 if (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
&& !TARGET_SINGLE_PIC_BASE
)
23444 /* When calling eh_return, we must return true for all the cases
23445 where conditional_register_usage marks the PIC offset reg
23447 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
23448 && (crtl
->calls_eh_return
23449 || df_regs_ever_live_p (reg
)
23450 || !constant_pool_empty_p ()))
23453 if ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
23458 return !call_used_regs
[reg
] && df_regs_ever_live_p (reg
);
23461 /* Return the first fixed-point register that is required to be
23462 saved. 32 if none. */
23465 first_reg_to_save (void)
23469 /* Find lowest numbered live register. */
23470 for (first_reg
= 13; first_reg
<= 31; first_reg
++)
23471 if (save_reg_p (first_reg
))
23476 && crtl
->uses_pic_offset_table
23477 && first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
23478 return RS6000_PIC_OFFSET_TABLE_REGNUM
;
23484 /* Similar, for FP regs. */
23487 first_fp_reg_to_save (void)
23491 /* Find lowest numbered live register. */
23492 for (first_reg
= 14 + 32; first_reg
<= 63; first_reg
++)
23493 if (save_reg_p (first_reg
))
23499 /* Similar, for AltiVec regs. */
23502 first_altivec_reg_to_save (void)
23506 /* Stack frame remains as is unless we are in AltiVec ABI. */
23507 if (! TARGET_ALTIVEC_ABI
)
23508 return LAST_ALTIVEC_REGNO
+ 1;
23510 /* On Darwin, the unwind routines are compiled without
23511 TARGET_ALTIVEC, and use save_world to save/restore the
23512 altivec registers when necessary. */
23513 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
23514 && ! TARGET_ALTIVEC
)
23515 return FIRST_ALTIVEC_REGNO
+ 20;
23517 /* Find lowest numbered live register. */
23518 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
<= LAST_ALTIVEC_REGNO
; ++i
)
23519 if (save_reg_p (i
))
23525 /* Return a 32-bit mask of the AltiVec registers we need to set in
23526 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
23527 the 32-bit word is 0. */
23529 static unsigned int
23530 compute_vrsave_mask (void)
23532 unsigned int i
, mask
= 0;
23534 /* On Darwin, the unwind routines are compiled without
23535 TARGET_ALTIVEC, and use save_world to save/restore the
23536 call-saved altivec registers when necessary. */
23537 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
23538 && ! TARGET_ALTIVEC
)
23541 /* First, find out if we use _any_ altivec registers. */
23542 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
23543 if (df_regs_ever_live_p (i
))
23544 mask
|= ALTIVEC_REG_BIT (i
);
23549 /* Next, remove the argument registers from the set. These must
23550 be in the VRSAVE mask set by the caller, so we don't need to add
23551 them in again. More importantly, the mask we compute here is
23552 used to generate CLOBBERs in the set_vrsave insn, and we do not
23553 wish the argument registers to die. */
23554 for (i
= ALTIVEC_ARG_MIN_REG
; i
< (unsigned) crtl
->args
.info
.vregno
; i
++)
23555 mask
&= ~ALTIVEC_REG_BIT (i
);
23557 /* Similarly, remove the return value from the set. */
23560 diddle_return_value (is_altivec_return_reg
, &yes
);
23562 mask
&= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN
);
23568 /* For a very restricted set of circumstances, we can cut down the
23569 size of prologues/epilogues by calling our own save/restore-the-world
23573 compute_save_world_info (rs6000_stack_t
*info
)
23575 info
->world_save_p
= 1;
23577 = (WORLD_SAVE_P (info
)
23578 && DEFAULT_ABI
== ABI_DARWIN
23579 && !cfun
->has_nonlocal_label
23580 && info
->first_fp_reg_save
== FIRST_SAVED_FP_REGNO
23581 && info
->first_gp_reg_save
== FIRST_SAVED_GP_REGNO
23582 && info
->first_altivec_reg_save
== FIRST_SAVED_ALTIVEC_REGNO
23583 && info
->cr_save_p
);
23585 /* This will not work in conjunction with sibcalls. Make sure there
23586 are none. (This check is expensive, but seldom executed.) */
23587 if (WORLD_SAVE_P (info
))
23590 for (insn
= get_last_insn_anywhere (); insn
; insn
= PREV_INSN (insn
))
23591 if (CALL_P (insn
) && SIBLING_CALL_P (insn
))
23593 info
->world_save_p
= 0;
23598 if (WORLD_SAVE_P (info
))
23600 /* Even if we're not touching VRsave, make sure there's room on the
23601 stack for it, if it looks like we're calling SAVE_WORLD, which
23602 will attempt to save it. */
23603 info
->vrsave_size
= 4;
23605 /* If we are going to save the world, we need to save the link register too. */
23606 info
->lr_save_p
= 1;
23608 /* "Save" the VRsave register too if we're saving the world. */
23609 if (info
->vrsave_mask
== 0)
23610 info
->vrsave_mask
= compute_vrsave_mask ();
23612 /* Because the Darwin register save/restore routines only handle
23613 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
23615 gcc_assert (info
->first_fp_reg_save
>= FIRST_SAVED_FP_REGNO
23616 && (info
->first_altivec_reg_save
23617 >= FIRST_SAVED_ALTIVEC_REGNO
));
23625 is_altivec_return_reg (rtx reg
, void *xyes
)
23627 bool *yes
= (bool *) xyes
;
23628 if (REGNO (reg
) == ALTIVEC_ARG_RETURN
)
23633 /* Return whether REG is a global user reg or has been specifed by
23634 -ffixed-REG. We should not restore these, and so cannot use
23635 lmw or out-of-line restore functions if there are any. We also
23636 can't save them (well, emit frame notes for them), because frame
23637 unwinding during exception handling will restore saved registers. */
23640 fixed_reg_p (int reg
)
23642 /* Ignore fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] when the
23643 backend sets it, overriding anything the user might have given. */
23644 if (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
23645 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
)
23646 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
23647 || (TARGET_TOC
&& TARGET_MINIMAL_TOC
)))
23650 return fixed_regs
[reg
];
23653 /* Determine the strategy for savings/restoring registers. */
23656 SAVE_MULTIPLE
= 0x1,
23657 SAVE_INLINE_GPRS
= 0x2,
23658 SAVE_INLINE_FPRS
= 0x4,
23659 SAVE_NOINLINE_GPRS_SAVES_LR
= 0x8,
23660 SAVE_NOINLINE_FPRS_SAVES_LR
= 0x10,
23661 SAVE_INLINE_VRS
= 0x20,
23662 REST_MULTIPLE
= 0x100,
23663 REST_INLINE_GPRS
= 0x200,
23664 REST_INLINE_FPRS
= 0x400,
23665 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
= 0x800,
23666 REST_INLINE_VRS
= 0x1000
23670 rs6000_savres_strategy (rs6000_stack_t
*info
,
23671 bool using_static_chain_p
)
23675 /* Select between in-line and out-of-line save and restore of regs.
23676 First, all the obvious cases where we don't use out-of-line. */
23677 if (crtl
->calls_eh_return
23678 || cfun
->machine
->ra_need_lr
)
23679 strategy
|= (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
23680 | SAVE_INLINE_GPRS
| REST_INLINE_GPRS
23681 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
23683 if (info
->first_gp_reg_save
== 32)
23684 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
23686 if (info
->first_fp_reg_save
== 64)
23687 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
23689 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1)
23690 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
23692 /* Define cutoff for using out-of-line functions to save registers. */
23693 if (DEFAULT_ABI
== ABI_V4
|| TARGET_ELF
)
23695 if (!optimize_size
)
23697 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
23698 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
23699 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
23703 /* Prefer out-of-line restore if it will exit. */
23704 if (info
->first_fp_reg_save
> 61)
23705 strategy
|= SAVE_INLINE_FPRS
;
23706 if (info
->first_gp_reg_save
> 29)
23708 if (info
->first_fp_reg_save
== 64)
23709 strategy
|= SAVE_INLINE_GPRS
;
23711 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
23713 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
)
23714 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
23717 else if (DEFAULT_ABI
== ABI_DARWIN
)
23719 if (info
->first_fp_reg_save
> 60)
23720 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
23721 if (info
->first_gp_reg_save
> 29)
23722 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
23723 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
23727 gcc_checking_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
23728 if ((flag_shrink_wrap_separate
&& optimize_function_for_speed_p (cfun
))
23729 || info
->first_fp_reg_save
> 61)
23730 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
23731 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
23732 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
23735 /* Don't bother to try to save things out-of-line if r11 is occupied
23736 by the static chain. It would require too much fiddling and the
23737 static chain is rarely used anyway. FPRs are saved w.r.t the stack
23738 pointer on Darwin, and AIX uses r1 or r12. */
23739 if (using_static_chain_p
23740 && (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
23741 strategy
|= ((DEFAULT_ABI
== ABI_DARWIN
? 0 : SAVE_INLINE_FPRS
)
23743 | SAVE_INLINE_VRS
);
23745 /* Don't ever restore fixed regs. That means we can't use the
23746 out-of-line register restore functions if a fixed reg is in the
23747 range of regs restored. */
23748 if (!(strategy
& REST_INLINE_FPRS
))
23749 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
23752 strategy
|= REST_INLINE_FPRS
;
23756 /* We can only use the out-of-line routines to restore fprs if we've
23757 saved all the registers from first_fp_reg_save in the prologue.
23758 Otherwise, we risk loading garbage. Of course, if we have saved
23759 out-of-line then we know we haven't skipped any fprs. */
23760 if ((strategy
& SAVE_INLINE_FPRS
)
23761 && !(strategy
& REST_INLINE_FPRS
))
23762 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
23763 if (!save_reg_p (i
))
23765 strategy
|= REST_INLINE_FPRS
;
23769 /* Similarly, for altivec regs. */
23770 if (!(strategy
& REST_INLINE_VRS
))
23771 for (int i
= info
->first_altivec_reg_save
; i
< LAST_ALTIVEC_REGNO
+ 1; i
++)
23774 strategy
|= REST_INLINE_VRS
;
23778 if ((strategy
& SAVE_INLINE_VRS
)
23779 && !(strategy
& REST_INLINE_VRS
))
23780 for (int i
= info
->first_altivec_reg_save
; i
< LAST_ALTIVEC_REGNO
+ 1; i
++)
23781 if (!save_reg_p (i
))
23783 strategy
|= REST_INLINE_VRS
;
23787 /* info->lr_save_p isn't yet set if the only reason lr needs to be
23788 saved is an out-of-line save or restore. Set up the value for
23789 the next test (excluding out-of-line gprs). */
23790 bool lr_save_p
= (info
->lr_save_p
23791 || !(strategy
& SAVE_INLINE_FPRS
)
23792 || !(strategy
& SAVE_INLINE_VRS
)
23793 || !(strategy
& REST_INLINE_FPRS
)
23794 || !(strategy
& REST_INLINE_VRS
));
23796 if (TARGET_MULTIPLE
23797 && !TARGET_POWERPC64
23798 && info
->first_gp_reg_save
< 31
23799 && !(flag_shrink_wrap
23800 && flag_shrink_wrap_separate
23801 && optimize_function_for_speed_p (cfun
)))
23804 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
23805 if (save_reg_p (i
))
23809 /* Don't use store multiple if only one reg needs to be
23810 saved. This can occur for example when the ABI_V4 pic reg
23811 (r30) needs to be saved to make calls, but r31 is not
23813 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
23816 /* Prefer store multiple for saves over out-of-line
23817 routines, since the store-multiple instruction will
23818 always be smaller. */
23819 strategy
|= SAVE_INLINE_GPRS
| SAVE_MULTIPLE
;
23821 /* The situation is more complicated with load multiple.
23822 We'd prefer to use the out-of-line routines for restores,
23823 since the "exit" out-of-line routines can handle the
23824 restore of LR and the frame teardown. However if doesn't
23825 make sense to use the out-of-line routine if that is the
23826 only reason we'd need to save LR, and we can't use the
23827 "exit" out-of-line gpr restore if we have saved some
23828 fprs; In those cases it is advantageous to use load
23829 multiple when available. */
23830 if (info
->first_fp_reg_save
!= 64 || !lr_save_p
)
23831 strategy
|= REST_INLINE_GPRS
| REST_MULTIPLE
;
23835 /* Using the "exit" out-of-line routine does not improve code size
23836 if using it would require lr to be saved and if only saving one
23838 else if (!lr_save_p
&& info
->first_gp_reg_save
> 29)
23839 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
23841 /* Don't ever restore fixed regs. */
23842 if ((strategy
& (REST_INLINE_GPRS
| REST_MULTIPLE
)) != REST_INLINE_GPRS
)
23843 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
23844 if (fixed_reg_p (i
))
23846 strategy
|= REST_INLINE_GPRS
;
23847 strategy
&= ~REST_MULTIPLE
;
23851 /* We can only use load multiple or the out-of-line routines to
23852 restore gprs if we've saved all the registers from
23853 first_gp_reg_save. Otherwise, we risk loading garbage.
23854 Of course, if we have saved out-of-line or used stmw then we know
23855 we haven't skipped any gprs. */
23856 if ((strategy
& (SAVE_INLINE_GPRS
| SAVE_MULTIPLE
)) == SAVE_INLINE_GPRS
23857 && (strategy
& (REST_INLINE_GPRS
| REST_MULTIPLE
)) != REST_INLINE_GPRS
)
23858 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
23859 if (!save_reg_p (i
))
23861 strategy
|= REST_INLINE_GPRS
;
23862 strategy
&= ~REST_MULTIPLE
;
23866 if (TARGET_ELF
&& TARGET_64BIT
)
23868 if (!(strategy
& SAVE_INLINE_FPRS
))
23869 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
23870 else if (!(strategy
& SAVE_INLINE_GPRS
)
23871 && info
->first_fp_reg_save
== 64)
23872 strategy
|= SAVE_NOINLINE_GPRS_SAVES_LR
;
23874 else if (TARGET_AIX
&& !(strategy
& REST_INLINE_FPRS
))
23875 strategy
|= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
;
23877 if (TARGET_MACHO
&& !(strategy
& SAVE_INLINE_FPRS
))
23878 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
23883 /* Calculate the stack information for the current function. This is
23884 complicated by having two separate calling sequences, the AIX calling
23885 sequence and the V.4 calling sequence.
23887 AIX (and Darwin/Mac OS X) stack frames look like:
23889 SP----> +---------------------------------------+
23890 | back chain to caller | 0 0
23891 +---------------------------------------+
23892 | saved CR | 4 8 (8-11)
23893 +---------------------------------------+
23895 +---------------------------------------+
23896 | reserved for compilers | 12 24
23897 +---------------------------------------+
23898 | reserved for binders | 16 32
23899 +---------------------------------------+
23900 | saved TOC pointer | 20 40
23901 +---------------------------------------+
23902 | Parameter save area (+padding*) (P) | 24 48
23903 +---------------------------------------+
23904 | Alloca space (A) | 24+P etc.
23905 +---------------------------------------+
23906 | Local variable space (L) | 24+P+A
23907 +---------------------------------------+
23908 | Float/int conversion temporary (X) | 24+P+A+L
23909 +---------------------------------------+
23910 | Save area for AltiVec registers (W) | 24+P+A+L+X
23911 +---------------------------------------+
23912 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
23913 +---------------------------------------+
23914 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
23915 +---------------------------------------+
23916 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
23917 +---------------------------------------+
23918 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
23919 +---------------------------------------+
23920 old SP->| back chain to caller's caller |
23921 +---------------------------------------+
23923 * If the alloca area is present, the parameter save area is
23924 padded so that the former starts 16-byte aligned.
23926 The required alignment for AIX configurations is two words (i.e., 8
23929 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
23931 SP----> +---------------------------------------+
23932 | Back chain to caller | 0
23933 +---------------------------------------+
23934 | Save area for CR | 8
23935 +---------------------------------------+
23937 +---------------------------------------+
23938 | Saved TOC pointer | 24
23939 +---------------------------------------+
23940 | Parameter save area (+padding*) (P) | 32
23941 +---------------------------------------+
23942 | Alloca space (A) | 32+P
23943 +---------------------------------------+
23944 | Local variable space (L) | 32+P+A
23945 +---------------------------------------+
23946 | Save area for AltiVec registers (W) | 32+P+A+L
23947 +---------------------------------------+
23948 | AltiVec alignment padding (Y) | 32+P+A+L+W
23949 +---------------------------------------+
23950 | Save area for GP registers (G) | 32+P+A+L+W+Y
23951 +---------------------------------------+
23952 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
23953 +---------------------------------------+
23954 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
23955 +---------------------------------------+
23957 * If the alloca area is present, the parameter save area is
23958 padded so that the former starts 16-byte aligned.
23960 V.4 stack frames look like:
23962 SP----> +---------------------------------------+
23963 | back chain to caller | 0
23964 +---------------------------------------+
23965 | caller's saved LR | 4
23966 +---------------------------------------+
23967 | Parameter save area (+padding*) (P) | 8
23968 +---------------------------------------+
23969 | Alloca space (A) | 8+P
23970 +---------------------------------------+
23971 | Varargs save area (V) | 8+P+A
23972 +---------------------------------------+
23973 | Local variable space (L) | 8+P+A+V
23974 +---------------------------------------+
23975 | Float/int conversion temporary (X) | 8+P+A+V+L
23976 +---------------------------------------+
23977 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
23978 +---------------------------------------+
23979 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
23980 +---------------------------------------+
23981 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
23982 +---------------------------------------+
23983 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
23984 +---------------------------------------+
23985 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
23986 +---------------------------------------+
23987 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
23988 +---------------------------------------+
23989 old SP->| back chain to caller's caller |
23990 +---------------------------------------+
23992 * If the alloca area is present and the required alignment is
23993 16 bytes, the parameter save area is padded so that the
23994 alloca area starts 16-byte aligned.
23996 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
23997 given. (But note below and in sysv4.h that we require only 8 and
23998 may round up the size of our stack frame anyways. The historical
23999 reason is early versions of powerpc-linux which didn't properly
24000 align the stack at program startup. A happy side-effect is that
24001 -mno-eabi libraries can be used with -meabi programs.)
24003 The EABI configuration defaults to the V.4 layout. However,
24004 the stack alignment requirements may differ. If -mno-eabi is not
24005 given, the required stack alignment is 8 bytes; if -mno-eabi is
24006 given, the required alignment is 16 bytes. (But see V.4 comment
24009 #ifndef ABI_STACK_BOUNDARY
24010 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
24013 static rs6000_stack_t
*
24014 rs6000_stack_info (void)
24016 /* We should never be called for thunks, we are not set up for that. */
24017 gcc_assert (!cfun
->is_thunk
);
24019 rs6000_stack_t
*info
= &stack_info
;
24020 int reg_size
= TARGET_32BIT
? 4 : 8;
24025 HOST_WIDE_INT non_fixed_size
;
24026 bool using_static_chain_p
;
24028 if (reload_completed
&& info
->reload_completed
)
24031 memset (info
, 0, sizeof (*info
));
24032 info
->reload_completed
= reload_completed
;
24034 /* Select which calling sequence. */
24035 info
->abi
= DEFAULT_ABI
;
24037 /* Calculate which registers need to be saved & save area size. */
24038 info
->first_gp_reg_save
= first_reg_to_save ();
24039 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
24040 even if it currently looks like we won't. Reload may need it to
24041 get at a constant; if so, it will have already created a constant
24042 pool entry for it. */
24043 if (((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
24044 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
24045 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
24046 && crtl
->uses_const_pool
24047 && info
->first_gp_reg_save
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
24048 first_gp
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
24050 first_gp
= info
->first_gp_reg_save
;
24052 info
->gp_size
= reg_size
* (32 - first_gp
);
24054 info
->first_fp_reg_save
= first_fp_reg_to_save ();
24055 info
->fp_size
= 8 * (64 - info
->first_fp_reg_save
);
24057 info
->first_altivec_reg_save
= first_altivec_reg_to_save ();
24058 info
->altivec_size
= 16 * (LAST_ALTIVEC_REGNO
+ 1
24059 - info
->first_altivec_reg_save
);
24061 /* Does this function call anything? */
24062 info
->calls_p
= (!crtl
->is_leaf
|| cfun
->machine
->ra_needs_full_frame
);
24064 /* Determine if we need to save the condition code registers. */
24065 if (save_reg_p (CR2_REGNO
)
24066 || save_reg_p (CR3_REGNO
)
24067 || save_reg_p (CR4_REGNO
))
24069 info
->cr_save_p
= 1;
24070 if (DEFAULT_ABI
== ABI_V4
)
24071 info
->cr_size
= reg_size
;
24074 /* If the current function calls __builtin_eh_return, then we need
24075 to allocate stack space for registers that will hold data for
24076 the exception handler. */
24077 if (crtl
->calls_eh_return
)
24080 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
24083 ehrd_size
= i
* UNITS_PER_WORD
;
24088 /* In the ELFv2 ABI, we also need to allocate space for separate
24089 CR field save areas if the function calls __builtin_eh_return. */
24090 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
24092 /* This hard-codes that we have three call-saved CR fields. */
24093 ehcr_size
= 3 * reg_size
;
24094 /* We do *not* use the regular CR save mechanism. */
24095 info
->cr_save_p
= 0;
24100 /* Determine various sizes. */
24101 info
->reg_size
= reg_size
;
24102 info
->fixed_size
= RS6000_SAVE_AREA
;
24103 info
->vars_size
= RS6000_ALIGN (get_frame_size (), 8);
24104 if (cfun
->calls_alloca
)
24106 RS6000_ALIGN (crtl
->outgoing_args_size
+ info
->fixed_size
,
24107 STACK_BOUNDARY
/ BITS_PER_UNIT
) - info
->fixed_size
;
24109 info
->parm_size
= RS6000_ALIGN (crtl
->outgoing_args_size
,
24110 TARGET_ALTIVEC
? 16 : 8);
24111 if (FRAME_GROWS_DOWNWARD
)
24113 += RS6000_ALIGN (info
->fixed_size
+ info
->vars_size
+ info
->parm_size
,
24114 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
)
24115 - (info
->fixed_size
+ info
->vars_size
+ info
->parm_size
);
24117 if (TARGET_ALTIVEC_ABI
)
24118 info
->vrsave_mask
= compute_vrsave_mask ();
24120 if (TARGET_ALTIVEC_VRSAVE
&& info
->vrsave_mask
)
24121 info
->vrsave_size
= 4;
24123 compute_save_world_info (info
);
24125 /* Calculate the offsets. */
24126 switch (DEFAULT_ABI
)
24130 gcc_unreachable ();
24135 info
->fp_save_offset
= -info
->fp_size
;
24136 info
->gp_save_offset
= info
->fp_save_offset
- info
->gp_size
;
24138 if (TARGET_ALTIVEC_ABI
)
24140 info
->vrsave_save_offset
= info
->gp_save_offset
- info
->vrsave_size
;
24142 /* Align stack so vector save area is on a quadword boundary.
24143 The padding goes above the vectors. */
24144 if (info
->altivec_size
!= 0)
24145 info
->altivec_padding_size
= info
->vrsave_save_offset
& 0xF;
24147 info
->altivec_save_offset
= info
->vrsave_save_offset
24148 - info
->altivec_padding_size
24149 - info
->altivec_size
;
24150 gcc_assert (info
->altivec_size
== 0
24151 || info
->altivec_save_offset
% 16 == 0);
24153 /* Adjust for AltiVec case. */
24154 info
->ehrd_offset
= info
->altivec_save_offset
- ehrd_size
;
24157 info
->ehrd_offset
= info
->gp_save_offset
- ehrd_size
;
24159 info
->ehcr_offset
= info
->ehrd_offset
- ehcr_size
;
24160 info
->cr_save_offset
= reg_size
; /* first word when 64-bit. */
24161 info
->lr_save_offset
= 2*reg_size
;
24165 info
->fp_save_offset
= -info
->fp_size
;
24166 info
->gp_save_offset
= info
->fp_save_offset
- info
->gp_size
;
24167 info
->cr_save_offset
= info
->gp_save_offset
- info
->cr_size
;
24169 if (TARGET_ALTIVEC_ABI
)
24171 info
->vrsave_save_offset
= info
->cr_save_offset
- info
->vrsave_size
;
24173 /* Align stack so vector save area is on a quadword boundary. */
24174 if (info
->altivec_size
!= 0)
24175 info
->altivec_padding_size
= 16 - (-info
->vrsave_save_offset
% 16);
24177 info
->altivec_save_offset
= info
->vrsave_save_offset
24178 - info
->altivec_padding_size
24179 - info
->altivec_size
;
24181 /* Adjust for AltiVec case. */
24182 info
->ehrd_offset
= info
->altivec_save_offset
;
24185 info
->ehrd_offset
= info
->cr_save_offset
;
24187 info
->ehrd_offset
-= ehrd_size
;
24188 info
->lr_save_offset
= reg_size
;
24191 save_align
= (TARGET_ALTIVEC_ABI
|| DEFAULT_ABI
== ABI_DARWIN
) ? 16 : 8;
24192 info
->save_size
= RS6000_ALIGN (info
->fp_size
24194 + info
->altivec_size
24195 + info
->altivec_padding_size
24199 + info
->vrsave_size
,
24202 non_fixed_size
= info
->vars_size
+ info
->parm_size
+ info
->save_size
;
24204 info
->total_size
= RS6000_ALIGN (non_fixed_size
+ info
->fixed_size
,
24205 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
);
24207 /* Determine if we need to save the link register. */
24209 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
24211 && !TARGET_PROFILE_KERNEL
)
24212 || (DEFAULT_ABI
== ABI_V4
&& cfun
->calls_alloca
)
24213 #ifdef TARGET_RELOCATABLE
24214 || (DEFAULT_ABI
== ABI_V4
24215 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
24216 && !constant_pool_empty_p ())
24218 || rs6000_ra_ever_killed ())
24219 info
->lr_save_p
= 1;
24221 using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
24222 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
24223 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
24224 info
->savres_strategy
= rs6000_savres_strategy (info
, using_static_chain_p
);
24226 if (!(info
->savres_strategy
& SAVE_INLINE_GPRS
)
24227 || !(info
->savres_strategy
& SAVE_INLINE_FPRS
)
24228 || !(info
->savres_strategy
& SAVE_INLINE_VRS
)
24229 || !(info
->savres_strategy
& REST_INLINE_GPRS
)
24230 || !(info
->savres_strategy
& REST_INLINE_FPRS
)
24231 || !(info
->savres_strategy
& REST_INLINE_VRS
))
24232 info
->lr_save_p
= 1;
24234 if (info
->lr_save_p
)
24235 df_set_regs_ever_live (LR_REGNO
, true);
24237 /* Determine if we need to allocate any stack frame:
24239 For AIX we need to push the stack if a frame pointer is needed
24240 (because the stack might be dynamically adjusted), if we are
24241 debugging, if we make calls, or if the sum of fp_save, gp_save,
24242 and local variables are more than the space needed to save all
24243 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
24244 + 18*8 = 288 (GPR13 reserved).
24246 For V.4 we don't have the stack cushion that AIX uses, but assume
24247 that the debugger can handle stackless frames. */
24252 else if (DEFAULT_ABI
== ABI_V4
)
24253 info
->push_p
= non_fixed_size
!= 0;
24255 else if (frame_pointer_needed
)
24258 else if (TARGET_XCOFF
&& write_symbols
!= NO_DEBUG
)
24262 info
->push_p
= non_fixed_size
> (TARGET_32BIT
? 220 : 288);
24268 debug_stack_info (rs6000_stack_t
*info
)
24270 const char *abi_string
;
24273 info
= rs6000_stack_info ();
24275 fprintf (stderr
, "\nStack information for function %s:\n",
24276 ((current_function_decl
&& DECL_NAME (current_function_decl
))
24277 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl
))
24282 default: abi_string
= "Unknown"; break;
24283 case ABI_NONE
: abi_string
= "NONE"; break;
24284 case ABI_AIX
: abi_string
= "AIX"; break;
24285 case ABI_ELFv2
: abi_string
= "ELFv2"; break;
24286 case ABI_DARWIN
: abi_string
= "Darwin"; break;
24287 case ABI_V4
: abi_string
= "V.4"; break;
24290 fprintf (stderr
, "\tABI = %5s\n", abi_string
);
24292 if (TARGET_ALTIVEC_ABI
)
24293 fprintf (stderr
, "\tALTIVEC ABI extensions enabled.\n");
24295 if (info
->first_gp_reg_save
!= 32)
24296 fprintf (stderr
, "\tfirst_gp_reg_save = %5d\n", info
->first_gp_reg_save
);
24298 if (info
->first_fp_reg_save
!= 64)
24299 fprintf (stderr
, "\tfirst_fp_reg_save = %5d\n", info
->first_fp_reg_save
);
24301 if (info
->first_altivec_reg_save
<= LAST_ALTIVEC_REGNO
)
24302 fprintf (stderr
, "\tfirst_altivec_reg_save = %5d\n",
24303 info
->first_altivec_reg_save
);
24305 if (info
->lr_save_p
)
24306 fprintf (stderr
, "\tlr_save_p = %5d\n", info
->lr_save_p
);
24308 if (info
->cr_save_p
)
24309 fprintf (stderr
, "\tcr_save_p = %5d\n", info
->cr_save_p
);
24311 if (info
->vrsave_mask
)
24312 fprintf (stderr
, "\tvrsave_mask = 0x%x\n", info
->vrsave_mask
);
24315 fprintf (stderr
, "\tpush_p = %5d\n", info
->push_p
);
24318 fprintf (stderr
, "\tcalls_p = %5d\n", info
->calls_p
);
24321 fprintf (stderr
, "\tgp_save_offset = %5d\n", info
->gp_save_offset
);
24324 fprintf (stderr
, "\tfp_save_offset = %5d\n", info
->fp_save_offset
);
24326 if (info
->altivec_size
)
24327 fprintf (stderr
, "\taltivec_save_offset = %5d\n",
24328 info
->altivec_save_offset
);
24330 if (info
->vrsave_size
)
24331 fprintf (stderr
, "\tvrsave_save_offset = %5d\n",
24332 info
->vrsave_save_offset
);
24334 if (info
->lr_save_p
)
24335 fprintf (stderr
, "\tlr_save_offset = %5d\n", info
->lr_save_offset
);
24337 if (info
->cr_save_p
)
24338 fprintf (stderr
, "\tcr_save_offset = %5d\n", info
->cr_save_offset
);
24340 if (info
->varargs_save_offset
)
24341 fprintf (stderr
, "\tvarargs_save_offset = %5d\n", info
->varargs_save_offset
);
24343 if (info
->total_size
)
24344 fprintf (stderr
, "\ttotal_size = " HOST_WIDE_INT_PRINT_DEC
"\n",
24347 if (info
->vars_size
)
24348 fprintf (stderr
, "\tvars_size = " HOST_WIDE_INT_PRINT_DEC
"\n",
24351 if (info
->parm_size
)
24352 fprintf (stderr
, "\tparm_size = %5d\n", info
->parm_size
);
24354 if (info
->fixed_size
)
24355 fprintf (stderr
, "\tfixed_size = %5d\n", info
->fixed_size
);
24358 fprintf (stderr
, "\tgp_size = %5d\n", info
->gp_size
);
24361 fprintf (stderr
, "\tfp_size = %5d\n", info
->fp_size
);
24363 if (info
->altivec_size
)
24364 fprintf (stderr
, "\taltivec_size = %5d\n", info
->altivec_size
);
24366 if (info
->vrsave_size
)
24367 fprintf (stderr
, "\tvrsave_size = %5d\n", info
->vrsave_size
);
24369 if (info
->altivec_padding_size
)
24370 fprintf (stderr
, "\taltivec_padding_size= %5d\n",
24371 info
->altivec_padding_size
);
24374 fprintf (stderr
, "\tcr_size = %5d\n", info
->cr_size
);
24376 if (info
->save_size
)
24377 fprintf (stderr
, "\tsave_size = %5d\n", info
->save_size
);
24379 if (info
->reg_size
!= 4)
24380 fprintf (stderr
, "\treg_size = %5d\n", info
->reg_size
);
24382 fprintf (stderr
, "\tsave-strategy = %04x\n", info
->savres_strategy
);
24384 fprintf (stderr
, "\n");
24388 rs6000_return_addr (int count
, rtx frame
)
24390 /* We can't use get_hard_reg_initial_val for LR when count == 0 if LR
24391 is trashed by the prologue, as it is for PIC on ABI_V4 and Darwin. */
24393 || ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
) && flag_pic
))
24395 cfun
->machine
->ra_needs_full_frame
= 1;
24398 /* FRAME is set to frame_pointer_rtx by the generic code, but that
24399 is good for loading 0(r1) only when !FRAME_GROWS_DOWNWARD. */
24400 frame
= stack_pointer_rtx
;
24401 rtx prev_frame_addr
= memory_address (Pmode
, frame
);
24402 rtx prev_frame
= copy_to_reg (gen_rtx_MEM (Pmode
, prev_frame_addr
));
24403 rtx lr_save_off
= plus_constant (Pmode
,
24404 prev_frame
, RETURN_ADDRESS_OFFSET
);
24405 rtx lr_save_addr
= memory_address (Pmode
, lr_save_off
);
24406 return gen_rtx_MEM (Pmode
, lr_save_addr
);
24409 cfun
->machine
->ra_need_lr
= 1;
24410 return get_hard_reg_initial_val (Pmode
, LR_REGNO
);
24413 /* Say whether a function is a candidate for sibcall handling or not. */
24416 rs6000_function_ok_for_sibcall (tree decl
, tree exp
)
24421 fntype
= TREE_TYPE (decl
);
24423 fntype
= TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp
)));
24425 /* We can't do it if the called function has more vector parameters
24426 than the current function; there's nowhere to put the VRsave code. */
24427 if (TARGET_ALTIVEC_ABI
24428 && TARGET_ALTIVEC_VRSAVE
24429 && !(decl
&& decl
== current_function_decl
))
24431 function_args_iterator args_iter
;
24435 /* Functions with vector parameters are required to have a
24436 prototype, so the argument type info must be available
24438 FOREACH_FUNCTION_ARGS(fntype
, type
, args_iter
)
24439 if (TREE_CODE (type
) == VECTOR_TYPE
24440 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
24443 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl
), type
, args_iter
)
24444 if (TREE_CODE (type
) == VECTOR_TYPE
24445 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
24452 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
24453 functions, because the callee may have a different TOC pointer to
24454 the caller and there's no way to ensure we restore the TOC when
24455 we return. With the secure-plt SYSV ABI we can't make non-local
24456 calls when -fpic/PIC because the plt call stubs use r30. */
24457 if (DEFAULT_ABI
== ABI_DARWIN
24458 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
24460 && !DECL_EXTERNAL (decl
)
24461 && !DECL_WEAK (decl
)
24462 && (*targetm
.binds_local_p
) (decl
))
24463 || (DEFAULT_ABI
== ABI_V4
24464 && (!TARGET_SECURE_PLT
24467 && (*targetm
.binds_local_p
) (decl
)))))
24469 tree attr_list
= TYPE_ATTRIBUTES (fntype
);
24471 if (!lookup_attribute ("longcall", attr_list
)
24472 || lookup_attribute ("shortcall", attr_list
))
24480 rs6000_ra_ever_killed (void)
24486 if (cfun
->is_thunk
)
24489 if (cfun
->machine
->lr_save_state
)
24490 return cfun
->machine
->lr_save_state
- 1;
24492 /* regs_ever_live has LR marked as used if any sibcalls are present,
24493 but this should not force saving and restoring in the
24494 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
24495 clobbers LR, so that is inappropriate. */
24497 /* Also, the prologue can generate a store into LR that
24498 doesn't really count, like this:
24501 bcl to set PIC register
24505 When we're called from the epilogue, we need to avoid counting
24506 this as a store. */
24508 push_topmost_sequence ();
24509 top
= get_insns ();
24510 pop_topmost_sequence ();
24511 reg
= gen_rtx_REG (Pmode
, LR_REGNO
);
24513 for (insn
= NEXT_INSN (top
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
24519 if (!SIBLING_CALL_P (insn
))
24522 else if (find_regno_note (insn
, REG_INC
, LR_REGNO
))
24524 else if (set_of (reg
, insn
) != NULL_RTX
24525 && !prologue_epilogue_contains (insn
))
24532 /* Emit instructions needed to load the TOC register.
24533 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
24534 a constant pool; or for SVR4 -fpic. */
24537 rs6000_emit_load_toc_table (int fromprolog
)
24540 dest
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
24542 if (TARGET_ELF
&& TARGET_SECURE_PLT
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
)
24545 rtx lab
, tmp1
, tmp2
, got
;
24547 lab
= gen_label_rtx ();
24548 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (lab
));
24549 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
24552 got
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
24556 got
= rs6000_got_sym ();
24557 tmp1
= tmp2
= dest
;
24560 tmp1
= gen_reg_rtx (Pmode
);
24561 tmp2
= gen_reg_rtx (Pmode
);
24563 emit_insn (gen_load_toc_v4_PIC_1 (lab
));
24564 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
24565 emit_insn (gen_load_toc_v4_PIC_3b (tmp2
, tmp1
, got
, lab
));
24566 emit_insn (gen_load_toc_v4_PIC_3c (dest
, tmp2
, got
, lab
));
24568 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
24570 emit_insn (gen_load_toc_v4_pic_si ());
24571 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
24573 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
24576 rtx temp0
= (fromprolog
24577 ? gen_rtx_REG (Pmode
, 0)
24578 : gen_reg_rtx (Pmode
));
24584 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
24585 symF
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
24587 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
24588 symL
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
24590 emit_insn (gen_load_toc_v4_PIC_1 (symF
));
24591 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
24592 emit_insn (gen_load_toc_v4_PIC_2 (temp0
, dest
, symL
, symF
));
24598 tocsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
24600 lab
= gen_label_rtx ();
24601 emit_insn (gen_load_toc_v4_PIC_1b (tocsym
, lab
));
24602 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
24603 if (TARGET_LINK_STACK
)
24604 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (4)));
24605 emit_move_insn (temp0
, gen_rtx_MEM (Pmode
, dest
));
24607 emit_insn (gen_addsi3 (dest
, temp0
, dest
));
24609 else if (TARGET_ELF
&& !TARGET_AIX
&& flag_pic
== 0 && TARGET_MINIMAL_TOC
)
24611 /* This is for AIX code running in non-PIC ELF32. */
24612 rtx realsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (toc_label_name
));
24615 emit_insn (gen_elf_high (dest
, realsym
));
24616 emit_insn (gen_elf_low (dest
, dest
, realsym
));
24620 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
24623 emit_insn (gen_load_toc_aix_si (dest
));
24625 emit_insn (gen_load_toc_aix_di (dest
));
24629 /* Emit instructions to restore the link register after determining where
24630 its value has been stored. */
24633 rs6000_emit_eh_reg_restore (rtx source
, rtx scratch
)
24635 rs6000_stack_t
*info
= rs6000_stack_info ();
24638 operands
[0] = source
;
24639 operands
[1] = scratch
;
24641 if (info
->lr_save_p
)
24643 rtx frame_rtx
= stack_pointer_rtx
;
24644 HOST_WIDE_INT sp_offset
= 0;
24647 if (frame_pointer_needed
24648 || cfun
->calls_alloca
24649 || info
->total_size
> 32767)
24651 tmp
= gen_frame_mem (Pmode
, frame_rtx
);
24652 emit_move_insn (operands
[1], tmp
);
24653 frame_rtx
= operands
[1];
24655 else if (info
->push_p
)
24656 sp_offset
= info
->total_size
;
24658 tmp
= plus_constant (Pmode
, frame_rtx
,
24659 info
->lr_save_offset
+ sp_offset
);
24660 tmp
= gen_frame_mem (Pmode
, tmp
);
24661 emit_move_insn (tmp
, operands
[0]);
24664 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
), operands
[0]);
24666 /* Freeze lr_save_p. We've just emitted rtl that depends on the
24667 state of lr_save_p so any change from here on would be a bug. In
24668 particular, stop rs6000_ra_ever_killed from considering the SET
24669 of lr we may have added just above. */
24670 cfun
->machine
->lr_save_state
= info
->lr_save_p
+ 1;
24673 static GTY(()) alias_set_type set
= -1;
24676 get_TOC_alias_set (void)
24679 set
= new_alias_set ();
24683 /* This returns nonzero if the current function uses the TOC. This is
24684 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
24685 is generated by the ABI_V4 load_toc_* patterns.
24686 Return 2 instead of 1 if the load_toc_* pattern is in the function
24687 partition that doesn't start the function. */
24695 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
24699 rtx pat
= PATTERN (insn
);
24702 if (GET_CODE (pat
) == PARALLEL
)
24703 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
24705 rtx sub
= XVECEXP (pat
, 0, i
);
24706 if (GET_CODE (sub
) == USE
)
24708 sub
= XEXP (sub
, 0);
24709 if (GET_CODE (sub
) == UNSPEC
24710 && XINT (sub
, 1) == UNSPEC_TOC
)
24715 else if (crtl
->has_bb_partition
24717 && NOTE_KIND (insn
) == NOTE_INSN_SWITCH_TEXT_SECTIONS
)
24725 create_TOC_reference (rtx symbol
, rtx largetoc_reg
)
24727 rtx tocrel
, tocreg
, hi
;
24729 if (TARGET_DEBUG_ADDR
)
24731 if (GET_CODE (symbol
) == SYMBOL_REF
)
24732 fprintf (stderr
, "\ncreate_TOC_reference, (symbol_ref %s)\n",
24736 fprintf (stderr
, "\ncreate_TOC_reference, code %s:\n",
24737 GET_RTX_NAME (GET_CODE (symbol
)));
24738 debug_rtx (symbol
);
24742 if (!can_create_pseudo_p ())
24743 df_set_regs_ever_live (TOC_REGISTER
, true);
24745 tocreg
= gen_rtx_REG (Pmode
, TOC_REGISTER
);
24746 tocrel
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, symbol
, tocreg
), UNSPEC_TOCREL
);
24747 if (TARGET_CMODEL
== CMODEL_SMALL
|| can_create_pseudo_p ())
24750 hi
= gen_rtx_HIGH (Pmode
, copy_rtx (tocrel
));
24751 if (largetoc_reg
!= NULL
)
24753 emit_move_insn (largetoc_reg
, hi
);
24756 return gen_rtx_LO_SUM (Pmode
, hi
, tocrel
);
24759 /* Issue assembly directives that create a reference to the given DWARF
24760 FRAME_TABLE_LABEL from the current function section. */
24762 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label
)
24764 fprintf (asm_out_file
, "\t.ref %s\n",
24765 (* targetm
.strip_name_encoding
) (frame_table_label
));
24768 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
24769 and the change to the stack pointer. */
24772 rs6000_emit_stack_tie (rtx fp
, bool hard_frame_needed
)
24779 regs
[i
++] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
24780 if (hard_frame_needed
)
24781 regs
[i
++] = gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
);
24782 if (!(REGNO (fp
) == STACK_POINTER_REGNUM
24783 || (hard_frame_needed
24784 && REGNO (fp
) == HARD_FRAME_POINTER_REGNUM
)))
24787 p
= rtvec_alloc (i
);
24790 rtx mem
= gen_frame_mem (BLKmode
, regs
[i
]);
24791 RTVEC_ELT (p
, i
) = gen_rtx_SET (mem
, const0_rtx
);
24794 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode
, p
)));
24797 /* Allocate SIZE_INT bytes on the stack using a store with update style insn
24798 and set the appropriate attributes for the generated insn. Return the
24799 first insn which adjusts the stack pointer or the last insn before
24800 the stack adjustment loop.
24802 SIZE_INT is used to create the CFI note for the allocation.
24804 SIZE_RTX is an rtx containing the size of the adjustment. Note that
24805 since stacks grow to lower addresses its runtime value is -SIZE_INT.
24807 ORIG_SP contains the backchain value that must be stored at *sp. */
24810 rs6000_emit_allocate_stack_1 (HOST_WIDE_INT size_int
, rtx orig_sp
)
24814 rtx size_rtx
= GEN_INT (-size_int
);
24815 if (size_int
> 32767)
24817 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
24818 /* Need a note here so that try_split doesn't get confused. */
24819 if (get_last_insn () == NULL_RTX
)
24820 emit_note (NOTE_INSN_DELETED
);
24821 insn
= emit_move_insn (tmp_reg
, size_rtx
);
24822 try_split (PATTERN (insn
), insn
, 0);
24823 size_rtx
= tmp_reg
;
24826 if (Pmode
== SImode
)
24827 insn
= emit_insn (gen_movsi_update_stack (stack_pointer_rtx
,
24832 insn
= emit_insn (gen_movdi_di_update_stack (stack_pointer_rtx
,
24836 rtx par
= PATTERN (insn
);
24837 gcc_assert (GET_CODE (par
) == PARALLEL
);
24838 rtx set
= XVECEXP (par
, 0, 0);
24839 gcc_assert (GET_CODE (set
) == SET
);
24840 rtx mem
= SET_DEST (set
);
24841 gcc_assert (MEM_P (mem
));
24842 MEM_NOTRAP_P (mem
) = 1;
24843 set_mem_alias_set (mem
, get_frame_alias_set ());
24845 RTX_FRAME_RELATED_P (insn
) = 1;
24846 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
24847 gen_rtx_SET (stack_pointer_rtx
,
24848 gen_rtx_PLUS (Pmode
,
24850 GEN_INT (-size_int
))));
24852 /* Emit a blockage to ensure the allocation/probing insns are
24853 not optimized, combined, removed, etc. Add REG_STACK_CHECK
24854 note for similar reasons. */
24855 if (flag_stack_clash_protection
)
24857 add_reg_note (insn
, REG_STACK_CHECK
, const0_rtx
);
24858 emit_insn (gen_blockage ());
24864 static HOST_WIDE_INT
24865 get_stack_clash_protection_probe_interval (void)
24867 return (HOST_WIDE_INT_1U
24868 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_PROBE_INTERVAL
));
24871 static HOST_WIDE_INT
24872 get_stack_clash_protection_guard_size (void)
24874 return (HOST_WIDE_INT_1U
24875 << PARAM_VALUE (PARAM_STACK_CLASH_PROTECTION_GUARD_SIZE
));
24878 /* Allocate ORIG_SIZE bytes on the stack and probe the newly
24879 allocated space every STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes.
24881 COPY_REG, if non-null, should contain a copy of the original
24882 stack pointer at exit from this function.
24884 This is subtly different than the Ada probing in that it tries hard to
24885 prevent attacks that jump the stack guard. Thus it is never allowed to
24886 allocate more than STACK_CLASH_PROTECTION_PROBE_INTERVAL bytes of stack
24887 space without a suitable probe. */
24889 rs6000_emit_probe_stack_range_stack_clash (HOST_WIDE_INT orig_size
,
24892 rtx orig_sp
= copy_reg
;
24894 HOST_WIDE_INT probe_interval
= get_stack_clash_protection_probe_interval ();
24896 /* Round the size down to a multiple of PROBE_INTERVAL. */
24897 HOST_WIDE_INT rounded_size
= ROUND_DOWN (orig_size
, probe_interval
);
24899 /* If explicitly requested,
24900 or the rounded size is not the same as the original size
24901 or the the rounded size is greater than a page,
24902 then we will need a copy of the original stack pointer. */
24903 if (rounded_size
!= orig_size
24904 || rounded_size
> probe_interval
24907 /* If the caller did not request a copy of the incoming stack
24908 pointer, then we use r0 to hold the copy. */
24910 orig_sp
= gen_rtx_REG (Pmode
, 0);
24911 emit_move_insn (orig_sp
, stack_pointer_rtx
);
24914 /* There's three cases here.
24916 One is a single probe which is the most common and most efficiently
24917 implemented as it does not have to have a copy of the original
24918 stack pointer if there are no residuals.
24920 Second is unrolled allocation/probes which we use if there's just
24921 a few of them. It needs to save the original stack pointer into a
24922 temporary for use as a source register in the allocation/probe.
24924 Last is a loop. This is the most uncommon case and least efficient. */
24925 rtx_insn
*retval
= NULL
;
24926 if (rounded_size
== probe_interval
)
24928 retval
= rs6000_emit_allocate_stack_1 (probe_interval
, stack_pointer_rtx
);
24930 dump_stack_clash_frame_info (PROBE_INLINE
, rounded_size
!= orig_size
);
24932 else if (rounded_size
<= 8 * probe_interval
)
24934 /* The ABI requires using the store with update insns to allocate
24935 space and store the backchain into the stack
24937 So we save the current stack pointer into a temporary, then
24938 emit the store-with-update insns to store the saved stack pointer
24939 into the right location in each new page. */
24940 for (int i
= 0; i
< rounded_size
; i
+= probe_interval
)
24943 = rs6000_emit_allocate_stack_1 (probe_interval
, orig_sp
);
24945 /* Save the first stack adjustment in RETVAL. */
24950 dump_stack_clash_frame_info (PROBE_INLINE
, rounded_size
!= orig_size
);
24954 /* Compute the ending address. */
24956 = copy_reg
? gen_rtx_REG (Pmode
, 0) : gen_rtx_REG (Pmode
, 12);
24957 rtx rs
= GEN_INT (-rounded_size
);
24959 if (add_operand (rs
, Pmode
))
24960 insn
= emit_insn (gen_add3_insn (end_addr
, stack_pointer_rtx
, rs
));
24963 emit_move_insn (end_addr
, GEN_INT (-rounded_size
));
24964 insn
= emit_insn (gen_add3_insn (end_addr
, end_addr
,
24965 stack_pointer_rtx
));
24966 /* Describe the effect of INSN to the CFI engine. */
24967 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
24968 gen_rtx_SET (end_addr
,
24969 gen_rtx_PLUS (Pmode
, stack_pointer_rtx
,
24972 RTX_FRAME_RELATED_P (insn
) = 1;
24974 /* Emit the loop. */
24976 retval
= emit_insn (gen_probe_stack_rangedi (stack_pointer_rtx
,
24977 stack_pointer_rtx
, orig_sp
,
24980 retval
= emit_insn (gen_probe_stack_rangesi (stack_pointer_rtx
,
24981 stack_pointer_rtx
, orig_sp
,
24983 RTX_FRAME_RELATED_P (retval
) = 1;
24984 /* Describe the effect of INSN to the CFI engine. */
24985 add_reg_note (retval
, REG_FRAME_RELATED_EXPR
,
24986 gen_rtx_SET (stack_pointer_rtx
, end_addr
));
24988 /* Emit a blockage to ensure the allocation/probing insns are
24989 not optimized, combined, removed, etc. Other cases handle this
24990 within their call to rs6000_emit_allocate_stack_1. */
24991 emit_insn (gen_blockage ());
24993 dump_stack_clash_frame_info (PROBE_LOOP
, rounded_size
!= orig_size
);
24996 if (orig_size
!= rounded_size
)
24998 /* Allocate (and implicitly probe) any residual space. */
24999 HOST_WIDE_INT residual
= orig_size
- rounded_size
;
25001 rtx_insn
*insn
= rs6000_emit_allocate_stack_1 (residual
, orig_sp
);
25003 /* If the residual was the only allocation, then we can return the
25004 allocating insn. */
25012 /* Emit the correct code for allocating stack space, as insns.
25013 If COPY_REG, make sure a copy of the old frame is left there.
25014 The generated code may use hard register 0 as a temporary. */
25017 rs6000_emit_allocate_stack (HOST_WIDE_INT size
, rtx copy_reg
, int copy_off
)
25020 rtx stack_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
25021 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
25022 rtx todec
= gen_int_mode (-size
, Pmode
);
25024 if (INTVAL (todec
) != -size
)
25026 warning (0, "stack frame too large");
25027 emit_insn (gen_trap ());
25031 if (crtl
->limit_stack
)
25033 if (REG_P (stack_limit_rtx
)
25034 && REGNO (stack_limit_rtx
) > 1
25035 && REGNO (stack_limit_rtx
) <= 31)
25038 = gen_add3_insn (tmp_reg
, stack_limit_rtx
, GEN_INT (size
));
25041 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
, const0_rtx
));
25043 else if (GET_CODE (stack_limit_rtx
) == SYMBOL_REF
25045 && DEFAULT_ABI
== ABI_V4
25048 rtx toload
= gen_rtx_CONST (VOIDmode
,
25049 gen_rtx_PLUS (Pmode
,
25053 emit_insn (gen_elf_high (tmp_reg
, toload
));
25054 emit_insn (gen_elf_low (tmp_reg
, tmp_reg
, toload
));
25055 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
25059 warning (0, "stack limit expression is not supported");
25062 if (flag_stack_clash_protection
)
25064 if (size
< get_stack_clash_protection_guard_size ())
25065 dump_stack_clash_frame_info (NO_PROBE_SMALL_FRAME
, true);
25068 rtx_insn
*insn
= rs6000_emit_probe_stack_range_stack_clash (size
,
25071 /* If we asked for a copy with an offset, then we still need add in
25073 if (copy_reg
&& copy_off
)
25074 emit_insn (gen_add3_insn (copy_reg
, copy_reg
, GEN_INT (copy_off
)));
25082 emit_insn (gen_add3_insn (copy_reg
, stack_reg
, GEN_INT (copy_off
)));
25084 emit_move_insn (copy_reg
, stack_reg
);
25087 /* Since we didn't use gen_frame_mem to generate the MEM, grab
25088 it now and set the alias set/attributes. The above gen_*_update
25089 calls will generate a PARALLEL with the MEM set being the first
25091 insn
= rs6000_emit_allocate_stack_1 (size
, stack_reg
);
25095 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
25097 #if PROBE_INTERVAL > 32768
25098 #error Cannot use indexed addressing mode for stack probing
25101 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
25102 inclusive. These are offsets from the current stack pointer. */
25105 rs6000_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
25107 /* See if we have a constant small number of probes to generate. If so,
25108 that's the easy case. */
25109 if (first
+ size
<= 32768)
25113 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
25114 it exceeds SIZE. If only one probe is needed, this will not
25115 generate any code. Then probe at FIRST + SIZE. */
25116 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
25117 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
25120 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
25124 /* Otherwise, do the same as above, but in a loop. Note that we must be
25125 extra careful with variables wrapping around because we might be at
25126 the very top (or the very bottom) of the address space and we have
25127 to be able to handle this case properly; in particular, we use an
25128 equality test for the loop condition. */
25131 HOST_WIDE_INT rounded_size
;
25132 rtx r12
= gen_rtx_REG (Pmode
, 12);
25133 rtx r0
= gen_rtx_REG (Pmode
, 0);
25135 /* Sanity check for the addressing mode we're going to use. */
25136 gcc_assert (first
<= 32768);
25138 /* Step 1: round SIZE to the previous multiple of the interval. */
25140 rounded_size
= ROUND_DOWN (size
, PROBE_INTERVAL
);
25143 /* Step 2: compute initial and final value of the loop counter. */
25145 /* TEST_ADDR = SP + FIRST. */
25146 emit_insn (gen_rtx_SET (r12
, plus_constant (Pmode
, stack_pointer_rtx
,
25149 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
25150 if (rounded_size
> 32768)
25152 emit_move_insn (r0
, GEN_INT (-rounded_size
));
25153 emit_insn (gen_rtx_SET (r0
, gen_rtx_PLUS (Pmode
, r12
, r0
)));
25156 emit_insn (gen_rtx_SET (r0
, plus_constant (Pmode
, r12
,
25160 /* Step 3: the loop
25164 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
25167 while (TEST_ADDR != LAST_ADDR)
25169 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
25170 until it is equal to ROUNDED_SIZE. */
25173 emit_insn (gen_probe_stack_rangedi (r12
, r12
, stack_pointer_rtx
, r0
));
25175 emit_insn (gen_probe_stack_rangesi (r12
, r12
, stack_pointer_rtx
, r0
));
25178 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
25179 that SIZE is equal to ROUNDED_SIZE. */
25181 if (size
!= rounded_size
)
25182 emit_stack_probe (plus_constant (Pmode
, r12
, rounded_size
- size
));
25186 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
25187 addresses, not offsets. */
25189 static const char *
25190 output_probe_stack_range_1 (rtx reg1
, rtx reg2
)
25192 static int labelno
= 0;
25196 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
25199 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
25201 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
25203 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
25204 output_asm_insn ("addi %0,%0,%1", xops
);
25206 /* Probe at TEST_ADDR. */
25207 xops
[1] = gen_rtx_REG (Pmode
, 0);
25208 output_asm_insn ("stw %1,0(%0)", xops
);
25210 /* Test if TEST_ADDR == LAST_ADDR. */
25213 output_asm_insn ("cmpd 0,%0,%1", xops
);
25215 output_asm_insn ("cmpw 0,%0,%1", xops
);
25218 fputs ("\tbne 0,", asm_out_file
);
25219 assemble_name_raw (asm_out_file
, loop_lab
);
25220 fputc ('\n', asm_out_file
);
25225 /* This function is called when rs6000_frame_related is processing
25226 SETs within a PARALLEL, and returns whether the REGNO save ought to
25227 be marked RTX_FRAME_RELATED_P. The PARALLELs involved are those
25228 for out-of-line register save functions, store multiple, and the
25229 Darwin world_save. They may contain registers that don't really
25233 interesting_frame_related_regno (unsigned int regno
)
25235 /* Saves apparently of r0 are actually saving LR. It doesn't make
25236 sense to substitute the regno here to test save_reg_p (LR_REGNO).
25237 We *know* LR needs saving, and dwarf2cfi.c is able to deduce that
25238 (set (mem) (r0)) is saving LR from a prior (set (r0) (lr)) marked
25239 as frame related. */
25242 /* If we see CR2 then we are here on a Darwin world save. Saves of
25243 CR2 signify the whole CR is being saved. This is a long-standing
25244 ABI wart fixed by ELFv2. As for r0/lr there is no need to check
25245 that CR needs to be saved. */
25246 if (regno
== CR2_REGNO
)
25248 /* Omit frame info for any user-defined global regs. If frame info
25249 is supplied for them, frame unwinding will restore a user reg.
25250 Also omit frame info for any reg we don't need to save, as that
25251 bloats frame info and can cause problems with shrink wrapping.
25252 Since global regs won't be seen as needing to be saved, both of
25253 these conditions are covered by save_reg_p. */
25254 return save_reg_p (regno
);
25257 /* Probe a range of stack addresses from REG1 to REG3 inclusive. These are
25258 addresses, not offsets.
25260 REG2 contains the backchain that must be stored into *sp at each allocation.
25262 This is subtly different than the Ada probing above in that it tries hard
25263 to prevent attacks that jump the stack guard. Thus, it is never allowed
25264 to allocate more than PROBE_INTERVAL bytes of stack space without a
25267 static const char *
25268 output_probe_stack_range_stack_clash (rtx reg1
, rtx reg2
, rtx reg3
)
25270 static int labelno
= 0;
25274 HOST_WIDE_INT probe_interval
= get_stack_clash_protection_probe_interval ();
25276 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
++);
25278 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
25280 /* This allocates and probes. */
25283 xops
[2] = GEN_INT (-probe_interval
);
25285 output_asm_insn ("stdu %1,%2(%0)", xops
);
25287 output_asm_insn ("stwu %1,%2(%0)", xops
);
25289 /* Jump to LOOP_LAB if TEST_ADDR != LAST_ADDR. */
25293 output_asm_insn ("cmpd 0,%0,%1", xops
);
25295 output_asm_insn ("cmpw 0,%0,%1", xops
);
25297 fputs ("\tbne 0,", asm_out_file
);
25298 assemble_name_raw (asm_out_file
, loop_lab
);
25299 fputc ('\n', asm_out_file
);
25304 /* Wrapper around the output_probe_stack_range routines. */
25306 output_probe_stack_range (rtx reg1
, rtx reg2
, rtx reg3
)
25308 if (flag_stack_clash_protection
)
25309 return output_probe_stack_range_stack_clash (reg1
, reg2
, reg3
);
25311 return output_probe_stack_range_1 (reg1
, reg3
);
25314 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
25315 with (plus:P (reg 1) VAL), and with REG2 replaced with REPL2 if REG2
25316 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
25317 deduce these equivalences by itself so it wasn't necessary to hold
25318 its hand so much. Don't be tempted to always supply d2_f_d_e with
25319 the actual cfa register, ie. r31 when we are using a hard frame
25320 pointer. That fails when saving regs off r1, and sched moves the
25321 r31 setup past the reg saves. */
25324 rs6000_frame_related (rtx_insn
*insn
, rtx reg
, HOST_WIDE_INT val
,
25325 rtx reg2
, rtx repl2
)
25329 if (REGNO (reg
) == STACK_POINTER_REGNUM
)
25331 gcc_checking_assert (val
== 0);
25335 repl
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
25338 rtx pat
= PATTERN (insn
);
25339 if (!repl
&& !reg2
)
25341 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
25342 if (GET_CODE (pat
) == PARALLEL
)
25343 for (int i
= 0; i
< XVECLEN (pat
, 0); i
++)
25344 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
)
25346 rtx set
= XVECEXP (pat
, 0, i
);
25348 if (!REG_P (SET_SRC (set
))
25349 || interesting_frame_related_regno (REGNO (SET_SRC (set
))))
25350 RTX_FRAME_RELATED_P (set
) = 1;
25352 RTX_FRAME_RELATED_P (insn
) = 1;
25356 /* We expect that 'pat' is either a SET or a PARALLEL containing
25357 SETs (and possibly other stuff). In a PARALLEL, all the SETs
25358 are important so they all have to be marked RTX_FRAME_RELATED_P.
25359 Call simplify_replace_rtx on the SETs rather than the whole insn
25360 so as to leave the other stuff alone (for example USE of r12). */
25362 set_used_flags (pat
);
25363 if (GET_CODE (pat
) == SET
)
25366 pat
= simplify_replace_rtx (pat
, reg
, repl
);
25368 pat
= simplify_replace_rtx (pat
, reg2
, repl2
);
25370 else if (GET_CODE (pat
) == PARALLEL
)
25372 pat
= shallow_copy_rtx (pat
);
25373 XVEC (pat
, 0) = shallow_copy_rtvec (XVEC (pat
, 0));
25375 for (int i
= 0; i
< XVECLEN (pat
, 0); i
++)
25376 if (GET_CODE (XVECEXP (pat
, 0, i
)) == SET
)
25378 rtx set
= XVECEXP (pat
, 0, i
);
25381 set
= simplify_replace_rtx (set
, reg
, repl
);
25383 set
= simplify_replace_rtx (set
, reg2
, repl2
);
25384 XVECEXP (pat
, 0, i
) = set
;
25386 if (!REG_P (SET_SRC (set
))
25387 || interesting_frame_related_regno (REGNO (SET_SRC (set
))))
25388 RTX_FRAME_RELATED_P (set
) = 1;
25392 gcc_unreachable ();
25394 RTX_FRAME_RELATED_P (insn
) = 1;
25395 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, copy_rtx_if_shared (pat
));
25400 /* Returns an insn that has a vrsave set operation with the
25401 appropriate CLOBBERs. */
25404 generate_set_vrsave (rtx reg
, rs6000_stack_t
*info
, int epiloguep
)
25407 rtx insn
, clobs
[TOTAL_ALTIVEC_REGS
+ 1];
25408 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
25411 = gen_rtx_SET (vrsave
,
25412 gen_rtx_UNSPEC_VOLATILE (SImode
,
25413 gen_rtvec (2, reg
, vrsave
),
25414 UNSPECV_SET_VRSAVE
));
25418 /* We need to clobber the registers in the mask so the scheduler
25419 does not move sets to VRSAVE before sets of AltiVec registers.
25421 However, if the function receives nonlocal gotos, reload will set
25422 all call saved registers live. We will end up with:
25424 (set (reg 999) (mem))
25425 (parallel [ (set (reg vrsave) (unspec blah))
25426 (clobber (reg 999))])
25428 The clobber will cause the store into reg 999 to be dead, and
25429 flow will attempt to delete an epilogue insn. In this case, we
25430 need an unspec use/set of the register. */
25432 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
25433 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
25435 if (!epiloguep
|| call_used_regs
[i
])
25436 clobs
[nclobs
++] = gen_rtx_CLOBBER (VOIDmode
,
25437 gen_rtx_REG (V4SImode
, i
));
25440 rtx reg
= gen_rtx_REG (V4SImode
, i
);
25443 = gen_rtx_SET (reg
,
25444 gen_rtx_UNSPEC (V4SImode
,
25445 gen_rtvec (1, reg
), 27));
25449 insn
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nclobs
));
25451 for (i
= 0; i
< nclobs
; ++i
)
25452 XVECEXP (insn
, 0, i
) = clobs
[i
];
25458 gen_frame_set (rtx reg
, rtx frame_reg
, int offset
, bool store
)
25462 addr
= gen_rtx_PLUS (Pmode
, frame_reg
, GEN_INT (offset
));
25463 mem
= gen_frame_mem (GET_MODE (reg
), addr
);
25464 return gen_rtx_SET (store
? mem
: reg
, store
? reg
: mem
);
25468 gen_frame_load (rtx reg
, rtx frame_reg
, int offset
)
25470 return gen_frame_set (reg
, frame_reg
, offset
, false);
25474 gen_frame_store (rtx reg
, rtx frame_reg
, int offset
)
25476 return gen_frame_set (reg
, frame_reg
, offset
, true);
25479 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
25480 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
25483 emit_frame_save (rtx frame_reg
, machine_mode mode
,
25484 unsigned int regno
, int offset
, HOST_WIDE_INT frame_reg_to_sp
)
25488 /* Some cases that need register indexed addressing. */
25489 gcc_checking_assert (!(TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
25490 || (TARGET_VSX
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
)));
25492 reg
= gen_rtx_REG (mode
, regno
);
25493 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, frame_reg
, offset
));
25494 return rs6000_frame_related (insn
, frame_reg
, frame_reg_to_sp
,
25495 NULL_RTX
, NULL_RTX
);
25498 /* Emit an offset memory reference suitable for a frame store, while
25499 converting to a valid addressing mode. */
25502 gen_frame_mem_offset (machine_mode mode
, rtx reg
, int offset
)
25504 return gen_frame_mem (mode
, gen_rtx_PLUS (Pmode
, reg
, GEN_INT (offset
)));
25507 #ifndef TARGET_FIX_AND_CONTINUE
25508 #define TARGET_FIX_AND_CONTINUE 0
25511 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
25512 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
25513 #define LAST_SAVRES_REGISTER 31
25514 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
25525 static GTY(()) rtx savres_routine_syms
[N_SAVRES_REGISTERS
][12];
25527 /* Temporary holding space for an out-of-line register save/restore
25529 static char savres_routine_name
[30];
25531 /* Return the name for an out-of-line register save/restore routine.
25532 We are saving/restoring GPRs if GPR is true. */
25535 rs6000_savres_routine_name (int regno
, int sel
)
25537 const char *prefix
= "";
25538 const char *suffix
= "";
25540 /* Different targets are supposed to define
25541 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
25542 routine name could be defined with:
25544 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
25546 This is a nice idea in practice, but in reality, things are
25547 complicated in several ways:
25549 - ELF targets have save/restore routines for GPRs.
25551 - PPC64 ELF targets have routines for save/restore of GPRs that
25552 differ in what they do with the link register, so having a set
25553 prefix doesn't work. (We only use one of the save routines at
25554 the moment, though.)
25556 - PPC32 elf targets have "exit" versions of the restore routines
25557 that restore the link register and can save some extra space.
25558 These require an extra suffix. (There are also "tail" versions
25559 of the restore routines and "GOT" versions of the save routines,
25560 but we don't generate those at present. Same problems apply,
25563 We deal with all this by synthesizing our own prefix/suffix and
25564 using that for the simple sprintf call shown above. */
25565 if (DEFAULT_ABI
== ABI_V4
)
25570 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
25571 prefix
= (sel
& SAVRES_SAVE
) ? "_savegpr_" : "_restgpr_";
25572 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
25573 prefix
= (sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_";
25574 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
25575 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
25579 if ((sel
& SAVRES_LR
))
25582 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
25584 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
25585 /* No out-of-line save/restore routines for GPRs on AIX. */
25586 gcc_assert (!TARGET_AIX
|| (sel
& SAVRES_REG
) != SAVRES_GPR
);
25590 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
25591 prefix
= ((sel
& SAVRES_SAVE
)
25592 ? ((sel
& SAVRES_LR
) ? "_savegpr0_" : "_savegpr1_")
25593 : ((sel
& SAVRES_LR
) ? "_restgpr0_" : "_restgpr1_"));
25594 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
25596 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
25597 if ((sel
& SAVRES_LR
))
25598 prefix
= ((sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_");
25602 prefix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_PREFIX
: RESTORE_FP_PREFIX
;
25603 suffix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_SUFFIX
: RESTORE_FP_SUFFIX
;
25606 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
25607 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
25612 if (DEFAULT_ABI
== ABI_DARWIN
)
25614 /* The Darwin approach is (slightly) different, in order to be
25615 compatible with code generated by the system toolchain. There is a
25616 single symbol for the start of save sequence, and the code here
25617 embeds an offset into that code on the basis of the first register
25619 prefix
= (sel
& SAVRES_SAVE
) ? "save" : "rest" ;
25620 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
25621 sprintf (savres_routine_name
, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix
,
25622 ((sel
& SAVRES_LR
) ? "x" : ""), (regno
== 13 ? "" : "+"),
25623 (regno
- 13) * 4, prefix
, regno
);
25624 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
25625 sprintf (savres_routine_name
, "*%sFP%s%.0d ; %s f%d-f31", prefix
,
25626 (regno
== 14 ? "" : "+"), (regno
- 14) * 4, prefix
, regno
);
25627 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
25628 sprintf (savres_routine_name
, "*%sVEC%s%.0d ; %s v%d-v31", prefix
,
25629 (regno
== 20 ? "" : "+"), (regno
- 20) * 8, prefix
, regno
);
25634 sprintf (savres_routine_name
, "%s%d%s", prefix
, regno
, suffix
);
25636 return savres_routine_name
;
25639 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
25640 We are saving/restoring GPRs if GPR is true. */
25643 rs6000_savres_routine_sym (rs6000_stack_t
*info
, int sel
)
25645 int regno
= ((sel
& SAVRES_REG
) == SAVRES_GPR
25646 ? info
->first_gp_reg_save
25647 : (sel
& SAVRES_REG
) == SAVRES_FPR
25648 ? info
->first_fp_reg_save
- 32
25649 : (sel
& SAVRES_REG
) == SAVRES_VR
25650 ? info
->first_altivec_reg_save
- FIRST_ALTIVEC_REGNO
25655 /* Don't generate bogus routine names. */
25656 gcc_assert (FIRST_SAVRES_REGISTER
<= regno
25657 && regno
<= LAST_SAVRES_REGISTER
25658 && select
>= 0 && select
<= 12);
25660 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
];
25666 name
= rs6000_savres_routine_name (regno
, sel
);
25668 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
]
25669 = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
25670 SYMBOL_REF_FLAGS (sym
) |= SYMBOL_FLAG_FUNCTION
;
25676 /* Emit a sequence of insns, including a stack tie if needed, for
25677 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
25678 reset the stack pointer, but move the base of the frame into
25679 reg UPDT_REGNO for use by out-of-line register restore routines. */
25682 rs6000_emit_stack_reset (rtx frame_reg_rtx
, HOST_WIDE_INT frame_off
,
25683 unsigned updt_regno
)
25685 /* If there is nothing to do, don't do anything. */
25686 if (frame_off
== 0 && REGNO (frame_reg_rtx
) == updt_regno
)
25689 rtx updt_reg_rtx
= gen_rtx_REG (Pmode
, updt_regno
);
25691 /* This blockage is needed so that sched doesn't decide to move
25692 the sp change before the register restores. */
25693 if (DEFAULT_ABI
== ABI_V4
)
25694 return emit_insn (gen_stack_restore_tie (updt_reg_rtx
, frame_reg_rtx
,
25695 GEN_INT (frame_off
)));
25697 /* If we are restoring registers out-of-line, we will be using the
25698 "exit" variants of the restore routines, which will reset the
25699 stack for us. But we do need to point updt_reg into the
25700 right place for those routines. */
25701 if (frame_off
!= 0)
25702 return emit_insn (gen_add3_insn (updt_reg_rtx
,
25703 frame_reg_rtx
, GEN_INT (frame_off
)));
25705 return emit_move_insn (updt_reg_rtx
, frame_reg_rtx
);
25710 /* Return the register number used as a pointer by out-of-line
25711 save/restore functions. */
25713 static inline unsigned
25714 ptr_regno_for_savres (int sel
)
25716 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
25717 return (sel
& SAVRES_REG
) == SAVRES_FPR
|| (sel
& SAVRES_LR
) ? 1 : 12;
25718 return DEFAULT_ABI
== ABI_DARWIN
&& (sel
& SAVRES_REG
) == SAVRES_FPR
? 1 : 11;
25721 /* Construct a parallel rtx describing the effect of a call to an
25722 out-of-line register save/restore routine, and emit the insn
25723 or jump_insn as appropriate. */
25726 rs6000_emit_savres_rtx (rs6000_stack_t
*info
,
25727 rtx frame_reg_rtx
, int save_area_offset
, int lr_offset
,
25728 machine_mode reg_mode
, int sel
)
25731 int offset
, start_reg
, end_reg
, n_regs
, use_reg
;
25732 int reg_size
= GET_MODE_SIZE (reg_mode
);
25739 start_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
25740 ? info
->first_gp_reg_save
25741 : (sel
& SAVRES_REG
) == SAVRES_FPR
25742 ? info
->first_fp_reg_save
25743 : (sel
& SAVRES_REG
) == SAVRES_VR
25744 ? info
->first_altivec_reg_save
25746 end_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
25748 : (sel
& SAVRES_REG
) == SAVRES_FPR
25750 : (sel
& SAVRES_REG
) == SAVRES_VR
25751 ? LAST_ALTIVEC_REGNO
+ 1
25753 n_regs
= end_reg
- start_reg
;
25754 p
= rtvec_alloc (3 + ((sel
& SAVRES_LR
) ? 1 : 0)
25755 + ((sel
& SAVRES_REG
) == SAVRES_VR
? 1 : 0)
25758 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
25759 RTVEC_ELT (p
, offset
++) = ret_rtx
;
25761 RTVEC_ELT (p
, offset
++)
25762 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
25764 sym
= rs6000_savres_routine_sym (info
, sel
);
25765 RTVEC_ELT (p
, offset
++) = gen_rtx_USE (VOIDmode
, sym
);
25767 use_reg
= ptr_regno_for_savres (sel
);
25768 if ((sel
& SAVRES_REG
) == SAVRES_VR
)
25770 /* Vector regs are saved/restored using [reg+reg] addressing. */
25771 RTVEC_ELT (p
, offset
++)
25772 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
25773 RTVEC_ELT (p
, offset
++)
25774 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 0));
25777 RTVEC_ELT (p
, offset
++)
25778 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
25780 for (i
= 0; i
< end_reg
- start_reg
; i
++)
25781 RTVEC_ELT (p
, i
+ offset
)
25782 = gen_frame_set (gen_rtx_REG (reg_mode
, start_reg
+ i
),
25783 frame_reg_rtx
, save_area_offset
+ reg_size
* i
,
25784 (sel
& SAVRES_SAVE
) != 0);
25786 if ((sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
25787 RTVEC_ELT (p
, i
+ offset
)
25788 = gen_frame_store (gen_rtx_REG (Pmode
, 0), frame_reg_rtx
, lr_offset
);
25790 par
= gen_rtx_PARALLEL (VOIDmode
, p
);
25792 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
25794 insn
= emit_jump_insn (par
);
25795 JUMP_LABEL (insn
) = ret_rtx
;
25798 insn
= emit_insn (par
);
25802 /* Emit prologue code to store CR fields that need to be saved into REG. This
25803 function should only be called when moving the non-volatile CRs to REG, it
25804 is not a general purpose routine to move the entire set of CRs to REG.
25805 Specifically, gen_prologue_movesi_from_cr() does not contain uses of the
25809 rs6000_emit_prologue_move_from_cr (rtx reg
)
25811 /* Only the ELFv2 ABI allows storing only selected fields. */
25812 if (DEFAULT_ABI
== ABI_ELFv2
&& TARGET_MFCRF
)
25814 int i
, cr_reg
[8], count
= 0;
25816 /* Collect CR fields that must be saved. */
25817 for (i
= 0; i
< 8; i
++)
25818 if (save_reg_p (CR0_REGNO
+ i
))
25819 cr_reg
[count
++] = i
;
25821 /* If it's just a single one, use mfcrf. */
25824 rtvec p
= rtvec_alloc (1);
25825 rtvec r
= rtvec_alloc (2);
25826 RTVEC_ELT (r
, 0) = gen_rtx_REG (CCmode
, CR0_REGNO
+ cr_reg
[0]);
25827 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7 - cr_reg
[0]));
25829 = gen_rtx_SET (reg
,
25830 gen_rtx_UNSPEC (SImode
, r
, UNSPEC_MOVESI_FROM_CR
));
25832 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
25836 /* ??? It might be better to handle count == 2 / 3 cases here
25837 as well, using logical operations to combine the values. */
25840 emit_insn (gen_prologue_movesi_from_cr (reg
));
25843 /* Return whether the split-stack arg pointer (r12) is used. */
25846 split_stack_arg_pointer_used_p (void)
25848 /* If the pseudo holding the arg pointer is no longer a pseudo,
25849 then the arg pointer is used. */
25850 if (cfun
->machine
->split_stack_arg_pointer
!= NULL_RTX
25851 && (!REG_P (cfun
->machine
->split_stack_arg_pointer
)
25852 || (REGNO (cfun
->machine
->split_stack_arg_pointer
)
25853 < FIRST_PSEUDO_REGISTER
)))
25856 /* Unfortunately we also need to do some code scanning, since
25857 r12 may have been substituted for the pseudo. */
25859 basic_block bb
= ENTRY_BLOCK_PTR_FOR_FN (cfun
)->next_bb
;
25860 FOR_BB_INSNS (bb
, insn
)
25861 if (NONDEBUG_INSN_P (insn
))
25863 /* A call destroys r12. */
25868 FOR_EACH_INSN_USE (use
, insn
)
25870 rtx x
= DF_REF_REG (use
);
25871 if (REG_P (x
) && REGNO (x
) == 12)
25875 FOR_EACH_INSN_DEF (def
, insn
)
25877 rtx x
= DF_REF_REG (def
);
25878 if (REG_P (x
) && REGNO (x
) == 12)
25882 return bitmap_bit_p (DF_LR_OUT (bb
), 12);
25885 /* Return whether we need to emit an ELFv2 global entry point prologue. */
25888 rs6000_global_entry_point_needed_p (void)
25890 /* Only needed for the ELFv2 ABI. */
25891 if (DEFAULT_ABI
!= ABI_ELFv2
)
25894 /* With -msingle-pic-base, we assume the whole program shares the same
25895 TOC, so no global entry point prologues are needed anywhere. */
25896 if (TARGET_SINGLE_PIC_BASE
)
25899 /* Ensure we have a global entry point for thunks. ??? We could
25900 avoid that if the target routine doesn't need a global entry point,
25901 but we do not know whether this is the case at this point. */
25902 if (cfun
->is_thunk
)
25905 /* For regular functions, rs6000_emit_prologue sets this flag if the
25906 routine ever uses the TOC pointer. */
25907 return cfun
->machine
->r2_setup_needed
;
25910 /* Implement TARGET_SHRINK_WRAP_GET_SEPARATE_COMPONENTS. */
25912 rs6000_get_separate_components (void)
25914 rs6000_stack_t
*info
= rs6000_stack_info ();
25916 if (WORLD_SAVE_P (info
))
25919 gcc_assert (!(info
->savres_strategy
& SAVE_MULTIPLE
)
25920 && !(info
->savres_strategy
& REST_MULTIPLE
));
25922 /* Component 0 is the save/restore of LR (done via GPR0).
25923 Component 2 is the save of the TOC (GPR2).
25924 Components 13..31 are the save/restore of GPR13..GPR31.
25925 Components 46..63 are the save/restore of FPR14..FPR31. */
25927 cfun
->machine
->n_components
= 64;
25929 sbitmap components
= sbitmap_alloc (cfun
->machine
->n_components
);
25930 bitmap_clear (components
);
25932 int reg_size
= TARGET_32BIT
? 4 : 8;
25933 int fp_reg_size
= 8;
25935 /* The GPRs we need saved to the frame. */
25936 if ((info
->savres_strategy
& SAVE_INLINE_GPRS
)
25937 && (info
->savres_strategy
& REST_INLINE_GPRS
))
25939 int offset
= info
->gp_save_offset
;
25941 offset
+= info
->total_size
;
25943 for (unsigned regno
= info
->first_gp_reg_save
; regno
< 32; regno
++)
25945 if (IN_RANGE (offset
, -0x8000, 0x7fff)
25946 && save_reg_p (regno
))
25947 bitmap_set_bit (components
, regno
);
25949 offset
+= reg_size
;
25953 /* Don't mess with the hard frame pointer. */
25954 if (frame_pointer_needed
)
25955 bitmap_clear_bit (components
, HARD_FRAME_POINTER_REGNUM
);
25957 /* Don't mess with the fixed TOC register. */
25958 if ((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
25959 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
25960 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
25961 bitmap_clear_bit (components
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
25963 /* The FPRs we need saved to the frame. */
25964 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
)
25965 && (info
->savres_strategy
& REST_INLINE_FPRS
))
25967 int offset
= info
->fp_save_offset
;
25969 offset
+= info
->total_size
;
25971 for (unsigned regno
= info
->first_fp_reg_save
; regno
< 64; regno
++)
25973 if (IN_RANGE (offset
, -0x8000, 0x7fff) && save_reg_p (regno
))
25974 bitmap_set_bit (components
, regno
);
25976 offset
+= fp_reg_size
;
25980 /* Optimize LR save and restore if we can. This is component 0. Any
25981 out-of-line register save/restore routines need LR. */
25982 if (info
->lr_save_p
25983 && !(flag_pic
&& (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
25984 && (info
->savres_strategy
& SAVE_INLINE_GPRS
)
25985 && (info
->savres_strategy
& REST_INLINE_GPRS
)
25986 && (info
->savres_strategy
& SAVE_INLINE_FPRS
)
25987 && (info
->savres_strategy
& REST_INLINE_FPRS
)
25988 && (info
->savres_strategy
& SAVE_INLINE_VRS
)
25989 && (info
->savres_strategy
& REST_INLINE_VRS
))
25991 int offset
= info
->lr_save_offset
;
25993 offset
+= info
->total_size
;
25994 if (IN_RANGE (offset
, -0x8000, 0x7fff))
25995 bitmap_set_bit (components
, 0);
25998 /* Optimize saving the TOC. This is component 2. */
25999 if (cfun
->machine
->save_toc_in_prologue
)
26000 bitmap_set_bit (components
, 2);
26005 /* Implement TARGET_SHRINK_WRAP_COMPONENTS_FOR_BB. */
26007 rs6000_components_for_bb (basic_block bb
)
26009 rs6000_stack_t
*info
= rs6000_stack_info ();
26011 bitmap in
= DF_LIVE_IN (bb
);
26012 bitmap gen
= &DF_LIVE_BB_INFO (bb
)->gen
;
26013 bitmap kill
= &DF_LIVE_BB_INFO (bb
)->kill
;
26015 sbitmap components
= sbitmap_alloc (cfun
->machine
->n_components
);
26016 bitmap_clear (components
);
26018 /* A register is used in a bb if it is in the IN, GEN, or KILL sets. */
26021 for (unsigned regno
= info
->first_gp_reg_save
; regno
< 32; regno
++)
26022 if (bitmap_bit_p (in
, regno
)
26023 || bitmap_bit_p (gen
, regno
)
26024 || bitmap_bit_p (kill
, regno
))
26025 bitmap_set_bit (components
, regno
);
26028 for (unsigned regno
= info
->first_fp_reg_save
; regno
< 64; regno
++)
26029 if (bitmap_bit_p (in
, regno
)
26030 || bitmap_bit_p (gen
, regno
)
26031 || bitmap_bit_p (kill
, regno
))
26032 bitmap_set_bit (components
, regno
);
26034 /* The link register. */
26035 if (bitmap_bit_p (in
, LR_REGNO
)
26036 || bitmap_bit_p (gen
, LR_REGNO
)
26037 || bitmap_bit_p (kill
, LR_REGNO
))
26038 bitmap_set_bit (components
, 0);
26040 /* The TOC save. */
26041 if (bitmap_bit_p (in
, TOC_REGNUM
)
26042 || bitmap_bit_p (gen
, TOC_REGNUM
)
26043 || bitmap_bit_p (kill
, TOC_REGNUM
))
26044 bitmap_set_bit (components
, 2);
26049 /* Implement TARGET_SHRINK_WRAP_DISQUALIFY_COMPONENTS. */
26051 rs6000_disqualify_components (sbitmap components
, edge e
,
26052 sbitmap edge_components
, bool /*is_prologue*/)
26054 /* Our LR pro/epilogue code moves LR via R0, so R0 had better not be
26055 live where we want to place that code. */
26056 if (bitmap_bit_p (edge_components
, 0)
26057 && bitmap_bit_p (DF_LIVE_IN (e
->dest
), 0))
26060 fprintf (dump_file
, "Disqualifying LR because GPR0 is live "
26061 "on entry to bb %d\n", e
->dest
->index
);
26062 bitmap_clear_bit (components
, 0);
26066 /* Implement TARGET_SHRINK_WRAP_EMIT_PROLOGUE_COMPONENTS. */
26068 rs6000_emit_prologue_components (sbitmap components
)
26070 rs6000_stack_t
*info
= rs6000_stack_info ();
26071 rtx ptr_reg
= gen_rtx_REG (Pmode
, frame_pointer_needed
26072 ? HARD_FRAME_POINTER_REGNUM
26073 : STACK_POINTER_REGNUM
);
26075 machine_mode reg_mode
= Pmode
;
26076 int reg_size
= TARGET_32BIT
? 4 : 8;
26077 machine_mode fp_reg_mode
= TARGET_HARD_FLOAT
? DFmode
: SFmode
;
26078 int fp_reg_size
= 8;
26080 /* Prologue for LR. */
26081 if (bitmap_bit_p (components
, 0))
26083 rtx lr
= gen_rtx_REG (reg_mode
, LR_REGNO
);
26084 rtx reg
= gen_rtx_REG (reg_mode
, 0);
26085 rtx_insn
*insn
= emit_move_insn (reg
, lr
);
26086 RTX_FRAME_RELATED_P (insn
) = 1;
26087 add_reg_note (insn
, REG_CFA_REGISTER
, gen_rtx_SET (reg
, lr
));
26089 int offset
= info
->lr_save_offset
;
26091 offset
+= info
->total_size
;
26093 insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26094 RTX_FRAME_RELATED_P (insn
) = 1;
26095 rtx mem
= copy_rtx (SET_DEST (single_set (insn
)));
26096 add_reg_note (insn
, REG_CFA_OFFSET
, gen_rtx_SET (mem
, lr
));
26099 /* Prologue for TOC. */
26100 if (bitmap_bit_p (components
, 2))
26102 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
26103 rtx sp_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26104 emit_insn (gen_frame_store (reg
, sp_reg
, RS6000_TOC_SAVE_SLOT
));
26107 /* Prologue for the GPRs. */
26108 int offset
= info
->gp_save_offset
;
26110 offset
+= info
->total_size
;
26112 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26114 if (bitmap_bit_p (components
, i
))
26116 rtx reg
= gen_rtx_REG (reg_mode
, i
);
26117 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26118 RTX_FRAME_RELATED_P (insn
) = 1;
26119 rtx set
= copy_rtx (single_set (insn
));
26120 add_reg_note (insn
, REG_CFA_OFFSET
, set
);
26123 offset
+= reg_size
;
26126 /* Prologue for the FPRs. */
26127 offset
= info
->fp_save_offset
;
26129 offset
+= info
->total_size
;
26131 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26133 if (bitmap_bit_p (components
, i
))
26135 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
26136 rtx_insn
*insn
= emit_insn (gen_frame_store (reg
, ptr_reg
, offset
));
26137 RTX_FRAME_RELATED_P (insn
) = 1;
26138 rtx set
= copy_rtx (single_set (insn
));
26139 add_reg_note (insn
, REG_CFA_OFFSET
, set
);
26142 offset
+= fp_reg_size
;
26146 /* Implement TARGET_SHRINK_WRAP_EMIT_EPILOGUE_COMPONENTS. */
26148 rs6000_emit_epilogue_components (sbitmap components
)
26150 rs6000_stack_t
*info
= rs6000_stack_info ();
26151 rtx ptr_reg
= gen_rtx_REG (Pmode
, frame_pointer_needed
26152 ? HARD_FRAME_POINTER_REGNUM
26153 : STACK_POINTER_REGNUM
);
26155 machine_mode reg_mode
= Pmode
;
26156 int reg_size
= TARGET_32BIT
? 4 : 8;
26158 machine_mode fp_reg_mode
= TARGET_HARD_FLOAT
? DFmode
: SFmode
;
26159 int fp_reg_size
= 8;
26161 /* Epilogue for the FPRs. */
26162 int offset
= info
->fp_save_offset
;
26164 offset
+= info
->total_size
;
26166 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26168 if (bitmap_bit_p (components
, i
))
26170 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
26171 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26172 RTX_FRAME_RELATED_P (insn
) = 1;
26173 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
26176 offset
+= fp_reg_size
;
26179 /* Epilogue for the GPRs. */
26180 offset
= info
->gp_save_offset
;
26182 offset
+= info
->total_size
;
26184 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26186 if (bitmap_bit_p (components
, i
))
26188 rtx reg
= gen_rtx_REG (reg_mode
, i
);
26189 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26190 RTX_FRAME_RELATED_P (insn
) = 1;
26191 add_reg_note (insn
, REG_CFA_RESTORE
, reg
);
26194 offset
+= reg_size
;
26197 /* Epilogue for LR. */
26198 if (bitmap_bit_p (components
, 0))
26200 int offset
= info
->lr_save_offset
;
26202 offset
+= info
->total_size
;
26204 rtx reg
= gen_rtx_REG (reg_mode
, 0);
26205 rtx_insn
*insn
= emit_insn (gen_frame_load (reg
, ptr_reg
, offset
));
26207 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
26208 insn
= emit_move_insn (lr
, reg
);
26209 RTX_FRAME_RELATED_P (insn
) = 1;
26210 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
26214 /* Implement TARGET_SHRINK_WRAP_SET_HANDLED_COMPONENTS. */
26216 rs6000_set_handled_components (sbitmap components
)
26218 rs6000_stack_t
*info
= rs6000_stack_info ();
26220 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26221 if (bitmap_bit_p (components
, i
))
26222 cfun
->machine
->gpr_is_wrapped_separately
[i
] = true;
26224 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26225 if (bitmap_bit_p (components
, i
))
26226 cfun
->machine
->fpr_is_wrapped_separately
[i
- 32] = true;
26228 if (bitmap_bit_p (components
, 0))
26229 cfun
->machine
->lr_is_wrapped_separately
= true;
26231 if (bitmap_bit_p (components
, 2))
26232 cfun
->machine
->toc_is_wrapped_separately
= true;
26235 /* VRSAVE is a bit vector representing which AltiVec registers
26236 are used. The OS uses this to determine which vector
26237 registers to save on a context switch. We need to save
26238 VRSAVE on the stack frame, add whatever AltiVec registers we
26239 used in this function, and do the corresponding magic in the
26242 emit_vrsave_prologue (rs6000_stack_t
*info
, int save_regno
,
26243 HOST_WIDE_INT frame_off
, rtx frame_reg_rtx
)
26245 /* Get VRSAVE into a GPR. */
26246 rtx reg
= gen_rtx_REG (SImode
, save_regno
);
26247 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
26249 emit_insn (gen_get_vrsave_internal (reg
));
26251 emit_insn (gen_rtx_SET (reg
, vrsave
));
26254 int offset
= info
->vrsave_save_offset
+ frame_off
;
26255 emit_insn (gen_frame_store (reg
, frame_reg_rtx
, offset
));
26257 /* Include the registers in the mask. */
26258 emit_insn (gen_iorsi3 (reg
, reg
, GEN_INT (info
->vrsave_mask
)));
26260 emit_insn (generate_set_vrsave (reg
, info
, 0));
26263 /* Set up the arg pointer (r12) for -fsplit-stack code. If __morestack was
26264 called, it left the arg pointer to the old stack in r29. Otherwise, the
26265 arg pointer is the top of the current frame. */
26267 emit_split_stack_prologue (rs6000_stack_t
*info
, rtx_insn
*sp_adjust
,
26268 HOST_WIDE_INT frame_off
, rtx frame_reg_rtx
)
26270 cfun
->machine
->split_stack_argp_used
= true;
26274 rtx r12
= gen_rtx_REG (Pmode
, 12);
26275 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26276 rtx set_r12
= gen_rtx_SET (r12
, sp_reg_rtx
);
26277 emit_insn_before (set_r12
, sp_adjust
);
26279 else if (frame_off
!= 0 || REGNO (frame_reg_rtx
) != 12)
26281 rtx r12
= gen_rtx_REG (Pmode
, 12);
26282 if (frame_off
== 0)
26283 emit_move_insn (r12
, frame_reg_rtx
);
26285 emit_insn (gen_add3_insn (r12
, frame_reg_rtx
, GEN_INT (frame_off
)));
26290 rtx r12
= gen_rtx_REG (Pmode
, 12);
26291 rtx r29
= gen_rtx_REG (Pmode
, 29);
26292 rtx cr7
= gen_rtx_REG (CCUNSmode
, CR7_REGNO
);
26293 rtx not_more
= gen_label_rtx ();
26296 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
26297 gen_rtx_GEU (VOIDmode
, cr7
, const0_rtx
),
26298 gen_rtx_LABEL_REF (VOIDmode
, not_more
),
26300 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
26301 JUMP_LABEL (jump
) = not_more
;
26302 LABEL_NUSES (not_more
) += 1;
26303 emit_move_insn (r12
, r29
);
26304 emit_label (not_more
);
26308 /* Emit function prologue as insns. */
26311 rs6000_emit_prologue (void)
26313 rs6000_stack_t
*info
= rs6000_stack_info ();
26314 machine_mode reg_mode
= Pmode
;
26315 int reg_size
= TARGET_32BIT
? 4 : 8;
26316 machine_mode fp_reg_mode
= TARGET_HARD_FLOAT
? DFmode
: SFmode
;
26317 int fp_reg_size
= 8;
26318 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
26319 rtx frame_reg_rtx
= sp_reg_rtx
;
26320 unsigned int cr_save_regno
;
26321 rtx cr_save_rtx
= NULL_RTX
;
26324 int using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
26325 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
26326 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
26327 int using_split_stack
= (flag_split_stack
26328 && (lookup_attribute ("no_split_stack",
26329 DECL_ATTRIBUTES (cfun
->decl
))
26332 /* Offset to top of frame for frame_reg and sp respectively. */
26333 HOST_WIDE_INT frame_off
= 0;
26334 HOST_WIDE_INT sp_off
= 0;
26335 /* sp_adjust is the stack adjusting instruction, tracked so that the
26336 insn setting up the split-stack arg pointer can be emitted just
26337 prior to it, when r12 is not used here for other purposes. */
26338 rtx_insn
*sp_adjust
= 0;
26341 /* Track and check usage of r0, r11, r12. */
26342 int reg_inuse
= using_static_chain_p
? 1 << 11 : 0;
26343 #define START_USE(R) do \
26345 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26346 reg_inuse |= 1 << (R); \
26348 #define END_USE(R) do \
26350 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
26351 reg_inuse &= ~(1 << (R)); \
26353 #define NOT_INUSE(R) do \
26355 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
26358 #define START_USE(R) do {} while (0)
26359 #define END_USE(R) do {} while (0)
26360 #define NOT_INUSE(R) do {} while (0)
26363 if (DEFAULT_ABI
== ABI_ELFv2
26364 && !TARGET_SINGLE_PIC_BASE
)
26366 cfun
->machine
->r2_setup_needed
= df_regs_ever_live_p (TOC_REGNUM
);
26368 /* With -mminimal-toc we may generate an extra use of r2 below. */
26369 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
26370 && !constant_pool_empty_p ())
26371 cfun
->machine
->r2_setup_needed
= true;
26375 if (flag_stack_usage_info
)
26376 current_function_static_stack_size
= info
->total_size
;
26378 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
26380 HOST_WIDE_INT size
= info
->total_size
;
26382 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
26384 if (size
> PROBE_INTERVAL
&& size
> get_stack_check_protect ())
26385 rs6000_emit_probe_stack_range (get_stack_check_protect (),
26386 size
- get_stack_check_protect ());
26389 rs6000_emit_probe_stack_range (get_stack_check_protect (), size
);
26392 if (TARGET_FIX_AND_CONTINUE
)
26394 /* gdb on darwin arranges to forward a function from the old
26395 address by modifying the first 5 instructions of the function
26396 to branch to the overriding function. This is necessary to
26397 permit function pointers that point to the old function to
26398 actually forward to the new function. */
26399 emit_insn (gen_nop ());
26400 emit_insn (gen_nop ());
26401 emit_insn (gen_nop ());
26402 emit_insn (gen_nop ());
26403 emit_insn (gen_nop ());
26406 /* Handle world saves specially here. */
26407 if (WORLD_SAVE_P (info
))
26414 /* save_world expects lr in r0. */
26415 reg0
= gen_rtx_REG (Pmode
, 0);
26416 if (info
->lr_save_p
)
26418 insn
= emit_move_insn (reg0
,
26419 gen_rtx_REG (Pmode
, LR_REGNO
));
26420 RTX_FRAME_RELATED_P (insn
) = 1;
26423 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
26424 assumptions about the offsets of various bits of the stack
26426 gcc_assert (info
->gp_save_offset
== -220
26427 && info
->fp_save_offset
== -144
26428 && info
->lr_save_offset
== 8
26429 && info
->cr_save_offset
== 4
26432 && (!crtl
->calls_eh_return
26433 || info
->ehrd_offset
== -432)
26434 && info
->vrsave_save_offset
== -224
26435 && info
->altivec_save_offset
== -416);
26437 treg
= gen_rtx_REG (SImode
, 11);
26438 emit_move_insn (treg
, GEN_INT (-info
->total_size
));
26440 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
26441 in R11. It also clobbers R12, so beware! */
26443 /* Preserve CR2 for save_world prologues */
26445 sz
+= 32 - info
->first_gp_reg_save
;
26446 sz
+= 64 - info
->first_fp_reg_save
;
26447 sz
+= LAST_ALTIVEC_REGNO
- info
->first_altivec_reg_save
+ 1;
26448 p
= rtvec_alloc (sz
);
26450 RTVEC_ELT (p
, j
++) = gen_rtx_CLOBBER (VOIDmode
,
26451 gen_rtx_REG (SImode
,
26453 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
26454 gen_rtx_SYMBOL_REF (Pmode
,
26456 /* We do floats first so that the instruction pattern matches
26458 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
26460 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT
? DFmode
: SFmode
,
26461 info
->first_fp_reg_save
+ i
),
26463 info
->fp_save_offset
+ frame_off
+ 8 * i
);
26464 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
26466 = gen_frame_store (gen_rtx_REG (V4SImode
,
26467 info
->first_altivec_reg_save
+ i
),
26469 info
->altivec_save_offset
+ frame_off
+ 16 * i
);
26470 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
26472 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
26474 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
26476 /* CR register traditionally saved as CR2. */
26478 = gen_frame_store (gen_rtx_REG (SImode
, CR2_REGNO
),
26479 frame_reg_rtx
, info
->cr_save_offset
+ frame_off
);
26480 /* Explain about use of R0. */
26481 if (info
->lr_save_p
)
26483 = gen_frame_store (reg0
,
26484 frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
26485 /* Explain what happens to the stack pointer. */
26487 rtx newval
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
, treg
);
26488 RTVEC_ELT (p
, j
++) = gen_rtx_SET (sp_reg_rtx
, newval
);
26491 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
26492 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
26493 treg
, GEN_INT (-info
->total_size
));
26494 sp_off
= frame_off
= info
->total_size
;
26497 strategy
= info
->savres_strategy
;
26499 /* For V.4, update stack before we do any saving and set back pointer. */
26500 if (! WORLD_SAVE_P (info
)
26502 && (DEFAULT_ABI
== ABI_V4
26503 || crtl
->calls_eh_return
))
26505 bool need_r11
= (!(strategy
& SAVE_INLINE_FPRS
)
26506 || !(strategy
& SAVE_INLINE_GPRS
)
26507 || !(strategy
& SAVE_INLINE_VRS
));
26508 int ptr_regno
= -1;
26509 rtx ptr_reg
= NULL_RTX
;
26512 if (info
->total_size
< 32767)
26513 frame_off
= info
->total_size
;
26516 else if (info
->cr_save_p
26518 || info
->first_fp_reg_save
< 64
26519 || info
->first_gp_reg_save
< 32
26520 || info
->altivec_size
!= 0
26521 || info
->vrsave_size
!= 0
26522 || crtl
->calls_eh_return
)
26526 /* The prologue won't be saving any regs so there is no need
26527 to set up a frame register to access any frame save area.
26528 We also won't be using frame_off anywhere below, but set
26529 the correct value anyway to protect against future
26530 changes to this function. */
26531 frame_off
= info
->total_size
;
26533 if (ptr_regno
!= -1)
26535 /* Set up the frame offset to that needed by the first
26536 out-of-line save function. */
26537 START_USE (ptr_regno
);
26538 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
26539 frame_reg_rtx
= ptr_reg
;
26540 if (!(strategy
& SAVE_INLINE_FPRS
) && info
->fp_size
!= 0)
26541 gcc_checking_assert (info
->fp_save_offset
+ info
->fp_size
== 0);
26542 else if (!(strategy
& SAVE_INLINE_GPRS
) && info
->first_gp_reg_save
< 32)
26543 ptr_off
= info
->gp_save_offset
+ info
->gp_size
;
26544 else if (!(strategy
& SAVE_INLINE_VRS
) && info
->altivec_size
!= 0)
26545 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
26546 frame_off
= -ptr_off
;
26548 sp_adjust
= rs6000_emit_allocate_stack (info
->total_size
,
26550 if (REGNO (frame_reg_rtx
) == 12)
26552 sp_off
= info
->total_size
;
26553 if (frame_reg_rtx
!= sp_reg_rtx
)
26554 rs6000_emit_stack_tie (frame_reg_rtx
, false);
26557 /* If we use the link register, get it into r0. */
26558 if (!WORLD_SAVE_P (info
) && info
->lr_save_p
26559 && !cfun
->machine
->lr_is_wrapped_separately
)
26561 rtx addr
, reg
, mem
;
26563 reg
= gen_rtx_REG (Pmode
, 0);
26565 insn
= emit_move_insn (reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
26566 RTX_FRAME_RELATED_P (insn
) = 1;
26568 if (!(strategy
& (SAVE_NOINLINE_GPRS_SAVES_LR
26569 | SAVE_NOINLINE_FPRS_SAVES_LR
)))
26571 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
26572 GEN_INT (info
->lr_save_offset
+ frame_off
));
26573 mem
= gen_rtx_MEM (Pmode
, addr
);
26574 /* This should not be of rs6000_sr_alias_set, because of
26575 __builtin_return_address. */
26577 insn
= emit_move_insn (mem
, reg
);
26578 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
26579 NULL_RTX
, NULL_RTX
);
26584 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
26585 r12 will be needed by out-of-line gpr restore. */
26586 cr_save_regno
= ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
26587 && !(strategy
& (SAVE_INLINE_GPRS
26588 | SAVE_NOINLINE_GPRS_SAVES_LR
))
26590 if (!WORLD_SAVE_P (info
)
26592 && REGNO (frame_reg_rtx
) != cr_save_regno
26593 && !(using_static_chain_p
&& cr_save_regno
== 11)
26594 && !(using_split_stack
&& cr_save_regno
== 12 && sp_adjust
))
26596 cr_save_rtx
= gen_rtx_REG (SImode
, cr_save_regno
);
26597 START_USE (cr_save_regno
);
26598 rs6000_emit_prologue_move_from_cr (cr_save_rtx
);
26601 /* Do any required saving of fpr's. If only one or two to save, do
26602 it ourselves. Otherwise, call function. */
26603 if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_INLINE_FPRS
))
26605 int offset
= info
->fp_save_offset
+ frame_off
;
26606 for (int i
= info
->first_fp_reg_save
; i
< 64; i
++)
26609 && !cfun
->machine
->fpr_is_wrapped_separately
[i
- 32])
26610 emit_frame_save (frame_reg_rtx
, fp_reg_mode
, i
, offset
,
26611 sp_off
- frame_off
);
26613 offset
+= fp_reg_size
;
26616 else if (!WORLD_SAVE_P (info
) && info
->first_fp_reg_save
!= 64)
26618 bool lr
= (strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
26619 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
26620 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
26621 rtx ptr_reg
= frame_reg_rtx
;
26623 if (REGNO (frame_reg_rtx
) == ptr_regno
)
26624 gcc_checking_assert (frame_off
== 0);
26627 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
26628 NOT_INUSE (ptr_regno
);
26629 emit_insn (gen_add3_insn (ptr_reg
,
26630 frame_reg_rtx
, GEN_INT (frame_off
)));
26632 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
26633 info
->fp_save_offset
,
26634 info
->lr_save_offset
,
26636 rs6000_frame_related (insn
, ptr_reg
, sp_off
,
26637 NULL_RTX
, NULL_RTX
);
26642 /* Save GPRs. This is done as a PARALLEL if we are using
26643 the store-multiple instructions. */
26644 if (!WORLD_SAVE_P (info
) && !(strategy
& SAVE_INLINE_GPRS
))
26646 bool lr
= (strategy
& SAVE_NOINLINE_GPRS_SAVES_LR
) != 0;
26647 int sel
= SAVRES_SAVE
| SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
26648 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
26649 rtx ptr_reg
= frame_reg_rtx
;
26650 bool ptr_set_up
= REGNO (ptr_reg
) == ptr_regno
;
26651 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
26654 if (ptr_regno
== 12)
26657 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
26659 /* Need to adjust r11 (r12) if we saved any FPRs. */
26660 if (end_save
+ frame_off
!= 0)
26662 rtx offset
= GEN_INT (end_save
+ frame_off
);
26665 frame_off
= -end_save
;
26667 NOT_INUSE (ptr_regno
);
26668 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
26670 else if (!ptr_set_up
)
26672 NOT_INUSE (ptr_regno
);
26673 emit_move_insn (ptr_reg
, frame_reg_rtx
);
26675 ptr_off
= -end_save
;
26676 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
26677 info
->gp_save_offset
+ ptr_off
,
26678 info
->lr_save_offset
+ ptr_off
,
26680 rs6000_frame_related (insn
, ptr_reg
, sp_off
- ptr_off
,
26681 NULL_RTX
, NULL_RTX
);
26685 else if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_MULTIPLE
))
26689 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
26690 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
26692 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
26694 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
26695 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
26696 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
26697 NULL_RTX
, NULL_RTX
);
26699 else if (!WORLD_SAVE_P (info
))
26701 int offset
= info
->gp_save_offset
+ frame_off
;
26702 for (int i
= info
->first_gp_reg_save
; i
< 32; i
++)
26705 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
26706 emit_frame_save (frame_reg_rtx
, reg_mode
, i
, offset
,
26707 sp_off
- frame_off
);
26709 offset
+= reg_size
;
26713 if (crtl
->calls_eh_return
)
26720 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
26721 if (regno
== INVALID_REGNUM
)
26725 p
= rtvec_alloc (i
);
26729 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
26730 if (regno
== INVALID_REGNUM
)
26734 = gen_frame_store (gen_rtx_REG (reg_mode
, regno
),
26736 info
->ehrd_offset
+ sp_off
+ reg_size
* (int) i
);
26737 RTVEC_ELT (p
, i
) = set
;
26738 RTX_FRAME_RELATED_P (set
) = 1;
26741 insn
= emit_insn (gen_blockage ());
26742 RTX_FRAME_RELATED_P (insn
) = 1;
26743 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, gen_rtx_PARALLEL (VOIDmode
, p
));
26746 /* In AIX ABI we need to make sure r2 is really saved. */
26747 if (TARGET_AIX
&& crtl
->calls_eh_return
)
26749 rtx tmp_reg
, tmp_reg_si
, hi
, lo
, compare_result
, toc_save_done
, jump
;
26750 rtx join_insn
, note
;
26751 rtx_insn
*save_insn
;
26752 long toc_restore_insn
;
26754 tmp_reg
= gen_rtx_REG (Pmode
, 11);
26755 tmp_reg_si
= gen_rtx_REG (SImode
, 11);
26756 if (using_static_chain_p
)
26759 emit_move_insn (gen_rtx_REG (Pmode
, 0), tmp_reg
);
26763 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
26764 /* Peek at instruction to which this function returns. If it's
26765 restoring r2, then we know we've already saved r2. We can't
26766 unconditionally save r2 because the value we have will already
26767 be updated if we arrived at this function via a plt call or
26768 toc adjusting stub. */
26769 emit_move_insn (tmp_reg_si
, gen_rtx_MEM (SImode
, tmp_reg
));
26770 toc_restore_insn
= ((TARGET_32BIT
? 0x80410000 : 0xE8410000)
26771 + RS6000_TOC_SAVE_SLOT
);
26772 hi
= gen_int_mode (toc_restore_insn
& ~0xffff, SImode
);
26773 emit_insn (gen_xorsi3 (tmp_reg_si
, tmp_reg_si
, hi
));
26774 compare_result
= gen_rtx_REG (CCUNSmode
, CR0_REGNO
);
26775 validate_condition_mode (EQ
, CCUNSmode
);
26776 lo
= gen_int_mode (toc_restore_insn
& 0xffff, SImode
);
26777 emit_insn (gen_rtx_SET (compare_result
,
26778 gen_rtx_COMPARE (CCUNSmode
, tmp_reg_si
, lo
)));
26779 toc_save_done
= gen_label_rtx ();
26780 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
26781 gen_rtx_EQ (VOIDmode
, compare_result
,
26783 gen_rtx_LABEL_REF (VOIDmode
, toc_save_done
),
26785 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
26786 JUMP_LABEL (jump
) = toc_save_done
;
26787 LABEL_NUSES (toc_save_done
) += 1;
26789 save_insn
= emit_frame_save (frame_reg_rtx
, reg_mode
,
26790 TOC_REGNUM
, frame_off
+ RS6000_TOC_SAVE_SLOT
,
26791 sp_off
- frame_off
);
26793 emit_label (toc_save_done
);
26795 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
26796 have a CFG that has different saves along different paths.
26797 Move the note to a dummy blockage insn, which describes that
26798 R2 is unconditionally saved after the label. */
26799 /* ??? An alternate representation might be a special insn pattern
26800 containing both the branch and the store. That might let the
26801 code that minimizes the number of DW_CFA_advance opcodes better
26802 freedom in placing the annotations. */
26803 note
= find_reg_note (save_insn
, REG_FRAME_RELATED_EXPR
, NULL
);
26805 remove_note (save_insn
, note
);
26807 note
= alloc_reg_note (REG_FRAME_RELATED_EXPR
,
26808 copy_rtx (PATTERN (save_insn
)), NULL_RTX
);
26809 RTX_FRAME_RELATED_P (save_insn
) = 0;
26811 join_insn
= emit_insn (gen_blockage ());
26812 REG_NOTES (join_insn
) = note
;
26813 RTX_FRAME_RELATED_P (join_insn
) = 1;
26815 if (using_static_chain_p
)
26817 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, 0));
26824 /* Save CR if we use any that must be preserved. */
26825 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
)
26827 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
26828 GEN_INT (info
->cr_save_offset
+ frame_off
));
26829 rtx mem
= gen_frame_mem (SImode
, addr
);
26831 /* If we didn't copy cr before, do so now using r0. */
26832 if (cr_save_rtx
== NULL_RTX
)
26835 cr_save_rtx
= gen_rtx_REG (SImode
, 0);
26836 rs6000_emit_prologue_move_from_cr (cr_save_rtx
);
26839 /* Saving CR requires a two-instruction sequence: one instruction
26840 to move the CR to a general-purpose register, and a second
26841 instruction that stores the GPR to memory.
26843 We do not emit any DWARF CFI records for the first of these,
26844 because we cannot properly represent the fact that CR is saved in
26845 a register. One reason is that we cannot express that multiple
26846 CR fields are saved; another reason is that on 64-bit, the size
26847 of the CR register in DWARF (4 bytes) differs from the size of
26848 a general-purpose register.
26850 This means if any intervening instruction were to clobber one of
26851 the call-saved CR fields, we'd have incorrect CFI. To prevent
26852 this from happening, we mark the store to memory as a use of
26853 those CR fields, which prevents any such instruction from being
26854 scheduled in between the two instructions. */
26859 crsave_v
[n_crsave
++] = gen_rtx_SET (mem
, cr_save_rtx
);
26860 for (i
= 0; i
< 8; i
++)
26861 if (save_reg_p (CR0_REGNO
+ i
))
26862 crsave_v
[n_crsave
++]
26863 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
26865 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
,
26866 gen_rtvec_v (n_crsave
, crsave_v
)));
26867 END_USE (REGNO (cr_save_rtx
));
26869 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
26870 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
26871 so we need to construct a frame expression manually. */
26872 RTX_FRAME_RELATED_P (insn
) = 1;
26874 /* Update address to be stack-pointer relative, like
26875 rs6000_frame_related would do. */
26876 addr
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
26877 GEN_INT (info
->cr_save_offset
+ sp_off
));
26878 mem
= gen_frame_mem (SImode
, addr
);
26880 if (DEFAULT_ABI
== ABI_ELFv2
)
26882 /* In the ELFv2 ABI we generate separate CFI records for each
26883 CR field that was actually saved. They all point to the
26884 same 32-bit stack slot. */
26888 for (i
= 0; i
< 8; i
++)
26889 if (save_reg_p (CR0_REGNO
+ i
))
26892 = gen_rtx_SET (mem
, gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
26894 RTX_FRAME_RELATED_P (crframe
[n_crframe
]) = 1;
26898 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
26899 gen_rtx_PARALLEL (VOIDmode
,
26900 gen_rtvec_v (n_crframe
, crframe
)));
26904 /* In other ABIs, by convention, we use a single CR regnum to
26905 represent the fact that all call-saved CR fields are saved.
26906 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
26907 rtx set
= gen_rtx_SET (mem
, gen_rtx_REG (SImode
, CR2_REGNO
));
26908 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
26912 /* In the ELFv2 ABI we need to save all call-saved CR fields into
26913 *separate* slots if the routine calls __builtin_eh_return, so
26914 that they can be independently restored by the unwinder. */
26915 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
26917 int i
, cr_off
= info
->ehcr_offset
;
26920 /* ??? We might get better performance by using multiple mfocrf
26922 crsave
= gen_rtx_REG (SImode
, 0);
26923 emit_insn (gen_prologue_movesi_from_cr (crsave
));
26925 for (i
= 0; i
< 8; i
++)
26926 if (!call_used_regs
[CR0_REGNO
+ i
])
26928 rtvec p
= rtvec_alloc (2);
26930 = gen_frame_store (crsave
, frame_reg_rtx
, cr_off
+ frame_off
);
26932 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
26934 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
26936 RTX_FRAME_RELATED_P (insn
) = 1;
26937 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
26938 gen_frame_store (gen_rtx_REG (SImode
, CR0_REGNO
+ i
),
26939 sp_reg_rtx
, cr_off
+ sp_off
));
26941 cr_off
+= reg_size
;
26945 /* If we are emitting stack probes, but allocate no stack, then
26946 just note that in the dump file. */
26947 if (flag_stack_clash_protection
26950 dump_stack_clash_frame_info (NO_PROBE_NO_FRAME
, false);
26952 /* Update stack and set back pointer unless this is V.4,
26953 for which it was done previously. */
26954 if (!WORLD_SAVE_P (info
) && info
->push_p
26955 && !(DEFAULT_ABI
== ABI_V4
|| crtl
->calls_eh_return
))
26957 rtx ptr_reg
= NULL
;
26960 /* If saving altivec regs we need to be able to address all save
26961 locations using a 16-bit offset. */
26962 if ((strategy
& SAVE_INLINE_VRS
) == 0
26963 || (info
->altivec_size
!= 0
26964 && (info
->altivec_save_offset
+ info
->altivec_size
- 16
26965 + info
->total_size
- frame_off
) > 32767)
26966 || (info
->vrsave_size
!= 0
26967 && (info
->vrsave_save_offset
26968 + info
->total_size
- frame_off
) > 32767))
26970 int sel
= SAVRES_SAVE
| SAVRES_VR
;
26971 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
26973 if (using_static_chain_p
26974 && ptr_regno
== STATIC_CHAIN_REGNUM
)
26976 if (REGNO (frame_reg_rtx
) != ptr_regno
)
26977 START_USE (ptr_regno
);
26978 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
26979 frame_reg_rtx
= ptr_reg
;
26980 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
26981 frame_off
= -ptr_off
;
26983 else if (REGNO (frame_reg_rtx
) == 1)
26984 frame_off
= info
->total_size
;
26985 sp_adjust
= rs6000_emit_allocate_stack (info
->total_size
,
26987 if (REGNO (frame_reg_rtx
) == 12)
26989 sp_off
= info
->total_size
;
26990 if (frame_reg_rtx
!= sp_reg_rtx
)
26991 rs6000_emit_stack_tie (frame_reg_rtx
, false);
26994 /* Set frame pointer, if needed. */
26995 if (frame_pointer_needed
)
26997 insn
= emit_move_insn (gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
),
26999 RTX_FRAME_RELATED_P (insn
) = 1;
27002 /* Save AltiVec registers if needed. Save here because the red zone does
27003 not always include AltiVec registers. */
27004 if (!WORLD_SAVE_P (info
)
27005 && info
->altivec_size
!= 0 && (strategy
& SAVE_INLINE_VRS
) == 0)
27007 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
27009 /* Oddly, the vector save/restore functions point r0 at the end
27010 of the save area, then use r11 or r12 to load offsets for
27011 [reg+reg] addressing. */
27012 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
27013 int scratch_regno
= ptr_regno_for_savres (SAVRES_SAVE
| SAVRES_VR
);
27014 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
27016 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
27018 if (scratch_regno
== 12)
27020 if (end_save
+ frame_off
!= 0)
27022 rtx offset
= GEN_INT (end_save
+ frame_off
);
27024 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
27027 emit_move_insn (ptr_reg
, frame_reg_rtx
);
27029 ptr_off
= -end_save
;
27030 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
27031 info
->altivec_save_offset
+ ptr_off
,
27032 0, V4SImode
, SAVRES_SAVE
| SAVRES_VR
);
27033 rs6000_frame_related (insn
, scratch_reg
, sp_off
- ptr_off
,
27034 NULL_RTX
, NULL_RTX
);
27035 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
27037 /* The oddity mentioned above clobbered our frame reg. */
27038 emit_move_insn (frame_reg_rtx
, ptr_reg
);
27039 frame_off
= ptr_off
;
27042 else if (!WORLD_SAVE_P (info
)
27043 && info
->altivec_size
!= 0)
27047 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
27048 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
27050 rtx areg
, savereg
, mem
;
27051 HOST_WIDE_INT offset
;
27053 offset
= (info
->altivec_save_offset
+ frame_off
27054 + 16 * (i
- info
->first_altivec_reg_save
));
27056 savereg
= gen_rtx_REG (V4SImode
, i
);
27058 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
27060 mem
= gen_frame_mem (V4SImode
,
27061 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27062 GEN_INT (offset
)));
27063 insn
= emit_insn (gen_rtx_SET (mem
, savereg
));
27069 areg
= gen_rtx_REG (Pmode
, 0);
27070 emit_move_insn (areg
, GEN_INT (offset
));
27072 /* AltiVec addressing mode is [reg+reg]. */
27073 mem
= gen_frame_mem (V4SImode
,
27074 gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
));
27076 /* Rather than emitting a generic move, force use of the stvx
27077 instruction, which we always want on ISA 2.07 (power8) systems.
27078 In particular we don't want xxpermdi/stxvd2x for little
27080 insn
= emit_insn (gen_altivec_stvx_v4si_internal (mem
, savereg
));
27083 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
27084 areg
, GEN_INT (offset
));
27088 /* VRSAVE is a bit vector representing which AltiVec registers
27089 are used. The OS uses this to determine which vector
27090 registers to save on a context switch. We need to save
27091 VRSAVE on the stack frame, add whatever AltiVec registers we
27092 used in this function, and do the corresponding magic in the
27095 if (!WORLD_SAVE_P (info
) && info
->vrsave_size
!= 0)
27097 /* Get VRSAVE into a GPR. Note that ABI_V4 and ABI_DARWIN might
27098 be using r12 as frame_reg_rtx and r11 as the static chain
27099 pointer for nested functions. */
27100 int save_regno
= 12;
27101 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
27102 && !using_static_chain_p
)
27104 else if (using_split_stack
|| REGNO (frame_reg_rtx
) == 12)
27107 if (using_static_chain_p
)
27110 NOT_INUSE (save_regno
);
27112 emit_vrsave_prologue (info
, save_regno
, frame_off
, frame_reg_rtx
);
27115 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
27116 if (!TARGET_SINGLE_PIC_BASE
27117 && ((TARGET_TOC
&& TARGET_MINIMAL_TOC
27118 && !constant_pool_empty_p ())
27119 || (DEFAULT_ABI
== ABI_V4
27120 && (flag_pic
== 1 || (flag_pic
&& TARGET_SECURE_PLT
))
27121 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))))
27123 /* If emit_load_toc_table will use the link register, we need to save
27124 it. We use R12 for this purpose because emit_load_toc_table
27125 can use register 0. This allows us to use a plain 'blr' to return
27126 from the procedure more often. */
27127 int save_LR_around_toc_setup
= (TARGET_ELF
27128 && DEFAULT_ABI
== ABI_V4
27130 && ! info
->lr_save_p
27131 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
) > 0);
27132 if (save_LR_around_toc_setup
)
27134 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27135 rtx tmp
= gen_rtx_REG (Pmode
, 12);
27138 insn
= emit_move_insn (tmp
, lr
);
27139 RTX_FRAME_RELATED_P (insn
) = 1;
27141 rs6000_emit_load_toc_table (TRUE
);
27143 insn
= emit_move_insn (lr
, tmp
);
27144 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
27145 RTX_FRAME_RELATED_P (insn
) = 1;
27148 rs6000_emit_load_toc_table (TRUE
);
27152 if (!TARGET_SINGLE_PIC_BASE
27153 && DEFAULT_ABI
== ABI_DARWIN
27154 && flag_pic
&& crtl
->uses_pic_offset_table
)
27156 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27157 rtx src
= gen_rtx_SYMBOL_REF (Pmode
, MACHOPIC_FUNCTION_BASE_NAME
);
27159 /* Save and restore LR locally around this call (in R0). */
27160 if (!info
->lr_save_p
)
27161 emit_move_insn (gen_rtx_REG (Pmode
, 0), lr
);
27163 emit_insn (gen_load_macho_picbase (src
));
27165 emit_move_insn (gen_rtx_REG (Pmode
,
27166 RS6000_PIC_OFFSET_TABLE_REGNUM
),
27169 if (!info
->lr_save_p
)
27170 emit_move_insn (lr
, gen_rtx_REG (Pmode
, 0));
27174 /* If we need to, save the TOC register after doing the stack setup.
27175 Do not emit eh frame info for this save. The unwinder wants info,
27176 conceptually attached to instructions in this function, about
27177 register values in the caller of this function. This R2 may have
27178 already been changed from the value in the caller.
27179 We don't attempt to write accurate DWARF EH frame info for R2
27180 because code emitted by gcc for a (non-pointer) function call
27181 doesn't save and restore R2. Instead, R2 is managed out-of-line
27182 by a linker generated plt call stub when the function resides in
27183 a shared library. This behavior is costly to describe in DWARF,
27184 both in terms of the size of DWARF info and the time taken in the
27185 unwinder to interpret it. R2 changes, apart from the
27186 calls_eh_return case earlier in this function, are handled by
27187 linux-unwind.h frob_update_context. */
27188 if (rs6000_save_toc_in_prologue_p ()
27189 && !cfun
->machine
->toc_is_wrapped_separately
)
27191 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
27192 emit_insn (gen_frame_store (reg
, sp_reg_rtx
, RS6000_TOC_SAVE_SLOT
));
27195 /* Set up the arg pointer (r12) for -fsplit-stack code. */
27196 if (using_split_stack
&& split_stack_arg_pointer_used_p ())
27197 emit_split_stack_prologue (info
, sp_adjust
, frame_off
, frame_reg_rtx
);
27200 /* Output .extern statements for the save/restore routines we use. */
27203 rs6000_output_savres_externs (FILE *file
)
27205 rs6000_stack_t
*info
= rs6000_stack_info ();
27207 if (TARGET_DEBUG_STACK
)
27208 debug_stack_info (info
);
27210 /* Write .extern for any function we will call to save and restore
27212 if (info
->first_fp_reg_save
< 64
27217 int regno
= info
->first_fp_reg_save
- 32;
27219 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
) == 0)
27221 bool lr
= (info
->savres_strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
27222 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27223 name
= rs6000_savres_routine_name (regno
, sel
);
27224 fprintf (file
, "\t.extern %s\n", name
);
27226 if ((info
->savres_strategy
& REST_INLINE_FPRS
) == 0)
27228 bool lr
= (info
->savres_strategy
27229 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
27230 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27231 name
= rs6000_savres_routine_name (regno
, sel
);
27232 fprintf (file
, "\t.extern %s\n", name
);
27237 /* Write function prologue. */
27240 rs6000_output_function_prologue (FILE *file
)
27242 if (!cfun
->is_thunk
)
27243 rs6000_output_savres_externs (file
);
27245 /* ELFv2 ABI r2 setup code and local entry point. This must follow
27246 immediately after the global entry point label. */
27247 if (rs6000_global_entry_point_needed_p ())
27249 const char *name
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
27251 (*targetm
.asm_out
.internal_label
) (file
, "LCF", rs6000_pic_labelno
);
27253 if (TARGET_CMODEL
!= CMODEL_LARGE
)
27255 /* In the small and medium code models, we assume the TOC is less
27256 2 GB away from the text section, so it can be computed via the
27257 following two-instruction sequence. */
27260 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
27261 fprintf (file
, "0:\taddis 2,12,.TOC.-");
27262 assemble_name (file
, buf
);
27263 fprintf (file
, "@ha\n");
27264 fprintf (file
, "\taddi 2,2,.TOC.-");
27265 assemble_name (file
, buf
);
27266 fprintf (file
, "@l\n");
27270 /* In the large code model, we allow arbitrary offsets between the
27271 TOC and the text section, so we have to load the offset from
27272 memory. The data field is emitted directly before the global
27273 entry point in rs6000_elf_declare_function_name. */
27276 #ifdef HAVE_AS_ENTRY_MARKERS
27277 /* If supported by the linker, emit a marker relocation. If the
27278 total code size of the final executable or shared library
27279 happens to fit into 2 GB after all, the linker will replace
27280 this code sequence with the sequence for the small or medium
27282 fprintf (file
, "\t.reloc .,R_PPC64_ENTRY\n");
27284 fprintf (file
, "\tld 2,");
27285 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
27286 assemble_name (file
, buf
);
27287 fprintf (file
, "-");
27288 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
27289 assemble_name (file
, buf
);
27290 fprintf (file
, "(12)\n");
27291 fprintf (file
, "\tadd 2,2,12\n");
27294 fputs ("\t.localentry\t", file
);
27295 assemble_name (file
, name
);
27296 fputs (",.-", file
);
27297 assemble_name (file
, name
);
27298 fputs ("\n", file
);
27301 /* Output -mprofile-kernel code. This needs to be done here instead of
27302 in output_function_profile since it must go after the ELFv2 ABI
27303 local entry point. */
27304 if (TARGET_PROFILE_KERNEL
&& crtl
->profile
)
27306 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
27307 gcc_assert (!TARGET_32BIT
);
27309 asm_fprintf (file
, "\tmflr %s\n", reg_names
[0]);
27311 /* In the ELFv2 ABI we have no compiler stack word. It must be
27312 the resposibility of _mcount to preserve the static chain
27313 register if required. */
27314 if (DEFAULT_ABI
!= ABI_ELFv2
27315 && cfun
->static_chain_decl
!= NULL
)
27317 asm_fprintf (file
, "\tstd %s,24(%s)\n",
27318 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
27319 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
27320 asm_fprintf (file
, "\tld %s,24(%s)\n",
27321 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
27324 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
27327 rs6000_pic_labelno
++;
27330 /* -mprofile-kernel code calls mcount before the function prolog,
27331 so a profiled leaf function should stay a leaf function. */
27333 rs6000_keep_leaf_when_profiled ()
27335 return TARGET_PROFILE_KERNEL
;
27338 /* Non-zero if vmx regs are restored before the frame pop, zero if
27339 we restore after the pop when possible. */
27340 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
27342 /* Restoring cr is a two step process: loading a reg from the frame
27343 save, then moving the reg to cr. For ABI_V4 we must let the
27344 unwinder know that the stack location is no longer valid at or
27345 before the stack deallocation, but we can't emit a cfa_restore for
27346 cr at the stack deallocation like we do for other registers.
27347 The trouble is that it is possible for the move to cr to be
27348 scheduled after the stack deallocation. So say exactly where cr
27349 is located on each of the two insns. */
27352 load_cr_save (int regno
, rtx frame_reg_rtx
, int offset
, bool exit_func
)
27354 rtx mem
= gen_frame_mem_offset (SImode
, frame_reg_rtx
, offset
);
27355 rtx reg
= gen_rtx_REG (SImode
, regno
);
27356 rtx_insn
*insn
= emit_move_insn (reg
, mem
);
27358 if (!exit_func
&& DEFAULT_ABI
== ABI_V4
)
27360 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
27361 rtx set
= gen_rtx_SET (reg
, cr
);
27363 add_reg_note (insn
, REG_CFA_REGISTER
, set
);
27364 RTX_FRAME_RELATED_P (insn
) = 1;
27369 /* Reload CR from REG. */
27372 restore_saved_cr (rtx reg
, int using_mfcr_multiple
, bool exit_func
)
27377 if (using_mfcr_multiple
)
27379 for (i
= 0; i
< 8; i
++)
27380 if (save_reg_p (CR0_REGNO
+ i
))
27382 gcc_assert (count
);
27385 if (using_mfcr_multiple
&& count
> 1)
27391 p
= rtvec_alloc (count
);
27394 for (i
= 0; i
< 8; i
++)
27395 if (save_reg_p (CR0_REGNO
+ i
))
27397 rtvec r
= rtvec_alloc (2);
27398 RTVEC_ELT (r
, 0) = reg
;
27399 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7-i
));
27400 RTVEC_ELT (p
, ndx
) =
27401 gen_rtx_SET (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
27402 gen_rtx_UNSPEC (CCmode
, r
, UNSPEC_MOVESI_TO_CR
));
27405 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27406 gcc_assert (ndx
== count
);
27408 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27409 CR field separately. */
27410 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
27412 for (i
= 0; i
< 8; i
++)
27413 if (save_reg_p (CR0_REGNO
+ i
))
27414 add_reg_note (insn
, REG_CFA_RESTORE
,
27415 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
27417 RTX_FRAME_RELATED_P (insn
) = 1;
27421 for (i
= 0; i
< 8; i
++)
27422 if (save_reg_p (CR0_REGNO
+ i
))
27424 rtx insn
= emit_insn (gen_movsi_to_cr_one
27425 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
27427 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
27428 CR field separately, attached to the insn that in fact
27429 restores this particular CR field. */
27430 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
27432 add_reg_note (insn
, REG_CFA_RESTORE
,
27433 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
27435 RTX_FRAME_RELATED_P (insn
) = 1;
27439 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
27440 if (!exit_func
&& DEFAULT_ABI
!= ABI_ELFv2
27441 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
27443 rtx_insn
*insn
= get_last_insn ();
27444 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
27446 add_reg_note (insn
, REG_CFA_RESTORE
, cr
);
27447 RTX_FRAME_RELATED_P (insn
) = 1;
27451 /* Like cr, the move to lr instruction can be scheduled after the
27452 stack deallocation, but unlike cr, its stack frame save is still
27453 valid. So we only need to emit the cfa_restore on the correct
27457 load_lr_save (int regno
, rtx frame_reg_rtx
, int offset
)
27459 rtx mem
= gen_frame_mem_offset (Pmode
, frame_reg_rtx
, offset
);
27460 rtx reg
= gen_rtx_REG (Pmode
, regno
);
27462 emit_move_insn (reg
, mem
);
27466 restore_saved_lr (int regno
, bool exit_func
)
27468 rtx reg
= gen_rtx_REG (Pmode
, regno
);
27469 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
27470 rtx_insn
*insn
= emit_move_insn (lr
, reg
);
27472 if (!exit_func
&& flag_shrink_wrap
)
27474 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
27475 RTX_FRAME_RELATED_P (insn
) = 1;
27480 add_crlr_cfa_restore (const rs6000_stack_t
*info
, rtx cfa_restores
)
27482 if (DEFAULT_ABI
== ABI_ELFv2
)
27485 for (i
= 0; i
< 8; i
++)
27486 if (save_reg_p (CR0_REGNO
+ i
))
27488 rtx cr
= gen_rtx_REG (SImode
, CR0_REGNO
+ i
);
27489 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, cr
,
27493 else if (info
->cr_save_p
)
27494 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
27495 gen_rtx_REG (SImode
, CR2_REGNO
),
27498 if (info
->lr_save_p
)
27499 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
27500 gen_rtx_REG (Pmode
, LR_REGNO
),
27502 return cfa_restores
;
27505 /* Return true if OFFSET from stack pointer can be clobbered by signals.
27506 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
27507 below stack pointer not cloberred by signals. */
27510 offset_below_red_zone_p (HOST_WIDE_INT offset
)
27512 return offset
< (DEFAULT_ABI
== ABI_V4
27514 : TARGET_32BIT
? -220 : -288);
27517 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
27520 emit_cfa_restores (rtx cfa_restores
)
27522 rtx_insn
*insn
= get_last_insn ();
27523 rtx
*loc
= ®_NOTES (insn
);
27526 loc
= &XEXP (*loc
, 1);
27527 *loc
= cfa_restores
;
27528 RTX_FRAME_RELATED_P (insn
) = 1;
27531 /* Emit function epilogue as insns. */
27534 rs6000_emit_epilogue (int sibcall
)
27536 rs6000_stack_t
*info
;
27537 int restoring_GPRs_inline
;
27538 int restoring_FPRs_inline
;
27539 int using_load_multiple
;
27540 int using_mtcr_multiple
;
27541 int use_backchain_to_restore_sp
;
27544 HOST_WIDE_INT frame_off
= 0;
27545 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, 1);
27546 rtx frame_reg_rtx
= sp_reg_rtx
;
27547 rtx cfa_restores
= NULL_RTX
;
27549 rtx cr_save_reg
= NULL_RTX
;
27550 machine_mode reg_mode
= Pmode
;
27551 int reg_size
= TARGET_32BIT
? 4 : 8;
27552 machine_mode fp_reg_mode
= TARGET_HARD_FLOAT
? DFmode
: SFmode
;
27553 int fp_reg_size
= 8;
27556 unsigned ptr_regno
;
27558 info
= rs6000_stack_info ();
27560 strategy
= info
->savres_strategy
;
27561 using_load_multiple
= strategy
& REST_MULTIPLE
;
27562 restoring_FPRs_inline
= sibcall
|| (strategy
& REST_INLINE_FPRS
);
27563 restoring_GPRs_inline
= sibcall
|| (strategy
& REST_INLINE_GPRS
);
27564 using_mtcr_multiple
= (rs6000_tune
== PROCESSOR_PPC601
27565 || rs6000_tune
== PROCESSOR_PPC603
27566 || rs6000_tune
== PROCESSOR_PPC750
27568 /* Restore via the backchain when we have a large frame, since this
27569 is more efficient than an addis, addi pair. The second condition
27570 here will not trigger at the moment; We don't actually need a
27571 frame pointer for alloca, but the generic parts of the compiler
27572 give us one anyway. */
27573 use_backchain_to_restore_sp
= (info
->total_size
+ (info
->lr_save_p
27574 ? info
->lr_save_offset
27576 || (cfun
->calls_alloca
27577 && !frame_pointer_needed
));
27578 restore_lr
= (info
->lr_save_p
27579 && (restoring_FPRs_inline
27580 || (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
))
27581 && (restoring_GPRs_inline
27582 || info
->first_fp_reg_save
< 64)
27583 && !cfun
->machine
->lr_is_wrapped_separately
);
27586 if (WORLD_SAVE_P (info
))
27590 const char *alloc_rname
;
27593 /* eh_rest_world_r10 will return to the location saved in the LR
27594 stack slot (which is not likely to be our caller.)
27595 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
27596 rest_world is similar, except any R10 parameter is ignored.
27597 The exception-handling stuff that was here in 2.95 is no
27598 longer necessary. */
27601 + 32 - info
->first_gp_reg_save
27602 + LAST_ALTIVEC_REGNO
+ 1 - info
->first_altivec_reg_save
27603 + 63 + 1 - info
->first_fp_reg_save
);
27605 strcpy (rname
, ((crtl
->calls_eh_return
) ?
27606 "*eh_rest_world_r10" : "*rest_world"));
27607 alloc_rname
= ggc_strdup (rname
);
27610 RTVEC_ELT (p
, j
++) = ret_rtx
;
27612 = gen_rtx_USE (VOIDmode
, gen_rtx_SYMBOL_REF (Pmode
, alloc_rname
));
27613 /* The instruction pattern requires a clobber here;
27614 it is shared with the restVEC helper. */
27616 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 11));
27619 /* CR register traditionally saved as CR2. */
27620 rtx reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
27622 = gen_frame_load (reg
, frame_reg_rtx
, info
->cr_save_offset
);
27623 if (flag_shrink_wrap
)
27625 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
27626 gen_rtx_REG (Pmode
, LR_REGNO
),
27628 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
27632 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
27634 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
27636 = gen_frame_load (reg
,
27637 frame_reg_rtx
, info
->gp_save_offset
+ reg_size
* i
);
27638 if (flag_shrink_wrap
27639 && save_reg_p (info
->first_gp_reg_save
+ i
))
27640 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
27642 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
27644 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
27646 = gen_frame_load (reg
,
27647 frame_reg_rtx
, info
->altivec_save_offset
+ 16 * i
);
27648 if (flag_shrink_wrap
27649 && save_reg_p (info
->first_altivec_reg_save
+ i
))
27650 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
27652 for (i
= 0; info
->first_fp_reg_save
+ i
<= 63; i
++)
27654 rtx reg
= gen_rtx_REG (TARGET_HARD_FLOAT
? DFmode
: SFmode
,
27655 info
->first_fp_reg_save
+ i
);
27657 = gen_frame_load (reg
, frame_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
27658 if (flag_shrink_wrap
27659 && save_reg_p (info
->first_fp_reg_save
+ i
))
27660 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
27663 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 0));
27665 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 12));
27667 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 7));
27669 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 8));
27671 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, 10));
27672 insn
= emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
27674 if (flag_shrink_wrap
)
27676 REG_NOTES (insn
) = cfa_restores
;
27677 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
27678 RTX_FRAME_RELATED_P (insn
) = 1;
27683 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
27685 frame_off
= info
->total_size
;
27687 /* Restore AltiVec registers if we must do so before adjusting the
27689 if (info
->altivec_size
!= 0
27690 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27691 || (DEFAULT_ABI
!= ABI_V4
27692 && offset_below_red_zone_p (info
->altivec_save_offset
))))
27695 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
27697 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
27698 if (use_backchain_to_restore_sp
)
27700 int frame_regno
= 11;
27702 if ((strategy
& REST_INLINE_VRS
) == 0)
27704 /* Of r11 and r12, select the one not clobbered by an
27705 out-of-line restore function for the frame register. */
27706 frame_regno
= 11 + 12 - scratch_regno
;
27708 frame_reg_rtx
= gen_rtx_REG (Pmode
, frame_regno
);
27709 emit_move_insn (frame_reg_rtx
,
27710 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
27713 else if (frame_pointer_needed
)
27714 frame_reg_rtx
= hard_frame_pointer_rtx
;
27716 if ((strategy
& REST_INLINE_VRS
) == 0)
27718 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
27720 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
27721 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
27723 if (end_save
+ frame_off
!= 0)
27725 rtx offset
= GEN_INT (end_save
+ frame_off
);
27727 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
27730 emit_move_insn (ptr_reg
, frame_reg_rtx
);
27732 ptr_off
= -end_save
;
27733 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
27734 info
->altivec_save_offset
+ ptr_off
,
27735 0, V4SImode
, SAVRES_VR
);
27739 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
27740 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
27742 rtx addr
, areg
, mem
, insn
;
27743 rtx reg
= gen_rtx_REG (V4SImode
, i
);
27744 HOST_WIDE_INT offset
27745 = (info
->altivec_save_offset
+ frame_off
27746 + 16 * (i
- info
->first_altivec_reg_save
));
27748 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
27750 mem
= gen_frame_mem (V4SImode
,
27751 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27752 GEN_INT (offset
)));
27753 insn
= gen_rtx_SET (reg
, mem
);
27757 areg
= gen_rtx_REG (Pmode
, 0);
27758 emit_move_insn (areg
, GEN_INT (offset
));
27760 /* AltiVec addressing mode is [reg+reg]. */
27761 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
27762 mem
= gen_frame_mem (V4SImode
, addr
);
27764 /* Rather than emitting a generic move, force use of the
27765 lvx instruction, which we always want. In particular we
27766 don't want lxvd2x/xxpermdi for little endian. */
27767 insn
= gen_altivec_lvx_v4si_internal (reg
, mem
);
27770 (void) emit_insn (insn
);
27774 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
27775 if (((strategy
& REST_INLINE_VRS
) == 0
27776 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
27777 && (flag_shrink_wrap
27778 || (offset_below_red_zone_p
27779 (info
->altivec_save_offset
27780 + 16 * (i
- info
->first_altivec_reg_save
))))
27783 rtx reg
= gen_rtx_REG (V4SImode
, i
);
27784 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
27788 /* Restore VRSAVE if we must do so before adjusting the stack. */
27789 if (info
->vrsave_size
!= 0
27790 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27791 || (DEFAULT_ABI
!= ABI_V4
27792 && offset_below_red_zone_p (info
->vrsave_save_offset
))))
27796 if (frame_reg_rtx
== sp_reg_rtx
)
27798 if (use_backchain_to_restore_sp
)
27800 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
27801 emit_move_insn (frame_reg_rtx
,
27802 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
27805 else if (frame_pointer_needed
)
27806 frame_reg_rtx
= hard_frame_pointer_rtx
;
27809 reg
= gen_rtx_REG (SImode
, 12);
27810 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
27811 info
->vrsave_save_offset
+ frame_off
));
27813 emit_insn (generate_set_vrsave (reg
, info
, 1));
27817 /* If we have a large stack frame, restore the old stack pointer
27818 using the backchain. */
27819 if (use_backchain_to_restore_sp
)
27821 if (frame_reg_rtx
== sp_reg_rtx
)
27823 /* Under V.4, don't reset the stack pointer until after we're done
27824 loading the saved registers. */
27825 if (DEFAULT_ABI
== ABI_V4
)
27826 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
27828 insn
= emit_move_insn (frame_reg_rtx
,
27829 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
27832 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27833 && DEFAULT_ABI
== ABI_V4
)
27834 /* frame_reg_rtx has been set up by the altivec restore. */
27838 insn
= emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
27839 frame_reg_rtx
= sp_reg_rtx
;
27842 /* If we have a frame pointer, we can restore the old stack pointer
27844 else if (frame_pointer_needed
)
27846 frame_reg_rtx
= sp_reg_rtx
;
27847 if (DEFAULT_ABI
== ABI_V4
)
27848 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
27849 /* Prevent reordering memory accesses against stack pointer restore. */
27850 else if (cfun
->calls_alloca
27851 || offset_below_red_zone_p (-info
->total_size
))
27852 rs6000_emit_stack_tie (frame_reg_rtx
, true);
27854 insn
= emit_insn (gen_add3_insn (frame_reg_rtx
, hard_frame_pointer_rtx
,
27855 GEN_INT (info
->total_size
)));
27858 else if (info
->push_p
27859 && DEFAULT_ABI
!= ABI_V4
27860 && !crtl
->calls_eh_return
)
27862 /* Prevent reordering memory accesses against stack pointer restore. */
27863 if (cfun
->calls_alloca
27864 || offset_below_red_zone_p (-info
->total_size
))
27865 rs6000_emit_stack_tie (frame_reg_rtx
, false);
27866 insn
= emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
,
27867 GEN_INT (info
->total_size
)));
27870 if (insn
&& frame_reg_rtx
== sp_reg_rtx
)
27874 REG_NOTES (insn
) = cfa_restores
;
27875 cfa_restores
= NULL_RTX
;
27877 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
27878 RTX_FRAME_RELATED_P (insn
) = 1;
27881 /* Restore AltiVec registers if we have not done so already. */
27882 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
27883 && info
->altivec_size
!= 0
27884 && (DEFAULT_ABI
== ABI_V4
27885 || !offset_below_red_zone_p (info
->altivec_save_offset
)))
27889 if ((strategy
& REST_INLINE_VRS
) == 0)
27891 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
27893 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
27894 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
27895 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
27897 if (end_save
+ frame_off
!= 0)
27899 rtx offset
= GEN_INT (end_save
+ frame_off
);
27901 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
27904 emit_move_insn (ptr_reg
, frame_reg_rtx
);
27906 ptr_off
= -end_save
;
27907 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
27908 info
->altivec_save_offset
+ ptr_off
,
27909 0, V4SImode
, SAVRES_VR
);
27910 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
27912 /* Frame reg was clobbered by out-of-line save. Restore it
27913 from ptr_reg, and if we are calling out-of-line gpr or
27914 fpr restore set up the correct pointer and offset. */
27915 unsigned newptr_regno
= 1;
27916 if (!restoring_GPRs_inline
)
27918 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
27919 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
27920 newptr_regno
= ptr_regno_for_savres (sel
);
27921 end_save
= info
->gp_save_offset
+ info
->gp_size
;
27923 else if (!restoring_FPRs_inline
)
27925 bool lr
= !(strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
);
27926 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
27927 newptr_regno
= ptr_regno_for_savres (sel
);
27928 end_save
= info
->fp_save_offset
+ info
->fp_size
;
27931 if (newptr_regno
!= 1 && REGNO (frame_reg_rtx
) != newptr_regno
)
27932 frame_reg_rtx
= gen_rtx_REG (Pmode
, newptr_regno
);
27934 if (end_save
+ ptr_off
!= 0)
27936 rtx offset
= GEN_INT (end_save
+ ptr_off
);
27938 frame_off
= -end_save
;
27940 emit_insn (gen_addsi3_carry (frame_reg_rtx
,
27943 emit_insn (gen_adddi3_carry (frame_reg_rtx
,
27948 frame_off
= ptr_off
;
27949 emit_move_insn (frame_reg_rtx
, ptr_reg
);
27955 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
27956 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
27958 rtx addr
, areg
, mem
, insn
;
27959 rtx reg
= gen_rtx_REG (V4SImode
, i
);
27960 HOST_WIDE_INT offset
27961 = (info
->altivec_save_offset
+ frame_off
27962 + 16 * (i
- info
->first_altivec_reg_save
));
27964 if (TARGET_P9_VECTOR
&& quad_address_offset_p (offset
))
27966 mem
= gen_frame_mem (V4SImode
,
27967 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
27968 GEN_INT (offset
)));
27969 insn
= gen_rtx_SET (reg
, mem
);
27973 areg
= gen_rtx_REG (Pmode
, 0);
27974 emit_move_insn (areg
, GEN_INT (offset
));
27976 /* AltiVec addressing mode is [reg+reg]. */
27977 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
27978 mem
= gen_frame_mem (V4SImode
, addr
);
27980 /* Rather than emitting a generic move, force use of the
27981 lvx instruction, which we always want. In particular we
27982 don't want lxvd2x/xxpermdi for little endian. */
27983 insn
= gen_altivec_lvx_v4si_internal (reg
, mem
);
27986 (void) emit_insn (insn
);
27990 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
27991 if (((strategy
& REST_INLINE_VRS
) == 0
27992 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
27993 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
27996 rtx reg
= gen_rtx_REG (V4SImode
, i
);
27997 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28001 /* Restore VRSAVE if we have not done so already. */
28002 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
28003 && info
->vrsave_size
!= 0
28004 && (DEFAULT_ABI
== ABI_V4
28005 || !offset_below_red_zone_p (info
->vrsave_save_offset
)))
28009 reg
= gen_rtx_REG (SImode
, 12);
28010 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28011 info
->vrsave_save_offset
+ frame_off
));
28013 emit_insn (generate_set_vrsave (reg
, info
, 1));
28016 /* If we exit by an out-of-line restore function on ABI_V4 then that
28017 function will deallocate the stack, so we don't need to worry
28018 about the unwinder restoring cr from an invalid stack frame
28020 exit_func
= (!restoring_FPRs_inline
28021 || (!restoring_GPRs_inline
28022 && info
->first_fp_reg_save
== 64));
28024 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
28025 *separate* slots if the routine calls __builtin_eh_return, so
28026 that they can be independently restored by the unwinder. */
28027 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
28029 int i
, cr_off
= info
->ehcr_offset
;
28031 for (i
= 0; i
< 8; i
++)
28032 if (!call_used_regs
[CR0_REGNO
+ i
])
28034 rtx reg
= gen_rtx_REG (SImode
, 0);
28035 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28036 cr_off
+ frame_off
));
28038 insn
= emit_insn (gen_movsi_to_cr_one
28039 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
28041 if (!exit_func
&& flag_shrink_wrap
)
28043 add_reg_note (insn
, REG_CFA_RESTORE
,
28044 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
28046 RTX_FRAME_RELATED_P (insn
) = 1;
28049 cr_off
+= reg_size
;
28053 /* Get the old lr if we saved it. If we are restoring registers
28054 out-of-line, then the out-of-line routines can do this for us. */
28055 if (restore_lr
&& restoring_GPRs_inline
)
28056 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
28058 /* Get the old cr if we saved it. */
28059 if (info
->cr_save_p
)
28061 unsigned cr_save_regno
= 12;
28063 if (!restoring_GPRs_inline
)
28065 /* Ensure we don't use the register used by the out-of-line
28066 gpr register restore below. */
28067 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
28068 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
28069 int gpr_ptr_regno
= ptr_regno_for_savres (sel
);
28071 if (gpr_ptr_regno
== 12)
28072 cr_save_regno
= 11;
28073 gcc_checking_assert (REGNO (frame_reg_rtx
) != cr_save_regno
);
28075 else if (REGNO (frame_reg_rtx
) == 12)
28076 cr_save_regno
= 11;
28078 cr_save_reg
= load_cr_save (cr_save_regno
, frame_reg_rtx
,
28079 info
->cr_save_offset
+ frame_off
,
28083 /* Set LR here to try to overlap restores below. */
28084 if (restore_lr
&& restoring_GPRs_inline
)
28085 restore_saved_lr (0, exit_func
);
28087 /* Load exception handler data registers, if needed. */
28088 if (crtl
->calls_eh_return
)
28090 unsigned int i
, regno
;
28094 rtx reg
= gen_rtx_REG (reg_mode
, 2);
28095 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
28096 frame_off
+ RS6000_TOC_SAVE_SLOT
));
28103 regno
= EH_RETURN_DATA_REGNO (i
);
28104 if (regno
== INVALID_REGNUM
)
28107 mem
= gen_frame_mem_offset (reg_mode
, frame_reg_rtx
,
28108 info
->ehrd_offset
+ frame_off
28109 + reg_size
* (int) i
);
28111 emit_move_insn (gen_rtx_REG (reg_mode
, regno
), mem
);
28115 /* Restore GPRs. This is done as a PARALLEL if we are using
28116 the load-multiple instructions. */
28117 if (!restoring_GPRs_inline
)
28119 /* We are jumping to an out-of-line function. */
28121 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
28122 bool can_use_exit
= end_save
== 0;
28123 int sel
= SAVRES_GPR
| (can_use_exit
? SAVRES_LR
: 0);
28126 /* Emit stack reset code if we need it. */
28127 ptr_regno
= ptr_regno_for_savres (sel
);
28128 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
28130 rs6000_emit_stack_reset (frame_reg_rtx
, frame_off
, ptr_regno
);
28131 else if (end_save
+ frame_off
!= 0)
28132 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
,
28133 GEN_INT (end_save
+ frame_off
)));
28134 else if (REGNO (frame_reg_rtx
) != ptr_regno
)
28135 emit_move_insn (ptr_reg
, frame_reg_rtx
);
28136 if (REGNO (frame_reg_rtx
) == ptr_regno
)
28137 frame_off
= -end_save
;
28139 if (can_use_exit
&& info
->cr_save_p
)
28140 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, true);
28142 ptr_off
= -end_save
;
28143 rs6000_emit_savres_rtx (info
, ptr_reg
,
28144 info
->gp_save_offset
+ ptr_off
,
28145 info
->lr_save_offset
+ ptr_off
,
28148 else if (using_load_multiple
)
28151 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
28152 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
28154 = gen_frame_load (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
28156 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
28157 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28161 int offset
= info
->gp_save_offset
+ frame_off
;
28162 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
28165 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
28167 rtx reg
= gen_rtx_REG (reg_mode
, i
);
28168 emit_insn (gen_frame_load (reg
, frame_reg_rtx
, offset
));
28171 offset
+= reg_size
;
28175 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28177 /* If the frame pointer was used then we can't delay emitting
28178 a REG_CFA_DEF_CFA note. This must happen on the insn that
28179 restores the frame pointer, r31. We may have already emitted
28180 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
28181 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
28182 be harmless if emitted. */
28183 if (frame_pointer_needed
)
28185 insn
= get_last_insn ();
28186 add_reg_note (insn
, REG_CFA_DEF_CFA
,
28187 plus_constant (Pmode
, frame_reg_rtx
, frame_off
));
28188 RTX_FRAME_RELATED_P (insn
) = 1;
28191 /* Set up cfa_restores. We always need these when
28192 shrink-wrapping. If not shrink-wrapping then we only need
28193 the cfa_restore when the stack location is no longer valid.
28194 The cfa_restores must be emitted on or before the insn that
28195 invalidates the stack, and of course must not be emitted
28196 before the insn that actually does the restore. The latter
28197 is why it is a bad idea to emit the cfa_restores as a group
28198 on the last instruction here that actually does a restore:
28199 That insn may be reordered with respect to others doing
28201 if (flag_shrink_wrap
28202 && !restoring_GPRs_inline
28203 && info
->first_fp_reg_save
== 64)
28204 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
28206 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
28208 && !cfun
->machine
->gpr_is_wrapped_separately
[i
])
28210 rtx reg
= gen_rtx_REG (reg_mode
, i
);
28211 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28215 if (!restoring_GPRs_inline
28216 && info
->first_fp_reg_save
== 64)
28218 /* We are jumping to an out-of-line function. */
28220 emit_cfa_restores (cfa_restores
);
28224 if (restore_lr
&& !restoring_GPRs_inline
)
28226 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
28227 restore_saved_lr (0, exit_func
);
28230 /* Restore fpr's if we need to do it without calling a function. */
28231 if (restoring_FPRs_inline
)
28233 int offset
= info
->fp_save_offset
+ frame_off
;
28234 for (i
= info
->first_fp_reg_save
; i
< 64; i
++)
28237 && !cfun
->machine
->fpr_is_wrapped_separately
[i
- 32])
28239 rtx reg
= gen_rtx_REG (fp_reg_mode
, i
);
28240 emit_insn (gen_frame_load (reg
, frame_reg_rtx
, offset
));
28241 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
28242 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
28246 offset
+= fp_reg_size
;
28250 /* If we saved cr, restore it here. Just those that were used. */
28251 if (info
->cr_save_p
)
28252 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, exit_func
);
28254 /* If this is V.4, unwind the stack pointer after all of the loads
28255 have been done, or set up r11 if we are restoring fp out of line. */
28257 if (!restoring_FPRs_inline
)
28259 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
28260 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
28261 ptr_regno
= ptr_regno_for_savres (sel
);
28264 insn
= rs6000_emit_stack_reset (frame_reg_rtx
, frame_off
, ptr_regno
);
28265 if (REGNO (frame_reg_rtx
) == ptr_regno
)
28268 if (insn
&& restoring_FPRs_inline
)
28272 REG_NOTES (insn
) = cfa_restores
;
28273 cfa_restores
= NULL_RTX
;
28275 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
28276 RTX_FRAME_RELATED_P (insn
) = 1;
28279 if (crtl
->calls_eh_return
)
28281 rtx sa
= EH_RETURN_STACKADJ_RTX
;
28282 emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
, sa
));
28285 if (!sibcall
&& restoring_FPRs_inline
)
28289 /* We can't hang the cfa_restores off a simple return,
28290 since the shrink-wrap code sometimes uses an existing
28291 return. This means there might be a path from
28292 pre-prologue code to this return, and dwarf2cfi code
28293 wants the eh_frame unwinder state to be the same on
28294 all paths to any point. So we need to emit the
28295 cfa_restores before the return. For -m64 we really
28296 don't need epilogue cfa_restores at all, except for
28297 this irritating dwarf2cfi with shrink-wrap
28298 requirement; The stack red-zone means eh_frame info
28299 from the prologue telling the unwinder to restore
28300 from the stack is perfectly good right to the end of
28302 emit_insn (gen_blockage ());
28303 emit_cfa_restores (cfa_restores
);
28304 cfa_restores
= NULL_RTX
;
28307 emit_jump_insn (targetm
.gen_simple_return ());
28310 if (!sibcall
&& !restoring_FPRs_inline
)
28312 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
28313 rtvec p
= rtvec_alloc (3 + !!lr
+ 64 - info
->first_fp_reg_save
);
28315 RTVEC_ELT (p
, elt
++) = ret_rtx
;
28317 RTVEC_ELT (p
, elt
++)
28318 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
28320 /* We have to restore more than two FP registers, so branch to the
28321 restore function. It will return to our caller. */
28326 if (flag_shrink_wrap
)
28327 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
28329 sym
= rs6000_savres_routine_sym (info
, SAVRES_FPR
| (lr
? SAVRES_LR
: 0));
28330 RTVEC_ELT (p
, elt
++) = gen_rtx_USE (VOIDmode
, sym
);
28331 reg
= (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)? 1 : 11;
28332 RTVEC_ELT (p
, elt
++) = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, reg
));
28334 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
28336 rtx reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
28338 RTVEC_ELT (p
, elt
++)
28339 = gen_frame_load (reg
, sp_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
28340 if (flag_shrink_wrap
28341 && save_reg_p (info
->first_fp_reg_save
+ i
))
28342 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
28345 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
28351 /* Ensure the cfa_restores are hung off an insn that won't
28352 be reordered above other restores. */
28353 emit_insn (gen_blockage ());
28355 emit_cfa_restores (cfa_restores
);
28359 /* Write function epilogue. */
28362 rs6000_output_function_epilogue (FILE *file
)
28365 macho_branch_islands ();
28368 rtx_insn
*insn
= get_last_insn ();
28369 rtx_insn
*deleted_debug_label
= NULL
;
28371 /* Mach-O doesn't support labels at the end of objects, so if
28372 it looks like we might want one, take special action.
28374 First, collect any sequence of deleted debug labels. */
28377 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
28379 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
28380 notes only, instead set their CODE_LABEL_NUMBER to -1,
28381 otherwise there would be code generation differences
28382 in between -g and -g0. */
28383 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
28384 deleted_debug_label
= insn
;
28385 insn
= PREV_INSN (insn
);
28388 /* Second, if we have:
28391 then this needs to be detected, so skip past the barrier. */
28393 if (insn
&& BARRIER_P (insn
))
28394 insn
= PREV_INSN (insn
);
28396 /* Up to now we've only seen notes or barriers. */
28401 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
))
28402 /* Trailing label: <barrier>. */
28403 fputs ("\tnop\n", file
);
28406 /* Lastly, see if we have a completely empty function body. */
28407 while (insn
&& ! INSN_P (insn
))
28408 insn
= PREV_INSN (insn
);
28409 /* If we don't find any insns, we've got an empty function body;
28410 I.e. completely empty - without a return or branch. This is
28411 taken as the case where a function body has been removed
28412 because it contains an inline __builtin_unreachable(). GCC
28413 states that reaching __builtin_unreachable() means UB so we're
28414 not obliged to do anything special; however, we want
28415 non-zero-sized function bodies. To meet this, and help the
28416 user out, let's trap the case. */
28418 fputs ("\ttrap\n", file
);
28421 else if (deleted_debug_label
)
28422 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
28423 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
28424 CODE_LABEL_NUMBER (insn
) = -1;
28428 /* Output a traceback table here. See /usr/include/sys/debug.h for info
28431 We don't output a traceback table if -finhibit-size-directive was
28432 used. The documentation for -finhibit-size-directive reads
28433 ``don't output a @code{.size} assembler directive, or anything
28434 else that would cause trouble if the function is split in the
28435 middle, and the two halves are placed at locations far apart in
28436 memory.'' The traceback table has this property, since it
28437 includes the offset from the start of the function to the
28438 traceback table itself.
28440 System V.4 Powerpc's (and the embedded ABI derived from it) use a
28441 different traceback table. */
28442 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
28443 && ! flag_inhibit_size_directive
28444 && rs6000_traceback
!= traceback_none
&& !cfun
->is_thunk
)
28446 const char *fname
= NULL
;
28447 const char *language_string
= lang_hooks
.name
;
28448 int fixed_parms
= 0, float_parms
= 0, parm_info
= 0;
28450 int optional_tbtab
;
28451 rs6000_stack_t
*info
= rs6000_stack_info ();
28453 if (rs6000_traceback
== traceback_full
)
28454 optional_tbtab
= 1;
28455 else if (rs6000_traceback
== traceback_part
)
28456 optional_tbtab
= 0;
28458 optional_tbtab
= !optimize_size
&& !TARGET_ELF
;
28460 if (optional_tbtab
)
28462 fname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
28463 while (*fname
== '.') /* V.4 encodes . in the name */
28466 /* Need label immediately before tbtab, so we can compute
28467 its offset from the function start. */
28468 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
28469 ASM_OUTPUT_LABEL (file
, fname
);
28472 /* The .tbtab pseudo-op can only be used for the first eight
28473 expressions, since it can't handle the possibly variable
28474 length fields that follow. However, if you omit the optional
28475 fields, the assembler outputs zeros for all optional fields
28476 anyways, giving each variable length field is minimum length
28477 (as defined in sys/debug.h). Thus we can not use the .tbtab
28478 pseudo-op at all. */
28480 /* An all-zero word flags the start of the tbtab, for debuggers
28481 that have to find it by searching forward from the entry
28482 point or from the current pc. */
28483 fputs ("\t.long 0\n", file
);
28485 /* Tbtab format type. Use format type 0. */
28486 fputs ("\t.byte 0,", file
);
28488 /* Language type. Unfortunately, there does not seem to be any
28489 official way to discover the language being compiled, so we
28490 use language_string.
28491 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
28492 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
28493 a number, so for now use 9. LTO, Go and JIT aren't assigned numbers
28494 either, so for now use 0. */
28496 || ! strcmp (language_string
, "GNU GIMPLE")
28497 || ! strcmp (language_string
, "GNU Go")
28498 || ! strcmp (language_string
, "libgccjit"))
28500 else if (! strcmp (language_string
, "GNU F77")
28501 || lang_GNU_Fortran ())
28503 else if (! strcmp (language_string
, "GNU Pascal"))
28505 else if (! strcmp (language_string
, "GNU Ada"))
28507 else if (lang_GNU_CXX ()
28508 || ! strcmp (language_string
, "GNU Objective-C++"))
28510 else if (! strcmp (language_string
, "GNU Java"))
28512 else if (! strcmp (language_string
, "GNU Objective-C"))
28515 gcc_unreachable ();
28516 fprintf (file
, "%d,", i
);
28518 /* 8 single bit fields: global linkage (not set for C extern linkage,
28519 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
28520 from start of procedure stored in tbtab, internal function, function
28521 has controlled storage, function has no toc, function uses fp,
28522 function logs/aborts fp operations. */
28523 /* Assume that fp operations are used if any fp reg must be saved. */
28524 fprintf (file
, "%d,",
28525 (optional_tbtab
<< 5) | ((info
->first_fp_reg_save
!= 64) << 1));
28527 /* 6 bitfields: function is interrupt handler, name present in
28528 proc table, function calls alloca, on condition directives
28529 (controls stack walks, 3 bits), saves condition reg, saves
28531 /* The `function calls alloca' bit seems to be set whenever reg 31 is
28532 set up as a frame pointer, even when there is no alloca call. */
28533 fprintf (file
, "%d,",
28534 ((optional_tbtab
<< 6)
28535 | ((optional_tbtab
& frame_pointer_needed
) << 5)
28536 | (info
->cr_save_p
<< 1)
28537 | (info
->lr_save_p
)));
28539 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
28541 fprintf (file
, "%d,",
28542 (info
->push_p
<< 7) | (64 - info
->first_fp_reg_save
));
28544 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
28545 fprintf (file
, "%d,", (32 - first_reg_to_save ()));
28547 if (optional_tbtab
)
28549 /* Compute the parameter info from the function decl argument
28552 int next_parm_info_bit
= 31;
28554 for (decl
= DECL_ARGUMENTS (current_function_decl
);
28555 decl
; decl
= DECL_CHAIN (decl
))
28557 rtx parameter
= DECL_INCOMING_RTL (decl
);
28558 machine_mode mode
= GET_MODE (parameter
);
28560 if (GET_CODE (parameter
) == REG
)
28562 if (SCALAR_FLOAT_MODE_P (mode
))
28585 gcc_unreachable ();
28588 /* If only one bit will fit, don't or in this entry. */
28589 if (next_parm_info_bit
> 0)
28590 parm_info
|= (bits
<< (next_parm_info_bit
- 1));
28591 next_parm_info_bit
-= 2;
28595 fixed_parms
+= ((GET_MODE_SIZE (mode
)
28596 + (UNITS_PER_WORD
- 1))
28598 next_parm_info_bit
-= 1;
28604 /* Number of fixed point parameters. */
28605 /* This is actually the number of words of fixed point parameters; thus
28606 an 8 byte struct counts as 2; and thus the maximum value is 8. */
28607 fprintf (file
, "%d,", fixed_parms
);
28609 /* 2 bitfields: number of floating point parameters (7 bits), parameters
28611 /* This is actually the number of fp registers that hold parameters;
28612 and thus the maximum value is 13. */
28613 /* Set parameters on stack bit if parameters are not in their original
28614 registers, regardless of whether they are on the stack? Xlc
28615 seems to set the bit when not optimizing. */
28616 fprintf (file
, "%d\n", ((float_parms
<< 1) | (! optimize
)));
28618 if (optional_tbtab
)
28620 /* Optional fields follow. Some are variable length. */
28622 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single
28623 float, 11 double float. */
28624 /* There is an entry for each parameter in a register, in the order
28625 that they occur in the parameter list. Any intervening arguments
28626 on the stack are ignored. If the list overflows a long (max
28627 possible length 34 bits) then completely leave off all elements
28629 /* Only emit this long if there was at least one parameter. */
28630 if (fixed_parms
|| float_parms
)
28631 fprintf (file
, "\t.long %d\n", parm_info
);
28633 /* Offset from start of code to tb table. */
28634 fputs ("\t.long ", file
);
28635 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
28636 RS6000_OUTPUT_BASENAME (file
, fname
);
28638 rs6000_output_function_entry (file
, fname
);
28641 /* Interrupt handler mask. */
28642 /* Omit this long, since we never set the interrupt handler bit
28645 /* Number of CTL (controlled storage) anchors. */
28646 /* Omit this long, since the has_ctl bit is never set above. */
28648 /* Displacement into stack of each CTL anchor. */
28649 /* Omit this list of longs, because there are no CTL anchors. */
28651 /* Length of function name. */
28654 fprintf (file
, "\t.short %d\n", (int) strlen (fname
));
28656 /* Function name. */
28657 assemble_string (fname
, strlen (fname
));
28659 /* Register for alloca automatic storage; this is always reg 31.
28660 Only emit this if the alloca bit was set above. */
28661 if (frame_pointer_needed
)
28662 fputs ("\t.byte 31\n", file
);
28664 fputs ("\t.align 2\n", file
);
28668 /* Arrange to define .LCTOC1 label, if not already done. */
28672 if (!toc_initialized
)
28674 switch_to_section (toc_section
);
28675 switch_to_section (current_function_section ());
28680 /* -fsplit-stack support. */
28682 /* A SYMBOL_REF for __morestack. */
28683 static GTY(()) rtx morestack_ref
;
28686 gen_add3_const (rtx rt
, rtx ra
, long c
)
28689 return gen_adddi3 (rt
, ra
, GEN_INT (c
));
28691 return gen_addsi3 (rt
, ra
, GEN_INT (c
));
28694 /* Emit -fsplit-stack prologue, which goes before the regular function
28695 prologue (at local entry point in the case of ELFv2). */
28698 rs6000_expand_split_stack_prologue (void)
28700 rs6000_stack_t
*info
= rs6000_stack_info ();
28701 unsigned HOST_WIDE_INT allocate
;
28702 long alloc_hi
, alloc_lo
;
28703 rtx r0
, r1
, r12
, lr
, ok_label
, compare
, jump
, call_fusage
;
28706 gcc_assert (flag_split_stack
&& reload_completed
);
28711 if (global_regs
[29])
28713 error ("%qs uses register r29", "-fsplit-stack");
28714 inform (DECL_SOURCE_LOCATION (global_regs_decl
[29]),
28715 "conflicts with %qD", global_regs_decl
[29]);
28718 allocate
= info
->total_size
;
28719 if (allocate
> (unsigned HOST_WIDE_INT
) 1 << 31)
28721 sorry ("Stack frame larger than 2G is not supported for -fsplit-stack");
28724 if (morestack_ref
== NULL_RTX
)
28726 morestack_ref
= gen_rtx_SYMBOL_REF (Pmode
, "__morestack");
28727 SYMBOL_REF_FLAGS (morestack_ref
) |= (SYMBOL_FLAG_LOCAL
28728 | SYMBOL_FLAG_FUNCTION
);
28731 r0
= gen_rtx_REG (Pmode
, 0);
28732 r1
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
28733 r12
= gen_rtx_REG (Pmode
, 12);
28734 emit_insn (gen_load_split_stack_limit (r0
));
28735 /* Always emit two insns here to calculate the requested stack,
28736 so that the linker can edit them when adjusting size for calling
28737 non-split-stack code. */
28738 alloc_hi
= (-allocate
+ 0x8000) & ~0xffffL
;
28739 alloc_lo
= -allocate
- alloc_hi
;
28742 emit_insn (gen_add3_const (r12
, r1
, alloc_hi
));
28744 emit_insn (gen_add3_const (r12
, r12
, alloc_lo
));
28746 emit_insn (gen_nop ());
28750 emit_insn (gen_add3_const (r12
, r1
, alloc_lo
));
28751 emit_insn (gen_nop ());
28754 compare
= gen_rtx_REG (CCUNSmode
, CR7_REGNO
);
28755 emit_insn (gen_rtx_SET (compare
, gen_rtx_COMPARE (CCUNSmode
, r12
, r0
)));
28756 ok_label
= gen_label_rtx ();
28757 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
28758 gen_rtx_GEU (VOIDmode
, compare
, const0_rtx
),
28759 gen_rtx_LABEL_REF (VOIDmode
, ok_label
),
28761 insn
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
28762 JUMP_LABEL (insn
) = ok_label
;
28763 /* Mark the jump as very likely to be taken. */
28764 add_reg_br_prob_note (insn
, profile_probability::very_likely ());
28766 lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
28767 insn
= emit_move_insn (r0
, lr
);
28768 RTX_FRAME_RELATED_P (insn
) = 1;
28769 insn
= emit_insn (gen_frame_store (r0
, r1
, info
->lr_save_offset
));
28770 RTX_FRAME_RELATED_P (insn
) = 1;
28772 insn
= emit_call_insn (gen_call (gen_rtx_MEM (SImode
, morestack_ref
),
28773 const0_rtx
, const0_rtx
));
28774 call_fusage
= NULL_RTX
;
28775 use_reg (&call_fusage
, r12
);
28776 /* Say the call uses r0, even though it doesn't, to stop regrename
28777 from twiddling with the insns saving lr, trashing args for cfun.
28778 The insns restoring lr are similarly protected by making
28779 split_stack_return use r0. */
28780 use_reg (&call_fusage
, r0
);
28781 add_function_usage_to (insn
, call_fusage
);
28782 /* Indicate that this function can't jump to non-local gotos. */
28783 make_reg_eh_region_note_nothrow_nononlocal (insn
);
28784 emit_insn (gen_frame_load (r0
, r1
, info
->lr_save_offset
));
28785 insn
= emit_move_insn (lr
, r0
);
28786 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
28787 RTX_FRAME_RELATED_P (insn
) = 1;
28788 emit_insn (gen_split_stack_return ());
28790 emit_label (ok_label
);
28791 LABEL_NUSES (ok_label
) = 1;
28794 /* Return the internal arg pointer used for function incoming
28795 arguments. When -fsplit-stack, the arg pointer is r12 so we need
28796 to copy it to a pseudo in order for it to be preserved over calls
28797 and suchlike. We'd really like to use a pseudo here for the
28798 internal arg pointer but data-flow analysis is not prepared to
28799 accept pseudos as live at the beginning of a function. */
28802 rs6000_internal_arg_pointer (void)
28804 if (flag_split_stack
28805 && (lookup_attribute ("no_split_stack", DECL_ATTRIBUTES (cfun
->decl
))
28809 if (cfun
->machine
->split_stack_arg_pointer
== NULL_RTX
)
28813 cfun
->machine
->split_stack_arg_pointer
= gen_reg_rtx (Pmode
);
28814 REG_POINTER (cfun
->machine
->split_stack_arg_pointer
) = 1;
28816 /* Put the pseudo initialization right after the note at the
28817 beginning of the function. */
28818 pat
= gen_rtx_SET (cfun
->machine
->split_stack_arg_pointer
,
28819 gen_rtx_REG (Pmode
, 12));
28820 push_topmost_sequence ();
28821 emit_insn_after (pat
, get_insns ());
28822 pop_topmost_sequence ();
28824 rtx ret
= plus_constant (Pmode
, cfun
->machine
->split_stack_arg_pointer
,
28825 FIRST_PARM_OFFSET (current_function_decl
));
28826 return copy_to_reg (ret
);
28828 return virtual_incoming_args_rtx
;
28831 /* We may have to tell the dataflow pass that the split stack prologue
28832 is initializing a register. */
28835 rs6000_live_on_entry (bitmap regs
)
28837 if (flag_split_stack
)
28838 bitmap_set_bit (regs
, 12);
28841 /* Emit -fsplit-stack dynamic stack allocation space check. */
28844 rs6000_split_stack_space_check (rtx size
, rtx label
)
28846 rtx sp
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
28847 rtx limit
= gen_reg_rtx (Pmode
);
28848 rtx requested
= gen_reg_rtx (Pmode
);
28849 rtx cmp
= gen_reg_rtx (CCUNSmode
);
28852 emit_insn (gen_load_split_stack_limit (limit
));
28853 if (CONST_INT_P (size
))
28854 emit_insn (gen_add3_insn (requested
, sp
, GEN_INT (-INTVAL (size
))));
28857 size
= force_reg (Pmode
, size
);
28858 emit_move_insn (requested
, gen_rtx_MINUS (Pmode
, sp
, size
));
28860 emit_insn (gen_rtx_SET (cmp
, gen_rtx_COMPARE (CCUNSmode
, requested
, limit
)));
28861 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
28862 gen_rtx_GEU (VOIDmode
, cmp
, const0_rtx
),
28863 gen_rtx_LABEL_REF (VOIDmode
, label
),
28865 jump
= emit_jump_insn (gen_rtx_SET (pc_rtx
, jump
));
28866 JUMP_LABEL (jump
) = label
;
28869 /* A C compound statement that outputs the assembler code for a thunk
28870 function, used to implement C++ virtual function calls with
28871 multiple inheritance. The thunk acts as a wrapper around a virtual
28872 function, adjusting the implicit object parameter before handing
28873 control off to the real function.
28875 First, emit code to add the integer DELTA to the location that
28876 contains the incoming first argument. Assume that this argument
28877 contains a pointer, and is the one used to pass the `this' pointer
28878 in C++. This is the incoming argument *before* the function
28879 prologue, e.g. `%o0' on a sparc. The addition must preserve the
28880 values of all other incoming arguments.
28882 After the addition, emit code to jump to FUNCTION, which is a
28883 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
28884 not touch the return address. Hence returning from FUNCTION will
28885 return to whoever called the current `thunk'.
28887 The effect must be as if FUNCTION had been called directly with the
28888 adjusted first argument. This macro is responsible for emitting
28889 all of the code for a thunk function; output_function_prologue()
28890 and output_function_epilogue() are not invoked.
28892 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
28893 been extracted from it.) It might possibly be useful on some
28894 targets, but probably not.
28896 If you do not define this macro, the target-independent code in the
28897 C++ frontend will generate a less efficient heavyweight thunk that
28898 calls FUNCTION instead of jumping to it. The generic approach does
28899 not support varargs. */
28902 rs6000_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
28903 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
28906 rtx this_rtx
, funexp
;
28909 reload_completed
= 1;
28910 epilogue_completed
= 1;
28912 /* Mark the end of the (empty) prologue. */
28913 emit_note (NOTE_INSN_PROLOGUE_END
);
28915 /* Find the "this" pointer. If the function returns a structure,
28916 the structure return pointer is in r3. */
28917 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
28918 this_rtx
= gen_rtx_REG (Pmode
, 4);
28920 this_rtx
= gen_rtx_REG (Pmode
, 3);
28922 /* Apply the constant offset, if required. */
28924 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, GEN_INT (delta
)));
28926 /* Apply the offset from the vtable, if required. */
28929 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
28930 rtx tmp
= gen_rtx_REG (Pmode
, 12);
28932 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
28933 if (((unsigned HOST_WIDE_INT
) vcall_offset
) + 0x8000 >= 0x10000)
28935 emit_insn (gen_add3_insn (tmp
, tmp
, vcall_offset_rtx
));
28936 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
28940 rtx loc
= gen_rtx_PLUS (Pmode
, tmp
, vcall_offset_rtx
);
28942 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, loc
));
28944 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, tmp
));
28947 /* Generate a tail call to the target function. */
28948 if (!TREE_USED (function
))
28950 assemble_external (function
);
28951 TREE_USED (function
) = 1;
28953 funexp
= XEXP (DECL_RTL (function
), 0);
28954 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
28957 if (MACHOPIC_INDIRECT
)
28958 funexp
= machopic_indirect_call_target (funexp
);
28961 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
28962 generate sibcall RTL explicitly. */
28963 insn
= emit_call_insn (
28964 gen_rtx_PARALLEL (VOIDmode
,
28966 gen_rtx_CALL (VOIDmode
,
28967 funexp
, const0_rtx
),
28968 gen_rtx_USE (VOIDmode
, const0_rtx
),
28969 simple_return_rtx
)));
28970 SIBLING_CALL_P (insn
) = 1;
28973 /* Run just enough of rest_of_compilation to get the insns emitted.
28974 There's not really enough bulk here to make other passes such as
28975 instruction scheduling worth while. Note that use_thunk calls
28976 assemble_start_function and assemble_end_function. */
28977 insn
= get_insns ();
28978 shorten_branches (insn
);
28979 final_start_function (insn
, file
, 1);
28980 final (insn
, file
, 1);
28981 final_end_function ();
28983 reload_completed
= 0;
28984 epilogue_completed
= 0;
28987 /* A quick summary of the various types of 'constant-pool tables'
28990 Target Flags Name One table per
28991 AIX (none) AIX TOC object file
28992 AIX -mfull-toc AIX TOC object file
28993 AIX -mminimal-toc AIX minimal TOC translation unit
28994 SVR4/EABI (none) SVR4 SDATA object file
28995 SVR4/EABI -fpic SVR4 pic object file
28996 SVR4/EABI -fPIC SVR4 PIC translation unit
28997 SVR4/EABI -mrelocatable EABI TOC function
28998 SVR4/EABI -maix AIX TOC object file
28999 SVR4/EABI -maix -mminimal-toc
29000 AIX minimal TOC translation unit
29002 Name Reg. Set by entries contains:
29003 made by addrs? fp? sum?
29005 AIX TOC 2 crt0 as Y option option
29006 AIX minimal TOC 30 prolog gcc Y Y option
29007 SVR4 SDATA 13 crt0 gcc N Y N
29008 SVR4 pic 30 prolog ld Y not yet N
29009 SVR4 PIC 30 prolog gcc Y option option
29010 EABI TOC 30 prolog gcc Y option option
29014 /* Hash functions for the hash table. */
29017 rs6000_hash_constant (rtx k
)
29019 enum rtx_code code
= GET_CODE (k
);
29020 machine_mode mode
= GET_MODE (k
);
29021 unsigned result
= (code
<< 3) ^ mode
;
29022 const char *format
;
29025 format
= GET_RTX_FORMAT (code
);
29026 flen
= strlen (format
);
29032 return result
* 1231 + (unsigned) INSN_UID (XEXP (k
, 0));
29034 case CONST_WIDE_INT
:
29037 flen
= CONST_WIDE_INT_NUNITS (k
);
29038 for (i
= 0; i
< flen
; i
++)
29039 result
= result
* 613 + CONST_WIDE_INT_ELT (k
, i
);
29044 if (mode
!= VOIDmode
)
29045 return real_hash (CONST_DOUBLE_REAL_VALUE (k
)) * result
;
29057 for (; fidx
< flen
; fidx
++)
29058 switch (format
[fidx
])
29063 const char *str
= XSTR (k
, fidx
);
29064 len
= strlen (str
);
29065 result
= result
* 613 + len
;
29066 for (i
= 0; i
< len
; i
++)
29067 result
= result
* 613 + (unsigned) str
[i
];
29072 result
= result
* 1231 + rs6000_hash_constant (XEXP (k
, fidx
));
29076 result
= result
* 613 + (unsigned) XINT (k
, fidx
);
29079 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT
))
29080 result
= result
* 613 + (unsigned) XWINT (k
, fidx
);
29084 for (i
= 0; i
< sizeof (HOST_WIDE_INT
) / sizeof (unsigned); i
++)
29085 result
= result
* 613 + (unsigned) (XWINT (k
, fidx
)
29092 gcc_unreachable ();
29099 toc_hasher::hash (toc_hash_struct
*thc
)
29101 return rs6000_hash_constant (thc
->key
) ^ thc
->key_mode
;
29104 /* Compare H1 and H2 for equivalence. */
29107 toc_hasher::equal (toc_hash_struct
*h1
, toc_hash_struct
*h2
)
29112 if (h1
->key_mode
!= h2
->key_mode
)
29115 return rtx_equal_p (r1
, r2
);
29118 /* These are the names given by the C++ front-end to vtables, and
29119 vtable-like objects. Ideally, this logic should not be here;
29120 instead, there should be some programmatic way of inquiring as
29121 to whether or not an object is a vtable. */
29123 #define VTABLE_NAME_P(NAME) \
29124 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
29125 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
29126 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
29127 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
29128 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
29130 #ifdef NO_DOLLAR_IN_LABEL
29131 /* Return a GGC-allocated character string translating dollar signs in
29132 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
29135 rs6000_xcoff_strip_dollar (const char *name
)
29141 q
= (const char *) strchr (name
, '$');
29143 if (q
== 0 || q
== name
)
29146 len
= strlen (name
);
29147 strip
= XALLOCAVEC (char, len
+ 1);
29148 strcpy (strip
, name
);
29149 p
= strip
+ (q
- name
);
29153 p
= strchr (p
+ 1, '$');
29156 return ggc_alloc_string (strip
, len
);
29161 rs6000_output_symbol_ref (FILE *file
, rtx x
)
29163 const char *name
= XSTR (x
, 0);
29165 /* Currently C++ toc references to vtables can be emitted before it
29166 is decided whether the vtable is public or private. If this is
29167 the case, then the linker will eventually complain that there is
29168 a reference to an unknown section. Thus, for vtables only,
29169 we emit the TOC reference to reference the identifier and not the
29171 if (VTABLE_NAME_P (name
))
29173 RS6000_OUTPUT_BASENAME (file
, name
);
29176 assemble_name (file
, name
);
29179 /* Output a TOC entry. We derive the entry name from what is being
29183 output_toc (FILE *file
, rtx x
, int labelno
, machine_mode mode
)
29186 const char *name
= buf
;
29188 HOST_WIDE_INT offset
= 0;
29190 gcc_assert (!TARGET_NO_TOC
);
29192 /* When the linker won't eliminate them, don't output duplicate
29193 TOC entries (this happens on AIX if there is any kind of TOC,
29194 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
29196 if (TARGET_TOC
&& GET_CODE (x
) != LABEL_REF
)
29198 struct toc_hash_struct
*h
;
29200 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
29201 time because GGC is not initialized at that point. */
29202 if (toc_hash_table
== NULL
)
29203 toc_hash_table
= hash_table
<toc_hasher
>::create_ggc (1021);
29205 h
= ggc_alloc
<toc_hash_struct
> ();
29207 h
->key_mode
= mode
;
29208 h
->labelno
= labelno
;
29210 toc_hash_struct
**found
= toc_hash_table
->find_slot (h
, INSERT
);
29211 if (*found
== NULL
)
29213 else /* This is indeed a duplicate.
29214 Set this label equal to that label. */
29216 fputs ("\t.set ", file
);
29217 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
29218 fprintf (file
, "%d,", labelno
);
29219 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
29220 fprintf (file
, "%d\n", ((*found
)->labelno
));
29223 if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
29224 && (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_GLOBAL_DYNAMIC
29225 || SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
))
29227 fputs ("\t.set ", file
);
29228 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
29229 fprintf (file
, "%d,", labelno
);
29230 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
29231 fprintf (file
, "%d\n", ((*found
)->labelno
));
29238 /* If we're going to put a double constant in the TOC, make sure it's
29239 aligned properly when strict alignment is on. */
29240 if ((CONST_DOUBLE_P (x
) || CONST_WIDE_INT_P (x
))
29241 && STRICT_ALIGNMENT
29242 && GET_MODE_BITSIZE (mode
) >= 64
29243 && ! (TARGET_NO_FP_IN_TOC
&& ! TARGET_MINIMAL_TOC
)) {
29244 ASM_OUTPUT_ALIGN (file
, 3);
29247 (*targetm
.asm_out
.internal_label
) (file
, "LC", labelno
);
29249 /* Handle FP constants specially. Note that if we have a minimal
29250 TOC, things we put here aren't actually in the TOC, so we can allow
29252 if (GET_CODE (x
) == CONST_DOUBLE
&&
29253 (GET_MODE (x
) == TFmode
|| GET_MODE (x
) == TDmode
29254 || GET_MODE (x
) == IFmode
|| GET_MODE (x
) == KFmode
))
29258 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29259 REAL_VALUE_TO_TARGET_DECIMAL128 (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29261 REAL_VALUE_TO_TARGET_LONG_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29265 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29266 fputs (DOUBLE_INT_ASM_OP
, file
);
29268 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29269 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29270 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29271 fprintf (file
, "0x%lx%08lx,0x%lx%08lx\n",
29272 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
29273 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff,
29274 k
[WORDS_BIG_ENDIAN
? 2 : 3] & 0xffffffff,
29275 k
[WORDS_BIG_ENDIAN
? 3 : 2] & 0xffffffff);
29280 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29281 fputs ("\t.long ", file
);
29283 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
29284 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29285 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29286 fprintf (file
, "0x%lx,0x%lx,0x%lx,0x%lx\n",
29287 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
29288 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
29292 else if (GET_CODE (x
) == CONST_DOUBLE
&&
29293 (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DDmode
))
29297 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29298 REAL_VALUE_TO_TARGET_DECIMAL64 (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29300 REAL_VALUE_TO_TARGET_DOUBLE (*CONST_DOUBLE_REAL_VALUE (x
), k
);
29304 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29305 fputs (DOUBLE_INT_ASM_OP
, file
);
29307 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
29308 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29309 fprintf (file
, "0x%lx%08lx\n",
29310 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
29311 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff);
29316 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29317 fputs ("\t.long ", file
);
29319 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
29320 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29321 fprintf (file
, "0x%lx,0x%lx\n",
29322 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
29326 else if (GET_CODE (x
) == CONST_DOUBLE
&&
29327 (GET_MODE (x
) == SFmode
|| GET_MODE (x
) == SDmode
))
29331 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
29332 REAL_VALUE_TO_TARGET_DECIMAL32 (*CONST_DOUBLE_REAL_VALUE (x
), l
);
29334 REAL_VALUE_TO_TARGET_SINGLE (*CONST_DOUBLE_REAL_VALUE (x
), l
);
29338 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29339 fputs (DOUBLE_INT_ASM_OP
, file
);
29341 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
29342 if (WORDS_BIG_ENDIAN
)
29343 fprintf (file
, "0x%lx00000000\n", l
& 0xffffffff);
29345 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
29350 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29351 fputs ("\t.long ", file
);
29353 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
29354 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
29358 else if (GET_MODE (x
) == VOIDmode
&& GET_CODE (x
) == CONST_INT
)
29360 unsigned HOST_WIDE_INT low
;
29361 HOST_WIDE_INT high
;
29363 low
= INTVAL (x
) & 0xffffffff;
29364 high
= (HOST_WIDE_INT
) INTVAL (x
) >> 32;
29366 /* TOC entries are always Pmode-sized, so when big-endian
29367 smaller integer constants in the TOC need to be padded.
29368 (This is still a win over putting the constants in
29369 a separate constant pool, because then we'd have
29370 to have both a TOC entry _and_ the actual constant.)
29372 For a 32-bit target, CONST_INT values are loaded and shifted
29373 entirely within `low' and can be stored in one TOC entry. */
29375 /* It would be easy to make this work, but it doesn't now. */
29376 gcc_assert (!TARGET_64BIT
|| POINTER_SIZE
>= GET_MODE_BITSIZE (mode
));
29378 if (WORDS_BIG_ENDIAN
&& POINTER_SIZE
> GET_MODE_BITSIZE (mode
))
29381 low
<<= POINTER_SIZE
- GET_MODE_BITSIZE (mode
);
29382 high
= (HOST_WIDE_INT
) low
>> 32;
29388 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29389 fputs (DOUBLE_INT_ASM_OP
, file
);
29391 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
29392 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29393 fprintf (file
, "0x%lx%08lx\n",
29394 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29399 if (POINTER_SIZE
< GET_MODE_BITSIZE (mode
))
29401 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29402 fputs ("\t.long ", file
);
29404 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
29405 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29406 fprintf (file
, "0x%lx,0x%lx\n",
29407 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
29411 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29412 fputs ("\t.long ", file
);
29414 fprintf (file
, "\t.tc IS_%lx[TC],", (long) low
& 0xffffffff);
29415 fprintf (file
, "0x%lx\n", (long) low
& 0xffffffff);
29421 if (GET_CODE (x
) == CONST
)
29423 gcc_assert (GET_CODE (XEXP (x
, 0)) == PLUS
29424 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
);
29426 base
= XEXP (XEXP (x
, 0), 0);
29427 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
29430 switch (GET_CODE (base
))
29433 name
= XSTR (base
, 0);
29437 ASM_GENERATE_INTERNAL_LABEL (buf
, "L",
29438 CODE_LABEL_NUMBER (XEXP (base
, 0)));
29442 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (base
));
29446 gcc_unreachable ();
29449 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
29450 fputs (TARGET_32BIT
? "\t.long " : DOUBLE_INT_ASM_OP
, file
);
29453 fputs ("\t.tc ", file
);
29454 RS6000_OUTPUT_BASENAME (file
, name
);
29457 fprintf (file
, ".N" HOST_WIDE_INT_PRINT_UNSIGNED
, - offset
);
29459 fprintf (file
, ".P" HOST_WIDE_INT_PRINT_UNSIGNED
, offset
);
29461 /* Mark large TOC symbols on AIX with [TE] so they are mapped
29462 after other TOC symbols, reducing overflow of small TOC access
29463 to [TC] symbols. */
29464 fputs (TARGET_XCOFF
&& TARGET_CMODEL
!= CMODEL_SMALL
29465 ? "[TE]," : "[TC],", file
);
29468 /* Currently C++ toc references to vtables can be emitted before it
29469 is decided whether the vtable is public or private. If this is
29470 the case, then the linker will eventually complain that there is
29471 a TOC reference to an unknown section. Thus, for vtables only,
29472 we emit the TOC reference to reference the symbol and not the
29474 if (VTABLE_NAME_P (name
))
29476 RS6000_OUTPUT_BASENAME (file
, name
);
29478 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
29479 else if (offset
> 0)
29480 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
29483 output_addr_const (file
, x
);
29486 if (TARGET_XCOFF
&& GET_CODE (base
) == SYMBOL_REF
)
29488 switch (SYMBOL_REF_TLS_MODEL (base
))
29492 case TLS_MODEL_LOCAL_EXEC
:
29493 fputs ("@le", file
);
29495 case TLS_MODEL_INITIAL_EXEC
:
29496 fputs ("@ie", file
);
29498 /* Use global-dynamic for local-dynamic. */
29499 case TLS_MODEL_GLOBAL_DYNAMIC
:
29500 case TLS_MODEL_LOCAL_DYNAMIC
:
29502 (*targetm
.asm_out
.internal_label
) (file
, "LCM", labelno
);
29503 fputs ("\t.tc .", file
);
29504 RS6000_OUTPUT_BASENAME (file
, name
);
29505 fputs ("[TC],", file
);
29506 output_addr_const (file
, x
);
29507 fputs ("@m", file
);
29510 gcc_unreachable ();
29518 /* Output an assembler pseudo-op to write an ASCII string of N characters
29519 starting at P to FILE.
29521 On the RS/6000, we have to do this using the .byte operation and
29522 write out special characters outside the quoted string.
29523 Also, the assembler is broken; very long strings are truncated,
29524 so we must artificially break them up early. */
29527 output_ascii (FILE *file
, const char *p
, int n
)
29530 int i
, count_string
;
29531 const char *for_string
= "\t.byte \"";
29532 const char *for_decimal
= "\t.byte ";
29533 const char *to_close
= NULL
;
29536 for (i
= 0; i
< n
; i
++)
29539 if (c
>= ' ' && c
< 0177)
29542 fputs (for_string
, file
);
29545 /* Write two quotes to get one. */
29553 for_decimal
= "\"\n\t.byte ";
29557 if (count_string
>= 512)
29559 fputs (to_close
, file
);
29561 for_string
= "\t.byte \"";
29562 for_decimal
= "\t.byte ";
29570 fputs (for_decimal
, file
);
29571 fprintf (file
, "%d", c
);
29573 for_string
= "\n\t.byte \"";
29574 for_decimal
= ", ";
29580 /* Now close the string if we have written one. Then end the line. */
29582 fputs (to_close
, file
);
29585 /* Generate a unique section name for FILENAME for a section type
29586 represented by SECTION_DESC. Output goes into BUF.
29588 SECTION_DESC can be any string, as long as it is different for each
29589 possible section type.
29591 We name the section in the same manner as xlc. The name begins with an
29592 underscore followed by the filename (after stripping any leading directory
29593 names) with the last period replaced by the string SECTION_DESC. If
29594 FILENAME does not contain a period, SECTION_DESC is appended to the end of
29598 rs6000_gen_section_name (char **buf
, const char *filename
,
29599 const char *section_desc
)
29601 const char *q
, *after_last_slash
, *last_period
= 0;
29605 after_last_slash
= filename
;
29606 for (q
= filename
; *q
; q
++)
29609 after_last_slash
= q
+ 1;
29610 else if (*q
== '.')
29614 len
= strlen (after_last_slash
) + strlen (section_desc
) + 2;
29615 *buf
= (char *) xmalloc (len
);
29620 for (q
= after_last_slash
; *q
; q
++)
29622 if (q
== last_period
)
29624 strcpy (p
, section_desc
);
29625 p
+= strlen (section_desc
);
29629 else if (ISALNUM (*q
))
29633 if (last_period
== 0)
29634 strcpy (p
, section_desc
);
29639 /* Emit profile function. */
29642 output_profile_hook (int labelno ATTRIBUTE_UNUSED
)
29644 /* Non-standard profiling for kernels, which just saves LR then calls
29645 _mcount without worrying about arg saves. The idea is to change
29646 the function prologue as little as possible as it isn't easy to
29647 account for arg save/restore code added just for _mcount. */
29648 if (TARGET_PROFILE_KERNEL
)
29651 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
29653 #ifndef NO_PROFILE_COUNTERS
29654 # define NO_PROFILE_COUNTERS 0
29656 if (NO_PROFILE_COUNTERS
)
29657 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
29658 LCT_NORMAL
, VOIDmode
);
29662 const char *label_name
;
29665 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
29666 label_name
= ggc_strdup ((*targetm
.strip_name_encoding
) (buf
));
29667 fun
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
29669 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
29670 LCT_NORMAL
, VOIDmode
, fun
, Pmode
);
29673 else if (DEFAULT_ABI
== ABI_DARWIN
)
29675 const char *mcount_name
= RS6000_MCOUNT
;
29676 int caller_addr_regno
= LR_REGNO
;
29678 /* Be conservative and always set this, at least for now. */
29679 crtl
->uses_pic_offset_table
= 1;
29682 /* For PIC code, set up a stub and collect the caller's address
29683 from r0, which is where the prologue puts it. */
29684 if (MACHOPIC_INDIRECT
29685 && crtl
->uses_pic_offset_table
)
29686 caller_addr_regno
= 0;
29688 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, mcount_name
),
29689 LCT_NORMAL
, VOIDmode
,
29690 gen_rtx_REG (Pmode
, caller_addr_regno
), Pmode
);
29694 /* Write function profiler code. */
29697 output_function_profiler (FILE *file
, int labelno
)
29701 switch (DEFAULT_ABI
)
29704 gcc_unreachable ();
29709 warning (0, "no profiling of 64-bit code for this ABI");
29712 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
29713 fprintf (file
, "\tmflr %s\n", reg_names
[0]);
29714 if (NO_PROFILE_COUNTERS
)
29716 asm_fprintf (file
, "\tstw %s,4(%s)\n",
29717 reg_names
[0], reg_names
[1]);
29719 else if (TARGET_SECURE_PLT
&& flag_pic
)
29721 if (TARGET_LINK_STACK
)
29724 get_ppc476_thunk_name (name
);
29725 asm_fprintf (file
, "\tbl %s\n", name
);
29728 asm_fprintf (file
, "\tbcl 20,31,1f\n1:\n");
29729 asm_fprintf (file
, "\tstw %s,4(%s)\n",
29730 reg_names
[0], reg_names
[1]);
29731 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
29732 asm_fprintf (file
, "\taddis %s,%s,",
29733 reg_names
[12], reg_names
[12]);
29734 assemble_name (file
, buf
);
29735 asm_fprintf (file
, "-1b@ha\n\tla %s,", reg_names
[0]);
29736 assemble_name (file
, buf
);
29737 asm_fprintf (file
, "-1b@l(%s)\n", reg_names
[12]);
29739 else if (flag_pic
== 1)
29741 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file
);
29742 asm_fprintf (file
, "\tstw %s,4(%s)\n",
29743 reg_names
[0], reg_names
[1]);
29744 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
29745 asm_fprintf (file
, "\tlwz %s,", reg_names
[0]);
29746 assemble_name (file
, buf
);
29747 asm_fprintf (file
, "@got(%s)\n", reg_names
[12]);
29749 else if (flag_pic
> 1)
29751 asm_fprintf (file
, "\tstw %s,4(%s)\n",
29752 reg_names
[0], reg_names
[1]);
29753 /* Now, we need to get the address of the label. */
29754 if (TARGET_LINK_STACK
)
29757 get_ppc476_thunk_name (name
);
29758 asm_fprintf (file
, "\tbl %s\n\tb 1f\n\t.long ", name
);
29759 assemble_name (file
, buf
);
29760 fputs ("-.\n1:", file
);
29761 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
29762 asm_fprintf (file
, "\taddi %s,%s,4\n",
29763 reg_names
[11], reg_names
[11]);
29767 fputs ("\tbcl 20,31,1f\n\t.long ", file
);
29768 assemble_name (file
, buf
);
29769 fputs ("-.\n1:", file
);
29770 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
29772 asm_fprintf (file
, "\tlwz %s,0(%s)\n",
29773 reg_names
[0], reg_names
[11]);
29774 asm_fprintf (file
, "\tadd %s,%s,%s\n",
29775 reg_names
[0], reg_names
[0], reg_names
[11]);
29779 asm_fprintf (file
, "\tlis %s,", reg_names
[12]);
29780 assemble_name (file
, buf
);
29781 fputs ("@ha\n", file
);
29782 asm_fprintf (file
, "\tstw %s,4(%s)\n",
29783 reg_names
[0], reg_names
[1]);
29784 asm_fprintf (file
, "\tla %s,", reg_names
[0]);
29785 assemble_name (file
, buf
);
29786 asm_fprintf (file
, "@l(%s)\n", reg_names
[12]);
29789 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
29790 fprintf (file
, "\tbl %s%s\n",
29791 RS6000_MCOUNT
, flag_pic
? "@plt" : "");
29797 /* Don't do anything, done in output_profile_hook (). */
29804 /* The following variable value is the last issued insn. */
29806 static rtx_insn
*last_scheduled_insn
;
29808 /* The following variable helps to balance issuing of load and
29809 store instructions */
29811 static int load_store_pendulum
;
29813 /* The following variable helps pair divide insns during scheduling. */
29814 static int divide_cnt
;
29815 /* The following variable helps pair and alternate vector and vector load
29816 insns during scheduling. */
29817 static int vec_pairing
;
29820 /* Power4 load update and store update instructions are cracked into a
29821 load or store and an integer insn which are executed in the same cycle.
29822 Branches have their own dispatch slot which does not count against the
29823 GCC issue rate, but it changes the program flow so there are no other
29824 instructions to issue in this cycle. */
29827 rs6000_variable_issue_1 (rtx_insn
*insn
, int more
)
29829 last_scheduled_insn
= insn
;
29830 if (GET_CODE (PATTERN (insn
)) == USE
29831 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
29833 cached_can_issue_more
= more
;
29834 return cached_can_issue_more
;
29837 if (insn_terminates_group_p (insn
, current_group
))
29839 cached_can_issue_more
= 0;
29840 return cached_can_issue_more
;
29843 /* If no reservation, but reach here */
29844 if (recog_memoized (insn
) < 0)
29847 if (rs6000_sched_groups
)
29849 if (is_microcoded_insn (insn
))
29850 cached_can_issue_more
= 0;
29851 else if (is_cracked_insn (insn
))
29852 cached_can_issue_more
= more
> 2 ? more
- 2 : 0;
29854 cached_can_issue_more
= more
- 1;
29856 return cached_can_issue_more
;
29859 if (rs6000_tune
== PROCESSOR_CELL
&& is_nonpipeline_insn (insn
))
29862 cached_can_issue_more
= more
- 1;
29863 return cached_can_issue_more
;
29867 rs6000_variable_issue (FILE *stream
, int verbose
, rtx_insn
*insn
, int more
)
29869 int r
= rs6000_variable_issue_1 (insn
, more
);
29871 fprintf (stream
, "// rs6000_variable_issue (more = %d) = %d\n", more
, r
);
29875 /* Adjust the cost of a scheduling dependency. Return the new cost of
29876 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
29879 rs6000_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
, int cost
,
29882 enum attr_type attr_type
;
29884 if (recog_memoized (insn
) < 0 || recog_memoized (dep_insn
) < 0)
29891 /* Data dependency; DEP_INSN writes a register that INSN reads
29892 some cycles later. */
29894 /* Separate a load from a narrower, dependent store. */
29895 if ((rs6000_sched_groups
|| rs6000_tune
== PROCESSOR_POWER9
)
29896 && GET_CODE (PATTERN (insn
)) == SET
29897 && GET_CODE (PATTERN (dep_insn
)) == SET
29898 && GET_CODE (XEXP (PATTERN (insn
), 1)) == MEM
29899 && GET_CODE (XEXP (PATTERN (dep_insn
), 0)) == MEM
29900 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn
), 1)))
29901 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn
), 0)))))
29904 attr_type
= get_attr_type (insn
);
29909 /* Tell the first scheduling pass about the latency between
29910 a mtctr and bctr (and mtlr and br/blr). The first
29911 scheduling pass will not know about this latency since
29912 the mtctr instruction, which has the latency associated
29913 to it, will be generated by reload. */
29916 /* Leave some extra cycles between a compare and its
29917 dependent branch, to inhibit expensive mispredicts. */
29918 if ((rs6000_tune
== PROCESSOR_PPC603
29919 || rs6000_tune
== PROCESSOR_PPC604
29920 || rs6000_tune
== PROCESSOR_PPC604e
29921 || rs6000_tune
== PROCESSOR_PPC620
29922 || rs6000_tune
== PROCESSOR_PPC630
29923 || rs6000_tune
== PROCESSOR_PPC750
29924 || rs6000_tune
== PROCESSOR_PPC7400
29925 || rs6000_tune
== PROCESSOR_PPC7450
29926 || rs6000_tune
== PROCESSOR_PPCE5500
29927 || rs6000_tune
== PROCESSOR_PPCE6500
29928 || rs6000_tune
== PROCESSOR_POWER4
29929 || rs6000_tune
== PROCESSOR_POWER5
29930 || rs6000_tune
== PROCESSOR_POWER7
29931 || rs6000_tune
== PROCESSOR_POWER8
29932 || rs6000_tune
== PROCESSOR_POWER9
29933 || rs6000_tune
== PROCESSOR_CELL
)
29934 && recog_memoized (dep_insn
)
29935 && (INSN_CODE (dep_insn
) >= 0))
29937 switch (get_attr_type (dep_insn
))
29940 case TYPE_FPCOMPARE
:
29941 case TYPE_CR_LOGICAL
:
29945 if (get_attr_dot (dep_insn
) == DOT_YES
)
29950 if (get_attr_dot (dep_insn
) == DOT_YES
29951 && get_attr_var_shift (dep_insn
) == VAR_SHIFT_NO
)
29962 if ((rs6000_tune
== PROCESSOR_POWER6
)
29963 && recog_memoized (dep_insn
)
29964 && (INSN_CODE (dep_insn
) >= 0))
29967 if (GET_CODE (PATTERN (insn
)) != SET
)
29968 /* If this happens, we have to extend this to schedule
29969 optimally. Return default for now. */
29972 /* Adjust the cost for the case where the value written
29973 by a fixed point operation is used as the address
29974 gen value on a store. */
29975 switch (get_attr_type (dep_insn
))
29980 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
29981 return get_attr_sign_extend (dep_insn
)
29982 == SIGN_EXTEND_YES
? 6 : 4;
29987 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
29988 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
29998 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30006 if (get_attr_update (dep_insn
) == UPDATE_YES
30007 && ! rs6000_store_data_bypass_p (dep_insn
, insn
))
30013 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30019 if (! rs6000_store_data_bypass_p (dep_insn
, insn
))
30020 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
30030 if ((rs6000_tune
== PROCESSOR_POWER6
)
30031 && recog_memoized (dep_insn
)
30032 && (INSN_CODE (dep_insn
) >= 0))
30035 /* Adjust the cost for the case where the value written
30036 by a fixed point instruction is used within the address
30037 gen portion of a subsequent load(u)(x) */
30038 switch (get_attr_type (dep_insn
))
30043 if (set_to_load_agen (dep_insn
, insn
))
30044 return get_attr_sign_extend (dep_insn
)
30045 == SIGN_EXTEND_YES
? 6 : 4;
30050 if (set_to_load_agen (dep_insn
, insn
))
30051 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
30061 if (set_to_load_agen (dep_insn
, insn
))
30069 if (get_attr_update (dep_insn
) == UPDATE_YES
30070 && set_to_load_agen (dep_insn
, insn
))
30076 if (set_to_load_agen (dep_insn
, insn
))
30082 if (set_to_load_agen (dep_insn
, insn
))
30083 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
30093 if ((rs6000_tune
== PROCESSOR_POWER6
)
30094 && get_attr_update (insn
) == UPDATE_NO
30095 && recog_memoized (dep_insn
)
30096 && (INSN_CODE (dep_insn
) >= 0)
30097 && (get_attr_type (dep_insn
) == TYPE_MFFGPR
))
30104 /* Fall out to return default cost. */
30108 case REG_DEP_OUTPUT
:
30109 /* Output dependency; DEP_INSN writes a register that INSN writes some
30111 if ((rs6000_tune
== PROCESSOR_POWER6
)
30112 && recog_memoized (dep_insn
)
30113 && (INSN_CODE (dep_insn
) >= 0))
30115 attr_type
= get_attr_type (insn
);
30120 case TYPE_FPSIMPLE
:
30121 if (get_attr_type (dep_insn
) == TYPE_FP
30122 || get_attr_type (dep_insn
) == TYPE_FPSIMPLE
)
30126 if (get_attr_update (insn
) == UPDATE_NO
30127 && get_attr_type (dep_insn
) == TYPE_MFFGPR
)
30134 /* Fall through, no cost for output dependency. */
30138 /* Anti dependency; DEP_INSN reads a register that INSN writes some
30143 gcc_unreachable ();
30149 /* Debug version of rs6000_adjust_cost. */
30152 rs6000_debug_adjust_cost (rtx_insn
*insn
, int dep_type
, rtx_insn
*dep_insn
,
30153 int cost
, unsigned int dw
)
30155 int ret
= rs6000_adjust_cost (insn
, dep_type
, dep_insn
, cost
, dw
);
30163 default: dep
= "unknown depencency"; break;
30164 case REG_DEP_TRUE
: dep
= "data dependency"; break;
30165 case REG_DEP_OUTPUT
: dep
= "output dependency"; break;
30166 case REG_DEP_ANTI
: dep
= "anti depencency"; break;
30170 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
30171 "%s, insn:\n", ret
, cost
, dep
);
30179 /* The function returns a true if INSN is microcoded.
30180 Return false otherwise. */
30183 is_microcoded_insn (rtx_insn
*insn
)
30185 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30186 || GET_CODE (PATTERN (insn
)) == USE
30187 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30190 if (rs6000_tune
== PROCESSOR_CELL
)
30191 return get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
;
30193 if (rs6000_sched_groups
30194 && (rs6000_tune
== PROCESSOR_POWER4
|| rs6000_tune
== PROCESSOR_POWER5
))
30196 enum attr_type type
= get_attr_type (insn
);
30197 if ((type
== TYPE_LOAD
30198 && get_attr_update (insn
) == UPDATE_YES
30199 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
)
30200 || ((type
== TYPE_LOAD
|| type
== TYPE_STORE
)
30201 && get_attr_update (insn
) == UPDATE_YES
30202 && get_attr_indexed (insn
) == INDEXED_YES
)
30203 || type
== TYPE_MFCR
)
30210 /* The function returns true if INSN is cracked into 2 instructions
30211 by the processor (and therefore occupies 2 issue slots). */
30214 is_cracked_insn (rtx_insn
*insn
)
30216 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30217 || GET_CODE (PATTERN (insn
)) == USE
30218 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30221 if (rs6000_sched_groups
30222 && (rs6000_tune
== PROCESSOR_POWER4
|| rs6000_tune
== PROCESSOR_POWER5
))
30224 enum attr_type type
= get_attr_type (insn
);
30225 if ((type
== TYPE_LOAD
30226 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
30227 && get_attr_update (insn
) == UPDATE_NO
)
30228 || (type
== TYPE_LOAD
30229 && get_attr_sign_extend (insn
) == SIGN_EXTEND_NO
30230 && get_attr_update (insn
) == UPDATE_YES
30231 && get_attr_indexed (insn
) == INDEXED_NO
)
30232 || (type
== TYPE_STORE
30233 && get_attr_update (insn
) == UPDATE_YES
30234 && get_attr_indexed (insn
) == INDEXED_NO
)
30235 || ((type
== TYPE_FPLOAD
|| type
== TYPE_FPSTORE
)
30236 && get_attr_update (insn
) == UPDATE_YES
)
30237 || (type
== TYPE_CR_LOGICAL
30238 && get_attr_cr_logical_3op (insn
) == CR_LOGICAL_3OP_YES
)
30239 || (type
== TYPE_EXTS
30240 && get_attr_dot (insn
) == DOT_YES
)
30241 || (type
== TYPE_SHIFT
30242 && get_attr_dot (insn
) == DOT_YES
30243 && get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
30244 || (type
== TYPE_MUL
30245 && get_attr_dot (insn
) == DOT_YES
)
30246 || type
== TYPE_DIV
30247 || (type
== TYPE_INSERT
30248 && get_attr_size (insn
) == SIZE_32
))
30255 /* The function returns true if INSN can be issued only from
30256 the branch slot. */
30259 is_branch_slot_insn (rtx_insn
*insn
)
30261 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30262 || GET_CODE (PATTERN (insn
)) == USE
30263 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30266 if (rs6000_sched_groups
)
30268 enum attr_type type
= get_attr_type (insn
);
30269 if (type
== TYPE_BRANCH
|| type
== TYPE_JMPREG
)
30277 /* The function returns true if out_inst sets a value that is
30278 used in the address generation computation of in_insn */
30280 set_to_load_agen (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
30282 rtx out_set
, in_set
;
30284 /* For performance reasons, only handle the simple case where
30285 both loads are a single_set. */
30286 out_set
= single_set (out_insn
);
30289 in_set
= single_set (in_insn
);
30291 return reg_mentioned_p (SET_DEST (out_set
), SET_SRC (in_set
));
30297 /* Try to determine base/offset/size parts of the given MEM.
30298 Return true if successful, false if all the values couldn't
30301 This function only looks for REG or REG+CONST address forms.
30302 REG+REG address form will return false. */
30305 get_memref_parts (rtx mem
, rtx
*base
, HOST_WIDE_INT
*offset
,
30306 HOST_WIDE_INT
*size
)
30309 if MEM_SIZE_KNOWN_P (mem
)
30310 *size
= MEM_SIZE (mem
);
30314 addr_rtx
= (XEXP (mem
, 0));
30315 if (GET_CODE (addr_rtx
) == PRE_MODIFY
)
30316 addr_rtx
= XEXP (addr_rtx
, 1);
30319 while (GET_CODE (addr_rtx
) == PLUS
30320 && CONST_INT_P (XEXP (addr_rtx
, 1)))
30322 *offset
+= INTVAL (XEXP (addr_rtx
, 1));
30323 addr_rtx
= XEXP (addr_rtx
, 0);
30325 if (!REG_P (addr_rtx
))
30332 /* The function returns true if the target storage location of
30333 mem1 is adjacent to the target storage location of mem2 */
30334 /* Return 1 if memory locations are adjacent. */
30337 adjacent_mem_locations (rtx mem1
, rtx mem2
)
30340 HOST_WIDE_INT off1
, size1
, off2
, size2
;
30342 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
30343 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
30344 return ((REGNO (reg1
) == REGNO (reg2
))
30345 && ((off1
+ size1
== off2
)
30346 || (off2
+ size2
== off1
)));
30351 /* This function returns true if it can be determined that the two MEM
30352 locations overlap by at least 1 byte based on base reg/offset/size. */
30355 mem_locations_overlap (rtx mem1
, rtx mem2
)
30358 HOST_WIDE_INT off1
, size1
, off2
, size2
;
30360 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
30361 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
30362 return ((REGNO (reg1
) == REGNO (reg2
))
30363 && (((off1
<= off2
) && (off1
+ size1
> off2
))
30364 || ((off2
<= off1
) && (off2
+ size2
> off1
))));
30369 /* A C statement (sans semicolon) to update the integer scheduling
30370 priority INSN_PRIORITY (INSN). Increase the priority to execute the
30371 INSN earlier, reduce the priority to execute INSN later. Do not
30372 define this macro if you do not need to adjust the scheduling
30373 priorities of insns. */
30376 rs6000_adjust_priority (rtx_insn
*insn ATTRIBUTE_UNUSED
, int priority
)
30378 rtx load_mem
, str_mem
;
30379 /* On machines (like the 750) which have asymmetric integer units,
30380 where one integer unit can do multiply and divides and the other
30381 can't, reduce the priority of multiply/divide so it is scheduled
30382 before other integer operations. */
30385 if (! INSN_P (insn
))
30388 if (GET_CODE (PATTERN (insn
)) == USE
)
30391 switch (rs6000_tune
) {
30392 case PROCESSOR_PPC750
:
30393 switch (get_attr_type (insn
))
30400 fprintf (stderr
, "priority was %#x (%d) before adjustment\n",
30401 priority
, priority
);
30402 if (priority
>= 0 && priority
< 0x01000000)
30409 if (insn_must_be_first_in_group (insn
)
30410 && reload_completed
30411 && current_sched_info
->sched_max_insns_priority
30412 && rs6000_sched_restricted_insns_priority
)
30415 /* Prioritize insns that can be dispatched only in the first
30417 if (rs6000_sched_restricted_insns_priority
== 1)
30418 /* Attach highest priority to insn. This means that in
30419 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
30420 precede 'priority' (critical path) considerations. */
30421 return current_sched_info
->sched_max_insns_priority
;
30422 else if (rs6000_sched_restricted_insns_priority
== 2)
30423 /* Increase priority of insn by a minimal amount. This means that in
30424 haifa-sched.c:ready_sort(), only 'priority' (critical path)
30425 considerations precede dispatch-slot restriction considerations. */
30426 return (priority
+ 1);
30429 if (rs6000_tune
== PROCESSOR_POWER6
30430 && ((load_store_pendulum
== -2 && is_load_insn (insn
, &load_mem
))
30431 || (load_store_pendulum
== 2 && is_store_insn (insn
, &str_mem
))))
30432 /* Attach highest priority to insn if the scheduler has just issued two
30433 stores and this instruction is a load, or two loads and this instruction
30434 is a store. Power6 wants loads and stores scheduled alternately
30436 return current_sched_info
->sched_max_insns_priority
;
30441 /* Return true if the instruction is nonpipelined on the Cell. */
30443 is_nonpipeline_insn (rtx_insn
*insn
)
30445 enum attr_type type
;
30446 if (!insn
|| !NONDEBUG_INSN_P (insn
)
30447 || GET_CODE (PATTERN (insn
)) == USE
30448 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
30451 type
= get_attr_type (insn
);
30452 if (type
== TYPE_MUL
30453 || type
== TYPE_DIV
30454 || type
== TYPE_SDIV
30455 || type
== TYPE_DDIV
30456 || type
== TYPE_SSQRT
30457 || type
== TYPE_DSQRT
30458 || type
== TYPE_MFCR
30459 || type
== TYPE_MFCRF
30460 || type
== TYPE_MFJMPR
)
30468 /* Return how many instructions the machine can issue per cycle. */
30471 rs6000_issue_rate (void)
30473 /* Unless scheduling for register pressure, use issue rate of 1 for
30474 first scheduling pass to decrease degradation. */
30475 if (!reload_completed
&& !flag_sched_pressure
)
30478 switch (rs6000_tune
) {
30479 case PROCESSOR_RS64A
:
30480 case PROCESSOR_PPC601
: /* ? */
30481 case PROCESSOR_PPC7450
:
30483 case PROCESSOR_PPC440
:
30484 case PROCESSOR_PPC603
:
30485 case PROCESSOR_PPC750
:
30486 case PROCESSOR_PPC7400
:
30487 case PROCESSOR_PPC8540
:
30488 case PROCESSOR_PPC8548
:
30489 case PROCESSOR_CELL
:
30490 case PROCESSOR_PPCE300C2
:
30491 case PROCESSOR_PPCE300C3
:
30492 case PROCESSOR_PPCE500MC
:
30493 case PROCESSOR_PPCE500MC64
:
30494 case PROCESSOR_PPCE5500
:
30495 case PROCESSOR_PPCE6500
:
30496 case PROCESSOR_TITAN
:
30498 case PROCESSOR_PPC476
:
30499 case PROCESSOR_PPC604
:
30500 case PROCESSOR_PPC604e
:
30501 case PROCESSOR_PPC620
:
30502 case PROCESSOR_PPC630
:
30504 case PROCESSOR_POWER4
:
30505 case PROCESSOR_POWER5
:
30506 case PROCESSOR_POWER6
:
30507 case PROCESSOR_POWER7
:
30509 case PROCESSOR_POWER8
:
30511 case PROCESSOR_POWER9
:
30518 /* Return how many instructions to look ahead for better insn
30522 rs6000_use_sched_lookahead (void)
30524 switch (rs6000_tune
)
30526 case PROCESSOR_PPC8540
:
30527 case PROCESSOR_PPC8548
:
30530 case PROCESSOR_CELL
:
30531 return (reload_completed
? 8 : 0);
30538 /* We are choosing insn from the ready queue. Return zero if INSN can be
30541 rs6000_use_sched_lookahead_guard (rtx_insn
*insn
, int ready_index
)
30543 if (ready_index
== 0)
30546 if (rs6000_tune
!= PROCESSOR_CELL
)
30549 gcc_assert (insn
!= NULL_RTX
&& INSN_P (insn
));
30551 if (!reload_completed
30552 || is_nonpipeline_insn (insn
)
30553 || is_microcoded_insn (insn
))
30559 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
30560 and return true. */
30563 find_mem_ref (rtx pat
, rtx
*mem_ref
)
30568 /* stack_tie does not produce any real memory traffic. */
30569 if (tie_operand (pat
, VOIDmode
))
30572 if (GET_CODE (pat
) == MEM
)
30578 /* Recursively process the pattern. */
30579 fmt
= GET_RTX_FORMAT (GET_CODE (pat
));
30581 for (i
= GET_RTX_LENGTH (GET_CODE (pat
)) - 1; i
>= 0; i
--)
30585 if (find_mem_ref (XEXP (pat
, i
), mem_ref
))
30588 else if (fmt
[i
] == 'E')
30589 for (j
= XVECLEN (pat
, i
) - 1; j
>= 0; j
--)
30591 if (find_mem_ref (XVECEXP (pat
, i
, j
), mem_ref
))
30599 /* Determine if PAT is a PATTERN of a load insn. */
30602 is_load_insn1 (rtx pat
, rtx
*load_mem
)
30604 if (!pat
|| pat
== NULL_RTX
)
30607 if (GET_CODE (pat
) == SET
)
30608 return find_mem_ref (SET_SRC (pat
), load_mem
);
30610 if (GET_CODE (pat
) == PARALLEL
)
30614 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
30615 if (is_load_insn1 (XVECEXP (pat
, 0, i
), load_mem
))
30622 /* Determine if INSN loads from memory. */
30625 is_load_insn (rtx insn
, rtx
*load_mem
)
30627 if (!insn
|| !INSN_P (insn
))
30633 return is_load_insn1 (PATTERN (insn
), load_mem
);
30636 /* Determine if PAT is a PATTERN of a store insn. */
30639 is_store_insn1 (rtx pat
, rtx
*str_mem
)
30641 if (!pat
|| pat
== NULL_RTX
)
30644 if (GET_CODE (pat
) == SET
)
30645 return find_mem_ref (SET_DEST (pat
), str_mem
);
30647 if (GET_CODE (pat
) == PARALLEL
)
30651 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
30652 if (is_store_insn1 (XVECEXP (pat
, 0, i
), str_mem
))
30659 /* Determine if INSN stores to memory. */
30662 is_store_insn (rtx insn
, rtx
*str_mem
)
30664 if (!insn
|| !INSN_P (insn
))
30667 return is_store_insn1 (PATTERN (insn
), str_mem
);
30670 /* Return whether TYPE is a Power9 pairable vector instruction type. */
30673 is_power9_pairable_vec_type (enum attr_type type
)
30677 case TYPE_VECSIMPLE
:
30678 case TYPE_VECCOMPLEX
:
30682 case TYPE_VECFLOAT
:
30684 case TYPE_VECDOUBLE
:
30692 /* Returns whether the dependence between INSN and NEXT is considered
30693 costly by the given target. */
30696 rs6000_is_costly_dependence (dep_t dep
, int cost
, int distance
)
30700 rtx load_mem
, str_mem
;
30702 /* If the flag is not enabled - no dependence is considered costly;
30703 allow all dependent insns in the same group.
30704 This is the most aggressive option. */
30705 if (rs6000_sched_costly_dep
== no_dep_costly
)
30708 /* If the flag is set to 1 - a dependence is always considered costly;
30709 do not allow dependent instructions in the same group.
30710 This is the most conservative option. */
30711 if (rs6000_sched_costly_dep
== all_deps_costly
)
30714 insn
= DEP_PRO (dep
);
30715 next
= DEP_CON (dep
);
30717 if (rs6000_sched_costly_dep
== store_to_load_dep_costly
30718 && is_load_insn (next
, &load_mem
)
30719 && is_store_insn (insn
, &str_mem
))
30720 /* Prevent load after store in the same group. */
30723 if (rs6000_sched_costly_dep
== true_store_to_load_dep_costly
30724 && is_load_insn (next
, &load_mem
)
30725 && is_store_insn (insn
, &str_mem
)
30726 && DEP_TYPE (dep
) == REG_DEP_TRUE
30727 && mem_locations_overlap(str_mem
, load_mem
))
30728 /* Prevent load after store in the same group if it is a true
30732 /* The flag is set to X; dependences with latency >= X are considered costly,
30733 and will not be scheduled in the same group. */
30734 if (rs6000_sched_costly_dep
<= max_dep_latency
30735 && ((cost
- distance
) >= (int)rs6000_sched_costly_dep
))
30741 /* Return the next insn after INSN that is found before TAIL is reached,
30742 skipping any "non-active" insns - insns that will not actually occupy
30743 an issue slot. Return NULL_RTX if such an insn is not found. */
30746 get_next_active_insn (rtx_insn
*insn
, rtx_insn
*tail
)
30748 if (insn
== NULL_RTX
|| insn
== tail
)
30753 insn
= NEXT_INSN (insn
);
30754 if (insn
== NULL_RTX
|| insn
== tail
)
30758 || JUMP_P (insn
) || JUMP_TABLE_DATA_P (insn
)
30759 || (NONJUMP_INSN_P (insn
)
30760 && GET_CODE (PATTERN (insn
)) != USE
30761 && GET_CODE (PATTERN (insn
)) != CLOBBER
30762 && INSN_CODE (insn
) != CODE_FOR_stack_tie
))
30768 /* Do Power9 specific sched_reorder2 reordering of ready list. */
30771 power9_sched_reorder2 (rtx_insn
**ready
, int lastpos
)
30776 enum attr_type type
, type2
;
30778 type
= get_attr_type (last_scheduled_insn
);
30780 /* Try to issue fixed point divides back-to-back in pairs so they will be
30781 routed to separate execution units and execute in parallel. */
30782 if (type
== TYPE_DIV
&& divide_cnt
== 0)
30784 /* First divide has been scheduled. */
30787 /* Scan the ready list looking for another divide, if found move it
30788 to the end of the list so it is chosen next. */
30792 if (recog_memoized (ready
[pos
]) >= 0
30793 && get_attr_type (ready
[pos
]) == TYPE_DIV
)
30796 for (i
= pos
; i
< lastpos
; i
++)
30797 ready
[i
] = ready
[i
+ 1];
30798 ready
[lastpos
] = tmp
;
30806 /* Last insn was the 2nd divide or not a divide, reset the counter. */
30809 /* The best dispatch throughput for vector and vector load insns can be
30810 achieved by interleaving a vector and vector load such that they'll
30811 dispatch to the same superslice. If this pairing cannot be achieved
30812 then it is best to pair vector insns together and vector load insns
30815 To aid in this pairing, vec_pairing maintains the current state with
30816 the following values:
30818 0 : Initial state, no vecload/vector pairing has been started.
30820 1 : A vecload or vector insn has been issued and a candidate for
30821 pairing has been found and moved to the end of the ready
30823 if (type
== TYPE_VECLOAD
)
30825 /* Issued a vecload. */
30826 if (vec_pairing
== 0)
30828 int vecload_pos
= -1;
30829 /* We issued a single vecload, look for a vector insn to pair it
30830 with. If one isn't found, try to pair another vecload. */
30834 if (recog_memoized (ready
[pos
]) >= 0)
30836 type2
= get_attr_type (ready
[pos
]);
30837 if (is_power9_pairable_vec_type (type2
))
30839 /* Found a vector insn to pair with, move it to the
30840 end of the ready list so it is scheduled next. */
30842 for (i
= pos
; i
< lastpos
; i
++)
30843 ready
[i
] = ready
[i
+ 1];
30844 ready
[lastpos
] = tmp
;
30846 return cached_can_issue_more
;
30848 else if (type2
== TYPE_VECLOAD
&& vecload_pos
== -1)
30849 /* Remember position of first vecload seen. */
30854 if (vecload_pos
>= 0)
30856 /* Didn't find a vector to pair with but did find a vecload,
30857 move it to the end of the ready list. */
30858 tmp
= ready
[vecload_pos
];
30859 for (i
= vecload_pos
; i
< lastpos
; i
++)
30860 ready
[i
] = ready
[i
+ 1];
30861 ready
[lastpos
] = tmp
;
30863 return cached_can_issue_more
;
30867 else if (is_power9_pairable_vec_type (type
))
30869 /* Issued a vector operation. */
30870 if (vec_pairing
== 0)
30873 /* We issued a single vector insn, look for a vecload to pair it
30874 with. If one isn't found, try to pair another vector. */
30878 if (recog_memoized (ready
[pos
]) >= 0)
30880 type2
= get_attr_type (ready
[pos
]);
30881 if (type2
== TYPE_VECLOAD
)
30883 /* Found a vecload insn to pair with, move it to the
30884 end of the ready list so it is scheduled next. */
30886 for (i
= pos
; i
< lastpos
; i
++)
30887 ready
[i
] = ready
[i
+ 1];
30888 ready
[lastpos
] = tmp
;
30890 return cached_can_issue_more
;
30892 else if (is_power9_pairable_vec_type (type2
)
30894 /* Remember position of first vector insn seen. */
30901 /* Didn't find a vecload to pair with but did find a vector
30902 insn, move it to the end of the ready list. */
30903 tmp
= ready
[vec_pos
];
30904 for (i
= vec_pos
; i
< lastpos
; i
++)
30905 ready
[i
] = ready
[i
+ 1];
30906 ready
[lastpos
] = tmp
;
30908 return cached_can_issue_more
;
30913 /* We've either finished a vec/vecload pair, couldn't find an insn to
30914 continue the current pair, or the last insn had nothing to do with
30915 with pairing. In any case, reset the state. */
30919 return cached_can_issue_more
;
30922 /* We are about to begin issuing insns for this clock cycle. */
30925 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
, int sched_verbose
,
30926 rtx_insn
**ready ATTRIBUTE_UNUSED
,
30927 int *pn_ready ATTRIBUTE_UNUSED
,
30928 int clock_var ATTRIBUTE_UNUSED
)
30930 int n_ready
= *pn_ready
;
30933 fprintf (dump
, "// rs6000_sched_reorder :\n");
30935 /* Reorder the ready list, if the second to last ready insn
30936 is a nonepipeline insn. */
30937 if (rs6000_tune
== PROCESSOR_CELL
&& n_ready
> 1)
30939 if (is_nonpipeline_insn (ready
[n_ready
- 1])
30940 && (recog_memoized (ready
[n_ready
- 2]) > 0))
30941 /* Simply swap first two insns. */
30942 std::swap (ready
[n_ready
- 1], ready
[n_ready
- 2]);
30945 if (rs6000_tune
== PROCESSOR_POWER6
)
30946 load_store_pendulum
= 0;
30948 return rs6000_issue_rate ();
30951 /* Like rs6000_sched_reorder, but called after issuing each insn. */
30954 rs6000_sched_reorder2 (FILE *dump
, int sched_verbose
, rtx_insn
**ready
,
30955 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
)
30958 fprintf (dump
, "// rs6000_sched_reorder2 :\n");
30960 /* For Power6, we need to handle some special cases to try and keep the
30961 store queue from overflowing and triggering expensive flushes.
30963 This code monitors how load and store instructions are being issued
30964 and skews the ready list one way or the other to increase the likelihood
30965 that a desired instruction is issued at the proper time.
30967 A couple of things are done. First, we maintain a "load_store_pendulum"
30968 to track the current state of load/store issue.
30970 - If the pendulum is at zero, then no loads or stores have been
30971 issued in the current cycle so we do nothing.
30973 - If the pendulum is 1, then a single load has been issued in this
30974 cycle and we attempt to locate another load in the ready list to
30977 - If the pendulum is -2, then two stores have already been
30978 issued in this cycle, so we increase the priority of the first load
30979 in the ready list to increase it's likelihood of being chosen first
30982 - If the pendulum is -1, then a single store has been issued in this
30983 cycle and we attempt to locate another store in the ready list to
30984 issue with it, preferring a store to an adjacent memory location to
30985 facilitate store pairing in the store queue.
30987 - If the pendulum is 2, then two loads have already been
30988 issued in this cycle, so we increase the priority of the first store
30989 in the ready list to increase it's likelihood of being chosen first
30992 - If the pendulum < -2 or > 2, then do nothing.
30994 Note: This code covers the most common scenarios. There exist non
30995 load/store instructions which make use of the LSU and which
30996 would need to be accounted for to strictly model the behavior
30997 of the machine. Those instructions are currently unaccounted
30998 for to help minimize compile time overhead of this code.
31000 if (rs6000_tune
== PROCESSOR_POWER6
&& last_scheduled_insn
)
31005 rtx load_mem
, str_mem
;
31007 if (is_store_insn (last_scheduled_insn
, &str_mem
))
31008 /* Issuing a store, swing the load_store_pendulum to the left */
31009 load_store_pendulum
--;
31010 else if (is_load_insn (last_scheduled_insn
, &load_mem
))
31011 /* Issuing a load, swing the load_store_pendulum to the right */
31012 load_store_pendulum
++;
31014 return cached_can_issue_more
;
31016 /* If the pendulum is balanced, or there is only one instruction on
31017 the ready list, then all is well, so return. */
31018 if ((load_store_pendulum
== 0) || (*pn_ready
<= 1))
31019 return cached_can_issue_more
;
31021 if (load_store_pendulum
== 1)
31023 /* A load has been issued in this cycle. Scan the ready list
31024 for another load to issue with it */
31029 if (is_load_insn (ready
[pos
], &load_mem
))
31031 /* Found a load. Move it to the head of the ready list,
31032 and adjust it's priority so that it is more likely to
31035 for (i
=pos
; i
<*pn_ready
-1; i
++)
31036 ready
[i
] = ready
[i
+ 1];
31037 ready
[*pn_ready
-1] = tmp
;
31039 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31040 INSN_PRIORITY (tmp
)++;
31046 else if (load_store_pendulum
== -2)
31048 /* Two stores have been issued in this cycle. Increase the
31049 priority of the first load in the ready list to favor it for
31050 issuing in the next cycle. */
31055 if (is_load_insn (ready
[pos
], &load_mem
)
31057 && INSN_PRIORITY_KNOWN (ready
[pos
]))
31059 INSN_PRIORITY (ready
[pos
])++;
31061 /* Adjust the pendulum to account for the fact that a load
31062 was found and increased in priority. This is to prevent
31063 increasing the priority of multiple loads */
31064 load_store_pendulum
--;
31071 else if (load_store_pendulum
== -1)
31073 /* A store has been issued in this cycle. Scan the ready list for
31074 another store to issue with it, preferring a store to an adjacent
31076 int first_store_pos
= -1;
31082 if (is_store_insn (ready
[pos
], &str_mem
))
31085 /* Maintain the index of the first store found on the
31087 if (first_store_pos
== -1)
31088 first_store_pos
= pos
;
31090 if (is_store_insn (last_scheduled_insn
, &str_mem2
)
31091 && adjacent_mem_locations (str_mem
, str_mem2
))
31093 /* Found an adjacent store. Move it to the head of the
31094 ready list, and adjust it's priority so that it is
31095 more likely to stay there */
31097 for (i
=pos
; i
<*pn_ready
-1; i
++)
31098 ready
[i
] = ready
[i
+ 1];
31099 ready
[*pn_ready
-1] = tmp
;
31101 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31102 INSN_PRIORITY (tmp
)++;
31104 first_store_pos
= -1;
31112 if (first_store_pos
>= 0)
31114 /* An adjacent store wasn't found, but a non-adjacent store was,
31115 so move the non-adjacent store to the front of the ready
31116 list, and adjust its priority so that it is more likely to
31118 tmp
= ready
[first_store_pos
];
31119 for (i
=first_store_pos
; i
<*pn_ready
-1; i
++)
31120 ready
[i
] = ready
[i
+ 1];
31121 ready
[*pn_ready
-1] = tmp
;
31122 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
31123 INSN_PRIORITY (tmp
)++;
31126 else if (load_store_pendulum
== 2)
31128 /* Two loads have been issued in this cycle. Increase the priority
31129 of the first store in the ready list to favor it for issuing in
31135 if (is_store_insn (ready
[pos
], &str_mem
)
31137 && INSN_PRIORITY_KNOWN (ready
[pos
]))
31139 INSN_PRIORITY (ready
[pos
])++;
31141 /* Adjust the pendulum to account for the fact that a store
31142 was found and increased in priority. This is to prevent
31143 increasing the priority of multiple stores */
31144 load_store_pendulum
++;
31153 /* Do Power9 dependent reordering if necessary. */
31154 if (rs6000_tune
== PROCESSOR_POWER9
&& last_scheduled_insn
31155 && recog_memoized (last_scheduled_insn
) >= 0)
31156 return power9_sched_reorder2 (ready
, *pn_ready
- 1);
31158 return cached_can_issue_more
;
31161 /* Return whether the presence of INSN causes a dispatch group termination
31162 of group WHICH_GROUP.
31164 If WHICH_GROUP == current_group, this function will return true if INSN
31165 causes the termination of the current group (i.e, the dispatch group to
31166 which INSN belongs). This means that INSN will be the last insn in the
31167 group it belongs to.
31169 If WHICH_GROUP == previous_group, this function will return true if INSN
31170 causes the termination of the previous group (i.e, the dispatch group that
31171 precedes the group to which INSN belongs). This means that INSN will be
31172 the first insn in the group it belongs to). */
31175 insn_terminates_group_p (rtx_insn
*insn
, enum group_termination which_group
)
31182 first
= insn_must_be_first_in_group (insn
);
31183 last
= insn_must_be_last_in_group (insn
);
31188 if (which_group
== current_group
)
31190 else if (which_group
== previous_group
)
31198 insn_must_be_first_in_group (rtx_insn
*insn
)
31200 enum attr_type type
;
31204 || DEBUG_INSN_P (insn
)
31205 || GET_CODE (PATTERN (insn
)) == USE
31206 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
31209 switch (rs6000_tune
)
31211 case PROCESSOR_POWER5
:
31212 if (is_cracked_insn (insn
))
31215 case PROCESSOR_POWER4
:
31216 if (is_microcoded_insn (insn
))
31219 if (!rs6000_sched_groups
)
31222 type
= get_attr_type (insn
);
31229 case TYPE_CR_LOGICAL
:
31242 case PROCESSOR_POWER6
:
31243 type
= get_attr_type (insn
);
31252 case TYPE_FPCOMPARE
:
31263 if (get_attr_dot (insn
) == DOT_NO
31264 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
31269 if (get_attr_size (insn
) == SIZE_32
)
31277 if (get_attr_update (insn
) == UPDATE_YES
)
31285 case PROCESSOR_POWER7
:
31286 type
= get_attr_type (insn
);
31290 case TYPE_CR_LOGICAL
:
31304 if (get_attr_dot (insn
) == DOT_YES
)
31309 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31310 || get_attr_update (insn
) == UPDATE_YES
)
31317 if (get_attr_update (insn
) == UPDATE_YES
)
31325 case PROCESSOR_POWER8
:
31326 type
= get_attr_type (insn
);
31330 case TYPE_CR_LOGICAL
:
31338 case TYPE_VECSTORE
:
31345 if (get_attr_dot (insn
) == DOT_YES
)
31350 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31351 || get_attr_update (insn
) == UPDATE_YES
)
31356 if (get_attr_update (insn
) == UPDATE_YES
31357 && get_attr_indexed (insn
) == INDEXED_YES
)
31373 insn_must_be_last_in_group (rtx_insn
*insn
)
31375 enum attr_type type
;
31379 || DEBUG_INSN_P (insn
)
31380 || GET_CODE (PATTERN (insn
)) == USE
31381 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
31384 switch (rs6000_tune
) {
31385 case PROCESSOR_POWER4
:
31386 case PROCESSOR_POWER5
:
31387 if (is_microcoded_insn (insn
))
31390 if (is_branch_slot_insn (insn
))
31394 case PROCESSOR_POWER6
:
31395 type
= get_attr_type (insn
);
31403 case TYPE_FPCOMPARE
:
31414 if (get_attr_dot (insn
) == DOT_NO
31415 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
31420 if (get_attr_size (insn
) == SIZE_32
)
31428 case PROCESSOR_POWER7
:
31429 type
= get_attr_type (insn
);
31439 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31440 && get_attr_update (insn
) == UPDATE_YES
)
31445 if (get_attr_update (insn
) == UPDATE_YES
31446 && get_attr_indexed (insn
) == INDEXED_YES
)
31454 case PROCESSOR_POWER8
:
31455 type
= get_attr_type (insn
);
31467 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
31468 && get_attr_update (insn
) == UPDATE_YES
)
31473 if (get_attr_update (insn
) == UPDATE_YES
31474 && get_attr_indexed (insn
) == INDEXED_YES
)
31489 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
31490 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
31493 is_costly_group (rtx
*group_insns
, rtx next_insn
)
31496 int issue_rate
= rs6000_issue_rate ();
31498 for (i
= 0; i
< issue_rate
; i
++)
31500 sd_iterator_def sd_it
;
31502 rtx insn
= group_insns
[i
];
31507 FOR_EACH_DEP (insn
, SD_LIST_RES_FORW
, sd_it
, dep
)
31509 rtx next
= DEP_CON (dep
);
31511 if (next
== next_insn
31512 && rs6000_is_costly_dependence (dep
, dep_cost (dep
), 0))
31520 /* Utility of the function redefine_groups.
31521 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
31522 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
31523 to keep it "far" (in a separate group) from GROUP_INSNS, following
31524 one of the following schemes, depending on the value of the flag
31525 -minsert_sched_nops = X:
31526 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
31527 in order to force NEXT_INSN into a separate group.
31528 (2) X < sched_finish_regroup_exact: insert exactly X nops.
31529 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
31530 insertion (has a group just ended, how many vacant issue slots remain in the
31531 last group, and how many dispatch groups were encountered so far). */
31534 force_new_group (int sched_verbose
, FILE *dump
, rtx
*group_insns
,
31535 rtx_insn
*next_insn
, bool *group_end
, int can_issue_more
,
31540 int issue_rate
= rs6000_issue_rate ();
31541 bool end
= *group_end
;
31544 if (next_insn
== NULL_RTX
|| DEBUG_INSN_P (next_insn
))
31545 return can_issue_more
;
31547 if (rs6000_sched_insert_nops
> sched_finish_regroup_exact
)
31548 return can_issue_more
;
31550 force
= is_costly_group (group_insns
, next_insn
);
31552 return can_issue_more
;
31554 if (sched_verbose
> 6)
31555 fprintf (dump
,"force: group count = %d, can_issue_more = %d\n",
31556 *group_count
,can_issue_more
);
31558 if (rs6000_sched_insert_nops
== sched_finish_regroup_exact
)
31561 can_issue_more
= 0;
31563 /* Since only a branch can be issued in the last issue_slot, it is
31564 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
31565 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
31566 in this case the last nop will start a new group and the branch
31567 will be forced to the new group. */
31568 if (can_issue_more
&& !is_branch_slot_insn (next_insn
))
31571 /* Do we have a special group ending nop? */
31572 if (rs6000_tune
== PROCESSOR_POWER6
|| rs6000_tune
== PROCESSOR_POWER7
31573 || rs6000_tune
== PROCESSOR_POWER8
)
31575 nop
= gen_group_ending_nop ();
31576 emit_insn_before (nop
, next_insn
);
31577 can_issue_more
= 0;
31580 while (can_issue_more
> 0)
31583 emit_insn_before (nop
, next_insn
);
31591 if (rs6000_sched_insert_nops
< sched_finish_regroup_exact
)
31593 int n_nops
= rs6000_sched_insert_nops
;
31595 /* Nops can't be issued from the branch slot, so the effective
31596 issue_rate for nops is 'issue_rate - 1'. */
31597 if (can_issue_more
== 0)
31598 can_issue_more
= issue_rate
;
31600 if (can_issue_more
== 0)
31602 can_issue_more
= issue_rate
- 1;
31605 for (i
= 0; i
< issue_rate
; i
++)
31607 group_insns
[i
] = 0;
31614 emit_insn_before (nop
, next_insn
);
31615 if (can_issue_more
== issue_rate
- 1) /* new group begins */
31618 if (can_issue_more
== 0)
31620 can_issue_more
= issue_rate
- 1;
31623 for (i
= 0; i
< issue_rate
; i
++)
31625 group_insns
[i
] = 0;
31631 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
31634 /* Is next_insn going to start a new group? */
31637 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
31638 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
31639 || (can_issue_more
< issue_rate
&&
31640 insn_terminates_group_p (next_insn
, previous_group
)));
31641 if (*group_end
&& end
)
31644 if (sched_verbose
> 6)
31645 fprintf (dump
, "done force: group count = %d, can_issue_more = %d\n",
31646 *group_count
, can_issue_more
);
31647 return can_issue_more
;
31650 return can_issue_more
;
31653 /* This function tries to synch the dispatch groups that the compiler "sees"
31654 with the dispatch groups that the processor dispatcher is expected to
31655 form in practice. It tries to achieve this synchronization by forcing the
31656 estimated processor grouping on the compiler (as opposed to the function
31657 'pad_goups' which tries to force the scheduler's grouping on the processor).
31659 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
31660 examines the (estimated) dispatch groups that will be formed by the processor
31661 dispatcher. It marks these group boundaries to reflect the estimated
31662 processor grouping, overriding the grouping that the scheduler had marked.
31663 Depending on the value of the flag '-minsert-sched-nops' this function can
31664 force certain insns into separate groups or force a certain distance between
31665 them by inserting nops, for example, if there exists a "costly dependence"
31668 The function estimates the group boundaries that the processor will form as
31669 follows: It keeps track of how many vacant issue slots are available after
31670 each insn. A subsequent insn will start a new group if one of the following
31672 - no more vacant issue slots remain in the current dispatch group.
31673 - only the last issue slot, which is the branch slot, is vacant, but the next
31674 insn is not a branch.
31675 - only the last 2 or less issue slots, including the branch slot, are vacant,
31676 which means that a cracked insn (which occupies two issue slots) can't be
31677 issued in this group.
31678 - less than 'issue_rate' slots are vacant, and the next insn always needs to
31679 start a new group. */
31682 redefine_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
31685 rtx_insn
*insn
, *next_insn
;
31687 int can_issue_more
;
31690 int group_count
= 0;
31694 issue_rate
= rs6000_issue_rate ();
31695 group_insns
= XALLOCAVEC (rtx
, issue_rate
);
31696 for (i
= 0; i
< issue_rate
; i
++)
31698 group_insns
[i
] = 0;
31700 can_issue_more
= issue_rate
;
31702 insn
= get_next_active_insn (prev_head_insn
, tail
);
31705 while (insn
!= NULL_RTX
)
31707 slot
= (issue_rate
- can_issue_more
);
31708 group_insns
[slot
] = insn
;
31710 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
31711 if (insn_terminates_group_p (insn
, current_group
))
31712 can_issue_more
= 0;
31714 next_insn
= get_next_active_insn (insn
, tail
);
31715 if (next_insn
== NULL_RTX
)
31716 return group_count
+ 1;
31718 /* Is next_insn going to start a new group? */
31720 = (can_issue_more
== 0
31721 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
31722 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
31723 || (can_issue_more
< issue_rate
&&
31724 insn_terminates_group_p (next_insn
, previous_group
)));
31726 can_issue_more
= force_new_group (sched_verbose
, dump
, group_insns
,
31727 next_insn
, &group_end
, can_issue_more
,
31733 can_issue_more
= 0;
31734 for (i
= 0; i
< issue_rate
; i
++)
31736 group_insns
[i
] = 0;
31740 if (GET_MODE (next_insn
) == TImode
&& can_issue_more
)
31741 PUT_MODE (next_insn
, VOIDmode
);
31742 else if (!can_issue_more
&& GET_MODE (next_insn
) != TImode
)
31743 PUT_MODE (next_insn
, TImode
);
31746 if (can_issue_more
== 0)
31747 can_issue_more
= issue_rate
;
31750 return group_count
;
31753 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
31754 dispatch group boundaries that the scheduler had marked. Pad with nops
31755 any dispatch groups which have vacant issue slots, in order to force the
31756 scheduler's grouping on the processor dispatcher. The function
31757 returns the number of dispatch groups found. */
31760 pad_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
31763 rtx_insn
*insn
, *next_insn
;
31766 int can_issue_more
;
31768 int group_count
= 0;
31770 /* Initialize issue_rate. */
31771 issue_rate
= rs6000_issue_rate ();
31772 can_issue_more
= issue_rate
;
31774 insn
= get_next_active_insn (prev_head_insn
, tail
);
31775 next_insn
= get_next_active_insn (insn
, tail
);
31777 while (insn
!= NULL_RTX
)
31780 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
31782 group_end
= (next_insn
== NULL_RTX
|| GET_MODE (next_insn
) == TImode
);
31784 if (next_insn
== NULL_RTX
)
31789 /* If the scheduler had marked group termination at this location
31790 (between insn and next_insn), and neither insn nor next_insn will
31791 force group termination, pad the group with nops to force group
31794 && (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
31795 && !insn_terminates_group_p (insn
, current_group
)
31796 && !insn_terminates_group_p (next_insn
, previous_group
))
31798 if (!is_branch_slot_insn (next_insn
))
31801 while (can_issue_more
)
31804 emit_insn_before (nop
, next_insn
);
31809 can_issue_more
= issue_rate
;
31814 next_insn
= get_next_active_insn (insn
, tail
);
31817 return group_count
;
31820 /* We're beginning a new block. Initialize data structures as necessary. */
31823 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
31824 int sched_verbose ATTRIBUTE_UNUSED
,
31825 int max_ready ATTRIBUTE_UNUSED
)
31827 last_scheduled_insn
= NULL
;
31828 load_store_pendulum
= 0;
31833 /* The following function is called at the end of scheduling BB.
31834 After reload, it inserts nops at insn group bundling. */
31837 rs6000_sched_finish (FILE *dump
, int sched_verbose
)
31842 fprintf (dump
, "=== Finishing schedule.\n");
31844 if (reload_completed
&& rs6000_sched_groups
)
31846 /* Do not run sched_finish hook when selective scheduling enabled. */
31847 if (sel_sched_p ())
31850 if (rs6000_sched_insert_nops
== sched_finish_none
)
31853 if (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
31854 n_groups
= pad_groups (dump
, sched_verbose
,
31855 current_sched_info
->prev_head
,
31856 current_sched_info
->next_tail
);
31858 n_groups
= redefine_groups (dump
, sched_verbose
,
31859 current_sched_info
->prev_head
,
31860 current_sched_info
->next_tail
);
31862 if (sched_verbose
>= 6)
31864 fprintf (dump
, "ngroups = %d\n", n_groups
);
31865 print_rtl (dump
, current_sched_info
->prev_head
);
31866 fprintf (dump
, "Done finish_sched\n");
31871 struct rs6000_sched_context
31873 short cached_can_issue_more
;
31874 rtx_insn
*last_scheduled_insn
;
31875 int load_store_pendulum
;
31880 typedef struct rs6000_sched_context rs6000_sched_context_def
;
31881 typedef rs6000_sched_context_def
*rs6000_sched_context_t
;
31883 /* Allocate store for new scheduling context. */
31885 rs6000_alloc_sched_context (void)
31887 return xmalloc (sizeof (rs6000_sched_context_def
));
31890 /* If CLEAN_P is true then initializes _SC with clean data,
31891 and from the global context otherwise. */
31893 rs6000_init_sched_context (void *_sc
, bool clean_p
)
31895 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
31899 sc
->cached_can_issue_more
= 0;
31900 sc
->last_scheduled_insn
= NULL
;
31901 sc
->load_store_pendulum
= 0;
31902 sc
->divide_cnt
= 0;
31903 sc
->vec_pairing
= 0;
31907 sc
->cached_can_issue_more
= cached_can_issue_more
;
31908 sc
->last_scheduled_insn
= last_scheduled_insn
;
31909 sc
->load_store_pendulum
= load_store_pendulum
;
31910 sc
->divide_cnt
= divide_cnt
;
31911 sc
->vec_pairing
= vec_pairing
;
31915 /* Sets the global scheduling context to the one pointed to by _SC. */
31917 rs6000_set_sched_context (void *_sc
)
31919 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
31921 gcc_assert (sc
!= NULL
);
31923 cached_can_issue_more
= sc
->cached_can_issue_more
;
31924 last_scheduled_insn
= sc
->last_scheduled_insn
;
31925 load_store_pendulum
= sc
->load_store_pendulum
;
31926 divide_cnt
= sc
->divide_cnt
;
31927 vec_pairing
= sc
->vec_pairing
;
31932 rs6000_free_sched_context (void *_sc
)
31934 gcc_assert (_sc
!= NULL
);
31940 rs6000_sched_can_speculate_insn (rtx_insn
*insn
)
31942 switch (get_attr_type (insn
))
31957 /* Length in units of the trampoline for entering a nested function. */
31960 rs6000_trampoline_size (void)
31964 switch (DEFAULT_ABI
)
31967 gcc_unreachable ();
31970 ret
= (TARGET_32BIT
) ? 12 : 24;
31974 gcc_assert (!TARGET_32BIT
);
31980 ret
= (TARGET_32BIT
) ? 40 : 48;
31987 /* Emit RTL insns to initialize the variable parts of a trampoline.
31988 FNADDR is an RTX for the address of the function's pure code.
31989 CXT is an RTX for the static chain value for the function. */
31992 rs6000_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
31994 int regsize
= (TARGET_32BIT
) ? 4 : 8;
31995 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
31996 rtx ctx_reg
= force_reg (Pmode
, cxt
);
31997 rtx addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
31999 switch (DEFAULT_ABI
)
32002 gcc_unreachable ();
32004 /* Under AIX, just build the 3 word function descriptor */
32007 rtx fnmem
, fn_reg
, toc_reg
;
32009 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
32010 error ("you cannot take the address of a nested function if you use "
32011 "the %qs option", "-mno-pointers-to-nested-functions");
32013 fnmem
= gen_const_mem (Pmode
, force_reg (Pmode
, fnaddr
));
32014 fn_reg
= gen_reg_rtx (Pmode
);
32015 toc_reg
= gen_reg_rtx (Pmode
);
32017 /* Macro to shorten the code expansions below. */
32018 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
32020 m_tramp
= replace_equiv_address (m_tramp
, addr
);
32022 emit_move_insn (fn_reg
, MEM_PLUS (fnmem
, 0));
32023 emit_move_insn (toc_reg
, MEM_PLUS (fnmem
, regsize
));
32024 emit_move_insn (MEM_PLUS (m_tramp
, 0), fn_reg
);
32025 emit_move_insn (MEM_PLUS (m_tramp
, regsize
), toc_reg
);
32026 emit_move_insn (MEM_PLUS (m_tramp
, 2*regsize
), ctx_reg
);
32032 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
32036 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__trampoline_setup"),
32037 LCT_NORMAL
, VOIDmode
,
32039 GEN_INT (rs6000_trampoline_size ()), SImode
,
32047 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
32048 identifier as an argument, so the front end shouldn't look it up. */
32051 rs6000_attribute_takes_identifier_p (const_tree attr_id
)
32053 return is_attribute_p ("altivec", attr_id
);
32056 /* Handle the "altivec" attribute. The attribute may have
32057 arguments as follows:
32059 __attribute__((altivec(vector__)))
32060 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
32061 __attribute__((altivec(bool__))) (always followed by 'unsigned')
32063 and may appear more than once (e.g., 'vector bool char') in a
32064 given declaration. */
32067 rs6000_handle_altivec_attribute (tree
*node
,
32068 tree name ATTRIBUTE_UNUSED
,
32070 int flags ATTRIBUTE_UNUSED
,
32071 bool *no_add_attrs
)
32073 tree type
= *node
, result
= NULL_TREE
;
32077 = ((args
&& TREE_CODE (args
) == TREE_LIST
&& TREE_VALUE (args
)
32078 && TREE_CODE (TREE_VALUE (args
)) == IDENTIFIER_NODE
)
32079 ? *IDENTIFIER_POINTER (TREE_VALUE (args
))
32082 while (POINTER_TYPE_P (type
)
32083 || TREE_CODE (type
) == FUNCTION_TYPE
32084 || TREE_CODE (type
) == METHOD_TYPE
32085 || TREE_CODE (type
) == ARRAY_TYPE
)
32086 type
= TREE_TYPE (type
);
32088 mode
= TYPE_MODE (type
);
32090 /* Check for invalid AltiVec type qualifiers. */
32091 if (type
== long_double_type_node
)
32092 error ("use of %<long double%> in AltiVec types is invalid");
32093 else if (type
== boolean_type_node
)
32094 error ("use of boolean types in AltiVec types is invalid");
32095 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
32096 error ("use of %<complex%> in AltiVec types is invalid");
32097 else if (DECIMAL_FLOAT_MODE_P (mode
))
32098 error ("use of decimal floating point types in AltiVec types is invalid");
32099 else if (!TARGET_VSX
)
32101 if (type
== long_unsigned_type_node
|| type
== long_integer_type_node
)
32104 error ("use of %<long%> in AltiVec types is invalid for "
32105 "64-bit code without %qs", "-mvsx");
32106 else if (rs6000_warn_altivec_long
)
32107 warning (0, "use of %<long%> in AltiVec types is deprecated; "
32110 else if (type
== long_long_unsigned_type_node
32111 || type
== long_long_integer_type_node
)
32112 error ("use of %<long long%> in AltiVec types is invalid without %qs",
32114 else if (type
== double_type_node
)
32115 error ("use of %<double%> in AltiVec types is invalid without %qs",
32119 switch (altivec_type
)
32122 unsigned_p
= TYPE_UNSIGNED (type
);
32126 result
= (unsigned_p
? unsigned_V1TI_type_node
: V1TI_type_node
);
32129 result
= (unsigned_p
? unsigned_V2DI_type_node
: V2DI_type_node
);
32132 result
= (unsigned_p
? unsigned_V4SI_type_node
: V4SI_type_node
);
32135 result
= (unsigned_p
? unsigned_V8HI_type_node
: V8HI_type_node
);
32138 result
= (unsigned_p
? unsigned_V16QI_type_node
: V16QI_type_node
);
32140 case E_SFmode
: result
= V4SF_type_node
; break;
32141 case E_DFmode
: result
= V2DF_type_node
; break;
32142 /* If the user says 'vector int bool', we may be handed the 'bool'
32143 attribute _before_ the 'vector' attribute, and so select the
32144 proper type in the 'b' case below. */
32145 case E_V4SImode
: case E_V8HImode
: case E_V16QImode
: case E_V4SFmode
:
32146 case E_V2DImode
: case E_V2DFmode
:
32154 case E_DImode
: case E_V2DImode
: result
= bool_V2DI_type_node
; break;
32155 case E_SImode
: case E_V4SImode
: result
= bool_V4SI_type_node
; break;
32156 case E_HImode
: case E_V8HImode
: result
= bool_V8HI_type_node
; break;
32157 case E_QImode
: case E_V16QImode
: result
= bool_V16QI_type_node
;
32164 case E_V8HImode
: result
= pixel_V8HI_type_node
;
32170 /* Propagate qualifiers attached to the element type
32171 onto the vector type. */
32172 if (result
&& result
!= type
&& TYPE_QUALS (type
))
32173 result
= build_qualified_type (result
, TYPE_QUALS (type
));
32175 *no_add_attrs
= true; /* No need to hang on to the attribute. */
32178 *node
= lang_hooks
.types
.reconstruct_complex_type (*node
, result
);
32183 /* AltiVec defines five built-in scalar types that serve as vector
32184 elements; we must teach the compiler how to mangle them. The 128-bit
32185 floating point mangling is target-specific as well. */
32187 static const char *
32188 rs6000_mangle_type (const_tree type
)
32190 type
= TYPE_MAIN_VARIANT (type
);
32192 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
32193 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
32196 if (type
== bool_char_type_node
) return "U6__boolc";
32197 if (type
== bool_short_type_node
) return "U6__bools";
32198 if (type
== pixel_type_node
) return "u7__pixel";
32199 if (type
== bool_int_type_node
) return "U6__booli";
32200 if (type
== bool_long_long_type_node
) return "U6__boolx";
32202 if (SCALAR_FLOAT_TYPE_P (type
) && FLOAT128_IBM_P (TYPE_MODE (type
)))
32204 if (SCALAR_FLOAT_TYPE_P (type
) && FLOAT128_IEEE_P (TYPE_MODE (type
)))
32205 return ieee128_mangling_gcc_8_1
? "U10__float128" : "u9__ieee128";
32207 /* For all other types, use the default mangling. */
32211 /* Handle a "longcall" or "shortcall" attribute; arguments as in
32212 struct attribute_spec.handler. */
32215 rs6000_handle_longcall_attribute (tree
*node
, tree name
,
32216 tree args ATTRIBUTE_UNUSED
,
32217 int flags ATTRIBUTE_UNUSED
,
32218 bool *no_add_attrs
)
32220 if (TREE_CODE (*node
) != FUNCTION_TYPE
32221 && TREE_CODE (*node
) != FIELD_DECL
32222 && TREE_CODE (*node
) != TYPE_DECL
)
32224 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
32226 *no_add_attrs
= true;
32232 /* Set longcall attributes on all functions declared when
32233 rs6000_default_long_calls is true. */
32235 rs6000_set_default_type_attributes (tree type
)
32237 if (rs6000_default_long_calls
32238 && (TREE_CODE (type
) == FUNCTION_TYPE
32239 || TREE_CODE (type
) == METHOD_TYPE
))
32240 TYPE_ATTRIBUTES (type
) = tree_cons (get_identifier ("longcall"),
32242 TYPE_ATTRIBUTES (type
));
32245 darwin_set_default_type_attributes (type
);
32249 /* Return a reference suitable for calling a function with the
32250 longcall attribute. */
32253 rs6000_longcall_ref (rtx call_ref
)
32255 const char *call_name
;
32258 if (GET_CODE (call_ref
) != SYMBOL_REF
)
32261 /* System V adds '.' to the internal name, so skip them. */
32262 call_name
= XSTR (call_ref
, 0);
32263 if (*call_name
== '.')
32265 while (*call_name
== '.')
32268 node
= get_identifier (call_name
);
32269 call_ref
= gen_rtx_SYMBOL_REF (VOIDmode
, IDENTIFIER_POINTER (node
));
32272 return force_reg (Pmode
, call_ref
);
32275 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
32276 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
32279 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
32280 struct attribute_spec.handler. */
32282 rs6000_handle_struct_attribute (tree
*node
, tree name
,
32283 tree args ATTRIBUTE_UNUSED
,
32284 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
32287 if (DECL_P (*node
))
32289 if (TREE_CODE (*node
) == TYPE_DECL
)
32290 type
= &TREE_TYPE (*node
);
32295 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
32296 || TREE_CODE (*type
) == UNION_TYPE
)))
32298 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
32299 *no_add_attrs
= true;
32302 else if ((is_attribute_p ("ms_struct", name
)
32303 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
32304 || ((is_attribute_p ("gcc_struct", name
)
32305 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
32307 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
32309 *no_add_attrs
= true;
32316 rs6000_ms_bitfield_layout_p (const_tree record_type
)
32318 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
32319 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
32320 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
32323 #ifdef USING_ELFOS_H
32325 /* A get_unnamed_section callback, used for switching to toc_section. */
32328 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
32330 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
32331 && TARGET_MINIMAL_TOC
)
32333 if (!toc_initialized
)
32335 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
32336 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32337 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LCTOC", 0);
32338 fprintf (asm_out_file
, "\t.tc ");
32339 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1[TC],");
32340 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
32341 fprintf (asm_out_file
, "\n");
32343 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
32344 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32345 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
32346 fprintf (asm_out_file
, " = .+32768\n");
32347 toc_initialized
= 1;
32350 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
32352 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
32354 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
32355 if (!toc_initialized
)
32357 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32358 toc_initialized
= 1;
32363 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
32364 if (!toc_initialized
)
32366 ASM_OUTPUT_ALIGN (asm_out_file
, TARGET_64BIT
? 3 : 2);
32367 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
32368 fprintf (asm_out_file
, " = .+32768\n");
32369 toc_initialized
= 1;
32374 /* Implement TARGET_ASM_INIT_SECTIONS. */
32377 rs6000_elf_asm_init_sections (void)
32380 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op
, NULL
);
32383 = get_unnamed_section (SECTION_WRITE
, output_section_asm_op
,
32384 SDATA2_SECTION_ASM_OP
);
32387 /* Implement TARGET_SELECT_RTX_SECTION. */
32390 rs6000_elf_select_rtx_section (machine_mode mode
, rtx x
,
32391 unsigned HOST_WIDE_INT align
)
32393 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
32394 return toc_section
;
32396 return default_elf_select_rtx_section (mode
, x
, align
);
32399 /* For a SYMBOL_REF, set generic flags and then perform some
32400 target-specific processing.
32402 When the AIX ABI is requested on a non-AIX system, replace the
32403 function name with the real name (with a leading .) rather than the
32404 function descriptor name. This saves a lot of overriding code to
32405 read the prefixes. */
32407 static void rs6000_elf_encode_section_info (tree
, rtx
, int) ATTRIBUTE_UNUSED
;
32409 rs6000_elf_encode_section_info (tree decl
, rtx rtl
, int first
)
32411 default_encode_section_info (decl
, rtl
, first
);
32414 && TREE_CODE (decl
) == FUNCTION_DECL
32416 && DEFAULT_ABI
== ABI_AIX
)
32418 rtx sym_ref
= XEXP (rtl
, 0);
32419 size_t len
= strlen (XSTR (sym_ref
, 0));
32420 char *str
= XALLOCAVEC (char, len
+ 2);
32422 memcpy (str
+ 1, XSTR (sym_ref
, 0), len
+ 1);
32423 XSTR (sym_ref
, 0) = ggc_alloc_string (str
, len
+ 1);
32428 compare_section_name (const char *section
, const char *templ
)
32432 len
= strlen (templ
);
32433 return (strncmp (section
, templ
, len
) == 0
32434 && (section
[len
] == 0 || section
[len
] == '.'));
32438 rs6000_elf_in_small_data_p (const_tree decl
)
32440 if (rs6000_sdata
== SDATA_NONE
)
32443 /* We want to merge strings, so we never consider them small data. */
32444 if (TREE_CODE (decl
) == STRING_CST
)
32447 /* Functions are never in the small data area. */
32448 if (TREE_CODE (decl
) == FUNCTION_DECL
)
32451 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
))
32453 const char *section
= DECL_SECTION_NAME (decl
);
32454 if (compare_section_name (section
, ".sdata")
32455 || compare_section_name (section
, ".sdata2")
32456 || compare_section_name (section
, ".gnu.linkonce.s")
32457 || compare_section_name (section
, ".sbss")
32458 || compare_section_name (section
, ".sbss2")
32459 || compare_section_name (section
, ".gnu.linkonce.sb")
32460 || strcmp (section
, ".PPC.EMB.sdata0") == 0
32461 || strcmp (section
, ".PPC.EMB.sbss0") == 0)
32466 /* If we are told not to put readonly data in sdata, then don't. */
32467 if (TREE_READONLY (decl
) && rs6000_sdata
!= SDATA_EABI
32468 && !rs6000_readonly_in_sdata
)
32471 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
32474 && size
<= g_switch_value
32475 /* If it's not public, and we're not going to reference it there,
32476 there's no need to put it in the small data section. */
32477 && (rs6000_sdata
!= SDATA_DATA
|| TREE_PUBLIC (decl
)))
32484 #endif /* USING_ELFOS_H */
32486 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
32489 rs6000_use_blocks_for_constant_p (machine_mode mode
, const_rtx x
)
32491 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
);
32494 /* Do not place thread-local symbols refs in the object blocks. */
32497 rs6000_use_blocks_for_decl_p (const_tree decl
)
32499 return !DECL_THREAD_LOCAL_P (decl
);
32502 /* Return a REG that occurs in ADDR with coefficient 1.
32503 ADDR can be effectively incremented by incrementing REG.
32505 r0 is special and we must not select it as an address
32506 register by this routine since our caller will try to
32507 increment the returned register via an "la" instruction. */
32510 find_addr_reg (rtx addr
)
32512 while (GET_CODE (addr
) == PLUS
)
32514 if (GET_CODE (XEXP (addr
, 0)) == REG
32515 && REGNO (XEXP (addr
, 0)) != 0)
32516 addr
= XEXP (addr
, 0);
32517 else if (GET_CODE (XEXP (addr
, 1)) == REG
32518 && REGNO (XEXP (addr
, 1)) != 0)
32519 addr
= XEXP (addr
, 1);
32520 else if (CONSTANT_P (XEXP (addr
, 0)))
32521 addr
= XEXP (addr
, 1);
32522 else if (CONSTANT_P (XEXP (addr
, 1)))
32523 addr
= XEXP (addr
, 0);
32525 gcc_unreachable ();
32527 gcc_assert (GET_CODE (addr
) == REG
&& REGNO (addr
) != 0);
32532 rs6000_fatal_bad_address (rtx op
)
32534 fatal_insn ("bad address", op
);
32539 typedef struct branch_island_d
{
32540 tree function_name
;
32546 static vec
<branch_island
, va_gc
> *branch_islands
;
32548 /* Remember to generate a branch island for far calls to the given
32552 add_compiler_branch_island (tree label_name
, tree function_name
,
32555 branch_island bi
= {function_name
, label_name
, line_number
};
32556 vec_safe_push (branch_islands
, bi
);
32559 /* Generate far-jump branch islands for everything recorded in
32560 branch_islands. Invoked immediately after the last instruction of
32561 the epilogue has been emitted; the branch islands must be appended
32562 to, and contiguous with, the function body. Mach-O stubs are
32563 generated in machopic_output_stub(). */
32566 macho_branch_islands (void)
32570 while (!vec_safe_is_empty (branch_islands
))
32572 branch_island
*bi
= &branch_islands
->last ();
32573 const char *label
= IDENTIFIER_POINTER (bi
->label_name
);
32574 const char *name
= IDENTIFIER_POINTER (bi
->function_name
);
32575 char name_buf
[512];
32576 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
32577 if (name
[0] == '*' || name
[0] == '&')
32578 strcpy (name_buf
, name
+1);
32582 strcpy (name_buf
+1, name
);
32584 strcpy (tmp_buf
, "\n");
32585 strcat (tmp_buf
, label
);
32586 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32587 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
32588 dbxout_stabd (N_SLINE
, bi
->line_number
);
32589 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32592 if (TARGET_LINK_STACK
)
32595 get_ppc476_thunk_name (name
);
32596 strcat (tmp_buf
, ":\n\tmflr r0\n\tbl ");
32597 strcat (tmp_buf
, name
);
32598 strcat (tmp_buf
, "\n");
32599 strcat (tmp_buf
, label
);
32600 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
32604 strcat (tmp_buf
, ":\n\tmflr r0\n\tbcl 20,31,");
32605 strcat (tmp_buf
, label
);
32606 strcat (tmp_buf
, "_pic\n");
32607 strcat (tmp_buf
, label
);
32608 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
32611 strcat (tmp_buf
, "\taddis r11,r11,ha16(");
32612 strcat (tmp_buf
, name_buf
);
32613 strcat (tmp_buf
, " - ");
32614 strcat (tmp_buf
, label
);
32615 strcat (tmp_buf
, "_pic)\n");
32617 strcat (tmp_buf
, "\tmtlr r0\n");
32619 strcat (tmp_buf
, "\taddi r12,r11,lo16(");
32620 strcat (tmp_buf
, name_buf
);
32621 strcat (tmp_buf
, " - ");
32622 strcat (tmp_buf
, label
);
32623 strcat (tmp_buf
, "_pic)\n");
32625 strcat (tmp_buf
, "\tmtctr r12\n\tbctr\n");
32629 strcat (tmp_buf
, ":\nlis r12,hi16(");
32630 strcat (tmp_buf
, name_buf
);
32631 strcat (tmp_buf
, ")\n\tori r12,r12,lo16(");
32632 strcat (tmp_buf
, name_buf
);
32633 strcat (tmp_buf
, ")\n\tmtctr r12\n\tbctr");
32635 output_asm_insn (tmp_buf
, 0);
32636 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
32637 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
32638 dbxout_stabd (N_SLINE
, bi
->line_number
);
32639 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
32640 branch_islands
->pop ();
32644 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
32645 already there or not. */
32648 no_previous_def (tree function_name
)
32653 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
32654 if (function_name
== bi
->function_name
)
32659 /* GET_PREV_LABEL gets the label name from the previous definition of
32663 get_prev_label (tree function_name
)
32668 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
32669 if (function_name
== bi
->function_name
)
32670 return bi
->label_name
;
32674 /* INSN is either a function call or a millicode call. It may have an
32675 unconditional jump in its delay slot.
32677 CALL_DEST is the routine we are calling. */
32680 output_call (rtx_insn
*insn
, rtx
*operands
, int dest_operand_number
,
32681 int cookie_operand_number
)
32683 static char buf
[256];
32684 if (darwin_emit_branch_islands
32685 && GET_CODE (operands
[dest_operand_number
]) == SYMBOL_REF
32686 && (INTVAL (operands
[cookie_operand_number
]) & CALL_LONG
))
32689 tree funname
= get_identifier (XSTR (operands
[dest_operand_number
], 0));
32691 if (no_previous_def (funname
))
32693 rtx label_rtx
= gen_label_rtx ();
32694 char *label_buf
, temp_buf
[256];
32695 ASM_GENERATE_INTERNAL_LABEL (temp_buf
, "L",
32696 CODE_LABEL_NUMBER (label_rtx
));
32697 label_buf
= temp_buf
[0] == '*' ? temp_buf
+ 1 : temp_buf
;
32698 labelname
= get_identifier (label_buf
);
32699 add_compiler_branch_island (labelname
, funname
, insn_line (insn
));
32702 labelname
= get_prev_label (funname
);
32704 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
32705 instruction will reach 'foo', otherwise link as 'bl L42'".
32706 "L42" should be a 'branch island', that will do a far jump to
32707 'foo'. Branch islands are generated in
32708 macho_branch_islands(). */
32709 sprintf (buf
, "jbsr %%z%d,%.246s",
32710 dest_operand_number
, IDENTIFIER_POINTER (labelname
));
32713 sprintf (buf
, "bl %%z%d", dest_operand_number
);
32717 /* Generate PIC and indirect symbol stubs. */
32720 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
32722 unsigned int length
;
32723 char *symbol_name
, *lazy_ptr_name
;
32724 char *local_label_0
;
32725 static int label
= 0;
32727 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
32728 symb
= (*targetm
.strip_name_encoding
) (symb
);
32731 length
= strlen (symb
);
32732 symbol_name
= XALLOCAVEC (char, length
+ 32);
32733 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
32735 lazy_ptr_name
= XALLOCAVEC (char, length
+ 32);
32736 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name
, symb
, length
);
32739 switch_to_section (darwin_sections
[machopic_picsymbol_stub1_section
]);
32741 switch_to_section (darwin_sections
[machopic_symbol_stub1_section
]);
32745 fprintf (file
, "\t.align 5\n");
32747 fprintf (file
, "%s:\n", stub
);
32748 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
32751 local_label_0
= XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
32752 sprintf (local_label_0
, "\"L%011d$spb\"", label
);
32754 fprintf (file
, "\tmflr r0\n");
32755 if (TARGET_LINK_STACK
)
32758 get_ppc476_thunk_name (name
);
32759 fprintf (file
, "\tbl %s\n", name
);
32760 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
32764 fprintf (file
, "\tbcl 20,31,%s\n", local_label_0
);
32765 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
32767 fprintf (file
, "\taddis r11,r11,ha16(%s-%s)\n",
32768 lazy_ptr_name
, local_label_0
);
32769 fprintf (file
, "\tmtlr r0\n");
32770 fprintf (file
, "\t%s r12,lo16(%s-%s)(r11)\n",
32771 (TARGET_64BIT
? "ldu" : "lwzu"),
32772 lazy_ptr_name
, local_label_0
);
32773 fprintf (file
, "\tmtctr r12\n");
32774 fprintf (file
, "\tbctr\n");
32778 fprintf (file
, "\t.align 4\n");
32780 fprintf (file
, "%s:\n", stub
);
32781 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
32783 fprintf (file
, "\tlis r11,ha16(%s)\n", lazy_ptr_name
);
32784 fprintf (file
, "\t%s r12,lo16(%s)(r11)\n",
32785 (TARGET_64BIT
? "ldu" : "lwzu"),
32787 fprintf (file
, "\tmtctr r12\n");
32788 fprintf (file
, "\tbctr\n");
32791 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr_section
]);
32792 fprintf (file
, "%s:\n", lazy_ptr_name
);
32793 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
32794 fprintf (file
, "%sdyld_stub_binding_helper\n",
32795 (TARGET_64BIT
? DOUBLE_INT_ASM_OP
: "\t.long\t"));
32798 /* Legitimize PIC addresses. If the address is already
32799 position-independent, we return ORIG. Newly generated
32800 position-independent addresses go into a reg. This is REG if non
32801 zero, otherwise we allocate register(s) as necessary. */
32803 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
32806 rs6000_machopic_legitimize_pic_address (rtx orig
, machine_mode mode
,
32811 if (reg
== NULL
&& !reload_completed
)
32812 reg
= gen_reg_rtx (Pmode
);
32814 if (GET_CODE (orig
) == CONST
)
32818 if (GET_CODE (XEXP (orig
, 0)) == PLUS
32819 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
32822 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
32824 /* Use a different reg for the intermediate value, as
32825 it will be marked UNCHANGING. */
32826 reg_temp
= !can_create_pseudo_p () ? reg
: gen_reg_rtx (Pmode
);
32827 base
= rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
32830 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
32833 if (GET_CODE (offset
) == CONST_INT
)
32835 if (SMALL_INT (offset
))
32836 return plus_constant (Pmode
, base
, INTVAL (offset
));
32837 else if (!reload_completed
)
32838 offset
= force_reg (Pmode
, offset
);
32841 rtx mem
= force_const_mem (Pmode
, orig
);
32842 return machopic_legitimize_pic_address (mem
, Pmode
, reg
);
32845 return gen_rtx_PLUS (Pmode
, base
, offset
);
32848 /* Fall back on generic machopic code. */
32849 return machopic_legitimize_pic_address (orig
, mode
, reg
);
32852 /* Output a .machine directive for the Darwin assembler, and call
32853 the generic start_file routine. */
32856 rs6000_darwin_file_start (void)
32858 static const struct
32862 HOST_WIDE_INT if_set
;
32864 { "ppc64", "ppc64", MASK_64BIT
},
32865 { "970", "ppc970", MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
32866 { "power4", "ppc970", 0 },
32867 { "G5", "ppc970", 0 },
32868 { "7450", "ppc7450", 0 },
32869 { "7400", "ppc7400", MASK_ALTIVEC
},
32870 { "G4", "ppc7400", 0 },
32871 { "750", "ppc750", 0 },
32872 { "740", "ppc750", 0 },
32873 { "G3", "ppc750", 0 },
32874 { "604e", "ppc604e", 0 },
32875 { "604", "ppc604", 0 },
32876 { "603e", "ppc603", 0 },
32877 { "603", "ppc603", 0 },
32878 { "601", "ppc601", 0 },
32879 { NULL
, "ppc", 0 } };
32880 const char *cpu_id
= "";
32883 rs6000_file_start ();
32884 darwin_file_start ();
32886 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
32888 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
32889 cpu_id
= rs6000_default_cpu
;
32891 if (global_options_set
.x_rs6000_cpu_index
)
32892 cpu_id
= processor_target_table
[rs6000_cpu_index
].name
;
32894 /* Look through the mapping array. Pick the first name that either
32895 matches the argument, has a bit set in IF_SET that is also set
32896 in the target flags, or has a NULL name. */
32899 while (mapping
[i
].arg
!= NULL
32900 && strcmp (mapping
[i
].arg
, cpu_id
) != 0
32901 && (mapping
[i
].if_set
& rs6000_isa_flags
) == 0)
32904 fprintf (asm_out_file
, "\t.machine %s\n", mapping
[i
].name
);
32907 #endif /* TARGET_MACHO */
32911 rs6000_elf_reloc_rw_mask (void)
32915 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
32921 /* Record an element in the table of global constructors. SYMBOL is
32922 a SYMBOL_REF of the function to be called; PRIORITY is a number
32923 between 0 and MAX_INIT_PRIORITY.
32925 This differs from default_named_section_asm_out_constructor in
32926 that we have special handling for -mrelocatable. */
32928 static void rs6000_elf_asm_out_constructor (rtx
, int) ATTRIBUTE_UNUSED
;
32930 rs6000_elf_asm_out_constructor (rtx symbol
, int priority
)
32932 const char *section
= ".ctors";
32935 if (priority
!= DEFAULT_INIT_PRIORITY
)
32937 sprintf (buf
, ".ctors.%.5u",
32938 /* Invert the numbering so the linker puts us in the proper
32939 order; constructors are run from right to left, and the
32940 linker sorts in increasing order. */
32941 MAX_INIT_PRIORITY
- priority
);
32945 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
32946 assemble_align (POINTER_SIZE
);
32948 if (DEFAULT_ABI
== ABI_V4
32949 && (TARGET_RELOCATABLE
|| flag_pic
> 1))
32951 fputs ("\t.long (", asm_out_file
);
32952 output_addr_const (asm_out_file
, symbol
);
32953 fputs (")@fixup\n", asm_out_file
);
32956 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
32959 static void rs6000_elf_asm_out_destructor (rtx
, int) ATTRIBUTE_UNUSED
;
32961 rs6000_elf_asm_out_destructor (rtx symbol
, int priority
)
32963 const char *section
= ".dtors";
32966 if (priority
!= DEFAULT_INIT_PRIORITY
)
32968 sprintf (buf
, ".dtors.%.5u",
32969 /* Invert the numbering so the linker puts us in the proper
32970 order; constructors are run from right to left, and the
32971 linker sorts in increasing order. */
32972 MAX_INIT_PRIORITY
- priority
);
32976 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
32977 assemble_align (POINTER_SIZE
);
32979 if (DEFAULT_ABI
== ABI_V4
32980 && (TARGET_RELOCATABLE
|| flag_pic
> 1))
32982 fputs ("\t.long (", asm_out_file
);
32983 output_addr_const (asm_out_file
, symbol
);
32984 fputs (")@fixup\n", asm_out_file
);
32987 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
32991 rs6000_elf_declare_function_name (FILE *file
, const char *name
, tree decl
)
32993 if (TARGET_64BIT
&& DEFAULT_ABI
!= ABI_ELFv2
)
32995 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file
);
32996 ASM_OUTPUT_LABEL (file
, name
);
32997 fputs (DOUBLE_INT_ASM_OP
, file
);
32998 rs6000_output_function_entry (file
, name
);
32999 fputs (",.TOC.@tocbase,0\n\t.previous\n", file
);
33002 fputs ("\t.size\t", file
);
33003 assemble_name (file
, name
);
33004 fputs (",24\n\t.type\t.", file
);
33005 assemble_name (file
, name
);
33006 fputs (",@function\n", file
);
33007 if (TREE_PUBLIC (decl
) && ! DECL_WEAK (decl
))
33009 fputs ("\t.globl\t.", file
);
33010 assemble_name (file
, name
);
33015 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
33016 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
33017 rs6000_output_function_entry (file
, name
);
33018 fputs (":\n", file
);
33023 if (DEFAULT_ABI
== ABI_V4
33024 && (TARGET_RELOCATABLE
|| flag_pic
> 1)
33025 && !TARGET_SECURE_PLT
33026 && (!constant_pool_empty_p () || crtl
->profile
)
33027 && (uses_toc
= uses_TOC ()))
33032 switch_to_other_text_partition ();
33033 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
33035 fprintf (file
, "\t.long ");
33036 assemble_name (file
, toc_label_name
);
33039 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
33040 assemble_name (file
, buf
);
33043 switch_to_other_text_partition ();
33046 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
33047 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
33049 if (TARGET_CMODEL
== CMODEL_LARGE
&& rs6000_global_entry_point_needed_p ())
33053 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
33055 fprintf (file
, "\t.quad .TOC.-");
33056 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
33057 assemble_name (file
, buf
);
33061 if (DEFAULT_ABI
== ABI_AIX
)
33063 const char *desc_name
, *orig_name
;
33065 orig_name
= (*targetm
.strip_name_encoding
) (name
);
33066 desc_name
= orig_name
;
33067 while (*desc_name
== '.')
33070 if (TREE_PUBLIC (decl
))
33071 fprintf (file
, "\t.globl %s\n", desc_name
);
33073 fprintf (file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
33074 fprintf (file
, "%s:\n", desc_name
);
33075 fprintf (file
, "\t.long %s\n", orig_name
);
33076 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file
);
33077 fputs ("\t.long 0\n", file
);
33078 fprintf (file
, "\t.previous\n");
33080 ASM_OUTPUT_LABEL (file
, name
);
33083 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED
;
33085 rs6000_elf_file_end (void)
33087 #ifdef HAVE_AS_GNU_ATTRIBUTE
33088 /* ??? The value emitted depends on options active at file end.
33089 Assume anyone using #pragma or attributes that might change
33090 options knows what they are doing. */
33091 if ((TARGET_64BIT
|| DEFAULT_ABI
== ABI_V4
)
33092 && rs6000_passes_float
)
33096 if (TARGET_HARD_FLOAT
)
33100 if (rs6000_passes_long_double
)
33102 if (!TARGET_LONG_DOUBLE_128
)
33104 else if (TARGET_IEEEQUAD
)
33109 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n", fp
);
33111 if (TARGET_32BIT
&& DEFAULT_ABI
== ABI_V4
)
33113 if (rs6000_passes_vector
)
33114 fprintf (asm_out_file
, "\t.gnu_attribute 8, %d\n",
33115 (TARGET_ALTIVEC_ABI
? 2 : 1));
33116 if (rs6000_returns_struct
)
33117 fprintf (asm_out_file
, "\t.gnu_attribute 12, %d\n",
33118 aix_struct_return
? 2 : 1);
33121 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
33122 if (TARGET_32BIT
|| DEFAULT_ABI
== ABI_ELFv2
)
33123 file_end_indicate_exec_stack ();
33126 if (flag_split_stack
)
33127 file_end_indicate_split_stack ();
33131 /* We have expanded a CPU builtin, so we need to emit a reference to
33132 the special symbol that LIBC uses to declare it supports the
33133 AT_PLATFORM and AT_HWCAP/AT_HWCAP2 in the TCB feature. */
33134 switch_to_section (data_section
);
33135 fprintf (asm_out_file
, "\t.align %u\n", TARGET_32BIT
? 2 : 3);
33136 fprintf (asm_out_file
, "\t%s %s\n",
33137 TARGET_32BIT
? ".long" : ".quad", tcb_verification_symbol
);
33144 #ifndef HAVE_XCOFF_DWARF_EXTRAS
33145 #define HAVE_XCOFF_DWARF_EXTRAS 0
33148 static enum unwind_info_type
33149 rs6000_xcoff_debug_unwind_info (void)
33155 rs6000_xcoff_asm_output_anchor (rtx symbol
)
33159 sprintf (buffer
, "$ + " HOST_WIDE_INT_PRINT_DEC
,
33160 SYMBOL_REF_BLOCK_OFFSET (symbol
));
33161 fprintf (asm_out_file
, "%s", SET_ASM_OP
);
33162 RS6000_OUTPUT_BASENAME (asm_out_file
, XSTR (symbol
, 0));
33163 fprintf (asm_out_file
, ",");
33164 RS6000_OUTPUT_BASENAME (asm_out_file
, buffer
);
33165 fprintf (asm_out_file
, "\n");
33169 rs6000_xcoff_asm_globalize_label (FILE *stream
, const char *name
)
33171 fputs (GLOBAL_ASM_OP
, stream
);
33172 RS6000_OUTPUT_BASENAME (stream
, name
);
33173 putc ('\n', stream
);
33176 /* A get_unnamed_decl callback, used for read-only sections. PTR
33177 points to the section string variable. */
33180 rs6000_xcoff_output_readonly_section_asm_op (const void *directive
)
33182 fprintf (asm_out_file
, "\t.csect %s[RO],%s\n",
33183 *(const char *const *) directive
,
33184 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33187 /* Likewise for read-write sections. */
33190 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive
)
33192 fprintf (asm_out_file
, "\t.csect %s[RW],%s\n",
33193 *(const char *const *) directive
,
33194 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33198 rs6000_xcoff_output_tls_section_asm_op (const void *directive
)
33200 fprintf (asm_out_file
, "\t.csect %s[TL],%s\n",
33201 *(const char *const *) directive
,
33202 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
33205 /* A get_unnamed_section callback, used for switching to toc_section. */
33208 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
33210 if (TARGET_MINIMAL_TOC
)
33212 /* toc_section is always selected at least once from
33213 rs6000_xcoff_file_start, so this is guaranteed to
33214 always be defined once and only once in each file. */
33215 if (!toc_initialized
)
33217 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file
);
33218 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file
);
33219 toc_initialized
= 1;
33221 fprintf (asm_out_file
, "\t.csect toc_table[RW]%s\n",
33222 (TARGET_32BIT
? "" : ",3"));
33225 fputs ("\t.toc\n", asm_out_file
);
33228 /* Implement TARGET_ASM_INIT_SECTIONS. */
33231 rs6000_xcoff_asm_init_sections (void)
33233 read_only_data_section
33234 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
33235 &xcoff_read_only_section_name
);
33237 private_data_section
33238 = get_unnamed_section (SECTION_WRITE
,
33239 rs6000_xcoff_output_readwrite_section_asm_op
,
33240 &xcoff_private_data_section_name
);
33243 = get_unnamed_section (SECTION_TLS
,
33244 rs6000_xcoff_output_tls_section_asm_op
,
33245 &xcoff_tls_data_section_name
);
33247 tls_private_data_section
33248 = get_unnamed_section (SECTION_TLS
,
33249 rs6000_xcoff_output_tls_section_asm_op
,
33250 &xcoff_private_data_section_name
);
33252 read_only_private_data_section
33253 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
33254 &xcoff_private_data_section_name
);
33257 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op
, NULL
);
33259 readonly_data_section
= read_only_data_section
;
33263 rs6000_xcoff_reloc_rw_mask (void)
33269 rs6000_xcoff_asm_named_section (const char *name
, unsigned int flags
,
33270 tree decl ATTRIBUTE_UNUSED
)
33273 static const char * const suffix
[5] = { "PR", "RO", "RW", "TL", "XO" };
33275 if (flags
& SECTION_EXCLUDE
)
33277 else if (flags
& SECTION_DEBUG
)
33279 fprintf (asm_out_file
, "\t.dwsect %s\n", name
);
33282 else if (flags
& SECTION_CODE
)
33284 else if (flags
& SECTION_TLS
)
33286 else if (flags
& SECTION_WRITE
)
33291 fprintf (asm_out_file
, "\t.csect %s%s[%s],%u\n",
33292 (flags
& SECTION_CODE
) ? "." : "",
33293 name
, suffix
[smclass
], flags
& SECTION_ENTSIZE
);
33296 #define IN_NAMED_SECTION(DECL) \
33297 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
33298 && DECL_SECTION_NAME (DECL) != NULL)
33301 rs6000_xcoff_select_section (tree decl
, int reloc
,
33302 unsigned HOST_WIDE_INT align
)
33304 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
33306 if (align
> BIGGEST_ALIGNMENT
)
33308 resolve_unique_section (decl
, reloc
, true);
33309 if (IN_NAMED_SECTION (decl
))
33310 return get_named_section (decl
, NULL
, reloc
);
33313 if (decl_readonly_section (decl
, reloc
))
33315 if (TREE_PUBLIC (decl
))
33316 return read_only_data_section
;
33318 return read_only_private_data_section
;
33323 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
33325 if (TREE_PUBLIC (decl
))
33326 return tls_data_section
;
33327 else if (bss_initializer_p (decl
))
33329 /* Convert to COMMON to emit in BSS. */
33330 DECL_COMMON (decl
) = 1;
33331 return tls_comm_section
;
33334 return tls_private_data_section
;
33338 if (TREE_PUBLIC (decl
))
33339 return data_section
;
33341 return private_data_section
;
33346 rs6000_xcoff_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
33350 /* Use select_section for private data and uninitialized data with
33351 alignment <= BIGGEST_ALIGNMENT. */
33352 if (!TREE_PUBLIC (decl
)
33353 || DECL_COMMON (decl
)
33354 || (DECL_INITIAL (decl
) == NULL_TREE
33355 && DECL_ALIGN (decl
) <= BIGGEST_ALIGNMENT
)
33356 || DECL_INITIAL (decl
) == error_mark_node
33357 || (flag_zero_initialized_in_bss
33358 && initializer_zerop (DECL_INITIAL (decl
))))
33361 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
33362 name
= (*targetm
.strip_name_encoding
) (name
);
33363 set_decl_section_name (decl
, name
);
33366 /* Select section for constant in constant pool.
33368 On RS/6000, all constants are in the private read-only data area.
33369 However, if this is being placed in the TOC it must be output as a
33373 rs6000_xcoff_select_rtx_section (machine_mode mode
, rtx x
,
33374 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
33376 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
33377 return toc_section
;
33379 return read_only_private_data_section
;
33382 /* Remove any trailing [DS] or the like from the symbol name. */
33384 static const char *
33385 rs6000_xcoff_strip_name_encoding (const char *name
)
33390 len
= strlen (name
);
33391 if (name
[len
- 1] == ']')
33392 return ggc_alloc_string (name
, len
- 4);
33397 /* Section attributes. AIX is always PIC. */
33399 static unsigned int
33400 rs6000_xcoff_section_type_flags (tree decl
, const char *name
, int reloc
)
33402 unsigned int align
;
33403 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
33405 /* Align to at least UNIT size. */
33406 if ((flags
& SECTION_CODE
) != 0 || !decl
|| !DECL_P (decl
))
33407 align
= MIN_UNITS_PER_WORD
;
33409 /* Increase alignment of large objects if not already stricter. */
33410 align
= MAX ((DECL_ALIGN (decl
) / BITS_PER_UNIT
),
33411 int_size_in_bytes (TREE_TYPE (decl
)) > MIN_UNITS_PER_WORD
33412 ? UNITS_PER_FP_WORD
: MIN_UNITS_PER_WORD
);
33414 return flags
| (exact_log2 (align
) & SECTION_ENTSIZE
);
33417 /* Output at beginning of assembler file.
33419 Initialize the section names for the RS/6000 at this point.
33421 Specify filename, including full path, to assembler.
33423 We want to go into the TOC section so at least one .toc will be emitted.
33424 Also, in order to output proper .bs/.es pairs, we need at least one static
33425 [RW] section emitted.
33427 Finally, declare mcount when profiling to make the assembler happy. */
33430 rs6000_xcoff_file_start (void)
33432 rs6000_gen_section_name (&xcoff_bss_section_name
,
33433 main_input_filename
, ".bss_");
33434 rs6000_gen_section_name (&xcoff_private_data_section_name
,
33435 main_input_filename
, ".rw_");
33436 rs6000_gen_section_name (&xcoff_read_only_section_name
,
33437 main_input_filename
, ".ro_");
33438 rs6000_gen_section_name (&xcoff_tls_data_section_name
,
33439 main_input_filename
, ".tls_");
33440 rs6000_gen_section_name (&xcoff_tbss_section_name
,
33441 main_input_filename
, ".tbss_[UL]");
33443 fputs ("\t.file\t", asm_out_file
);
33444 output_quoted_string (asm_out_file
, main_input_filename
);
33445 fputc ('\n', asm_out_file
);
33446 if (write_symbols
!= NO_DEBUG
)
33447 switch_to_section (private_data_section
);
33448 switch_to_section (toc_section
);
33449 switch_to_section (text_section
);
33451 fprintf (asm_out_file
, "\t.extern %s\n", RS6000_MCOUNT
);
33452 rs6000_file_start ();
33455 /* Output at end of assembler file.
33456 On the RS/6000, referencing data should automatically pull in text. */
33459 rs6000_xcoff_file_end (void)
33461 switch_to_section (text_section
);
33462 fputs ("_section_.text:\n", asm_out_file
);
33463 switch_to_section (data_section
);
33464 fputs (TARGET_32BIT
33465 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
33469 struct declare_alias_data
33472 bool function_descriptor
;
33475 /* Declare alias N. A helper function for for_node_and_aliases. */
33478 rs6000_declare_alias (struct symtab_node
*n
, void *d
)
33480 struct declare_alias_data
*data
= (struct declare_alias_data
*)d
;
33481 /* Main symbol is output specially, because varasm machinery does part of
33482 the job for us - we do not need to declare .globl/lglobs and such. */
33483 if (!n
->alias
|| n
->weakref
)
33486 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n
->decl
)))
33489 /* Prevent assemble_alias from trying to use .set pseudo operation
33490 that does not behave as expected by the middle-end. */
33491 TREE_ASM_WRITTEN (n
->decl
) = true;
33493 const char *name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n
->decl
));
33494 char *buffer
= (char *) alloca (strlen (name
) + 2);
33496 int dollar_inside
= 0;
33498 strcpy (buffer
, name
);
33499 p
= strchr (buffer
, '$');
33503 p
= strchr (p
+ 1, '$');
33505 if (TREE_PUBLIC (n
->decl
))
33507 if (!RS6000_WEAK
|| !DECL_WEAK (n
->decl
))
33509 if (dollar_inside
) {
33510 if (data
->function_descriptor
)
33511 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
33512 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
33514 if (data
->function_descriptor
)
33516 fputs ("\t.globl .", data
->file
);
33517 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33518 putc ('\n', data
->file
);
33520 fputs ("\t.globl ", data
->file
);
33521 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33522 putc ('\n', data
->file
);
33524 #ifdef ASM_WEAKEN_DECL
33525 else if (DECL_WEAK (n
->decl
) && !data
->function_descriptor
)
33526 ASM_WEAKEN_DECL (data
->file
, n
->decl
, name
, NULL
);
33533 if (data
->function_descriptor
)
33534 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
33535 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
33537 if (data
->function_descriptor
)
33539 fputs ("\t.lglobl .", data
->file
);
33540 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33541 putc ('\n', data
->file
);
33543 fputs ("\t.lglobl ", data
->file
);
33544 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33545 putc ('\n', data
->file
);
33547 if (data
->function_descriptor
)
33548 fputs (".", data
->file
);
33549 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
33550 fputs (":\n", data
->file
);
33555 #ifdef HAVE_GAS_HIDDEN
33556 /* Helper function to calculate visibility of a DECL
33557 and return the value as a const string. */
33559 static const char *
33560 rs6000_xcoff_visibility (tree decl
)
33562 static const char * const visibility_types
[] = {
33563 "", ",protected", ",hidden", ",internal"
33566 enum symbol_visibility vis
= DECL_VISIBILITY (decl
);
33567 return visibility_types
[vis
];
33572 /* This macro produces the initial definition of a function name.
33573 On the RS/6000, we need to place an extra '.' in the function name and
33574 output the function descriptor.
33575 Dollar signs are converted to underscores.
33577 The csect for the function will have already been created when
33578 text_section was selected. We do have to go back to that csect, however.
33580 The third and fourth parameters to the .function pseudo-op (16 and 044)
33581 are placeholders which no longer have any use.
33583 Because AIX assembler's .set command has unexpected semantics, we output
33584 all aliases as alternative labels in front of the definition. */
33587 rs6000_xcoff_declare_function_name (FILE *file
, const char *name
, tree decl
)
33589 char *buffer
= (char *) alloca (strlen (name
) + 1);
33591 int dollar_inside
= 0;
33592 struct declare_alias_data data
= {file
, false};
33594 strcpy (buffer
, name
);
33595 p
= strchr (buffer
, '$');
33599 p
= strchr (p
+ 1, '$');
33601 if (TREE_PUBLIC (decl
))
33603 if (!RS6000_WEAK
|| !DECL_WEAK (decl
))
33605 if (dollar_inside
) {
33606 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
33607 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
33609 fputs ("\t.globl .", file
);
33610 RS6000_OUTPUT_BASENAME (file
, buffer
);
33611 #ifdef HAVE_GAS_HIDDEN
33612 fputs (rs6000_xcoff_visibility (decl
), file
);
33619 if (dollar_inside
) {
33620 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
33621 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
33623 fputs ("\t.lglobl .", file
);
33624 RS6000_OUTPUT_BASENAME (file
, buffer
);
33627 fputs ("\t.csect ", file
);
33628 RS6000_OUTPUT_BASENAME (file
, buffer
);
33629 fputs (TARGET_32BIT
? "[DS]\n" : "[DS],3\n", file
);
33630 RS6000_OUTPUT_BASENAME (file
, buffer
);
33631 fputs (":\n", file
);
33632 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
33634 fputs (TARGET_32BIT
? "\t.long ." : "\t.llong .", file
);
33635 RS6000_OUTPUT_BASENAME (file
, buffer
);
33636 fputs (", TOC[tc0], 0\n", file
);
33638 switch_to_section (function_section (decl
));
33640 RS6000_OUTPUT_BASENAME (file
, buffer
);
33641 fputs (":\n", file
);
33642 data
.function_descriptor
= true;
33643 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
33645 if (!DECL_IGNORED_P (decl
))
33647 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
33648 xcoffout_declare_function (file
, decl
, buffer
);
33649 else if (write_symbols
== DWARF2_DEBUG
)
33651 name
= (*targetm
.strip_name_encoding
) (name
);
33652 fprintf (file
, "\t.function .%s,.%s,2,0\n", name
, name
);
33659 /* Output assembly language to globalize a symbol from a DECL,
33660 possibly with visibility. */
33663 rs6000_xcoff_asm_globalize_decl_name (FILE *stream
, tree decl
)
33665 const char *name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
33666 fputs (GLOBAL_ASM_OP
, stream
);
33667 RS6000_OUTPUT_BASENAME (stream
, name
);
33668 #ifdef HAVE_GAS_HIDDEN
33669 fputs (rs6000_xcoff_visibility (decl
), stream
);
33671 putc ('\n', stream
);
33674 /* Output assembly language to define a symbol as COMMON from a DECL,
33675 possibly with visibility. */
33678 rs6000_xcoff_asm_output_aligned_decl_common (FILE *stream
,
33679 tree decl ATTRIBUTE_UNUSED
,
33681 unsigned HOST_WIDE_INT size
,
33682 unsigned HOST_WIDE_INT align
)
33684 unsigned HOST_WIDE_INT align2
= 2;
33687 align2
= floor_log2 (align
/ BITS_PER_UNIT
);
33691 fputs (COMMON_ASM_OP
, stream
);
33692 RS6000_OUTPUT_BASENAME (stream
, name
);
33695 "," HOST_WIDE_INT_PRINT_UNSIGNED
"," HOST_WIDE_INT_PRINT_UNSIGNED
,
33698 #ifdef HAVE_GAS_HIDDEN
33700 fputs (rs6000_xcoff_visibility (decl
), stream
);
33702 putc ('\n', stream
);
33705 /* This macro produces the initial definition of a object (variable) name.
33706 Because AIX assembler's .set command has unexpected semantics, we output
33707 all aliases as alternative labels in front of the definition. */
33710 rs6000_xcoff_declare_object_name (FILE *file
, const char *name
, tree decl
)
33712 struct declare_alias_data data
= {file
, false};
33713 RS6000_OUTPUT_BASENAME (file
, name
);
33714 fputs (":\n", file
);
33715 symtab_node::get_create (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
,
33719 /* Overide the default 'SYMBOL-.' syntax with AIX compatible 'SYMBOL-$'. */
33722 rs6000_asm_output_dwarf_pcrel (FILE *file
, int size
, const char *label
)
33724 fputs (integer_asm_op (size
, FALSE
), file
);
33725 assemble_name (file
, label
);
33726 fputs ("-$", file
);
33729 /* Output a symbol offset relative to the dbase for the current object.
33730 We use __gcc_unwind_dbase as an arbitrary base for dbase and assume
33733 __gcc_unwind_dbase is embedded in all executables/libraries through
33734 libgcc/config/rs6000/crtdbase.S. */
33737 rs6000_asm_output_dwarf_datarel (FILE *file
, int size
, const char *label
)
33739 fputs (integer_asm_op (size
, FALSE
), file
);
33740 assemble_name (file
, label
);
33741 fputs("-__gcc_unwind_dbase", file
);
33746 rs6000_xcoff_encode_section_info (tree decl
, rtx rtl
, int first
)
33750 const char *symname
;
33752 default_encode_section_info (decl
, rtl
, first
);
33754 /* Careful not to prod global register variables. */
33757 symbol
= XEXP (rtl
, 0);
33758 if (GET_CODE (symbol
) != SYMBOL_REF
)
33761 flags
= SYMBOL_REF_FLAGS (symbol
);
33763 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
33764 flags
&= ~SYMBOL_FLAG_HAS_BLOCK_INFO
;
33766 SYMBOL_REF_FLAGS (symbol
) = flags
;
33768 /* Append mapping class to extern decls. */
33769 symname
= XSTR (symbol
, 0);
33770 if (decl
/* sync condition with assemble_external () */
33771 && DECL_P (decl
) && DECL_EXTERNAL (decl
) && TREE_PUBLIC (decl
)
33772 && ((TREE_CODE (decl
) == VAR_DECL
&& !DECL_THREAD_LOCAL_P (decl
))
33773 || TREE_CODE (decl
) == FUNCTION_DECL
)
33774 && symname
[strlen (symname
) - 1] != ']')
33776 char *newname
= (char *) alloca (strlen (symname
) + 5);
33777 strcpy (newname
, symname
);
33778 strcat (newname
, (TREE_CODE (decl
) == FUNCTION_DECL
33779 ? "[DS]" : "[UA]"));
33780 XSTR (symbol
, 0) = ggc_strdup (newname
);
33783 #endif /* HAVE_AS_TLS */
33784 #endif /* TARGET_XCOFF */
33787 rs6000_asm_weaken_decl (FILE *stream
, tree decl
,
33788 const char *name
, const char *val
)
33790 fputs ("\t.weak\t", stream
);
33791 RS6000_OUTPUT_BASENAME (stream
, name
);
33792 if (decl
&& TREE_CODE (decl
) == FUNCTION_DECL
33793 && DEFAULT_ABI
== ABI_AIX
&& DOT_SYMBOLS
)
33796 fputs ("[DS]", stream
);
33797 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
33799 fputs (rs6000_xcoff_visibility (decl
), stream
);
33801 fputs ("\n\t.weak\t.", stream
);
33802 RS6000_OUTPUT_BASENAME (stream
, name
);
33804 #if TARGET_XCOFF && HAVE_GAS_HIDDEN
33806 fputs (rs6000_xcoff_visibility (decl
), stream
);
33808 fputc ('\n', stream
);
33811 #ifdef ASM_OUTPUT_DEF
33812 ASM_OUTPUT_DEF (stream
, name
, val
);
33814 if (decl
&& TREE_CODE (decl
) == FUNCTION_DECL
33815 && DEFAULT_ABI
== ABI_AIX
&& DOT_SYMBOLS
)
33817 fputs ("\t.set\t.", stream
);
33818 RS6000_OUTPUT_BASENAME (stream
, name
);
33819 fputs (",.", stream
);
33820 RS6000_OUTPUT_BASENAME (stream
, val
);
33821 fputc ('\n', stream
);
33827 /* Return true if INSN should not be copied. */
33830 rs6000_cannot_copy_insn_p (rtx_insn
*insn
)
33832 return recog_memoized (insn
) >= 0
33833 && get_attr_cannot_copy (insn
);
33836 /* Compute a (partial) cost for rtx X. Return true if the complete
33837 cost has been computed, and false if subexpressions should be
33838 scanned. In either case, *TOTAL contains the cost result. */
33841 rs6000_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
33842 int opno ATTRIBUTE_UNUSED
, int *total
, bool speed
)
33844 int code
= GET_CODE (x
);
33848 /* On the RS/6000, if it is valid in the insn, it is free. */
33850 if (((outer_code
== SET
33851 || outer_code
== PLUS
33852 || outer_code
== MINUS
)
33853 && (satisfies_constraint_I (x
)
33854 || satisfies_constraint_L (x
)))
33855 || (outer_code
== AND
33856 && (satisfies_constraint_K (x
)
33858 ? satisfies_constraint_L (x
)
33859 : satisfies_constraint_J (x
))))
33860 || ((outer_code
== IOR
|| outer_code
== XOR
)
33861 && (satisfies_constraint_K (x
)
33863 ? satisfies_constraint_L (x
)
33864 : satisfies_constraint_J (x
))))
33865 || outer_code
== ASHIFT
33866 || outer_code
== ASHIFTRT
33867 || outer_code
== LSHIFTRT
33868 || outer_code
== ROTATE
33869 || outer_code
== ROTATERT
33870 || outer_code
== ZERO_EXTRACT
33871 || (outer_code
== MULT
33872 && satisfies_constraint_I (x
))
33873 || ((outer_code
== DIV
|| outer_code
== UDIV
33874 || outer_code
== MOD
|| outer_code
== UMOD
)
33875 && exact_log2 (INTVAL (x
)) >= 0)
33876 || (outer_code
== COMPARE
33877 && (satisfies_constraint_I (x
)
33878 || satisfies_constraint_K (x
)))
33879 || ((outer_code
== EQ
|| outer_code
== NE
)
33880 && (satisfies_constraint_I (x
)
33881 || satisfies_constraint_K (x
)
33883 ? satisfies_constraint_L (x
)
33884 : satisfies_constraint_J (x
))))
33885 || (outer_code
== GTU
33886 && satisfies_constraint_I (x
))
33887 || (outer_code
== LTU
33888 && satisfies_constraint_P (x
)))
33893 else if ((outer_code
== PLUS
33894 && reg_or_add_cint_operand (x
, VOIDmode
))
33895 || (outer_code
== MINUS
33896 && reg_or_sub_cint_operand (x
, VOIDmode
))
33897 || ((outer_code
== SET
33898 || outer_code
== IOR
33899 || outer_code
== XOR
)
33901 & ~ (unsigned HOST_WIDE_INT
) 0xffffffff) == 0))
33903 *total
= COSTS_N_INSNS (1);
33909 case CONST_WIDE_INT
:
33913 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
33917 /* When optimizing for size, MEM should be slightly more expensive
33918 than generating address, e.g., (plus (reg) (const)).
33919 L1 cache latency is about two instructions. */
33920 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
33921 if (rs6000_slow_unaligned_access (mode
, MEM_ALIGN (x
)))
33922 *total
+= COSTS_N_INSNS (100);
33931 if (FLOAT_MODE_P (mode
))
33932 *total
= rs6000_cost
->fp
;
33934 *total
= COSTS_N_INSNS (1);
33938 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
33939 && satisfies_constraint_I (XEXP (x
, 1)))
33941 if (INTVAL (XEXP (x
, 1)) >= -256
33942 && INTVAL (XEXP (x
, 1)) <= 255)
33943 *total
= rs6000_cost
->mulsi_const9
;
33945 *total
= rs6000_cost
->mulsi_const
;
33947 else if (mode
== SFmode
)
33948 *total
= rs6000_cost
->fp
;
33949 else if (FLOAT_MODE_P (mode
))
33950 *total
= rs6000_cost
->dmul
;
33951 else if (mode
== DImode
)
33952 *total
= rs6000_cost
->muldi
;
33954 *total
= rs6000_cost
->mulsi
;
33958 if (mode
== SFmode
)
33959 *total
= rs6000_cost
->fp
;
33961 *total
= rs6000_cost
->dmul
;
33966 if (FLOAT_MODE_P (mode
))
33968 *total
= mode
== DFmode
? rs6000_cost
->ddiv
33969 : rs6000_cost
->sdiv
;
33976 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
33977 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
33979 if (code
== DIV
|| code
== MOD
)
33981 *total
= COSTS_N_INSNS (2);
33984 *total
= COSTS_N_INSNS (1);
33988 if (GET_MODE (XEXP (x
, 1)) == DImode
)
33989 *total
= rs6000_cost
->divdi
;
33991 *total
= rs6000_cost
->divsi
;
33993 /* Add in shift and subtract for MOD unless we have a mod instruction. */
33994 if (!TARGET_MODULO
&& (code
== MOD
|| code
== UMOD
))
33995 *total
+= COSTS_N_INSNS (2);
33999 *total
= COSTS_N_INSNS (TARGET_CTZ
? 1 : 4);
34003 *total
= COSTS_N_INSNS (4);
34007 *total
= COSTS_N_INSNS (TARGET_POPCNTD
? 1 : 6);
34011 *total
= COSTS_N_INSNS (TARGET_CMPB
? 2 : 6);
34015 if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
34018 *total
= COSTS_N_INSNS (1);
34022 if (CONST_INT_P (XEXP (x
, 1)))
34024 rtx left
= XEXP (x
, 0);
34025 rtx_code left_code
= GET_CODE (left
);
34027 /* rotate-and-mask: 1 insn. */
34028 if ((left_code
== ROTATE
34029 || left_code
== ASHIFT
34030 || left_code
== LSHIFTRT
)
34031 && rs6000_is_valid_shift_mask (XEXP (x
, 1), left
, mode
))
34033 *total
= rtx_cost (XEXP (left
, 0), mode
, left_code
, 0, speed
);
34034 if (!CONST_INT_P (XEXP (left
, 1)))
34035 *total
+= rtx_cost (XEXP (left
, 1), SImode
, left_code
, 1, speed
);
34036 *total
+= COSTS_N_INSNS (1);
34040 /* rotate-and-mask (no rotate), andi., andis.: 1 insn. */
34041 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
34042 if (rs6000_is_valid_and_mask (XEXP (x
, 1), mode
)
34043 || (val
& 0xffff) == val
34044 || (val
& 0xffff0000) == val
34045 || ((val
& 0xffff) == 0 && mode
== SImode
))
34047 *total
= rtx_cost (left
, mode
, AND
, 0, speed
);
34048 *total
+= COSTS_N_INSNS (1);
34053 if (rs6000_is_valid_2insn_and (XEXP (x
, 1), mode
))
34055 *total
= rtx_cost (left
, mode
, AND
, 0, speed
);
34056 *total
+= COSTS_N_INSNS (2);
34061 *total
= COSTS_N_INSNS (1);
34066 *total
= COSTS_N_INSNS (1);
34072 *total
= COSTS_N_INSNS (1);
34076 /* The EXTSWSLI instruction is a combined instruction. Don't count both
34077 the sign extend and shift separately within the insn. */
34078 if (TARGET_EXTSWSLI
&& mode
== DImode
34079 && GET_CODE (XEXP (x
, 0)) == SIGN_EXTEND
34080 && GET_MODE (XEXP (XEXP (x
, 0), 0)) == SImode
)
34091 /* Handle mul_highpart. */
34092 if (outer_code
== TRUNCATE
34093 && GET_CODE (XEXP (x
, 0)) == MULT
)
34095 if (mode
== DImode
)
34096 *total
= rs6000_cost
->muldi
;
34098 *total
= rs6000_cost
->mulsi
;
34101 else if (outer_code
== AND
)
34104 *total
= COSTS_N_INSNS (1);
34109 if (GET_CODE (XEXP (x
, 0)) == MEM
)
34112 *total
= COSTS_N_INSNS (1);
34118 if (!FLOAT_MODE_P (mode
))
34120 *total
= COSTS_N_INSNS (1);
34126 case UNSIGNED_FLOAT
:
34129 case FLOAT_TRUNCATE
:
34130 *total
= rs6000_cost
->fp
;
34134 if (mode
== DFmode
)
34135 *total
= rs6000_cost
->sfdf_convert
;
34137 *total
= rs6000_cost
->fp
;
34141 switch (XINT (x
, 1))
34144 *total
= rs6000_cost
->fp
;
34156 *total
= COSTS_N_INSNS (1);
34159 else if (FLOAT_MODE_P (mode
) && TARGET_PPC_GFXOPT
&& TARGET_HARD_FLOAT
)
34161 *total
= rs6000_cost
->fp
;
34170 /* Carry bit requires mode == Pmode.
34171 NEG or PLUS already counted so only add one. */
34173 && (outer_code
== NEG
|| outer_code
== PLUS
))
34175 *total
= COSTS_N_INSNS (1);
34183 if (outer_code
== SET
)
34185 if (XEXP (x
, 1) == const0_rtx
)
34187 *total
= COSTS_N_INSNS (2);
34192 *total
= COSTS_N_INSNS (3);
34197 if (outer_code
== COMPARE
)
34211 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
34214 rs6000_debug_rtx_costs (rtx x
, machine_mode mode
, int outer_code
,
34215 int opno
, int *total
, bool speed
)
34217 bool ret
= rs6000_rtx_costs (x
, mode
, outer_code
, opno
, total
, speed
);
34220 "\nrs6000_rtx_costs, return = %s, mode = %s, outer_code = %s, "
34221 "opno = %d, total = %d, speed = %s, x:\n",
34222 ret
? "complete" : "scan inner",
34223 GET_MODE_NAME (mode
),
34224 GET_RTX_NAME (outer_code
),
34227 speed
? "true" : "false");
34235 rs6000_insn_cost (rtx_insn
*insn
, bool speed
)
34237 if (recog_memoized (insn
) < 0)
34241 return get_attr_length (insn
);
34243 int cost
= get_attr_cost (insn
);
34247 int n
= get_attr_length (insn
) / 4;
34248 enum attr_type type
= get_attr_type (insn
);
34255 cost
= COSTS_N_INSNS (n
+ 1);
34259 switch (get_attr_size (insn
))
34262 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->mulsi_const9
;
34265 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->mulsi_const
;
34268 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->mulsi
;
34271 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->muldi
;
34274 gcc_unreachable ();
34278 switch (get_attr_size (insn
))
34281 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->divsi
;
34284 cost
= COSTS_N_INSNS (n
- 1) + rs6000_cost
->divdi
;
34287 gcc_unreachable ();
34292 cost
= n
* rs6000_cost
->fp
;
34295 cost
= n
* rs6000_cost
->dmul
;
34298 cost
= n
* rs6000_cost
->sdiv
;
34301 cost
= n
* rs6000_cost
->ddiv
;
34308 cost
= COSTS_N_INSNS (n
+ 2);
34312 cost
= COSTS_N_INSNS (n
);
34318 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
34321 rs6000_debug_address_cost (rtx x
, machine_mode mode
,
34322 addr_space_t as
, bool speed
)
34324 int ret
= TARGET_ADDRESS_COST (x
, mode
, as
, speed
);
34326 fprintf (stderr
, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
34327 ret
, speed
? "true" : "false");
34334 /* A C expression returning the cost of moving data from a register of class
34335 CLASS1 to one of CLASS2. */
34338 rs6000_register_move_cost (machine_mode mode
,
34339 reg_class_t from
, reg_class_t to
)
34343 if (TARGET_DEBUG_COST
)
34346 /* Moves from/to GENERAL_REGS. */
34347 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
34348 || reg_classes_intersect_p (from
, GENERAL_REGS
))
34350 reg_class_t rclass
= from
;
34352 if (! reg_classes_intersect_p (to
, GENERAL_REGS
))
34355 if (rclass
== FLOAT_REGS
|| rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
34356 ret
= (rs6000_memory_move_cost (mode
, rclass
, false)
34357 + rs6000_memory_move_cost (mode
, GENERAL_REGS
, false));
34359 /* It's more expensive to move CR_REGS than CR0_REGS because of the
34361 else if (rclass
== CR_REGS
)
34364 /* For those processors that have slow LR/CTR moves, make them more
34365 expensive than memory in order to bias spills to memory .*/
34366 else if ((rs6000_tune
== PROCESSOR_POWER6
34367 || rs6000_tune
== PROCESSOR_POWER7
34368 || rs6000_tune
== PROCESSOR_POWER8
34369 || rs6000_tune
== PROCESSOR_POWER9
)
34370 && reg_classes_intersect_p (rclass
, LINK_OR_CTR_REGS
))
34371 ret
= 6 * hard_regno_nregs (0, mode
);
34374 /* A move will cost one instruction per GPR moved. */
34375 ret
= 2 * hard_regno_nregs (0, mode
);
34378 /* If we have VSX, we can easily move between FPR or Altivec registers. */
34379 else if (VECTOR_MEM_VSX_P (mode
)
34380 && reg_classes_intersect_p (to
, VSX_REGS
)
34381 && reg_classes_intersect_p (from
, VSX_REGS
))
34382 ret
= 2 * hard_regno_nregs (FIRST_FPR_REGNO
, mode
);
34384 /* Moving between two similar registers is just one instruction. */
34385 else if (reg_classes_intersect_p (to
, from
))
34386 ret
= (FLOAT128_2REG_P (mode
)) ? 4 : 2;
34388 /* Everything else has to go through GENERAL_REGS. */
34390 ret
= (rs6000_register_move_cost (mode
, GENERAL_REGS
, to
)
34391 + rs6000_register_move_cost (mode
, from
, GENERAL_REGS
));
34393 if (TARGET_DEBUG_COST
)
34395 if (dbg_cost_ctrl
== 1)
34397 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
34398 ret
, GET_MODE_NAME (mode
), reg_class_names
[from
],
34399 reg_class_names
[to
]);
34406 /* A C expressions returning the cost of moving data of MODE from a register to
34410 rs6000_memory_move_cost (machine_mode mode
, reg_class_t rclass
,
34411 bool in ATTRIBUTE_UNUSED
)
34415 if (TARGET_DEBUG_COST
)
34418 if (reg_classes_intersect_p (rclass
, GENERAL_REGS
))
34419 ret
= 4 * hard_regno_nregs (0, mode
);
34420 else if ((reg_classes_intersect_p (rclass
, FLOAT_REGS
)
34421 || reg_classes_intersect_p (rclass
, VSX_REGS
)))
34422 ret
= 4 * hard_regno_nregs (32, mode
);
34423 else if (reg_classes_intersect_p (rclass
, ALTIVEC_REGS
))
34424 ret
= 4 * hard_regno_nregs (FIRST_ALTIVEC_REGNO
, mode
);
34426 ret
= 4 + rs6000_register_move_cost (mode
, rclass
, GENERAL_REGS
);
34428 if (TARGET_DEBUG_COST
)
34430 if (dbg_cost_ctrl
== 1)
34432 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
34433 ret
, GET_MODE_NAME (mode
), reg_class_names
[rclass
], in
);
34440 /* Returns a code for a target-specific builtin that implements
34441 reciprocal of the function, or NULL_TREE if not available. */
34444 rs6000_builtin_reciprocal (tree fndecl
)
34446 switch (DECL_FUNCTION_CODE (fndecl
))
34448 case VSX_BUILTIN_XVSQRTDP
:
34449 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode
))
34452 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
34454 case VSX_BUILTIN_XVSQRTSP
:
34455 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode
))
34458 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_4SF
];
34465 /* Load up a constant. If the mode is a vector mode, splat the value across
34466 all of the vector elements. */
34469 rs6000_load_constant_and_splat (machine_mode mode
, REAL_VALUE_TYPE dconst
)
34473 if (mode
== SFmode
|| mode
== DFmode
)
34475 rtx d
= const_double_from_real_value (dconst
, mode
);
34476 reg
= force_reg (mode
, d
);
34478 else if (mode
== V4SFmode
)
34480 rtx d
= const_double_from_real_value (dconst
, SFmode
);
34481 rtvec v
= gen_rtvec (4, d
, d
, d
, d
);
34482 reg
= gen_reg_rtx (mode
);
34483 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
34485 else if (mode
== V2DFmode
)
34487 rtx d
= const_double_from_real_value (dconst
, DFmode
);
34488 rtvec v
= gen_rtvec (2, d
, d
);
34489 reg
= gen_reg_rtx (mode
);
34490 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
34493 gcc_unreachable ();
34498 /* Generate an FMA instruction. */
34501 rs6000_emit_madd (rtx target
, rtx m1
, rtx m2
, rtx a
)
34503 machine_mode mode
= GET_MODE (target
);
34506 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
34507 gcc_assert (dst
!= NULL
);
34510 emit_move_insn (target
, dst
);
34513 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
34516 rs6000_emit_nmsub (rtx dst
, rtx m1
, rtx m2
, rtx a
)
34518 machine_mode mode
= GET_MODE (dst
);
34521 /* This is a tad more complicated, since the fnma_optab is for
34522 a different expression: fma(-m1, m2, a), which is the same
34523 thing except in the case of signed zeros.
34525 Fortunately we know that if FMA is supported that FNMSUB is
34526 also supported in the ISA. Just expand it directly. */
34528 gcc_assert (optab_handler (fma_optab
, mode
) != CODE_FOR_nothing
);
34530 r
= gen_rtx_NEG (mode
, a
);
34531 r
= gen_rtx_FMA (mode
, m1
, m2
, r
);
34532 r
= gen_rtx_NEG (mode
, r
);
34533 emit_insn (gen_rtx_SET (dst
, r
));
34536 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
34537 add a reg_note saying that this was a division. Support both scalar and
34538 vector divide. Assumes no trapping math and finite arguments. */
34541 rs6000_emit_swdiv (rtx dst
, rtx n
, rtx d
, bool note_p
)
34543 machine_mode mode
= GET_MODE (dst
);
34544 rtx one
, x0
, e0
, x1
, xprev
, eprev
, xnext
, enext
, u
, v
;
34547 /* Low precision estimates guarantee 5 bits of accuracy. High
34548 precision estimates guarantee 14 bits of accuracy. SFmode
34549 requires 23 bits of accuracy. DFmode requires 52 bits of
34550 accuracy. Each pass at least doubles the accuracy, leading
34551 to the following. */
34552 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
34553 if (mode
== DFmode
|| mode
== V2DFmode
)
34556 enum insn_code code
= optab_handler (smul_optab
, mode
);
34557 insn_gen_fn gen_mul
= GEN_FCN (code
);
34559 gcc_assert (code
!= CODE_FOR_nothing
);
34561 one
= rs6000_load_constant_and_splat (mode
, dconst1
);
34563 /* x0 = 1./d estimate */
34564 x0
= gen_reg_rtx (mode
);
34565 emit_insn (gen_rtx_SET (x0
, gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
34568 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
34571 /* e0 = 1. - d * x0 */
34572 e0
= gen_reg_rtx (mode
);
34573 rs6000_emit_nmsub (e0
, d
, x0
, one
);
34575 /* x1 = x0 + e0 * x0 */
34576 x1
= gen_reg_rtx (mode
);
34577 rs6000_emit_madd (x1
, e0
, x0
, x0
);
34579 for (i
= 0, xprev
= x1
, eprev
= e0
; i
< passes
- 2;
34580 ++i
, xprev
= xnext
, eprev
= enext
) {
34582 /* enext = eprev * eprev */
34583 enext
= gen_reg_rtx (mode
);
34584 emit_insn (gen_mul (enext
, eprev
, eprev
));
34586 /* xnext = xprev + enext * xprev */
34587 xnext
= gen_reg_rtx (mode
);
34588 rs6000_emit_madd (xnext
, enext
, xprev
, xprev
);
34594 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
34596 /* u = n * xprev */
34597 u
= gen_reg_rtx (mode
);
34598 emit_insn (gen_mul (u
, n
, xprev
));
34600 /* v = n - (d * u) */
34601 v
= gen_reg_rtx (mode
);
34602 rs6000_emit_nmsub (v
, d
, u
, n
);
34604 /* dst = (v * xprev) + u */
34605 rs6000_emit_madd (dst
, v
, xprev
, u
);
34608 add_reg_note (get_last_insn (), REG_EQUAL
, gen_rtx_DIV (mode
, n
, d
));
34611 /* Goldschmidt's Algorithm for single/double-precision floating point
34612 sqrt and rsqrt. Assumes no trapping math and finite arguments. */
34615 rs6000_emit_swsqrt (rtx dst
, rtx src
, bool recip
)
34617 machine_mode mode
= GET_MODE (src
);
34618 rtx e
= gen_reg_rtx (mode
);
34619 rtx g
= gen_reg_rtx (mode
);
34620 rtx h
= gen_reg_rtx (mode
);
34622 /* Low precision estimates guarantee 5 bits of accuracy. High
34623 precision estimates guarantee 14 bits of accuracy. SFmode
34624 requires 23 bits of accuracy. DFmode requires 52 bits of
34625 accuracy. Each pass at least doubles the accuracy, leading
34626 to the following. */
34627 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
34628 if (mode
== DFmode
|| mode
== V2DFmode
)
34633 enum insn_code code
= optab_handler (smul_optab
, mode
);
34634 insn_gen_fn gen_mul
= GEN_FCN (code
);
34636 gcc_assert (code
!= CODE_FOR_nothing
);
34638 mhalf
= rs6000_load_constant_and_splat (mode
, dconsthalf
);
34640 /* e = rsqrt estimate */
34641 emit_insn (gen_rtx_SET (e
, gen_rtx_UNSPEC (mode
, gen_rtvec (1, src
),
34644 /* If (src == 0.0) filter infinity to prevent NaN for sqrt(0.0). */
34647 rtx zero
= force_reg (mode
, CONST0_RTX (mode
));
34649 if (mode
== SFmode
)
34651 rtx target
= emit_conditional_move (e
, GT
, src
, zero
, mode
,
34654 emit_move_insn (e
, target
);
34658 rtx cond
= gen_rtx_GT (VOIDmode
, e
, zero
);
34659 rs6000_emit_vector_cond_expr (e
, e
, zero
, cond
, src
, zero
);
34663 /* g = sqrt estimate. */
34664 emit_insn (gen_mul (g
, e
, src
));
34665 /* h = 1/(2*sqrt) estimate. */
34666 emit_insn (gen_mul (h
, e
, mhalf
));
34672 rtx t
= gen_reg_rtx (mode
);
34673 rs6000_emit_nmsub (t
, g
, h
, mhalf
);
34674 /* Apply correction directly to 1/rsqrt estimate. */
34675 rs6000_emit_madd (dst
, e
, t
, e
);
34679 for (i
= 0; i
< passes
; i
++)
34681 rtx t1
= gen_reg_rtx (mode
);
34682 rtx g1
= gen_reg_rtx (mode
);
34683 rtx h1
= gen_reg_rtx (mode
);
34685 rs6000_emit_nmsub (t1
, g
, h
, mhalf
);
34686 rs6000_emit_madd (g1
, g
, t1
, g
);
34687 rs6000_emit_madd (h1
, h
, t1
, h
);
34692 /* Multiply by 2 for 1/rsqrt. */
34693 emit_insn (gen_add3_insn (dst
, h
, h
));
34698 rtx t
= gen_reg_rtx (mode
);
34699 rs6000_emit_nmsub (t
, g
, h
, mhalf
);
34700 rs6000_emit_madd (dst
, g
, t
, g
);
34706 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
34707 (Power7) targets. DST is the target, and SRC is the argument operand. */
34710 rs6000_emit_popcount (rtx dst
, rtx src
)
34712 machine_mode mode
= GET_MODE (dst
);
34715 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
34716 if (TARGET_POPCNTD
)
34718 if (mode
== SImode
)
34719 emit_insn (gen_popcntdsi2 (dst
, src
));
34721 emit_insn (gen_popcntddi2 (dst
, src
));
34725 tmp1
= gen_reg_rtx (mode
);
34727 if (mode
== SImode
)
34729 emit_insn (gen_popcntbsi2 (tmp1
, src
));
34730 tmp2
= expand_mult (SImode
, tmp1
, GEN_INT (0x01010101),
34732 tmp2
= force_reg (SImode
, tmp2
);
34733 emit_insn (gen_lshrsi3 (dst
, tmp2
, GEN_INT (24)));
34737 emit_insn (gen_popcntbdi2 (tmp1
, src
));
34738 tmp2
= expand_mult (DImode
, tmp1
,
34739 GEN_INT ((HOST_WIDE_INT
)
34740 0x01010101 << 32 | 0x01010101),
34742 tmp2
= force_reg (DImode
, tmp2
);
34743 emit_insn (gen_lshrdi3 (dst
, tmp2
, GEN_INT (56)));
34748 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
34749 target, and SRC is the argument operand. */
34752 rs6000_emit_parity (rtx dst
, rtx src
)
34754 machine_mode mode
= GET_MODE (dst
);
34757 tmp
= gen_reg_rtx (mode
);
34759 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
34762 if (mode
== SImode
)
34764 emit_insn (gen_popcntbsi2 (tmp
, src
));
34765 emit_insn (gen_paritysi2_cmpb (dst
, tmp
));
34769 emit_insn (gen_popcntbdi2 (tmp
, src
));
34770 emit_insn (gen_paritydi2_cmpb (dst
, tmp
));
34775 if (mode
== SImode
)
34777 /* Is mult+shift >= shift+xor+shift+xor? */
34778 if (rs6000_cost
->mulsi_const
>= COSTS_N_INSNS (3))
34780 rtx tmp1
, tmp2
, tmp3
, tmp4
;
34782 tmp1
= gen_reg_rtx (SImode
);
34783 emit_insn (gen_popcntbsi2 (tmp1
, src
));
34785 tmp2
= gen_reg_rtx (SImode
);
34786 emit_insn (gen_lshrsi3 (tmp2
, tmp1
, GEN_INT (16)));
34787 tmp3
= gen_reg_rtx (SImode
);
34788 emit_insn (gen_xorsi3 (tmp3
, tmp1
, tmp2
));
34790 tmp4
= gen_reg_rtx (SImode
);
34791 emit_insn (gen_lshrsi3 (tmp4
, tmp3
, GEN_INT (8)));
34792 emit_insn (gen_xorsi3 (tmp
, tmp3
, tmp4
));
34795 rs6000_emit_popcount (tmp
, src
);
34796 emit_insn (gen_andsi3 (dst
, tmp
, const1_rtx
));
34800 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
34801 if (rs6000_cost
->muldi
>= COSTS_N_INSNS (5))
34803 rtx tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
;
34805 tmp1
= gen_reg_rtx (DImode
);
34806 emit_insn (gen_popcntbdi2 (tmp1
, src
));
34808 tmp2
= gen_reg_rtx (DImode
);
34809 emit_insn (gen_lshrdi3 (tmp2
, tmp1
, GEN_INT (32)));
34810 tmp3
= gen_reg_rtx (DImode
);
34811 emit_insn (gen_xordi3 (tmp3
, tmp1
, tmp2
));
34813 tmp4
= gen_reg_rtx (DImode
);
34814 emit_insn (gen_lshrdi3 (tmp4
, tmp3
, GEN_INT (16)));
34815 tmp5
= gen_reg_rtx (DImode
);
34816 emit_insn (gen_xordi3 (tmp5
, tmp3
, tmp4
));
34818 tmp6
= gen_reg_rtx (DImode
);
34819 emit_insn (gen_lshrdi3 (tmp6
, tmp5
, GEN_INT (8)));
34820 emit_insn (gen_xordi3 (tmp
, tmp5
, tmp6
));
34823 rs6000_emit_popcount (tmp
, src
);
34824 emit_insn (gen_anddi3 (dst
, tmp
, const1_rtx
));
34828 /* Expand an Altivec constant permutation for little endian mode.
34829 OP0 and OP1 are the input vectors and TARGET is the output vector.
34830 SEL specifies the constant permutation vector.
34832 There are two issues: First, the two input operands must be
34833 swapped so that together they form a double-wide array in LE
34834 order. Second, the vperm instruction has surprising behavior
34835 in LE mode: it interprets the elements of the source vectors
34836 in BE mode ("left to right") and interprets the elements of
34837 the destination vector in LE mode ("right to left"). To
34838 correct for this, we must subtract each element of the permute
34839 control vector from 31.
34841 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
34842 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
34843 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
34844 serve as the permute control vector. Then, in BE mode,
34848 places the desired result in vr9. However, in LE mode the
34849 vector contents will be
34851 vr10 = 00000003 00000002 00000001 00000000
34852 vr11 = 00000007 00000006 00000005 00000004
34854 The result of the vperm using the same permute control vector is
34856 vr9 = 05000000 07000000 01000000 03000000
34858 That is, the leftmost 4 bytes of vr10 are interpreted as the
34859 source for the rightmost 4 bytes of vr9, and so on.
34861 If we change the permute control vector to
34863 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
34871 vr9 = 00000006 00000004 00000002 00000000. */
34874 altivec_expand_vec_perm_const_le (rtx target
, rtx op0
, rtx op1
,
34875 const vec_perm_indices
&sel
)
34879 rtx constv
, unspec
;
34881 /* Unpack and adjust the constant selector. */
34882 for (i
= 0; i
< 16; ++i
)
34884 unsigned int elt
= 31 - (sel
[i
] & 31);
34885 perm
[i
] = GEN_INT (elt
);
34888 /* Expand to a permute, swapping the inputs and using the
34889 adjusted selector. */
34891 op0
= force_reg (V16QImode
, op0
);
34893 op1
= force_reg (V16QImode
, op1
);
34895 constv
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, perm
));
34896 constv
= force_reg (V16QImode
, constv
);
34897 unspec
= gen_rtx_UNSPEC (V16QImode
, gen_rtvec (3, op1
, op0
, constv
),
34899 if (!REG_P (target
))
34901 rtx tmp
= gen_reg_rtx (V16QImode
);
34902 emit_move_insn (tmp
, unspec
);
34906 emit_move_insn (target
, unspec
);
34909 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
34910 permute control vector. But here it's not a constant, so we must
34911 generate a vector NAND or NOR to do the adjustment. */
34914 altivec_expand_vec_perm_le (rtx operands
[4])
34916 rtx notx
, iorx
, unspec
;
34917 rtx target
= operands
[0];
34918 rtx op0
= operands
[1];
34919 rtx op1
= operands
[2];
34920 rtx sel
= operands
[3];
34922 rtx norreg
= gen_reg_rtx (V16QImode
);
34923 machine_mode mode
= GET_MODE (target
);
34925 /* Get everything in regs so the pattern matches. */
34927 op0
= force_reg (mode
, op0
);
34929 op1
= force_reg (mode
, op1
);
34931 sel
= force_reg (V16QImode
, sel
);
34932 if (!REG_P (target
))
34933 tmp
= gen_reg_rtx (mode
);
34935 if (TARGET_P9_VECTOR
)
34937 unspec
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op0
, sel
),
34942 /* Invert the selector with a VNAND if available, else a VNOR.
34943 The VNAND is preferred for future fusion opportunities. */
34944 notx
= gen_rtx_NOT (V16QImode
, sel
);
34945 iorx
= (TARGET_P8_VECTOR
34946 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
34947 : gen_rtx_AND (V16QImode
, notx
, notx
));
34948 emit_insn (gen_rtx_SET (norreg
, iorx
));
34950 /* Permute with operands reversed and adjusted selector. */
34951 unspec
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op0
, norreg
),
34955 /* Copy into target, possibly by way of a register. */
34956 if (!REG_P (target
))
34958 emit_move_insn (tmp
, unspec
);
34962 emit_move_insn (target
, unspec
);
34965 /* Expand an Altivec constant permutation. Return true if we match
34966 an efficient implementation; false to fall back to VPERM.
34968 OP0 and OP1 are the input vectors and TARGET is the output vector.
34969 SEL specifies the constant permutation vector. */
34972 altivec_expand_vec_perm_const (rtx target
, rtx op0
, rtx op1
,
34973 const vec_perm_indices
&sel
)
34975 struct altivec_perm_insn
{
34976 HOST_WIDE_INT mask
;
34977 enum insn_code impl
;
34978 unsigned char perm
[16];
34980 static const struct altivec_perm_insn patterns
[] = {
34981 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuhum_direct
,
34982 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
34983 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuwum_direct
,
34984 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
34985 { OPTION_MASK_ALTIVEC
,
34986 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghb_direct
34987 : CODE_FOR_altivec_vmrglb_direct
),
34988 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
34989 { OPTION_MASK_ALTIVEC
,
34990 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghh_direct
34991 : CODE_FOR_altivec_vmrglh_direct
),
34992 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
34993 { OPTION_MASK_ALTIVEC
,
34994 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghw_direct
34995 : CODE_FOR_altivec_vmrglw_direct
),
34996 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
34997 { OPTION_MASK_ALTIVEC
,
34998 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglb_direct
34999 : CODE_FOR_altivec_vmrghb_direct
),
35000 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
35001 { OPTION_MASK_ALTIVEC
,
35002 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglh_direct
35003 : CODE_FOR_altivec_vmrghh_direct
),
35004 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
35005 { OPTION_MASK_ALTIVEC
,
35006 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglw_direct
35007 : CODE_FOR_altivec_vmrghw_direct
),
35008 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
35009 { OPTION_MASK_P8_VECTOR
,
35010 (BYTES_BIG_ENDIAN
? CODE_FOR_p8_vmrgew_v4sf_direct
35011 : CODE_FOR_p8_vmrgow_v4sf_direct
),
35012 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
35013 { OPTION_MASK_P8_VECTOR
,
35014 (BYTES_BIG_ENDIAN
? CODE_FOR_p8_vmrgow_v4sf_direct
35015 : CODE_FOR_p8_vmrgew_v4sf_direct
),
35016 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
35019 unsigned int i
, j
, elt
, which
;
35020 unsigned char perm
[16];
35024 /* Unpack the constant selector. */
35025 for (i
= which
= 0; i
< 16; ++i
)
35028 which
|= (elt
< 16 ? 1 : 2);
35032 /* Simplify the constant selector based on operands. */
35036 gcc_unreachable ();
35040 if (!rtx_equal_p (op0
, op1
))
35045 for (i
= 0; i
< 16; ++i
)
35057 /* Look for splat patterns. */
35062 for (i
= 0; i
< 16; ++i
)
35063 if (perm
[i
] != elt
)
35067 if (!BYTES_BIG_ENDIAN
)
35069 emit_insn (gen_altivec_vspltb_direct (target
, op0
, GEN_INT (elt
)));
35075 for (i
= 0; i
< 16; i
+= 2)
35076 if (perm
[i
] != elt
|| perm
[i
+ 1] != elt
+ 1)
35080 int field
= BYTES_BIG_ENDIAN
? elt
/ 2 : 7 - elt
/ 2;
35081 x
= gen_reg_rtx (V8HImode
);
35082 emit_insn (gen_altivec_vsplth_direct (x
, gen_lowpart (V8HImode
, op0
),
35084 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35091 for (i
= 0; i
< 16; i
+= 4)
35093 || perm
[i
+ 1] != elt
+ 1
35094 || perm
[i
+ 2] != elt
+ 2
35095 || perm
[i
+ 3] != elt
+ 3)
35099 int field
= BYTES_BIG_ENDIAN
? elt
/ 4 : 3 - elt
/ 4;
35100 x
= gen_reg_rtx (V4SImode
);
35101 emit_insn (gen_altivec_vspltw_direct (x
, gen_lowpart (V4SImode
, op0
),
35103 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35109 /* Look for merge and pack patterns. */
35110 for (j
= 0; j
< ARRAY_SIZE (patterns
); ++j
)
35114 if ((patterns
[j
].mask
& rs6000_isa_flags
) == 0)
35117 elt
= patterns
[j
].perm
[0];
35118 if (perm
[0] == elt
)
35120 else if (perm
[0] == elt
+ 16)
35124 for (i
= 1; i
< 16; ++i
)
35126 elt
= patterns
[j
].perm
[i
];
35128 elt
= (elt
>= 16 ? elt
- 16 : elt
+ 16);
35129 else if (one_vec
&& elt
>= 16)
35131 if (perm
[i
] != elt
)
35136 enum insn_code icode
= patterns
[j
].impl
;
35137 machine_mode omode
= insn_data
[icode
].operand
[0].mode
;
35138 machine_mode imode
= insn_data
[icode
].operand
[1].mode
;
35140 /* For little-endian, don't use vpkuwum and vpkuhum if the
35141 underlying vector type is not V4SI and V8HI, respectively.
35142 For example, using vpkuwum with a V8HI picks up the even
35143 halfwords (BE numbering) when the even halfwords (LE
35144 numbering) are what we need. */
35145 if (!BYTES_BIG_ENDIAN
35146 && icode
== CODE_FOR_altivec_vpkuwum_direct
35147 && ((GET_CODE (op0
) == REG
35148 && GET_MODE (op0
) != V4SImode
)
35149 || (GET_CODE (op0
) == SUBREG
35150 && GET_MODE (XEXP (op0
, 0)) != V4SImode
)))
35152 if (!BYTES_BIG_ENDIAN
35153 && icode
== CODE_FOR_altivec_vpkuhum_direct
35154 && ((GET_CODE (op0
) == REG
35155 && GET_MODE (op0
) != V8HImode
)
35156 || (GET_CODE (op0
) == SUBREG
35157 && GET_MODE (XEXP (op0
, 0)) != V8HImode
)))
35160 /* For little-endian, the two input operands must be swapped
35161 (or swapped back) to ensure proper right-to-left numbering
35163 if (swapped
^ !BYTES_BIG_ENDIAN
)
35164 std::swap (op0
, op1
);
35165 if (imode
!= V16QImode
)
35167 op0
= gen_lowpart (imode
, op0
);
35168 op1
= gen_lowpart (imode
, op1
);
35170 if (omode
== V16QImode
)
35173 x
= gen_reg_rtx (omode
);
35174 emit_insn (GEN_FCN (icode
) (x
, op0
, op1
));
35175 if (omode
!= V16QImode
)
35176 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
35181 if (!BYTES_BIG_ENDIAN
)
35183 altivec_expand_vec_perm_const_le (target
, op0
, op1
, sel
);
35190 /* Expand a VSX Permute Doubleword constant permutation.
35191 Return true if we match an efficient implementation. */
35194 rs6000_expand_vec_perm_const_1 (rtx target
, rtx op0
, rtx op1
,
35195 unsigned char perm0
, unsigned char perm1
)
35199 /* If both selectors come from the same operand, fold to single op. */
35200 if ((perm0
& 2) == (perm1
& 2))
35207 /* If both operands are equal, fold to simpler permutation. */
35208 if (rtx_equal_p (op0
, op1
))
35211 perm1
= (perm1
& 1) + 2;
35213 /* If the first selector comes from the second operand, swap. */
35214 else if (perm0
& 2)
35220 std::swap (op0
, op1
);
35222 /* If the second selector does not come from the second operand, fail. */
35223 else if ((perm1
& 2) == 0)
35227 if (target
!= NULL
)
35229 machine_mode vmode
, dmode
;
35232 vmode
= GET_MODE (target
);
35233 gcc_assert (GET_MODE_NUNITS (vmode
) == 2);
35234 dmode
= mode_for_vector (GET_MODE_INNER (vmode
), 4).require ();
35235 x
= gen_rtx_VEC_CONCAT (dmode
, op0
, op1
);
35236 v
= gen_rtvec (2, GEN_INT (perm0
), GEN_INT (perm1
));
35237 x
= gen_rtx_VEC_SELECT (vmode
, x
, gen_rtx_PARALLEL (VOIDmode
, v
));
35238 emit_insn (gen_rtx_SET (target
, x
));
35243 /* Implement TARGET_VECTORIZE_VEC_PERM_CONST. */
35246 rs6000_vectorize_vec_perm_const (machine_mode vmode
, rtx target
, rtx op0
,
35247 rtx op1
, const vec_perm_indices
&sel
)
35249 bool testing_p
= !target
;
35251 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
35252 if (TARGET_ALTIVEC
&& testing_p
)
35255 /* Check for ps_merge* or xxpermdi insns. */
35256 if ((vmode
== V2DFmode
|| vmode
== V2DImode
) && VECTOR_MEM_VSX_P (vmode
))
35260 op0
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 1);
35261 op1
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 2);
35263 if (rs6000_expand_vec_perm_const_1 (target
, op0
, op1
, sel
[0], sel
[1]))
35267 if (TARGET_ALTIVEC
)
35269 /* Force the target-independent code to lower to V16QImode. */
35270 if (vmode
!= V16QImode
)
35272 if (altivec_expand_vec_perm_const (target
, op0
, op1
, sel
))
35279 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave.
35280 OP0 and OP1 are the input vectors and TARGET is the output vector.
35281 PERM specifies the constant permutation vector. */
35284 rs6000_do_expand_vec_perm (rtx target
, rtx op0
, rtx op1
,
35285 machine_mode vmode
, const vec_perm_builder
&perm
)
35287 rtx x
= expand_vec_perm_const (vmode
, op0
, op1
, perm
, BLKmode
, target
);
35289 emit_move_insn (target
, x
);
35292 /* Expand an extract even operation. */
35295 rs6000_expand_extract_even (rtx target
, rtx op0
, rtx op1
)
35297 machine_mode vmode
= GET_MODE (target
);
35298 unsigned i
, nelt
= GET_MODE_NUNITS (vmode
);
35299 vec_perm_builder
perm (nelt
, nelt
, 1);
35301 for (i
= 0; i
< nelt
; i
++)
35302 perm
.quick_push (i
* 2);
35304 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, perm
);
35307 /* Expand a vector interleave operation. */
35310 rs6000_expand_interleave (rtx target
, rtx op0
, rtx op1
, bool highp
)
35312 machine_mode vmode
= GET_MODE (target
);
35313 unsigned i
, high
, nelt
= GET_MODE_NUNITS (vmode
);
35314 vec_perm_builder
perm (nelt
, nelt
, 1);
35316 high
= (highp
? 0 : nelt
/ 2);
35317 for (i
= 0; i
< nelt
/ 2; i
++)
35319 perm
.quick_push (i
+ high
);
35320 perm
.quick_push (i
+ nelt
+ high
);
35323 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, perm
);
35326 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
35328 rs6000_scale_v2df (rtx tgt
, rtx src
, int scale
)
35330 HOST_WIDE_INT
hwi_scale (scale
);
35331 REAL_VALUE_TYPE r_pow
;
35332 rtvec v
= rtvec_alloc (2);
35334 rtx scale_vec
= gen_reg_rtx (V2DFmode
);
35335 (void)real_powi (&r_pow
, DFmode
, &dconst2
, hwi_scale
);
35336 elt
= const_double_from_real_value (r_pow
, DFmode
);
35337 RTVEC_ELT (v
, 0) = elt
;
35338 RTVEC_ELT (v
, 1) = elt
;
35339 rs6000_expand_vector_init (scale_vec
, gen_rtx_PARALLEL (V2DFmode
, v
));
35340 emit_insn (gen_mulv2df3 (tgt
, src
, scale_vec
));
35343 /* Return an RTX representing where to find the function value of a
35344 function returning MODE. */
35346 rs6000_complex_function_value (machine_mode mode
)
35348 unsigned int regno
;
35350 machine_mode inner
= GET_MODE_INNER (mode
);
35351 unsigned int inner_bytes
= GET_MODE_UNIT_SIZE (mode
);
35353 if (TARGET_FLOAT128_TYPE
35355 || (mode
== TCmode
&& TARGET_IEEEQUAD
)))
35356 regno
= ALTIVEC_ARG_RETURN
;
35358 else if (FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
35359 regno
= FP_ARG_RETURN
;
35363 regno
= GP_ARG_RETURN
;
35365 /* 32-bit is OK since it'll go in r3/r4. */
35366 if (TARGET_32BIT
&& inner_bytes
>= 4)
35367 return gen_rtx_REG (mode
, regno
);
35370 if (inner_bytes
>= 8)
35371 return gen_rtx_REG (mode
, regno
);
35373 r1
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
),
35375 r2
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
+ 1),
35376 GEN_INT (inner_bytes
));
35377 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
35380 /* Return an rtx describing a return value of MODE as a PARALLEL
35381 in N_ELTS registers, each of mode ELT_MODE, starting at REGNO,
35382 stride REG_STRIDE. */
35385 rs6000_parallel_return (machine_mode mode
,
35386 int n_elts
, machine_mode elt_mode
,
35387 unsigned int regno
, unsigned int reg_stride
)
35389 rtx par
= gen_rtx_PARALLEL (mode
, rtvec_alloc (n_elts
));
35392 for (i
= 0; i
< n_elts
; i
++)
35394 rtx r
= gen_rtx_REG (elt_mode
, regno
);
35395 rtx off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
35396 XVECEXP (par
, 0, i
) = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
35397 regno
+= reg_stride
;
35403 /* Target hook for TARGET_FUNCTION_VALUE.
35405 An integer value is in r3 and a floating-point value is in fp1,
35406 unless -msoft-float. */
35409 rs6000_function_value (const_tree valtype
,
35410 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
35411 bool outgoing ATTRIBUTE_UNUSED
)
35414 unsigned int regno
;
35415 machine_mode elt_mode
;
35418 /* Special handling for structs in darwin64. */
35420 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype
), valtype
))
35422 CUMULATIVE_ARGS valcum
;
35426 valcum
.fregno
= FP_ARG_MIN_REG
;
35427 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
35428 /* Do a trial code generation as if this were going to be passed as
35429 an argument; if any part goes in memory, we return NULL. */
35430 valret
= rs6000_darwin64_record_arg (&valcum
, valtype
, true, /* retval= */ true);
35433 /* Otherwise fall through to standard ABI rules. */
35436 mode
= TYPE_MODE (valtype
);
35438 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
35439 if (rs6000_discover_homogeneous_aggregate (mode
, valtype
, &elt_mode
, &n_elts
))
35441 int first_reg
, n_regs
;
35443 if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (elt_mode
))
35445 /* _Decimal128 must use even/odd register pairs. */
35446 first_reg
= (elt_mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
35447 n_regs
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
35451 first_reg
= ALTIVEC_ARG_RETURN
;
35455 return rs6000_parallel_return (mode
, n_elts
, elt_mode
, first_reg
, n_regs
);
35458 /* Some return value types need be split in -mpowerpc64, 32bit ABI. */
35459 if (TARGET_32BIT
&& TARGET_POWERPC64
)
35468 int count
= GET_MODE_SIZE (mode
) / 4;
35469 return rs6000_parallel_return (mode
, count
, SImode
, GP_ARG_RETURN
, 1);
35472 if ((INTEGRAL_TYPE_P (valtype
)
35473 && GET_MODE_BITSIZE (mode
) < (TARGET_32BIT
? 32 : 64))
35474 || POINTER_TYPE_P (valtype
))
35475 mode
= TARGET_32BIT
? SImode
: DImode
;
35477 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
35478 /* _Decimal128 must use an even/odd register pair. */
35479 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
35480 else if (SCALAR_FLOAT_TYPE_P (valtype
) && TARGET_HARD_FLOAT
35481 && !FLOAT128_VECTOR_P (mode
))
35482 regno
= FP_ARG_RETURN
;
35483 else if (TREE_CODE (valtype
) == COMPLEX_TYPE
35484 && targetm
.calls
.split_complex_arg
)
35485 return rs6000_complex_function_value (mode
);
35486 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35487 return register is used in both cases, and we won't see V2DImode/V2DFmode
35488 for pure altivec, combine the two cases. */
35489 else if ((TREE_CODE (valtype
) == VECTOR_TYPE
|| FLOAT128_VECTOR_P (mode
))
35490 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
35491 && ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
35492 regno
= ALTIVEC_ARG_RETURN
;
35494 regno
= GP_ARG_RETURN
;
35496 return gen_rtx_REG (mode
, regno
);
35499 /* Define how to find the value returned by a library function
35500 assuming the value has mode MODE. */
35502 rs6000_libcall_value (machine_mode mode
)
35504 unsigned int regno
;
35506 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
35507 if (TARGET_32BIT
&& TARGET_POWERPC64
&& mode
== DImode
)
35508 return rs6000_parallel_return (mode
, 2, SImode
, GP_ARG_RETURN
, 1);
35510 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
)
35511 /* _Decimal128 must use an even/odd register pair. */
35512 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
35513 else if (SCALAR_FLOAT_MODE_NOT_VECTOR_P (mode
) && TARGET_HARD_FLOAT
)
35514 regno
= FP_ARG_RETURN
;
35515 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
35516 return register is used in both cases, and we won't see V2DImode/V2DFmode
35517 for pure altivec, combine the two cases. */
35518 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
35519 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
)
35520 regno
= ALTIVEC_ARG_RETURN
;
35521 else if (COMPLEX_MODE_P (mode
) && targetm
.calls
.split_complex_arg
)
35522 return rs6000_complex_function_value (mode
);
35524 regno
= GP_ARG_RETURN
;
35526 return gen_rtx_REG (mode
, regno
);
35529 /* Compute register pressure classes. We implement the target hook to avoid
35530 IRA picking something like NON_SPECIAL_REGS as a pressure class, which can
35531 lead to incorrect estimates of number of available registers and therefor
35532 increased register pressure/spill. */
35534 rs6000_compute_pressure_classes (enum reg_class
*pressure_classes
)
35539 pressure_classes
[n
++] = GENERAL_REGS
;
35541 pressure_classes
[n
++] = VSX_REGS
;
35544 if (TARGET_ALTIVEC
)
35545 pressure_classes
[n
++] = ALTIVEC_REGS
;
35546 if (TARGET_HARD_FLOAT
)
35547 pressure_classes
[n
++] = FLOAT_REGS
;
35549 pressure_classes
[n
++] = CR_REGS
;
35550 pressure_classes
[n
++] = SPECIAL_REGS
;
35555 /* Given FROM and TO register numbers, say whether this elimination is allowed.
35556 Frame pointer elimination is automatically handled.
35558 For the RS/6000, if frame pointer elimination is being done, we would like
35559 to convert ap into fp, not sp.
35561 We need r30 if -mminimal-toc was specified, and there are constant pool
35565 rs6000_can_eliminate (const int from
, const int to
)
35567 return (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
35568 ? ! frame_pointer_needed
35569 : from
== RS6000_PIC_OFFSET_TABLE_REGNUM
35570 ? ! TARGET_MINIMAL_TOC
|| TARGET_NO_TOC
35571 || constant_pool_empty_p ()
35575 /* Define the offset between two registers, FROM to be eliminated and its
35576 replacement TO, at the start of a routine. */
35578 rs6000_initial_elimination_offset (int from
, int to
)
35580 rs6000_stack_t
*info
= rs6000_stack_info ();
35581 HOST_WIDE_INT offset
;
35583 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
35584 offset
= info
->push_p
? 0 : -info
->total_size
;
35585 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
35587 offset
= info
->push_p
? 0 : -info
->total_size
;
35588 if (FRAME_GROWS_DOWNWARD
)
35589 offset
+= info
->fixed_size
+ info
->vars_size
+ info
->parm_size
;
35591 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
35592 offset
= FRAME_GROWS_DOWNWARD
35593 ? info
->fixed_size
+ info
->vars_size
+ info
->parm_size
35595 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
35596 offset
= info
->total_size
;
35597 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
35598 offset
= info
->push_p
? info
->total_size
: 0;
35599 else if (from
== RS6000_PIC_OFFSET_TABLE_REGNUM
)
35602 gcc_unreachable ();
35607 /* Fill in sizes of registers used by unwinder. */
35610 rs6000_init_dwarf_reg_sizes_extra (tree address
)
35612 if (TARGET_MACHO
&& ! TARGET_ALTIVEC
)
35615 machine_mode mode
= TYPE_MODE (char_type_node
);
35616 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
35617 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
35618 rtx value
= gen_int_mode (16, mode
);
35620 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
35621 The unwinder still needs to know the size of Altivec registers. */
35623 for (i
= FIRST_ALTIVEC_REGNO
; i
< LAST_ALTIVEC_REGNO
+1; i
++)
35625 int column
= DWARF_REG_TO_UNWIND_COLUMN
35626 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i
), true));
35627 HOST_WIDE_INT offset
= column
* GET_MODE_SIZE (mode
);
35629 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
35634 /* Map internal gcc register numbers to debug format register numbers.
35635 FORMAT specifies the type of debug register number to use:
35636 0 -- debug information, except for frame-related sections
35637 1 -- DWARF .debug_frame section
35638 2 -- DWARF .eh_frame section */
35641 rs6000_dbx_register_number (unsigned int regno
, unsigned int format
)
35643 /* Except for the above, we use the internal number for non-DWARF
35644 debug information, and also for .eh_frame. */
35645 if ((format
== 0 && write_symbols
!= DWARF2_DEBUG
) || format
== 2)
35648 /* On some platforms, we use the standard DWARF register
35649 numbering for .debug_info and .debug_frame. */
35650 #ifdef RS6000_USE_DWARF_NUMBERING
35653 if (regno
== LR_REGNO
)
35655 if (regno
== CTR_REGNO
)
35657 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
35658 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
35659 The actual code emitted saves the whole of CR, so we map CR2_REGNO
35660 to the DWARF reg for CR. */
35661 if (format
== 1 && regno
== CR2_REGNO
)
35663 if (CR_REGNO_P (regno
))
35664 return regno
- CR0_REGNO
+ 86;
35665 if (regno
== CA_REGNO
)
35666 return 101; /* XER */
35667 if (ALTIVEC_REGNO_P (regno
))
35668 return regno
- FIRST_ALTIVEC_REGNO
+ 1124;
35669 if (regno
== VRSAVE_REGNO
)
35671 if (regno
== VSCR_REGNO
)
35677 /* target hook eh_return_filter_mode */
35678 static scalar_int_mode
35679 rs6000_eh_return_filter_mode (void)
35681 return TARGET_32BIT
? SImode
: word_mode
;
35684 /* Target hook for translate_mode_attribute. */
35685 static machine_mode
35686 rs6000_translate_mode_attribute (machine_mode mode
)
35688 if ((FLOAT128_IEEE_P (mode
)
35689 && ieee128_float_type_node
== long_double_type_node
)
35690 || (FLOAT128_IBM_P (mode
)
35691 && ibm128_float_type_node
== long_double_type_node
))
35692 return COMPLEX_MODE_P (mode
) ? E_TCmode
: E_TFmode
;
35696 /* Target hook for scalar_mode_supported_p. */
35698 rs6000_scalar_mode_supported_p (scalar_mode mode
)
35700 /* -m32 does not support TImode. This is the default, from
35701 default_scalar_mode_supported_p. For -m32 -mpowerpc64 we want the
35702 same ABI as for -m32. But default_scalar_mode_supported_p allows
35703 integer modes of precision 2 * BITS_PER_WORD, which matches TImode
35704 for -mpowerpc64. */
35705 if (TARGET_32BIT
&& mode
== TImode
)
35708 if (DECIMAL_FLOAT_MODE_P (mode
))
35709 return default_decimal_float_supported_p ();
35710 else if (TARGET_FLOAT128_TYPE
&& (mode
== KFmode
|| mode
== IFmode
))
35713 return default_scalar_mode_supported_p (mode
);
35716 /* Target hook for vector_mode_supported_p. */
35718 rs6000_vector_mode_supported_p (machine_mode mode
)
35720 /* There is no vector form for IEEE 128-bit. If we return true for IEEE
35721 128-bit, the compiler might try to widen IEEE 128-bit to IBM
35723 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
) && !FLOAT128_IEEE_P (mode
))
35730 /* Target hook for floatn_mode. */
35731 static opt_scalar_float_mode
35732 rs6000_floatn_mode (int n
, bool extended
)
35742 if (TARGET_FLOAT128_TYPE
)
35743 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
35745 return opt_scalar_float_mode ();
35748 return opt_scalar_float_mode ();
35751 /* Those are the only valid _FloatNx types. */
35752 gcc_unreachable ();
35766 if (TARGET_FLOAT128_TYPE
)
35767 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
35769 return opt_scalar_float_mode ();
35772 return opt_scalar_float_mode ();
35778 /* Target hook for c_mode_for_suffix. */
35779 static machine_mode
35780 rs6000_c_mode_for_suffix (char suffix
)
35782 if (TARGET_FLOAT128_TYPE
)
35784 if (suffix
== 'q' || suffix
== 'Q')
35785 return (FLOAT128_IEEE_P (TFmode
)) ? TFmode
: KFmode
;
35787 /* At the moment, we are not defining a suffix for IBM extended double.
35788 If/when the default for -mabi=ieeelongdouble is changed, and we want
35789 to support __ibm128 constants in legacy library code, we may need to
35790 re-evalaute this decision. Currently, c-lex.c only supports 'w' and
35791 'q' as machine dependent suffixes. The x86_64 port uses 'w' for
35792 __float80 constants. */
35798 /* Target hook for invalid_arg_for_unprototyped_fn. */
35799 static const char *
35800 invalid_arg_for_unprototyped_fn (const_tree typelist
, const_tree funcdecl
, const_tree val
)
35802 return (!rs6000_darwin64_abi
35804 && TREE_CODE (TREE_TYPE (val
)) == VECTOR_TYPE
35805 && (funcdecl
== NULL_TREE
35806 || (TREE_CODE (funcdecl
) == FUNCTION_DECL
35807 && DECL_BUILT_IN_CLASS (funcdecl
) != BUILT_IN_MD
)))
35808 ? N_("AltiVec argument passed to unprototyped function")
35812 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
35813 setup by using __stack_chk_fail_local hidden function instead of
35814 calling __stack_chk_fail directly. Otherwise it is better to call
35815 __stack_chk_fail directly. */
35817 static tree ATTRIBUTE_UNUSED
35818 rs6000_stack_protect_fail (void)
35820 return (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
35821 ? default_hidden_stack_protect_fail ()
35822 : default_external_stack_protect_fail ();
35825 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
35828 static unsigned HOST_WIDE_INT
35829 rs6000_asan_shadow_offset (void)
35831 return (unsigned HOST_WIDE_INT
) 1 << (TARGET_64BIT
? 41 : 29);
35835 /* Mask options that we want to support inside of attribute((target)) and
35836 #pragma GCC target operations. Note, we do not include things like
35837 64/32-bit, endianness, hard/soft floating point, etc. that would have
35838 different calling sequences. */
35840 struct rs6000_opt_mask
{
35841 const char *name
; /* option name */
35842 HOST_WIDE_INT mask
; /* mask to set */
35843 bool invert
; /* invert sense of mask */
35844 bool valid_target
; /* option is a target option */
35847 static struct rs6000_opt_mask
const rs6000_opt_masks
[] =
35849 { "altivec", OPTION_MASK_ALTIVEC
, false, true },
35850 { "cmpb", OPTION_MASK_CMPB
, false, true },
35851 { "crypto", OPTION_MASK_CRYPTO
, false, true },
35852 { "direct-move", OPTION_MASK_DIRECT_MOVE
, false, true },
35853 { "dlmzb", OPTION_MASK_DLMZB
, false, true },
35854 { "efficient-unaligned-vsx", OPTION_MASK_EFFICIENT_UNALIGNED_VSX
,
35856 { "float128", OPTION_MASK_FLOAT128_KEYWORD
, false, true },
35857 { "float128-hardware", OPTION_MASK_FLOAT128_HW
, false, true },
35858 { "fprnd", OPTION_MASK_FPRND
, false, true },
35859 { "hard-dfp", OPTION_MASK_DFP
, false, true },
35860 { "htm", OPTION_MASK_HTM
, false, true },
35861 { "isel", OPTION_MASK_ISEL
, false, true },
35862 { "mfcrf", OPTION_MASK_MFCRF
, false, true },
35863 { "mfpgpr", OPTION_MASK_MFPGPR
, false, true },
35864 { "modulo", OPTION_MASK_MODULO
, false, true },
35865 { "mulhw", OPTION_MASK_MULHW
, false, true },
35866 { "multiple", OPTION_MASK_MULTIPLE
, false, true },
35867 { "popcntb", OPTION_MASK_POPCNTB
, false, true },
35868 { "popcntd", OPTION_MASK_POPCNTD
, false, true },
35869 { "power8-fusion", OPTION_MASK_P8_FUSION
, false, true },
35870 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN
, false, true },
35871 { "power8-vector", OPTION_MASK_P8_VECTOR
, false, true },
35872 { "power9-fusion", OPTION_MASK_P9_FUSION
, false, true },
35873 { "power9-minmax", OPTION_MASK_P9_MINMAX
, false, true },
35874 { "power9-misc", OPTION_MASK_P9_MISC
, false, true },
35875 { "power9-vector", OPTION_MASK_P9_VECTOR
, false, true },
35876 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT
, false, true },
35877 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT
, false, true },
35878 { "quad-memory", OPTION_MASK_QUAD_MEMORY
, false, true },
35879 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC
, false, true },
35880 { "recip-precision", OPTION_MASK_RECIP_PRECISION
, false, true },
35881 { "save-toc-indirect", OPTION_MASK_SAVE_TOC_INDIRECT
, false, true },
35882 { "string", 0, false, true },
35883 { "toc-fusion", OPTION_MASK_TOC_FUSION
, false, true },
35884 { "update", OPTION_MASK_NO_UPDATE
, true , true },
35885 { "vsx", OPTION_MASK_VSX
, false, true },
35886 #ifdef OPTION_MASK_64BIT
35888 { "aix64", OPTION_MASK_64BIT
, false, false },
35889 { "aix32", OPTION_MASK_64BIT
, true, false },
35891 { "64", OPTION_MASK_64BIT
, false, false },
35892 { "32", OPTION_MASK_64BIT
, true, false },
35895 #ifdef OPTION_MASK_EABI
35896 { "eabi", OPTION_MASK_EABI
, false, false },
35898 #ifdef OPTION_MASK_LITTLE_ENDIAN
35899 { "little", OPTION_MASK_LITTLE_ENDIAN
, false, false },
35900 { "big", OPTION_MASK_LITTLE_ENDIAN
, true, false },
35902 #ifdef OPTION_MASK_RELOCATABLE
35903 { "relocatable", OPTION_MASK_RELOCATABLE
, false, false },
35905 #ifdef OPTION_MASK_STRICT_ALIGN
35906 { "strict-align", OPTION_MASK_STRICT_ALIGN
, false, false },
35908 { "soft-float", OPTION_MASK_SOFT_FLOAT
, false, false },
35909 { "string", 0, false, false },
35912 /* Builtin mask mapping for printing the flags. */
35913 static struct rs6000_opt_mask
const rs6000_builtin_mask_names
[] =
35915 { "altivec", RS6000_BTM_ALTIVEC
, false, false },
35916 { "vsx", RS6000_BTM_VSX
, false, false },
35917 { "fre", RS6000_BTM_FRE
, false, false },
35918 { "fres", RS6000_BTM_FRES
, false, false },
35919 { "frsqrte", RS6000_BTM_FRSQRTE
, false, false },
35920 { "frsqrtes", RS6000_BTM_FRSQRTES
, false, false },
35921 { "popcntd", RS6000_BTM_POPCNTD
, false, false },
35922 { "cell", RS6000_BTM_CELL
, false, false },
35923 { "power8-vector", RS6000_BTM_P8_VECTOR
, false, false },
35924 { "power9-vector", RS6000_BTM_P9_VECTOR
, false, false },
35925 { "power9-misc", RS6000_BTM_P9_MISC
, false, false },
35926 { "crypto", RS6000_BTM_CRYPTO
, false, false },
35927 { "htm", RS6000_BTM_HTM
, false, false },
35928 { "hard-dfp", RS6000_BTM_DFP
, false, false },
35929 { "hard-float", RS6000_BTM_HARD_FLOAT
, false, false },
35930 { "long-double-128", RS6000_BTM_LDBL128
, false, false },
35931 { "powerpc64", RS6000_BTM_POWERPC64
, false, false },
35932 { "float128", RS6000_BTM_FLOAT128
, false, false },
35933 { "float128-hw", RS6000_BTM_FLOAT128_HW
,false, false },
35936 /* Option variables that we want to support inside attribute((target)) and
35937 #pragma GCC target operations. */
35939 struct rs6000_opt_var
{
35940 const char *name
; /* option name */
35941 size_t global_offset
; /* offset of the option in global_options. */
35942 size_t target_offset
; /* offset of the option in target options. */
35945 static struct rs6000_opt_var
const rs6000_opt_vars
[] =
35948 offsetof (struct gcc_options
, x_TARGET_FRIZ
),
35949 offsetof (struct cl_target_option
, x_TARGET_FRIZ
), },
35950 { "avoid-indexed-addresses",
35951 offsetof (struct gcc_options
, x_TARGET_AVOID_XFORM
),
35952 offsetof (struct cl_target_option
, x_TARGET_AVOID_XFORM
) },
35954 offsetof (struct gcc_options
, x_rs6000_default_long_calls
),
35955 offsetof (struct cl_target_option
, x_rs6000_default_long_calls
), },
35956 { "optimize-swaps",
35957 offsetof (struct gcc_options
, x_rs6000_optimize_swaps
),
35958 offsetof (struct cl_target_option
, x_rs6000_optimize_swaps
), },
35959 { "allow-movmisalign",
35960 offsetof (struct gcc_options
, x_TARGET_ALLOW_MOVMISALIGN
),
35961 offsetof (struct cl_target_option
, x_TARGET_ALLOW_MOVMISALIGN
), },
35963 offsetof (struct gcc_options
, x_TARGET_SCHED_GROUPS
),
35964 offsetof (struct cl_target_option
, x_TARGET_SCHED_GROUPS
), },
35966 offsetof (struct gcc_options
, x_TARGET_ALWAYS_HINT
),
35967 offsetof (struct cl_target_option
, x_TARGET_ALWAYS_HINT
), },
35968 { "align-branch-targets",
35969 offsetof (struct gcc_options
, x_TARGET_ALIGN_BRANCH_TARGETS
),
35970 offsetof (struct cl_target_option
, x_TARGET_ALIGN_BRANCH_TARGETS
), },
35972 offsetof (struct gcc_options
, x_tls_markers
),
35973 offsetof (struct cl_target_option
, x_tls_markers
), },
35975 offsetof (struct gcc_options
, x_TARGET_SCHED_PROLOG
),
35976 offsetof (struct cl_target_option
, x_TARGET_SCHED_PROLOG
), },
35978 offsetof (struct gcc_options
, x_TARGET_SCHED_PROLOG
),
35979 offsetof (struct cl_target_option
, x_TARGET_SCHED_PROLOG
), },
35980 { "speculate-indirect-jumps",
35981 offsetof (struct gcc_options
, x_rs6000_speculate_indirect_jumps
),
35982 offsetof (struct cl_target_option
, x_rs6000_speculate_indirect_jumps
), },
35985 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
35986 parsing. Return true if there were no errors. */
35989 rs6000_inner_target_options (tree args
, bool attr_p
)
35993 if (args
== NULL_TREE
)
35996 else if (TREE_CODE (args
) == STRING_CST
)
35998 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
36001 while ((q
= strtok (p
, ",")) != NULL
)
36003 bool error_p
= false;
36004 bool not_valid_p
= false;
36005 const char *cpu_opt
= NULL
;
36008 if (strncmp (q
, "cpu=", 4) == 0)
36010 int cpu_index
= rs6000_cpu_name_lookup (q
+4);
36011 if (cpu_index
>= 0)
36012 rs6000_cpu_index
= cpu_index
;
36019 else if (strncmp (q
, "tune=", 5) == 0)
36021 int tune_index
= rs6000_cpu_name_lookup (q
+5);
36022 if (tune_index
>= 0)
36023 rs6000_tune_index
= tune_index
;
36033 bool invert
= false;
36037 if (strncmp (r
, "no-", 3) == 0)
36043 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
36044 if (strcmp (r
, rs6000_opt_masks
[i
].name
) == 0)
36046 HOST_WIDE_INT mask
= rs6000_opt_masks
[i
].mask
;
36048 if (!rs6000_opt_masks
[i
].valid_target
)
36049 not_valid_p
= true;
36053 rs6000_isa_flags_explicit
|= mask
;
36055 /* VSX needs altivec, so -mvsx automagically sets
36056 altivec and disables -mavoid-indexed-addresses. */
36059 if (mask
== OPTION_MASK_VSX
)
36061 mask
|= OPTION_MASK_ALTIVEC
;
36062 TARGET_AVOID_XFORM
= 0;
36066 if (rs6000_opt_masks
[i
].invert
)
36070 rs6000_isa_flags
&= ~mask
;
36072 rs6000_isa_flags
|= mask
;
36077 if (error_p
&& !not_valid_p
)
36079 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
36080 if (strcmp (r
, rs6000_opt_vars
[i
].name
) == 0)
36082 size_t j
= rs6000_opt_vars
[i
].global_offset
;
36083 *((int *) ((char *)&global_options
+ j
)) = !invert
;
36085 not_valid_p
= false;
36093 const char *eprefix
, *esuffix
;
36098 eprefix
= "__attribute__((__target__(";
36103 eprefix
= "#pragma GCC target ";
36108 error ("invalid cpu %qs for %s%qs%s", cpu_opt
, eprefix
,
36110 else if (not_valid_p
)
36111 error ("%s%qs%s is not allowed", eprefix
, q
, esuffix
);
36113 error ("%s%qs%s is invalid", eprefix
, q
, esuffix
);
36118 else if (TREE_CODE (args
) == TREE_LIST
)
36122 tree value
= TREE_VALUE (args
);
36125 bool ret2
= rs6000_inner_target_options (value
, attr_p
);
36129 args
= TREE_CHAIN (args
);
36131 while (args
!= NULL_TREE
);
36136 error ("attribute %<target%> argument not a string");
36143 /* Print out the target options as a list for -mdebug=target. */
36146 rs6000_debug_target_options (tree args
, const char *prefix
)
36148 if (args
== NULL_TREE
)
36149 fprintf (stderr
, "%s<NULL>", prefix
);
36151 else if (TREE_CODE (args
) == STRING_CST
)
36153 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
36156 while ((q
= strtok (p
, ",")) != NULL
)
36159 fprintf (stderr
, "%s\"%s\"", prefix
, q
);
36164 else if (TREE_CODE (args
) == TREE_LIST
)
36168 tree value
= TREE_VALUE (args
);
36171 rs6000_debug_target_options (value
, prefix
);
36174 args
= TREE_CHAIN (args
);
36176 while (args
!= NULL_TREE
);
36180 gcc_unreachable ();
36186 /* Hook to validate attribute((target("..."))). */
36189 rs6000_valid_attribute_p (tree fndecl
,
36190 tree
ARG_UNUSED (name
),
36194 struct cl_target_option cur_target
;
36197 tree new_target
, new_optimize
;
36198 tree func_optimize
;
36200 gcc_assert ((fndecl
!= NULL_TREE
) && (args
!= NULL_TREE
));
36202 if (TARGET_DEBUG_TARGET
)
36204 tree tname
= DECL_NAME (fndecl
);
36205 fprintf (stderr
, "\n==================== rs6000_valid_attribute_p:\n");
36207 fprintf (stderr
, "function: %.*s\n",
36208 (int) IDENTIFIER_LENGTH (tname
),
36209 IDENTIFIER_POINTER (tname
));
36211 fprintf (stderr
, "function: unknown\n");
36213 fprintf (stderr
, "args:");
36214 rs6000_debug_target_options (args
, " ");
36215 fprintf (stderr
, "\n");
36218 fprintf (stderr
, "flags: 0x%x\n", flags
);
36220 fprintf (stderr
, "--------------------\n");
36223 /* attribute((target("default"))) does nothing, beyond
36224 affecting multi-versioning. */
36225 if (TREE_VALUE (args
)
36226 && TREE_CODE (TREE_VALUE (args
)) == STRING_CST
36227 && TREE_CHAIN (args
) == NULL_TREE
36228 && strcmp (TREE_STRING_POINTER (TREE_VALUE (args
)), "default") == 0)
36231 old_optimize
= build_optimization_node (&global_options
);
36232 func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
36234 /* If the function changed the optimization levels as well as setting target
36235 options, start with the optimizations specified. */
36236 if (func_optimize
&& func_optimize
!= old_optimize
)
36237 cl_optimization_restore (&global_options
,
36238 TREE_OPTIMIZATION (func_optimize
));
36240 /* The target attributes may also change some optimization flags, so update
36241 the optimization options if necessary. */
36242 cl_target_option_save (&cur_target
, &global_options
);
36243 rs6000_cpu_index
= rs6000_tune_index
= -1;
36244 ret
= rs6000_inner_target_options (args
, true);
36246 /* Set up any additional state. */
36249 ret
= rs6000_option_override_internal (false);
36250 new_target
= build_target_option_node (&global_options
);
36255 new_optimize
= build_optimization_node (&global_options
);
36262 DECL_FUNCTION_SPECIFIC_TARGET (fndecl
) = new_target
;
36264 if (old_optimize
!= new_optimize
)
36265 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
) = new_optimize
;
36268 cl_target_option_restore (&global_options
, &cur_target
);
36270 if (old_optimize
!= new_optimize
)
36271 cl_optimization_restore (&global_options
,
36272 TREE_OPTIMIZATION (old_optimize
));
36278 /* Hook to validate the current #pragma GCC target and set the state, and
36279 update the macros based on what was changed. If ARGS is NULL, then
36280 POP_TARGET is used to reset the options. */
36283 rs6000_pragma_target_parse (tree args
, tree pop_target
)
36285 tree prev_tree
= build_target_option_node (&global_options
);
36287 struct cl_target_option
*prev_opt
, *cur_opt
;
36288 HOST_WIDE_INT prev_flags
, cur_flags
, diff_flags
;
36289 HOST_WIDE_INT prev_bumask
, cur_bumask
, diff_bumask
;
36291 if (TARGET_DEBUG_TARGET
)
36293 fprintf (stderr
, "\n==================== rs6000_pragma_target_parse\n");
36294 fprintf (stderr
, "args:");
36295 rs6000_debug_target_options (args
, " ");
36296 fprintf (stderr
, "\n");
36300 fprintf (stderr
, "pop_target:\n");
36301 debug_tree (pop_target
);
36304 fprintf (stderr
, "pop_target: <NULL>\n");
36306 fprintf (stderr
, "--------------------\n");
36311 cur_tree
= ((pop_target
)
36313 : target_option_default_node
);
36314 cl_target_option_restore (&global_options
,
36315 TREE_TARGET_OPTION (cur_tree
));
36319 rs6000_cpu_index
= rs6000_tune_index
= -1;
36320 if (!rs6000_inner_target_options (args
, false)
36321 || !rs6000_option_override_internal (false)
36322 || (cur_tree
= build_target_option_node (&global_options
))
36325 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
36326 fprintf (stderr
, "invalid pragma\n");
36332 target_option_current_node
= cur_tree
;
36333 rs6000_activate_target_options (target_option_current_node
);
36335 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
36336 change the macros that are defined. */
36337 if (rs6000_target_modify_macros_ptr
)
36339 prev_opt
= TREE_TARGET_OPTION (prev_tree
);
36340 prev_bumask
= prev_opt
->x_rs6000_builtin_mask
;
36341 prev_flags
= prev_opt
->x_rs6000_isa_flags
;
36343 cur_opt
= TREE_TARGET_OPTION (cur_tree
);
36344 cur_flags
= cur_opt
->x_rs6000_isa_flags
;
36345 cur_bumask
= cur_opt
->x_rs6000_builtin_mask
;
36347 diff_bumask
= (prev_bumask
^ cur_bumask
);
36348 diff_flags
= (prev_flags
^ cur_flags
);
36350 if ((diff_flags
!= 0) || (diff_bumask
!= 0))
36352 /* Delete old macros. */
36353 rs6000_target_modify_macros_ptr (false,
36354 prev_flags
& diff_flags
,
36355 prev_bumask
& diff_bumask
);
36357 /* Define new macros. */
36358 rs6000_target_modify_macros_ptr (true,
36359 cur_flags
& diff_flags
,
36360 cur_bumask
& diff_bumask
);
36368 /* Remember the last target of rs6000_set_current_function. */
36369 static GTY(()) tree rs6000_previous_fndecl
;
36371 /* Restore target's globals from NEW_TREE and invalidate the
36372 rs6000_previous_fndecl cache. */
36375 rs6000_activate_target_options (tree new_tree
)
36377 cl_target_option_restore (&global_options
, TREE_TARGET_OPTION (new_tree
));
36378 if (TREE_TARGET_GLOBALS (new_tree
))
36379 restore_target_globals (TREE_TARGET_GLOBALS (new_tree
));
36380 else if (new_tree
== target_option_default_node
)
36381 restore_target_globals (&default_target_globals
);
36383 TREE_TARGET_GLOBALS (new_tree
) = save_target_globals_default_opts ();
36384 rs6000_previous_fndecl
= NULL_TREE
;
36387 /* Establish appropriate back-end context for processing the function
36388 FNDECL. The argument might be NULL to indicate processing at top
36389 level, outside of any function scope. */
36391 rs6000_set_current_function (tree fndecl
)
36393 if (TARGET_DEBUG_TARGET
)
36395 fprintf (stderr
, "\n==================== rs6000_set_current_function");
36398 fprintf (stderr
, ", fndecl %s (%p)",
36399 (DECL_NAME (fndecl
)
36400 ? IDENTIFIER_POINTER (DECL_NAME (fndecl
))
36401 : "<unknown>"), (void *)fndecl
);
36403 if (rs6000_previous_fndecl
)
36404 fprintf (stderr
, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl
);
36406 fprintf (stderr
, "\n");
36409 /* Only change the context if the function changes. This hook is called
36410 several times in the course of compiling a function, and we don't want to
36411 slow things down too much or call target_reinit when it isn't safe. */
36412 if (fndecl
== rs6000_previous_fndecl
)
36416 if (rs6000_previous_fndecl
== NULL_TREE
)
36417 old_tree
= target_option_current_node
;
36418 else if (DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
))
36419 old_tree
= DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
);
36421 old_tree
= target_option_default_node
;
36424 if (fndecl
== NULL_TREE
)
36426 if (old_tree
!= target_option_current_node
)
36427 new_tree
= target_option_current_node
;
36429 new_tree
= NULL_TREE
;
36433 new_tree
= DECL_FUNCTION_SPECIFIC_TARGET (fndecl
);
36434 if (new_tree
== NULL_TREE
)
36435 new_tree
= target_option_default_node
;
36438 if (TARGET_DEBUG_TARGET
)
36442 fprintf (stderr
, "\nnew fndecl target specific options:\n");
36443 debug_tree (new_tree
);
36448 fprintf (stderr
, "\nold fndecl target specific options:\n");
36449 debug_tree (old_tree
);
36452 if (old_tree
!= NULL_TREE
|| new_tree
!= NULL_TREE
)
36453 fprintf (stderr
, "--------------------\n");
36456 if (new_tree
&& old_tree
!= new_tree
)
36457 rs6000_activate_target_options (new_tree
);
36460 rs6000_previous_fndecl
= fndecl
;
36464 /* Save the current options */
36467 rs6000_function_specific_save (struct cl_target_option
*ptr
,
36468 struct gcc_options
*opts
)
36470 ptr
->x_rs6000_isa_flags
= opts
->x_rs6000_isa_flags
;
36471 ptr
->x_rs6000_isa_flags_explicit
= opts
->x_rs6000_isa_flags_explicit
;
36474 /* Restore the current options */
36477 rs6000_function_specific_restore (struct gcc_options
*opts
,
36478 struct cl_target_option
*ptr
)
36481 opts
->x_rs6000_isa_flags
= ptr
->x_rs6000_isa_flags
;
36482 opts
->x_rs6000_isa_flags_explicit
= ptr
->x_rs6000_isa_flags_explicit
;
36483 (void) rs6000_option_override_internal (false);
36486 /* Print the current options */
36489 rs6000_function_specific_print (FILE *file
, int indent
,
36490 struct cl_target_option
*ptr
)
36492 rs6000_print_isa_options (file
, indent
, "Isa options set",
36493 ptr
->x_rs6000_isa_flags
);
36495 rs6000_print_isa_options (file
, indent
, "Isa options explicit",
36496 ptr
->x_rs6000_isa_flags_explicit
);
36499 /* Helper function to print the current isa or misc options on a line. */
36502 rs6000_print_options_internal (FILE *file
,
36504 const char *string
,
36505 HOST_WIDE_INT flags
,
36506 const char *prefix
,
36507 const struct rs6000_opt_mask
*opts
,
36508 size_t num_elements
)
36511 size_t start_column
= 0;
36513 size_t max_column
= 120;
36514 size_t prefix_len
= strlen (prefix
);
36515 size_t comma_len
= 0;
36516 const char *comma
= "";
36519 start_column
+= fprintf (file
, "%*s", indent
, "");
36523 fprintf (stderr
, DEBUG_FMT_S
, string
, "<none>");
36527 start_column
+= fprintf (stderr
, DEBUG_FMT_WX
, string
, flags
);
36529 /* Print the various mask options. */
36530 cur_column
= start_column
;
36531 for (i
= 0; i
< num_elements
; i
++)
36533 bool invert
= opts
[i
].invert
;
36534 const char *name
= opts
[i
].name
;
36535 const char *no_str
= "";
36536 HOST_WIDE_INT mask
= opts
[i
].mask
;
36537 size_t len
= comma_len
+ prefix_len
+ strlen (name
);
36541 if ((flags
& mask
) == 0)
36544 len
+= sizeof ("no-") - 1;
36552 if ((flags
& mask
) != 0)
36555 len
+= sizeof ("no-") - 1;
36562 if (cur_column
> max_column
)
36564 fprintf (stderr
, ", \\\n%*s", (int)start_column
, "");
36565 cur_column
= start_column
+ len
;
36569 fprintf (file
, "%s%s%s%s", comma
, prefix
, no_str
, name
);
36571 comma_len
= sizeof (", ") - 1;
36574 fputs ("\n", file
);
36577 /* Helper function to print the current isa options on a line. */
36580 rs6000_print_isa_options (FILE *file
, int indent
, const char *string
,
36581 HOST_WIDE_INT flags
)
36583 rs6000_print_options_internal (file
, indent
, string
, flags
, "-m",
36584 &rs6000_opt_masks
[0],
36585 ARRAY_SIZE (rs6000_opt_masks
));
36589 rs6000_print_builtin_options (FILE *file
, int indent
, const char *string
,
36590 HOST_WIDE_INT flags
)
36592 rs6000_print_options_internal (file
, indent
, string
, flags
, "",
36593 &rs6000_builtin_mask_names
[0],
36594 ARRAY_SIZE (rs6000_builtin_mask_names
));
36597 /* If the user used -mno-vsx, we need turn off all of the implicit ISA 2.06,
36598 2.07, and 3.0 options that relate to the vector unit (-mdirect-move,
36599 -mupper-regs-df, etc.).
36601 If the user used -mno-power8-vector, we need to turn off all of the implicit
36602 ISA 2.07 and 3.0 options that relate to the vector unit.
36604 If the user used -mno-power9-vector, we need to turn off all of the implicit
36605 ISA 3.0 options that relate to the vector unit.
36607 This function does not handle explicit options such as the user specifying
36608 -mdirect-move. These are handled in rs6000_option_override_internal, and
36609 the appropriate error is given if needed.
36611 We return a mask of all of the implicit options that should not be enabled
36614 static HOST_WIDE_INT
36615 rs6000_disable_incompatible_switches (void)
36617 HOST_WIDE_INT ignore_masks
= rs6000_isa_flags_explicit
;
36620 static const struct {
36621 const HOST_WIDE_INT no_flag
; /* flag explicitly turned off. */
36622 const HOST_WIDE_INT dep_flags
; /* flags that depend on this option. */
36623 const char *const name
; /* name of the switch. */
36625 { OPTION_MASK_P9_VECTOR
, OTHER_P9_VECTOR_MASKS
, "power9-vector" },
36626 { OPTION_MASK_P8_VECTOR
, OTHER_P8_VECTOR_MASKS
, "power8-vector" },
36627 { OPTION_MASK_VSX
, OTHER_VSX_VECTOR_MASKS
, "vsx" },
36630 for (i
= 0; i
< ARRAY_SIZE (flags
); i
++)
36632 HOST_WIDE_INT no_flag
= flags
[i
].no_flag
;
36634 if ((rs6000_isa_flags
& no_flag
) == 0
36635 && (rs6000_isa_flags_explicit
& no_flag
) != 0)
36637 HOST_WIDE_INT dep_flags
= flags
[i
].dep_flags
;
36638 HOST_WIDE_INT set_flags
= (rs6000_isa_flags_explicit
36644 for (j
= 0; j
< ARRAY_SIZE (rs6000_opt_masks
); j
++)
36645 if ((set_flags
& rs6000_opt_masks
[j
].mask
) != 0)
36647 set_flags
&= ~rs6000_opt_masks
[j
].mask
;
36648 error ("%<-mno-%s%> turns off %<-m%s%>",
36650 rs6000_opt_masks
[j
].name
);
36653 gcc_assert (!set_flags
);
36656 rs6000_isa_flags
&= ~dep_flags
;
36657 ignore_masks
|= no_flag
| dep_flags
;
36661 return ignore_masks
;
36665 /* Helper function for printing the function name when debugging. */
36667 static const char *
36668 get_decl_name (tree fn
)
36675 name
= DECL_NAME (fn
);
36677 return "<no-name>";
36679 return IDENTIFIER_POINTER (name
);
36682 /* Return the clone id of the target we are compiling code for in a target
36683 clone. The clone id is ordered from 0 (default) to CLONE_MAX-1 and gives
36684 the priority list for the target clones (ordered from lowest to
36688 rs6000_clone_priority (tree fndecl
)
36690 tree fn_opts
= DECL_FUNCTION_SPECIFIC_TARGET (fndecl
);
36691 HOST_WIDE_INT isa_masks
;
36692 int ret
= CLONE_DEFAULT
;
36693 tree attrs
= lookup_attribute ("target", DECL_ATTRIBUTES (fndecl
));
36694 const char *attrs_str
= NULL
;
36696 attrs
= TREE_VALUE (TREE_VALUE (attrs
));
36697 attrs_str
= TREE_STRING_POINTER (attrs
);
36699 /* Return priority zero for default function. Return the ISA needed for the
36700 function if it is not the default. */
36701 if (strcmp (attrs_str
, "default") != 0)
36703 if (fn_opts
== NULL_TREE
)
36704 fn_opts
= target_option_default_node
;
36706 if (!fn_opts
|| !TREE_TARGET_OPTION (fn_opts
))
36707 isa_masks
= rs6000_isa_flags
;
36709 isa_masks
= TREE_TARGET_OPTION (fn_opts
)->x_rs6000_isa_flags
;
36711 for (ret
= CLONE_MAX
- 1; ret
!= 0; ret
--)
36712 if ((rs6000_clone_map
[ret
].isa_mask
& isa_masks
) != 0)
36716 if (TARGET_DEBUG_TARGET
)
36717 fprintf (stderr
, "rs6000_get_function_version_priority (%s) => %d\n",
36718 get_decl_name (fndecl
), ret
);
36723 /* This compares the priority of target features in function DECL1 and DECL2.
36724 It returns positive value if DECL1 is higher priority, negative value if
36725 DECL2 is higher priority and 0 if they are the same. Note, priorities are
36726 ordered from lowest (CLONE_DEFAULT) to highest (currently CLONE_ISA_3_0). */
36729 rs6000_compare_version_priority (tree decl1
, tree decl2
)
36731 int priority1
= rs6000_clone_priority (decl1
);
36732 int priority2
= rs6000_clone_priority (decl2
);
36733 int ret
= priority1
- priority2
;
36735 if (TARGET_DEBUG_TARGET
)
36736 fprintf (stderr
, "rs6000_compare_version_priority (%s, %s) => %d\n",
36737 get_decl_name (decl1
), get_decl_name (decl2
), ret
);
36742 /* Make a dispatcher declaration for the multi-versioned function DECL.
36743 Calls to DECL function will be replaced with calls to the dispatcher
36744 by the front-end. Returns the decl of the dispatcher function. */
36747 rs6000_get_function_versions_dispatcher (void *decl
)
36749 tree fn
= (tree
) decl
;
36750 struct cgraph_node
*node
= NULL
;
36751 struct cgraph_node
*default_node
= NULL
;
36752 struct cgraph_function_version_info
*node_v
= NULL
;
36753 struct cgraph_function_version_info
*first_v
= NULL
;
36755 tree dispatch_decl
= NULL
;
36757 struct cgraph_function_version_info
*default_version_info
= NULL
;
36758 gcc_assert (fn
!= NULL
&& DECL_FUNCTION_VERSIONED (fn
));
36760 if (TARGET_DEBUG_TARGET
)
36761 fprintf (stderr
, "rs6000_get_function_versions_dispatcher (%s)\n",
36762 get_decl_name (fn
));
36764 node
= cgraph_node::get (fn
);
36765 gcc_assert (node
!= NULL
);
36767 node_v
= node
->function_version ();
36768 gcc_assert (node_v
!= NULL
);
36770 if (node_v
->dispatcher_resolver
!= NULL
)
36771 return node_v
->dispatcher_resolver
;
36773 /* Find the default version and make it the first node. */
36775 /* Go to the beginning of the chain. */
36776 while (first_v
->prev
!= NULL
)
36777 first_v
= first_v
->prev
;
36779 default_version_info
= first_v
;
36780 while (default_version_info
!= NULL
)
36782 const tree decl2
= default_version_info
->this_node
->decl
;
36783 if (is_function_default_version (decl2
))
36785 default_version_info
= default_version_info
->next
;
36788 /* If there is no default node, just return NULL. */
36789 if (default_version_info
== NULL
)
36792 /* Make default info the first node. */
36793 if (first_v
!= default_version_info
)
36795 default_version_info
->prev
->next
= default_version_info
->next
;
36796 if (default_version_info
->next
)
36797 default_version_info
->next
->prev
= default_version_info
->prev
;
36798 first_v
->prev
= default_version_info
;
36799 default_version_info
->next
= first_v
;
36800 default_version_info
->prev
= NULL
;
36803 default_node
= default_version_info
->this_node
;
36805 #ifndef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
36806 error_at (DECL_SOURCE_LOCATION (default_node
->decl
),
36807 "target_clones attribute needs GLIBC (2.23 and newer) that "
36808 "exports hardware capability bits");
36811 if (targetm
.has_ifunc_p ())
36813 struct cgraph_function_version_info
*it_v
= NULL
;
36814 struct cgraph_node
*dispatcher_node
= NULL
;
36815 struct cgraph_function_version_info
*dispatcher_version_info
= NULL
;
36817 /* Right now, the dispatching is done via ifunc. */
36818 dispatch_decl
= make_dispatcher_decl (default_node
->decl
);
36820 dispatcher_node
= cgraph_node::get_create (dispatch_decl
);
36821 gcc_assert (dispatcher_node
!= NULL
);
36822 dispatcher_node
->dispatcher_function
= 1;
36823 dispatcher_version_info
36824 = dispatcher_node
->insert_new_function_version ();
36825 dispatcher_version_info
->next
= default_version_info
;
36826 dispatcher_node
->definition
= 1;
36828 /* Set the dispatcher for all the versions. */
36829 it_v
= default_version_info
;
36830 while (it_v
!= NULL
)
36832 it_v
->dispatcher_resolver
= dispatch_decl
;
36838 error_at (DECL_SOURCE_LOCATION (default_node
->decl
),
36839 "multiversioning needs ifunc which is not supported "
36844 return dispatch_decl
;
36847 /* Make the resolver function decl to dispatch the versions of a multi-
36848 versioned function, DEFAULT_DECL. Create an empty basic block in the
36849 resolver and store the pointer in EMPTY_BB. Return the decl of the resolver
36853 make_resolver_func (const tree default_decl
,
36854 const tree dispatch_decl
,
36855 basic_block
*empty_bb
)
36857 /* Make the resolver function static. The resolver function returns
36859 tree decl_name
= clone_function_name (default_decl
, "resolver");
36860 const char *resolver_name
= IDENTIFIER_POINTER (decl_name
);
36861 tree type
= build_function_type_list (ptr_type_node
, NULL_TREE
);
36862 tree decl
= build_fn_decl (resolver_name
, type
);
36863 SET_DECL_ASSEMBLER_NAME (decl
, decl_name
);
36865 DECL_NAME (decl
) = decl_name
;
36866 TREE_USED (decl
) = 1;
36867 DECL_ARTIFICIAL (decl
) = 1;
36868 DECL_IGNORED_P (decl
) = 0;
36869 TREE_PUBLIC (decl
) = 0;
36870 DECL_UNINLINABLE (decl
) = 1;
36872 /* Resolver is not external, body is generated. */
36873 DECL_EXTERNAL (decl
) = 0;
36874 DECL_EXTERNAL (dispatch_decl
) = 0;
36876 DECL_CONTEXT (decl
) = NULL_TREE
;
36877 DECL_INITIAL (decl
) = make_node (BLOCK
);
36878 DECL_STATIC_CONSTRUCTOR (decl
) = 0;
36880 /* Build result decl and add to function_decl. */
36881 tree t
= build_decl (UNKNOWN_LOCATION
, RESULT_DECL
, NULL_TREE
, ptr_type_node
);
36882 DECL_ARTIFICIAL (t
) = 1;
36883 DECL_IGNORED_P (t
) = 1;
36884 DECL_RESULT (decl
) = t
;
36886 gimplify_function_tree (decl
);
36887 push_cfun (DECL_STRUCT_FUNCTION (decl
));
36888 *empty_bb
= init_lowered_empty_function (decl
, false,
36889 profile_count::uninitialized ());
36891 cgraph_node::add_new_function (decl
, true);
36892 symtab
->call_cgraph_insertion_hooks (cgraph_node::get_create (decl
));
36896 /* Mark dispatch_decl as "ifunc" with resolver as resolver_name. */
36897 DECL_ATTRIBUTES (dispatch_decl
)
36898 = make_attribute ("ifunc", resolver_name
, DECL_ATTRIBUTES (dispatch_decl
));
36900 cgraph_node::create_same_body_alias (dispatch_decl
, decl
);
36905 /* This adds a condition to the basic_block NEW_BB in function FUNCTION_DECL to
36906 return a pointer to VERSION_DECL if we are running on a machine that
36907 supports the index CLONE_ISA hardware architecture bits. This function will
36908 be called during version dispatch to decide which function version to
36909 execute. It returns the basic block at the end, to which more conditions
36913 add_condition_to_bb (tree function_decl
, tree version_decl
,
36914 int clone_isa
, basic_block new_bb
)
36916 push_cfun (DECL_STRUCT_FUNCTION (function_decl
));
36918 gcc_assert (new_bb
!= NULL
);
36919 gimple_seq gseq
= bb_seq (new_bb
);
36922 tree convert_expr
= build1 (CONVERT_EXPR
, ptr_type_node
,
36923 build_fold_addr_expr (version_decl
));
36924 tree result_var
= create_tmp_var (ptr_type_node
);
36925 gimple
*convert_stmt
= gimple_build_assign (result_var
, convert_expr
);
36926 gimple
*return_stmt
= gimple_build_return (result_var
);
36928 if (clone_isa
== CLONE_DEFAULT
)
36930 gimple_seq_add_stmt (&gseq
, convert_stmt
);
36931 gimple_seq_add_stmt (&gseq
, return_stmt
);
36932 set_bb_seq (new_bb
, gseq
);
36933 gimple_set_bb (convert_stmt
, new_bb
);
36934 gimple_set_bb (return_stmt
, new_bb
);
36939 tree bool_zero
= build_int_cst (bool_int_type_node
, 0);
36940 tree cond_var
= create_tmp_var (bool_int_type_node
);
36941 tree predicate_decl
= rs6000_builtin_decls
[(int) RS6000_BUILTIN_CPU_SUPPORTS
];
36942 const char *arg_str
= rs6000_clone_map
[clone_isa
].name
;
36943 tree predicate_arg
= build_string_literal (strlen (arg_str
) + 1, arg_str
);
36944 gimple
*call_cond_stmt
= gimple_build_call (predicate_decl
, 1, predicate_arg
);
36945 gimple_call_set_lhs (call_cond_stmt
, cond_var
);
36947 gimple_set_block (call_cond_stmt
, DECL_INITIAL (function_decl
));
36948 gimple_set_bb (call_cond_stmt
, new_bb
);
36949 gimple_seq_add_stmt (&gseq
, call_cond_stmt
);
36951 gimple
*if_else_stmt
= gimple_build_cond (NE_EXPR
, cond_var
, bool_zero
,
36952 NULL_TREE
, NULL_TREE
);
36953 gimple_set_block (if_else_stmt
, DECL_INITIAL (function_decl
));
36954 gimple_set_bb (if_else_stmt
, new_bb
);
36955 gimple_seq_add_stmt (&gseq
, if_else_stmt
);
36957 gimple_seq_add_stmt (&gseq
, convert_stmt
);
36958 gimple_seq_add_stmt (&gseq
, return_stmt
);
36959 set_bb_seq (new_bb
, gseq
);
36961 basic_block bb1
= new_bb
;
36962 edge e12
= split_block (bb1
, if_else_stmt
);
36963 basic_block bb2
= e12
->dest
;
36964 e12
->flags
&= ~EDGE_FALLTHRU
;
36965 e12
->flags
|= EDGE_TRUE_VALUE
;
36967 edge e23
= split_block (bb2
, return_stmt
);
36968 gimple_set_bb (convert_stmt
, bb2
);
36969 gimple_set_bb (return_stmt
, bb2
);
36971 basic_block bb3
= e23
->dest
;
36972 make_edge (bb1
, bb3
, EDGE_FALSE_VALUE
);
36975 make_edge (bb2
, EXIT_BLOCK_PTR_FOR_FN (cfun
), 0);
36981 /* This function generates the dispatch function for multi-versioned functions.
36982 DISPATCH_DECL is the function which will contain the dispatch logic.
36983 FNDECLS are the function choices for dispatch, and is a tree chain.
36984 EMPTY_BB is the basic block pointer in DISPATCH_DECL in which the dispatch
36985 code is generated. */
36988 dispatch_function_versions (tree dispatch_decl
,
36990 basic_block
*empty_bb
)
36994 vec
<tree
> *fndecls
;
36995 tree clones
[CLONE_MAX
];
36997 if (TARGET_DEBUG_TARGET
)
36998 fputs ("dispatch_function_versions, top\n", stderr
);
37000 gcc_assert (dispatch_decl
!= NULL
37001 && fndecls_p
!= NULL
37002 && empty_bb
!= NULL
);
37004 /* fndecls_p is actually a vector. */
37005 fndecls
= static_cast<vec
<tree
> *> (fndecls_p
);
37007 /* At least one more version other than the default. */
37008 gcc_assert (fndecls
->length () >= 2);
37010 /* The first version in the vector is the default decl. */
37011 memset ((void *) clones
, '\0', sizeof (clones
));
37012 clones
[CLONE_DEFAULT
] = (*fndecls
)[0];
37014 /* On the PowerPC, we do not need to call __builtin_cpu_init, which is a NOP
37015 on the PowerPC (on the x86_64, it is not a NOP). The builtin function
37016 __builtin_cpu_support ensures that the TOC fields are setup by requiring a
37017 recent glibc. If we ever need to call __builtin_cpu_init, we would need
37018 to insert the code here to do the call. */
37020 for (ix
= 1; fndecls
->iterate (ix
, &ele
); ++ix
)
37022 int priority
= rs6000_clone_priority (ele
);
37023 if (!clones
[priority
])
37024 clones
[priority
] = ele
;
37027 for (ix
= CLONE_MAX
- 1; ix
>= 0; ix
--)
37030 if (TARGET_DEBUG_TARGET
)
37031 fprintf (stderr
, "dispatch_function_versions, clone %d, %s\n",
37032 ix
, get_decl_name (clones
[ix
]));
37034 *empty_bb
= add_condition_to_bb (dispatch_decl
, clones
[ix
], ix
,
37041 /* Generate the dispatching code body to dispatch multi-versioned function
37042 DECL. The target hook is called to process the "target" attributes and
37043 provide the code to dispatch the right function at run-time. NODE points
37044 to the dispatcher decl whose body will be created. */
37047 rs6000_generate_version_dispatcher_body (void *node_p
)
37050 basic_block empty_bb
;
37051 struct cgraph_node
*node
= (cgraph_node
*) node_p
;
37052 struct cgraph_function_version_info
*ninfo
= node
->function_version ();
37054 if (ninfo
->dispatcher_resolver
)
37055 return ninfo
->dispatcher_resolver
;
37057 /* node is going to be an alias, so remove the finalized bit. */
37058 node
->definition
= false;
37060 /* The first version in the chain corresponds to the default version. */
37061 ninfo
->dispatcher_resolver
= resolver
37062 = make_resolver_func (ninfo
->next
->this_node
->decl
, node
->decl
, &empty_bb
);
37064 if (TARGET_DEBUG_TARGET
)
37065 fprintf (stderr
, "rs6000_get_function_versions_dispatcher, %s\n",
37066 get_decl_name (resolver
));
37068 push_cfun (DECL_STRUCT_FUNCTION (resolver
));
37069 auto_vec
<tree
, 2> fn_ver_vec
;
37071 for (struct cgraph_function_version_info
*vinfo
= ninfo
->next
;
37073 vinfo
= vinfo
->next
)
37075 struct cgraph_node
*version
= vinfo
->this_node
;
37076 /* Check for virtual functions here again, as by this time it should
37077 have been determined if this function needs a vtable index or
37078 not. This happens for methods in derived classes that override
37079 virtual methods in base classes but are not explicitly marked as
37081 if (DECL_VINDEX (version
->decl
))
37082 sorry ("Virtual function multiversioning not supported");
37084 fn_ver_vec
.safe_push (version
->decl
);
37087 dispatch_function_versions (resolver
, &fn_ver_vec
, &empty_bb
);
37088 cgraph_edge::rebuild_edges ();
37094 /* Hook to determine if one function can safely inline another. */
37097 rs6000_can_inline_p (tree caller
, tree callee
)
37100 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
37101 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
37103 /* If callee has no option attributes, then it is ok to inline. */
37107 /* If caller has no option attributes, but callee does then it is not ok to
37109 else if (!caller_tree
)
37114 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
37115 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
37117 /* Callee's options should a subset of the caller's, i.e. a vsx function
37118 can inline an altivec function but a non-vsx function can't inline a
37120 if ((caller_opts
->x_rs6000_isa_flags
& callee_opts
->x_rs6000_isa_flags
)
37121 == callee_opts
->x_rs6000_isa_flags
)
37125 if (TARGET_DEBUG_TARGET
)
37126 fprintf (stderr
, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
37127 get_decl_name (caller
), get_decl_name (callee
),
37128 (ret
? "can" : "cannot"));
37133 /* Allocate a stack temp and fixup the address so it meets the particular
37134 memory requirements (either offetable or REG+REG addressing). */
37137 rs6000_allocate_stack_temp (machine_mode mode
,
37138 bool offsettable_p
,
37141 rtx stack
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
37142 rtx addr
= XEXP (stack
, 0);
37143 int strict_p
= reload_completed
;
37145 if (!legitimate_indirect_address_p (addr
, strict_p
))
37148 && !rs6000_legitimate_offset_address_p (mode
, addr
, strict_p
, true))
37149 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
37151 else if (reg_reg_p
&& !legitimate_indexed_address_p (addr
, strict_p
))
37152 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
37158 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
37159 to such a form to deal with memory reference instructions like STFIWX that
37160 only take reg+reg addressing. */
37163 rs6000_address_for_fpconvert (rtx x
)
37167 gcc_assert (MEM_P (x
));
37168 addr
= XEXP (x
, 0);
37169 if (can_create_pseudo_p ()
37170 && ! legitimate_indirect_address_p (addr
, reload_completed
)
37171 && ! legitimate_indexed_address_p (addr
, reload_completed
))
37173 if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
37175 rtx reg
= XEXP (addr
, 0);
37176 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (x
));
37177 rtx size_rtx
= GEN_INT ((GET_CODE (addr
) == PRE_DEC
) ? -size
: size
);
37178 gcc_assert (REG_P (reg
));
37179 emit_insn (gen_add3_insn (reg
, reg
, size_rtx
));
37182 else if (GET_CODE (addr
) == PRE_MODIFY
)
37184 rtx reg
= XEXP (addr
, 0);
37185 rtx expr
= XEXP (addr
, 1);
37186 gcc_assert (REG_P (reg
));
37187 gcc_assert (GET_CODE (expr
) == PLUS
);
37188 emit_insn (gen_add3_insn (reg
, XEXP (expr
, 0), XEXP (expr
, 1)));
37192 x
= replace_equiv_address (x
, copy_addr_to_reg (addr
));
37198 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
37200 On the RS/6000, all integer constants are acceptable, most won't be valid
37201 for particular insns, though. Only easy FP constants are acceptable. */
37204 rs6000_legitimate_constant_p (machine_mode mode
, rtx x
)
37206 if (TARGET_ELF
&& tls_referenced_p (x
))
37209 return ((GET_CODE (x
) != CONST_DOUBLE
&& GET_CODE (x
) != CONST_VECTOR
)
37210 || GET_MODE (x
) == VOIDmode
37211 || (TARGET_POWERPC64
&& mode
== DImode
)
37212 || easy_fp_constant (x
, mode
)
37213 || easy_vector_constant (x
, mode
));
37217 /* Return TRUE iff the sequence ending in LAST sets the static chain. */
37220 chain_already_loaded (rtx_insn
*last
)
37222 for (; last
!= NULL
; last
= PREV_INSN (last
))
37224 if (NONJUMP_INSN_P (last
))
37226 rtx patt
= PATTERN (last
);
37228 if (GET_CODE (patt
) == SET
)
37230 rtx lhs
= XEXP (patt
, 0);
37232 if (REG_P (lhs
) && REGNO (lhs
) == STATIC_CHAIN_REGNUM
)
37240 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
37243 rs6000_call_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
37245 const bool direct_call_p
37246 = GET_CODE (func_desc
) == SYMBOL_REF
&& SYMBOL_REF_FUNCTION_P (func_desc
);
37247 rtx toc_reg
= gen_rtx_REG (Pmode
, TOC_REGNUM
);
37248 rtx toc_load
= NULL_RTX
;
37249 rtx toc_restore
= NULL_RTX
;
37251 rtx abi_reg
= NULL_RTX
;
37256 /* Handle longcall attributes. */
37257 if (INTVAL (cookie
) & CALL_LONG
)
37258 func_desc
= rs6000_longcall_ref (func_desc
);
37260 /* Handle indirect calls. */
37261 if (GET_CODE (func_desc
) != SYMBOL_REF
37262 || (DEFAULT_ABI
== ABI_AIX
&& !SYMBOL_REF_FUNCTION_P (func_desc
)))
37264 /* Save the TOC into its reserved slot before the call,
37265 and prepare to restore it after the call. */
37266 rtx stack_ptr
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
37267 rtx stack_toc_offset
= GEN_INT (RS6000_TOC_SAVE_SLOT
);
37268 rtx stack_toc_mem
= gen_frame_mem (Pmode
,
37269 gen_rtx_PLUS (Pmode
, stack_ptr
,
37270 stack_toc_offset
));
37271 rtx stack_toc_unspec
= gen_rtx_UNSPEC (Pmode
,
37272 gen_rtvec (1, stack_toc_offset
),
37274 toc_restore
= gen_rtx_SET (toc_reg
, stack_toc_unspec
);
37276 /* Can we optimize saving the TOC in the prologue or
37277 do we need to do it at every call? */
37278 if (TARGET_SAVE_TOC_INDIRECT
&& !cfun
->calls_alloca
)
37279 cfun
->machine
->save_toc_in_prologue
= true;
37282 MEM_VOLATILE_P (stack_toc_mem
) = 1;
37283 emit_move_insn (stack_toc_mem
, toc_reg
);
37286 if (DEFAULT_ABI
== ABI_ELFv2
)
37288 /* A function pointer in the ELFv2 ABI is just a plain address, but
37289 the ABI requires it to be loaded into r12 before the call. */
37290 func_addr
= gen_rtx_REG (Pmode
, 12);
37291 emit_move_insn (func_addr
, func_desc
);
37292 abi_reg
= func_addr
;
37296 /* A function pointer under AIX is a pointer to a data area whose
37297 first word contains the actual address of the function, whose
37298 second word contains a pointer to its TOC, and whose third word
37299 contains a value to place in the static chain register (r11).
37300 Note that if we load the static chain, our "trampoline" need
37301 not have any executable code. */
37303 /* Load up address of the actual function. */
37304 func_desc
= force_reg (Pmode
, func_desc
);
37305 func_addr
= gen_reg_rtx (Pmode
);
37306 emit_move_insn (func_addr
, gen_rtx_MEM (Pmode
, func_desc
));
37308 /* Prepare to load the TOC of the called function. Note that the
37309 TOC load must happen immediately before the actual call so
37310 that unwinding the TOC registers works correctly. See the
37311 comment in frob_update_context. */
37312 rtx func_toc_offset
= GEN_INT (GET_MODE_SIZE (Pmode
));
37313 rtx func_toc_mem
= gen_rtx_MEM (Pmode
,
37314 gen_rtx_PLUS (Pmode
, func_desc
,
37316 toc_load
= gen_rtx_USE (VOIDmode
, func_toc_mem
);
37318 /* If we have a static chain, load it up. But, if the call was
37319 originally direct, the 3rd word has not been written since no
37320 trampoline has been built, so we ought not to load it, lest we
37321 override a static chain value. */
37323 && TARGET_POINTERS_TO_NESTED_FUNCTIONS
37324 && !chain_already_loaded (get_current_sequence ()->next
->last
))
37326 rtx sc_reg
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
37327 rtx func_sc_offset
= GEN_INT (2 * GET_MODE_SIZE (Pmode
));
37328 rtx func_sc_mem
= gen_rtx_MEM (Pmode
,
37329 gen_rtx_PLUS (Pmode
, func_desc
,
37331 emit_move_insn (sc_reg
, func_sc_mem
);
37338 /* Direct calls use the TOC: for local calls, the callee will
37339 assume the TOC register is set; for non-local calls, the
37340 PLT stub needs the TOC register. */
37342 func_addr
= func_desc
;
37345 /* Create the call. */
37346 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_addr
), flag
);
37347 if (value
!= NULL_RTX
)
37348 call
[0] = gen_rtx_SET (value
, call
[0]);
37352 call
[n_call
++] = toc_load
;
37354 call
[n_call
++] = toc_restore
;
37356 call
[n_call
++] = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
37358 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (n_call
, call
));
37359 insn
= emit_call_insn (insn
);
37361 /* Mention all registers defined by the ABI to hold information
37362 as uses in CALL_INSN_FUNCTION_USAGE. */
37364 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), abi_reg
);
37367 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
37370 rs6000_sibcall_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
37375 gcc_assert (INTVAL (cookie
) == 0);
37377 /* Create the call. */
37378 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_desc
), flag
);
37379 if (value
!= NULL_RTX
)
37380 call
[0] = gen_rtx_SET (value
, call
[0]);
37382 call
[1] = simple_return_rtx
;
37384 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (2, call
));
37385 insn
= emit_call_insn (insn
);
37387 /* Note use of the TOC register. */
37388 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), gen_rtx_REG (Pmode
, TOC_REGNUM
));
37391 /* Return whether we need to always update the saved TOC pointer when we update
37392 the stack pointer. */
37395 rs6000_save_toc_in_prologue_p (void)
37397 return (cfun
&& cfun
->machine
&& cfun
->machine
->save_toc_in_prologue
);
37400 #ifdef HAVE_GAS_HIDDEN
37401 # define USE_HIDDEN_LINKONCE 1
37403 # define USE_HIDDEN_LINKONCE 0
37406 /* Fills in the label name that should be used for a 476 link stack thunk. */
37409 get_ppc476_thunk_name (char name
[32])
37411 gcc_assert (TARGET_LINK_STACK
);
37413 if (USE_HIDDEN_LINKONCE
)
37414 sprintf (name
, "__ppc476.get_thunk");
37416 ASM_GENERATE_INTERNAL_LABEL (name
, "LPPC476_", 0);
37419 /* This function emits the simple thunk routine that is used to preserve
37420 the link stack on the 476 cpu. */
37422 static void rs6000_code_end (void) ATTRIBUTE_UNUSED
;
37424 rs6000_code_end (void)
37429 if (!TARGET_LINK_STACK
)
37432 get_ppc476_thunk_name (name
);
37434 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
, get_identifier (name
),
37435 build_function_type_list (void_type_node
, NULL_TREE
));
37436 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
37437 NULL_TREE
, void_type_node
);
37438 TREE_PUBLIC (decl
) = 1;
37439 TREE_STATIC (decl
) = 1;
37442 if (USE_HIDDEN_LINKONCE
&& !TARGET_XCOFF
)
37444 cgraph_node::create (decl
)->set_comdat_group (DECL_ASSEMBLER_NAME (decl
));
37445 targetm
.asm_out
.unique_section (decl
, 0);
37446 switch_to_section (get_named_section (decl
, NULL
, 0));
37447 DECL_WEAK (decl
) = 1;
37448 ASM_WEAKEN_DECL (asm_out_file
, decl
, name
, 0);
37449 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
37450 targetm
.asm_out
.assemble_visibility (decl
, VISIBILITY_HIDDEN
);
37451 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
37456 switch_to_section (text_section
);
37457 ASM_OUTPUT_LABEL (asm_out_file
, name
);
37460 DECL_INITIAL (decl
) = make_node (BLOCK
);
37461 current_function_decl
= decl
;
37462 allocate_struct_function (decl
, false);
37463 init_function_start (decl
);
37464 first_function_block_is_cold
= false;
37465 /* Make sure unwind info is emitted for the thunk if needed. */
37466 final_start_function (emit_barrier (), asm_out_file
, 1);
37468 fputs ("\tblr\n", asm_out_file
);
37470 final_end_function ();
37471 init_insn_lengths ();
37472 free_after_compilation (cfun
);
37474 current_function_decl
= NULL
;
37477 /* Add r30 to hard reg set if the prologue sets it up and it is not
37478 pic_offset_table_rtx. */
37481 rs6000_set_up_by_prologue (struct hard_reg_set_container
*set
)
37483 if (!TARGET_SINGLE_PIC_BASE
37485 && TARGET_MINIMAL_TOC
37486 && !constant_pool_empty_p ())
37487 add_to_hard_reg_set (&set
->set
, Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
37488 if (cfun
->machine
->split_stack_argp_used
)
37489 add_to_hard_reg_set (&set
->set
, Pmode
, 12);
37491 /* Make sure the hard reg set doesn't include r2, which was possibly added
37492 via PIC_OFFSET_TABLE_REGNUM. */
37494 remove_from_hard_reg_set (&set
->set
, Pmode
, TOC_REGNUM
);
37498 /* Helper function for rs6000_split_logical to emit a logical instruction after
37499 spliting the operation to single GPR registers.
37501 DEST is the destination register.
37502 OP1 and OP2 are the input source registers.
37503 CODE is the base operation (AND, IOR, XOR, NOT).
37504 MODE is the machine mode.
37505 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37506 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37507 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37510 rs6000_split_logical_inner (rtx dest
,
37513 enum rtx_code code
,
37515 bool complement_final_p
,
37516 bool complement_op1_p
,
37517 bool complement_op2_p
)
37521 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
37522 if (op2
&& GET_CODE (op2
) == CONST_INT
37523 && (mode
== SImode
|| (mode
== DImode
&& TARGET_POWERPC64
))
37524 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
37526 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
37527 HOST_WIDE_INT value
= INTVAL (op2
) & mask
;
37529 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
37534 emit_insn (gen_rtx_SET (dest
, const0_rtx
));
37538 else if (value
== mask
)
37540 if (!rtx_equal_p (dest
, op1
))
37541 emit_insn (gen_rtx_SET (dest
, op1
));
37546 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
37547 into separate ORI/ORIS or XORI/XORIS instrucitons. */
37548 else if (code
== IOR
|| code
== XOR
)
37552 if (!rtx_equal_p (dest
, op1
))
37553 emit_insn (gen_rtx_SET (dest
, op1
));
37559 if (code
== AND
&& mode
== SImode
37560 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
37562 emit_insn (gen_andsi3 (dest
, op1
, op2
));
37566 if (complement_op1_p
)
37567 op1
= gen_rtx_NOT (mode
, op1
);
37569 if (complement_op2_p
)
37570 op2
= gen_rtx_NOT (mode
, op2
);
37572 /* For canonical RTL, if only one arm is inverted it is the first. */
37573 if (!complement_op1_p
&& complement_op2_p
)
37574 std::swap (op1
, op2
);
37576 bool_rtx
= ((code
== NOT
)
37577 ? gen_rtx_NOT (mode
, op1
)
37578 : gen_rtx_fmt_ee (code
, mode
, op1
, op2
));
37580 if (complement_final_p
)
37581 bool_rtx
= gen_rtx_NOT (mode
, bool_rtx
);
37583 emit_insn (gen_rtx_SET (dest
, bool_rtx
));
37586 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
37587 operations are split immediately during RTL generation to allow for more
37588 optimizations of the AND/IOR/XOR.
37590 OPERANDS is an array containing the destination and two input operands.
37591 CODE is the base operation (AND, IOR, XOR, NOT).
37592 MODE is the machine mode.
37593 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37594 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37595 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
37596 CLOBBER_REG is either NULL or a scratch register of type CC to allow
37597 formation of the AND instructions. */
37600 rs6000_split_logical_di (rtx operands
[3],
37601 enum rtx_code code
,
37602 bool complement_final_p
,
37603 bool complement_op1_p
,
37604 bool complement_op2_p
)
37606 const HOST_WIDE_INT lower_32bits
= HOST_WIDE_INT_C(0xffffffff);
37607 const HOST_WIDE_INT upper_32bits
= ~ lower_32bits
;
37608 const HOST_WIDE_INT sign_bit
= HOST_WIDE_INT_C(0x80000000);
37609 enum hi_lo
{ hi
= 0, lo
= 1 };
37610 rtx op0_hi_lo
[2], op1_hi_lo
[2], op2_hi_lo
[2];
37613 op0_hi_lo
[hi
] = gen_highpart (SImode
, operands
[0]);
37614 op1_hi_lo
[hi
] = gen_highpart (SImode
, operands
[1]);
37615 op0_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[0]);
37616 op1_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[1]);
37619 op2_hi_lo
[hi
] = op2_hi_lo
[lo
] = NULL_RTX
;
37622 if (GET_CODE (operands
[2]) != CONST_INT
)
37624 op2_hi_lo
[hi
] = gen_highpart_mode (SImode
, DImode
, operands
[2]);
37625 op2_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[2]);
37629 HOST_WIDE_INT value
= INTVAL (operands
[2]);
37630 HOST_WIDE_INT value_hi_lo
[2];
37632 gcc_assert (!complement_final_p
);
37633 gcc_assert (!complement_op1_p
);
37634 gcc_assert (!complement_op2_p
);
37636 value_hi_lo
[hi
] = value
>> 32;
37637 value_hi_lo
[lo
] = value
& lower_32bits
;
37639 for (i
= 0; i
< 2; i
++)
37641 HOST_WIDE_INT sub_value
= value_hi_lo
[i
];
37643 if (sub_value
& sign_bit
)
37644 sub_value
|= upper_32bits
;
37646 op2_hi_lo
[i
] = GEN_INT (sub_value
);
37648 /* If this is an AND instruction, check to see if we need to load
37649 the value in a register. */
37650 if (code
== AND
&& sub_value
!= -1 && sub_value
!= 0
37651 && !and_operand (op2_hi_lo
[i
], SImode
))
37652 op2_hi_lo
[i
] = force_reg (SImode
, op2_hi_lo
[i
]);
37657 for (i
= 0; i
< 2; i
++)
37659 /* Split large IOR/XOR operations. */
37660 if ((code
== IOR
|| code
== XOR
)
37661 && GET_CODE (op2_hi_lo
[i
]) == CONST_INT
37662 && !complement_final_p
37663 && !complement_op1_p
37664 && !complement_op2_p
37665 && !logical_const_operand (op2_hi_lo
[i
], SImode
))
37667 HOST_WIDE_INT value
= INTVAL (op2_hi_lo
[i
]);
37668 HOST_WIDE_INT hi_16bits
= value
& HOST_WIDE_INT_C(0xffff0000);
37669 HOST_WIDE_INT lo_16bits
= value
& HOST_WIDE_INT_C(0x0000ffff);
37670 rtx tmp
= gen_reg_rtx (SImode
);
37672 /* Make sure the constant is sign extended. */
37673 if ((hi_16bits
& sign_bit
) != 0)
37674 hi_16bits
|= upper_32bits
;
37676 rs6000_split_logical_inner (tmp
, op1_hi_lo
[i
], GEN_INT (hi_16bits
),
37677 code
, SImode
, false, false, false);
37679 rs6000_split_logical_inner (op0_hi_lo
[i
], tmp
, GEN_INT (lo_16bits
),
37680 code
, SImode
, false, false, false);
37683 rs6000_split_logical_inner (op0_hi_lo
[i
], op1_hi_lo
[i
], op2_hi_lo
[i
],
37684 code
, SImode
, complement_final_p
,
37685 complement_op1_p
, complement_op2_p
);
37691 /* Split the insns that make up boolean operations operating on multiple GPR
37692 registers. The boolean MD patterns ensure that the inputs either are
37693 exactly the same as the output registers, or there is no overlap.
37695 OPERANDS is an array containing the destination and two input operands.
37696 CODE is the base operation (AND, IOR, XOR, NOT).
37697 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
37698 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
37699 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
37702 rs6000_split_logical (rtx operands
[3],
37703 enum rtx_code code
,
37704 bool complement_final_p
,
37705 bool complement_op1_p
,
37706 bool complement_op2_p
)
37708 machine_mode mode
= GET_MODE (operands
[0]);
37709 machine_mode sub_mode
;
37711 int sub_size
, regno0
, regno1
, nregs
, i
;
37713 /* If this is DImode, use the specialized version that can run before
37714 register allocation. */
37715 if (mode
== DImode
&& !TARGET_POWERPC64
)
37717 rs6000_split_logical_di (operands
, code
, complement_final_p
,
37718 complement_op1_p
, complement_op2_p
);
37724 op2
= (code
== NOT
) ? NULL_RTX
: operands
[2];
37725 sub_mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
37726 sub_size
= GET_MODE_SIZE (sub_mode
);
37727 regno0
= REGNO (op0
);
37728 regno1
= REGNO (op1
);
37730 gcc_assert (reload_completed
);
37731 gcc_assert (IN_RANGE (regno0
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
37732 gcc_assert (IN_RANGE (regno1
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
37734 nregs
= rs6000_hard_regno_nregs
[(int)mode
][regno0
];
37735 gcc_assert (nregs
> 1);
37737 if (op2
&& REG_P (op2
))
37738 gcc_assert (IN_RANGE (REGNO (op2
), FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
37740 for (i
= 0; i
< nregs
; i
++)
37742 int offset
= i
* sub_size
;
37743 rtx sub_op0
= simplify_subreg (sub_mode
, op0
, mode
, offset
);
37744 rtx sub_op1
= simplify_subreg (sub_mode
, op1
, mode
, offset
);
37745 rtx sub_op2
= ((code
== NOT
)
37747 : simplify_subreg (sub_mode
, op2
, mode
, offset
));
37749 rs6000_split_logical_inner (sub_op0
, sub_op1
, sub_op2
, code
, sub_mode
,
37750 complement_final_p
, complement_op1_p
,
37758 /* Return true if the peephole2 can combine a load involving a combination of
37759 an addis instruction and a load with an offset that can be fused together on
37763 fusion_gpr_load_p (rtx addis_reg
, /* register set via addis. */
37764 rtx addis_value
, /* addis value. */
37765 rtx target
, /* target register that is loaded. */
37766 rtx mem
) /* bottom part of the memory addr. */
37771 /* Validate arguments. */
37772 if (!base_reg_operand (addis_reg
, GET_MODE (addis_reg
)))
37775 if (!base_reg_operand (target
, GET_MODE (target
)))
37778 if (!fusion_gpr_addis (addis_value
, GET_MODE (addis_value
)))
37781 /* Allow sign/zero extension. */
37782 if (GET_CODE (mem
) == ZERO_EXTEND
37783 || (GET_CODE (mem
) == SIGN_EXTEND
&& TARGET_P8_FUSION_SIGN
))
37784 mem
= XEXP (mem
, 0);
37789 if (!fusion_gpr_mem_load (mem
, GET_MODE (mem
)))
37792 addr
= XEXP (mem
, 0); /* either PLUS or LO_SUM. */
37793 if (GET_CODE (addr
) != PLUS
&& GET_CODE (addr
) != LO_SUM
)
37796 /* Validate that the register used to load the high value is either the
37797 register being loaded, or we can safely replace its use.
37799 This function is only called from the peephole2 pass and we assume that
37800 there are 2 instructions in the peephole (addis and load), so we want to
37801 check if the target register was not used in the memory address and the
37802 register to hold the addis result is dead after the peephole. */
37803 if (REGNO (addis_reg
) != REGNO (target
))
37805 if (reg_mentioned_p (target
, mem
))
37808 if (!peep2_reg_dead_p (2, addis_reg
))
37811 /* If the target register being loaded is the stack pointer, we must
37812 avoid loading any other value into it, even temporarily. */
37813 if (REG_P (target
) && REGNO (target
) == STACK_POINTER_REGNUM
)
37817 base_reg
= XEXP (addr
, 0);
37818 return REGNO (addis_reg
) == REGNO (base_reg
);
37821 /* During the peephole2 pass, adjust and expand the insns for a load fusion
37822 sequence. We adjust the addis register to use the target register. If the
37823 load sign extends, we adjust the code to do the zero extending load, and an
37824 explicit sign extension later since the fusion only covers zero extending
37828 operands[0] register set with addis (to be replaced with target)
37829 operands[1] value set via addis
37830 operands[2] target register being loaded
37831 operands[3] D-form memory reference using operands[0]. */
37834 expand_fusion_gpr_load (rtx
*operands
)
37836 rtx addis_value
= operands
[1];
37837 rtx target
= operands
[2];
37838 rtx orig_mem
= operands
[3];
37839 rtx new_addr
, new_mem
, orig_addr
, offset
;
37840 enum rtx_code plus_or_lo_sum
;
37841 machine_mode target_mode
= GET_MODE (target
);
37842 machine_mode extend_mode
= target_mode
;
37843 machine_mode ptr_mode
= Pmode
;
37844 enum rtx_code extend
= UNKNOWN
;
37846 if (GET_CODE (orig_mem
) == ZERO_EXTEND
37847 || (TARGET_P8_FUSION_SIGN
&& GET_CODE (orig_mem
) == SIGN_EXTEND
))
37849 extend
= GET_CODE (orig_mem
);
37850 orig_mem
= XEXP (orig_mem
, 0);
37851 target_mode
= GET_MODE (orig_mem
);
37854 gcc_assert (MEM_P (orig_mem
));
37856 orig_addr
= XEXP (orig_mem
, 0);
37857 plus_or_lo_sum
= GET_CODE (orig_addr
);
37858 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
37860 offset
= XEXP (orig_addr
, 1);
37861 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
37862 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
37864 if (extend
!= UNKNOWN
)
37865 new_mem
= gen_rtx_fmt_e (ZERO_EXTEND
, extend_mode
, new_mem
);
37867 new_mem
= gen_rtx_UNSPEC (extend_mode
, gen_rtvec (1, new_mem
),
37868 UNSPEC_FUSION_GPR
);
37869 emit_insn (gen_rtx_SET (target
, new_mem
));
37871 if (extend
== SIGN_EXTEND
)
37873 int sub_off
= ((BYTES_BIG_ENDIAN
)
37874 ? GET_MODE_SIZE (extend_mode
) - GET_MODE_SIZE (target_mode
)
37877 = simplify_subreg (target_mode
, target
, extend_mode
, sub_off
);
37879 emit_insn (gen_rtx_SET (target
,
37880 gen_rtx_SIGN_EXTEND (extend_mode
, sign_reg
)));
37886 /* Emit the addis instruction that will be part of a fused instruction
37890 emit_fusion_addis (rtx target
, rtx addis_value
)
37893 const char *addis_str
= NULL
;
37895 /* Emit the addis instruction. */
37896 fuse_ops
[0] = target
;
37897 if (satisfies_constraint_L (addis_value
))
37899 fuse_ops
[1] = addis_value
;
37900 addis_str
= "lis %0,%v1";
37903 else if (GET_CODE (addis_value
) == PLUS
)
37905 rtx op0
= XEXP (addis_value
, 0);
37906 rtx op1
= XEXP (addis_value
, 1);
37908 if (REG_P (op0
) && CONST_INT_P (op1
)
37909 && satisfies_constraint_L (op1
))
37913 addis_str
= "addis %0,%1,%v2";
37917 else if (GET_CODE (addis_value
) == HIGH
)
37919 rtx value
= XEXP (addis_value
, 0);
37920 if (GET_CODE (value
) == UNSPEC
&& XINT (value
, 1) == UNSPEC_TOCREL
)
37922 fuse_ops
[1] = XVECEXP (value
, 0, 0); /* symbol ref. */
37923 fuse_ops
[2] = XVECEXP (value
, 0, 1); /* TOC register. */
37925 addis_str
= "addis %0,%2,%1@toc@ha";
37927 else if (TARGET_XCOFF
)
37928 addis_str
= "addis %0,%1@u(%2)";
37931 gcc_unreachable ();
37934 else if (GET_CODE (value
) == PLUS
)
37936 rtx op0
= XEXP (value
, 0);
37937 rtx op1
= XEXP (value
, 1);
37939 if (GET_CODE (op0
) == UNSPEC
37940 && XINT (op0
, 1) == UNSPEC_TOCREL
37941 && CONST_INT_P (op1
))
37943 fuse_ops
[1] = XVECEXP (op0
, 0, 0); /* symbol ref. */
37944 fuse_ops
[2] = XVECEXP (op0
, 0, 1); /* TOC register. */
37947 addis_str
= "addis %0,%2,%1+%3@toc@ha";
37949 else if (TARGET_XCOFF
)
37950 addis_str
= "addis %0,%1+%3@u(%2)";
37953 gcc_unreachable ();
37957 else if (satisfies_constraint_L (value
))
37959 fuse_ops
[1] = value
;
37960 addis_str
= "lis %0,%v1";
37963 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (value
))
37965 fuse_ops
[1] = value
;
37966 addis_str
= "lis %0,%1@ha";
37971 fatal_insn ("Could not generate addis value for fusion", addis_value
);
37973 output_asm_insn (addis_str
, fuse_ops
);
37976 /* Emit a D-form load or store instruction that is the second instruction
37977 of a fusion sequence. */
37980 emit_fusion_load_store (rtx load_store_reg
, rtx addis_reg
, rtx offset
,
37981 const char *insn_str
)
37984 char insn_template
[80];
37986 fuse_ops
[0] = load_store_reg
;
37987 fuse_ops
[1] = addis_reg
;
37989 if (CONST_INT_P (offset
) && satisfies_constraint_I (offset
))
37991 sprintf (insn_template
, "%s %%0,%%2(%%1)", insn_str
);
37992 fuse_ops
[2] = offset
;
37993 output_asm_insn (insn_template
, fuse_ops
);
37996 else if (GET_CODE (offset
) == UNSPEC
37997 && XINT (offset
, 1) == UNSPEC_TOCREL
)
38000 sprintf (insn_template
, "%s %%0,%%2@toc@l(%%1)", insn_str
);
38002 else if (TARGET_XCOFF
)
38003 sprintf (insn_template
, "%s %%0,%%2@l(%%1)", insn_str
);
38006 gcc_unreachable ();
38008 fuse_ops
[2] = XVECEXP (offset
, 0, 0);
38009 output_asm_insn (insn_template
, fuse_ops
);
38012 else if (GET_CODE (offset
) == PLUS
38013 && GET_CODE (XEXP (offset
, 0)) == UNSPEC
38014 && XINT (XEXP (offset
, 0), 1) == UNSPEC_TOCREL
38015 && CONST_INT_P (XEXP (offset
, 1)))
38017 rtx tocrel_unspec
= XEXP (offset
, 0);
38019 sprintf (insn_template
, "%s %%0,%%2+%%3@toc@l(%%1)", insn_str
);
38021 else if (TARGET_XCOFF
)
38022 sprintf (insn_template
, "%s %%0,%%2+%%3@l(%%1)", insn_str
);
38025 gcc_unreachable ();
38027 fuse_ops
[2] = XVECEXP (tocrel_unspec
, 0, 0);
38028 fuse_ops
[3] = XEXP (offset
, 1);
38029 output_asm_insn (insn_template
, fuse_ops
);
38032 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (offset
))
38034 sprintf (insn_template
, "%s %%0,%%2@l(%%1)", insn_str
);
38036 fuse_ops
[2] = offset
;
38037 output_asm_insn (insn_template
, fuse_ops
);
38041 fatal_insn ("Unable to generate load/store offset for fusion", offset
);
38046 /* Wrap a TOC address that can be fused to indicate that special fusion
38047 processing is needed. */
38050 fusion_wrap_memory_address (rtx old_mem
)
38052 rtx old_addr
= XEXP (old_mem
, 0);
38053 rtvec v
= gen_rtvec (1, old_addr
);
38054 rtx new_addr
= gen_rtx_UNSPEC (Pmode
, v
, UNSPEC_FUSION_ADDIS
);
38055 return replace_equiv_address_nv (old_mem
, new_addr
, false);
38058 /* Given an address, convert it into the addis and load offset parts. Addresses
38059 created during the peephole2 process look like:
38060 (lo_sum (high (unspec [(sym)] UNSPEC_TOCREL))
38061 (unspec [(...)] UNSPEC_TOCREL))
38063 Addresses created via toc fusion look like:
38064 (unspec [(unspec [(...)] UNSPEC_TOCREL)] UNSPEC_FUSION_ADDIS)) */
38067 fusion_split_address (rtx addr
, rtx
*p_hi
, rtx
*p_lo
)
38071 if (GET_CODE (addr
) == UNSPEC
&& XINT (addr
, 1) == UNSPEC_FUSION_ADDIS
)
38073 lo
= XVECEXP (addr
, 0, 0);
38074 hi
= gen_rtx_HIGH (Pmode
, lo
);
38076 else if (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
)
38078 hi
= XEXP (addr
, 0);
38079 lo
= XEXP (addr
, 1);
38082 gcc_unreachable ();
38088 /* Return a string to fuse an addis instruction with a gpr load to the same
38089 register that we loaded up the addis instruction. The address that is used
38090 is the logical address that was formed during peephole2:
38091 (lo_sum (high) (low-part))
38093 Or the address is the TOC address that is wrapped before register allocation:
38094 (unspec [(addr) (toc-reg)] UNSPEC_FUSION_ADDIS)
38096 The code is complicated, so we call output_asm_insn directly, and just
38100 emit_fusion_gpr_load (rtx target
, rtx mem
)
38105 const char *load_str
= NULL
;
38108 if (GET_CODE (mem
) == ZERO_EXTEND
)
38109 mem
= XEXP (mem
, 0);
38111 gcc_assert (REG_P (target
) && MEM_P (mem
));
38113 addr
= XEXP (mem
, 0);
38114 fusion_split_address (addr
, &addis_value
, &load_offset
);
38116 /* Now emit the load instruction to the same register. */
38117 mode
= GET_MODE (mem
);
38135 gcc_assert (TARGET_POWERPC64
);
38140 fatal_insn ("Bad GPR fusion", gen_rtx_SET (target
, mem
));
38143 /* Emit the addis instruction. */
38144 emit_fusion_addis (target
, addis_value
);
38146 /* Emit the D-form load instruction. */
38147 emit_fusion_load_store (target
, target
, load_offset
, load_str
);
38153 /* Return true if the peephole2 can combine a load/store involving a
38154 combination of an addis instruction and the memory operation. This was
38155 added to the ISA 3.0 (power9) hardware. */
38158 fusion_p9_p (rtx addis_reg
, /* register set via addis. */
38159 rtx addis_value
, /* addis value. */
38160 rtx dest
, /* destination (memory or register). */
38161 rtx src
) /* source (register or memory). */
38163 rtx addr
, mem
, offset
;
38164 machine_mode mode
= GET_MODE (src
);
38166 /* Validate arguments. */
38167 if (!base_reg_operand (addis_reg
, GET_MODE (addis_reg
)))
38170 if (!fusion_gpr_addis (addis_value
, GET_MODE (addis_value
)))
38173 /* Ignore extend operations that are part of the load. */
38174 if (GET_CODE (src
) == FLOAT_EXTEND
|| GET_CODE (src
) == ZERO_EXTEND
)
38175 src
= XEXP (src
, 0);
38177 /* Test for memory<-register or register<-memory. */
38178 if (fpr_reg_operand (src
, mode
) || int_reg_operand (src
, mode
))
38186 else if (MEM_P (src
))
38188 if (!fpr_reg_operand (dest
, mode
) && !int_reg_operand (dest
, mode
))
38197 addr
= XEXP (mem
, 0); /* either PLUS or LO_SUM. */
38198 if (GET_CODE (addr
) == PLUS
)
38200 if (!rtx_equal_p (addis_reg
, XEXP (addr
, 0)))
38203 return satisfies_constraint_I (XEXP (addr
, 1));
38206 else if (GET_CODE (addr
) == LO_SUM
)
38208 if (!rtx_equal_p (addis_reg
, XEXP (addr
, 0)))
38211 offset
= XEXP (addr
, 1);
38212 if (TARGET_XCOFF
|| (TARGET_ELF
&& TARGET_POWERPC64
))
38213 return small_toc_ref (offset
, GET_MODE (offset
));
38215 else if (TARGET_ELF
&& !TARGET_POWERPC64
)
38216 return CONSTANT_P (offset
);
38222 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38226 operands[0] register set with addis
38227 operands[1] value set via addis
38228 operands[2] target register being loaded
38229 operands[3] D-form memory reference using operands[0].
38231 This is similar to the fusion introduced with power8, except it scales to
38232 both loads/stores and does not require the result register to be the same as
38233 the base register. At the moment, we only do this if register set with addis
38237 expand_fusion_p9_load (rtx
*operands
)
38239 rtx tmp_reg
= operands
[0];
38240 rtx addis_value
= operands
[1];
38241 rtx target
= operands
[2];
38242 rtx orig_mem
= operands
[3];
38243 rtx new_addr
, new_mem
, orig_addr
, offset
, set
, clobber
, insn
;
38244 enum rtx_code plus_or_lo_sum
;
38245 machine_mode target_mode
= GET_MODE (target
);
38246 machine_mode extend_mode
= target_mode
;
38247 machine_mode ptr_mode
= Pmode
;
38248 enum rtx_code extend
= UNKNOWN
;
38250 if (GET_CODE (orig_mem
) == FLOAT_EXTEND
|| GET_CODE (orig_mem
) == ZERO_EXTEND
)
38252 extend
= GET_CODE (orig_mem
);
38253 orig_mem
= XEXP (orig_mem
, 0);
38254 target_mode
= GET_MODE (orig_mem
);
38257 gcc_assert (MEM_P (orig_mem
));
38259 orig_addr
= XEXP (orig_mem
, 0);
38260 plus_or_lo_sum
= GET_CODE (orig_addr
);
38261 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
38263 offset
= XEXP (orig_addr
, 1);
38264 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
38265 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
38267 if (extend
!= UNKNOWN
)
38268 new_mem
= gen_rtx_fmt_e (extend
, extend_mode
, new_mem
);
38270 new_mem
= gen_rtx_UNSPEC (extend_mode
, gen_rtvec (1, new_mem
),
38273 set
= gen_rtx_SET (target
, new_mem
);
38274 clobber
= gen_rtx_CLOBBER (VOIDmode
, tmp_reg
);
38275 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
));
38281 /* During the peephole2 pass, adjust and expand the insns for an extended fusion
38285 operands[0] register set with addis
38286 operands[1] value set via addis
38287 operands[2] target D-form memory being stored to
38288 operands[3] register being stored
38290 This is similar to the fusion introduced with power8, except it scales to
38291 both loads/stores and does not require the result register to be the same as
38292 the base register. At the moment, we only do this if register set with addis
38296 expand_fusion_p9_store (rtx
*operands
)
38298 rtx tmp_reg
= operands
[0];
38299 rtx addis_value
= operands
[1];
38300 rtx orig_mem
= operands
[2];
38301 rtx src
= operands
[3];
38302 rtx new_addr
, new_mem
, orig_addr
, offset
, set
, clobber
, insn
, new_src
;
38303 enum rtx_code plus_or_lo_sum
;
38304 machine_mode target_mode
= GET_MODE (orig_mem
);
38305 machine_mode ptr_mode
= Pmode
;
38307 gcc_assert (MEM_P (orig_mem
));
38309 orig_addr
= XEXP (orig_mem
, 0);
38310 plus_or_lo_sum
= GET_CODE (orig_addr
);
38311 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
38313 offset
= XEXP (orig_addr
, 1);
38314 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
38315 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
38317 new_src
= gen_rtx_UNSPEC (target_mode
, gen_rtvec (1, src
),
38320 set
= gen_rtx_SET (new_mem
, new_src
);
38321 clobber
= gen_rtx_CLOBBER (VOIDmode
, tmp_reg
);
38322 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
));
38328 /* Return a string to fuse an addis instruction with a load using extended
38329 fusion. The address that is used is the logical address that was formed
38330 during peephole2: (lo_sum (high) (low-part))
38332 The code is complicated, so we call output_asm_insn directly, and just
38336 emit_fusion_p9_load (rtx reg
, rtx mem
, rtx tmp_reg
)
38338 machine_mode mode
= GET_MODE (reg
);
38342 const char *load_string
;
38345 if (GET_CODE (mem
) == FLOAT_EXTEND
|| GET_CODE (mem
) == ZERO_EXTEND
)
38347 mem
= XEXP (mem
, 0);
38348 mode
= GET_MODE (mem
);
38351 if (GET_CODE (reg
) == SUBREG
)
38353 gcc_assert (SUBREG_BYTE (reg
) == 0);
38354 reg
= SUBREG_REG (reg
);
38358 fatal_insn ("emit_fusion_p9_load, bad reg #1", reg
);
38361 if (FP_REGNO_P (r
))
38363 if (mode
== SFmode
)
38364 load_string
= "lfs";
38365 else if (mode
== DFmode
|| mode
== DImode
)
38366 load_string
= "lfd";
38368 gcc_unreachable ();
38370 else if (ALTIVEC_REGNO_P (r
) && TARGET_P9_VECTOR
)
38372 if (mode
== SFmode
)
38373 load_string
= "lxssp";
38374 else if (mode
== DFmode
|| mode
== DImode
)
38375 load_string
= "lxsd";
38377 gcc_unreachable ();
38379 else if (INT_REGNO_P (r
))
38384 load_string
= "lbz";
38387 load_string
= "lhz";
38391 load_string
= "lwz";
38395 if (!TARGET_POWERPC64
)
38396 gcc_unreachable ();
38397 load_string
= "ld";
38400 gcc_unreachable ();
38404 fatal_insn ("emit_fusion_p9_load, bad reg #2", reg
);
38407 fatal_insn ("emit_fusion_p9_load not MEM", mem
);
38409 addr
= XEXP (mem
, 0);
38410 fusion_split_address (addr
, &hi
, &lo
);
38412 /* Emit the addis instruction. */
38413 emit_fusion_addis (tmp_reg
, hi
);
38415 /* Emit the D-form load instruction. */
38416 emit_fusion_load_store (reg
, tmp_reg
, lo
, load_string
);
38421 /* Return a string to fuse an addis instruction with a store using extended
38422 fusion. The address that is used is the logical address that was formed
38423 during peephole2: (lo_sum (high) (low-part))
38425 The code is complicated, so we call output_asm_insn directly, and just
38429 emit_fusion_p9_store (rtx mem
, rtx reg
, rtx tmp_reg
)
38431 machine_mode mode
= GET_MODE (reg
);
38435 const char *store_string
;
38438 if (GET_CODE (reg
) == SUBREG
)
38440 gcc_assert (SUBREG_BYTE (reg
) == 0);
38441 reg
= SUBREG_REG (reg
);
38445 fatal_insn ("emit_fusion_p9_store, bad reg #1", reg
);
38448 if (FP_REGNO_P (r
))
38450 if (mode
== SFmode
)
38451 store_string
= "stfs";
38452 else if (mode
== DFmode
)
38453 store_string
= "stfd";
38455 gcc_unreachable ();
38457 else if (ALTIVEC_REGNO_P (r
) && TARGET_P9_VECTOR
)
38459 if (mode
== SFmode
)
38460 store_string
= "stxssp";
38461 else if (mode
== DFmode
|| mode
== DImode
)
38462 store_string
= "stxsd";
38464 gcc_unreachable ();
38466 else if (INT_REGNO_P (r
))
38471 store_string
= "stb";
38474 store_string
= "sth";
38478 store_string
= "stw";
38482 if (!TARGET_POWERPC64
)
38483 gcc_unreachable ();
38484 store_string
= "std";
38487 gcc_unreachable ();
38491 fatal_insn ("emit_fusion_p9_store, bad reg #2", reg
);
38494 fatal_insn ("emit_fusion_p9_store not MEM", mem
);
38496 addr
= XEXP (mem
, 0);
38497 fusion_split_address (addr
, &hi
, &lo
);
38499 /* Emit the addis instruction. */
38500 emit_fusion_addis (tmp_reg
, hi
);
38502 /* Emit the D-form load instruction. */
38503 emit_fusion_load_store (reg
, tmp_reg
, lo
, store_string
);
38508 #ifdef RS6000_GLIBC_ATOMIC_FENV
38509 /* Function declarations for rs6000_atomic_assign_expand_fenv. */
38510 static tree atomic_hold_decl
, atomic_clear_decl
, atomic_update_decl
;
38513 /* Implement TARGET_ATOMIC_ASSIGN_EXPAND_FENV hook. */
38516 rs6000_atomic_assign_expand_fenv (tree
*hold
, tree
*clear
, tree
*update
)
38518 if (!TARGET_HARD_FLOAT
)
38520 #ifdef RS6000_GLIBC_ATOMIC_FENV
38521 if (atomic_hold_decl
== NULL_TREE
)
38524 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
38525 get_identifier ("__atomic_feholdexcept"),
38526 build_function_type_list (void_type_node
,
38527 double_ptr_type_node
,
38529 TREE_PUBLIC (atomic_hold_decl
) = 1;
38530 DECL_EXTERNAL (atomic_hold_decl
) = 1;
38533 if (atomic_clear_decl
== NULL_TREE
)
38536 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
38537 get_identifier ("__atomic_feclearexcept"),
38538 build_function_type_list (void_type_node
,
38540 TREE_PUBLIC (atomic_clear_decl
) = 1;
38541 DECL_EXTERNAL (atomic_clear_decl
) = 1;
38544 tree const_double
= build_qualified_type (double_type_node
,
38546 tree const_double_ptr
= build_pointer_type (const_double
);
38547 if (atomic_update_decl
== NULL_TREE
)
38550 = build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
,
38551 get_identifier ("__atomic_feupdateenv"),
38552 build_function_type_list (void_type_node
,
38555 TREE_PUBLIC (atomic_update_decl
) = 1;
38556 DECL_EXTERNAL (atomic_update_decl
) = 1;
38559 tree fenv_var
= create_tmp_var_raw (double_type_node
);
38560 TREE_ADDRESSABLE (fenv_var
) = 1;
38561 tree fenv_addr
= build1 (ADDR_EXPR
, double_ptr_type_node
, fenv_var
);
38563 *hold
= build_call_expr (atomic_hold_decl
, 1, fenv_addr
);
38564 *clear
= build_call_expr (atomic_clear_decl
, 0);
38565 *update
= build_call_expr (atomic_update_decl
, 1,
38566 fold_convert (const_double_ptr
, fenv_addr
));
38571 tree mffs
= rs6000_builtin_decls
[RS6000_BUILTIN_MFFS
];
38572 tree mtfsf
= rs6000_builtin_decls
[RS6000_BUILTIN_MTFSF
];
38573 tree call_mffs
= build_call_expr (mffs
, 0);
38575 /* Generates the equivalent of feholdexcept (&fenv_var)
38577 *fenv_var = __builtin_mffs ();
38579 *(uint64_t*)&fenv_hold = *(uint64_t*)fenv_var & 0xffffffff00000007LL;
38580 __builtin_mtfsf (0xff, fenv_hold); */
38582 /* Mask to clear everything except for the rounding modes and non-IEEE
38583 arithmetic flag. */
38584 const unsigned HOST_WIDE_INT hold_exception_mask
=
38585 HOST_WIDE_INT_C (0xffffffff00000007);
38587 tree fenv_var
= create_tmp_var_raw (double_type_node
);
38589 tree hold_mffs
= build2 (MODIFY_EXPR
, void_type_node
, fenv_var
, call_mffs
);
38591 tree fenv_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, fenv_var
);
38592 tree fenv_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, fenv_llu
,
38593 build_int_cst (uint64_type_node
,
38594 hold_exception_mask
));
38596 tree fenv_hold_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
38599 tree hold_mtfsf
= build_call_expr (mtfsf
, 2,
38600 build_int_cst (unsigned_type_node
, 0xff),
38603 *hold
= build2 (COMPOUND_EXPR
, void_type_node
, hold_mffs
, hold_mtfsf
);
38605 /* Generates the equivalent of feclearexcept (FE_ALL_EXCEPT):
38607 double fenv_clear = __builtin_mffs ();
38608 *(uint64_t)&fenv_clear &= 0xffffffff00000000LL;
38609 __builtin_mtfsf (0xff, fenv_clear); */
38611 /* Mask to clear everything except for the rounding modes and non-IEEE
38612 arithmetic flag. */
38613 const unsigned HOST_WIDE_INT clear_exception_mask
=
38614 HOST_WIDE_INT_C (0xffffffff00000000);
38616 tree fenv_clear
= create_tmp_var_raw (double_type_node
);
38618 tree clear_mffs
= build2 (MODIFY_EXPR
, void_type_node
, fenv_clear
, call_mffs
);
38620 tree fenv_clean_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, fenv_clear
);
38621 tree fenv_clear_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
,
38623 build_int_cst (uint64_type_node
,
38624 clear_exception_mask
));
38626 tree fenv_clear_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
38627 fenv_clear_llu_and
);
38629 tree clear_mtfsf
= build_call_expr (mtfsf
, 2,
38630 build_int_cst (unsigned_type_node
, 0xff),
38633 *clear
= build2 (COMPOUND_EXPR
, void_type_node
, clear_mffs
, clear_mtfsf
);
38635 /* Generates the equivalent of feupdateenv (&fenv_var)
38637 double old_fenv = __builtin_mffs ();
38638 double fenv_update;
38639 *(uint64_t*)&fenv_update = (*(uint64_t*)&old & 0xffffffff1fffff00LL) |
38640 (*(uint64_t*)fenv_var 0x1ff80fff);
38641 __builtin_mtfsf (0xff, fenv_update); */
38643 const unsigned HOST_WIDE_INT update_exception_mask
=
38644 HOST_WIDE_INT_C (0xffffffff1fffff00);
38645 const unsigned HOST_WIDE_INT new_exception_mask
=
38646 HOST_WIDE_INT_C (0x1ff80fff);
38648 tree old_fenv
= create_tmp_var_raw (double_type_node
);
38649 tree update_mffs
= build2 (MODIFY_EXPR
, void_type_node
, old_fenv
, call_mffs
);
38651 tree old_llu
= build1 (VIEW_CONVERT_EXPR
, uint64_type_node
, old_fenv
);
38652 tree old_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, old_llu
,
38653 build_int_cst (uint64_type_node
,
38654 update_exception_mask
));
38656 tree new_llu_and
= build2 (BIT_AND_EXPR
, uint64_type_node
, fenv_llu
,
38657 build_int_cst (uint64_type_node
,
38658 new_exception_mask
));
38660 tree new_llu_mask
= build2 (BIT_IOR_EXPR
, uint64_type_node
,
38661 old_llu_and
, new_llu_and
);
38663 tree fenv_update_mtfsf
= build1 (VIEW_CONVERT_EXPR
, double_type_node
,
38666 tree update_mtfsf
= build_call_expr (mtfsf
, 2,
38667 build_int_cst (unsigned_type_node
, 0xff),
38668 fenv_update_mtfsf
);
38670 *update
= build2 (COMPOUND_EXPR
, void_type_node
, update_mffs
, update_mtfsf
);
38674 rs6000_generate_float2_double_code (rtx dst
, rtx src1
, rtx src2
)
38676 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
38678 rtx_tmp0
= gen_reg_rtx (V2DFmode
);
38679 rtx_tmp1
= gen_reg_rtx (V2DFmode
);
38681 /* The destination of the vmrgew instruction layout is:
38682 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38683 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38684 vmrgew instruction will be correct. */
38685 if (BYTES_BIG_ENDIAN
)
38687 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp0
, src1
, src2
,
38689 emit_insn (gen_vsx_xxpermdi_v2df_be (rtx_tmp1
, src1
, src2
,
38694 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0
, src1
, src2
, GEN_INT (3)));
38695 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1
, src1
, src2
, GEN_INT (0)));
38698 rtx_tmp2
= gen_reg_rtx (V4SFmode
);
38699 rtx_tmp3
= gen_reg_rtx (V4SFmode
);
38701 emit_insn (gen_vsx_xvcdpsp (rtx_tmp2
, rtx_tmp0
));
38702 emit_insn (gen_vsx_xvcdpsp (rtx_tmp3
, rtx_tmp1
));
38704 if (BYTES_BIG_ENDIAN
)
38705 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp2
, rtx_tmp3
));
38707 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp3
, rtx_tmp2
));
38711 rs6000_generate_float2_code (bool signed_convert
, rtx dst
, rtx src1
, rtx src2
)
38713 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
38715 rtx_tmp0
= gen_reg_rtx (V2DImode
);
38716 rtx_tmp1
= gen_reg_rtx (V2DImode
);
38718 /* The destination of the vmrgew instruction layout is:
38719 rtx_tmp2[0] rtx_tmp3[0] rtx_tmp2[1] rtx_tmp3[0].
38720 Setup rtx_tmp0 and rtx_tmp1 to ensure the order of the elements after the
38721 vmrgew instruction will be correct. */
38722 if (BYTES_BIG_ENDIAN
)
38724 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp0
, src1
, src2
, GEN_INT (0)));
38725 emit_insn (gen_vsx_xxpermdi_v2di_be (rtx_tmp1
, src1
, src2
, GEN_INT (3)));
38729 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp0
, src1
, src2
, GEN_INT (3)));
38730 emit_insn (gen_vsx_xxpermdi_v2di (rtx_tmp1
, src1
, src2
, GEN_INT (0)));
38733 rtx_tmp2
= gen_reg_rtx (V4SFmode
);
38734 rtx_tmp3
= gen_reg_rtx (V4SFmode
);
38736 if (signed_convert
)
38738 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp2
, rtx_tmp0
));
38739 emit_insn (gen_vsx_xvcvsxdsp (rtx_tmp3
, rtx_tmp1
));
38743 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp2
, rtx_tmp0
));
38744 emit_insn (gen_vsx_xvcvuxdsp (rtx_tmp3
, rtx_tmp1
));
38747 if (BYTES_BIG_ENDIAN
)
38748 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp2
, rtx_tmp3
));
38750 emit_insn (gen_p8_vmrgew_v4sf (dst
, rtx_tmp3
, rtx_tmp2
));
38754 rs6000_generate_vsigned2_code (bool signed_convert
, rtx dst
, rtx src1
,
38757 rtx rtx_tmp0
, rtx_tmp1
, rtx_tmp2
, rtx_tmp3
;
38759 rtx_tmp0
= gen_reg_rtx (V2DFmode
);
38760 rtx_tmp1
= gen_reg_rtx (V2DFmode
);
38762 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp0
, src1
, src2
, GEN_INT (0)));
38763 emit_insn (gen_vsx_xxpermdi_v2df (rtx_tmp1
, src1
, src2
, GEN_INT (3)));
38765 rtx_tmp2
= gen_reg_rtx (V4SImode
);
38766 rtx_tmp3
= gen_reg_rtx (V4SImode
);
38768 if (signed_convert
)
38770 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp2
, rtx_tmp0
));
38771 emit_insn (gen_vsx_xvcvdpsxws (rtx_tmp3
, rtx_tmp1
));
38775 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp2
, rtx_tmp0
));
38776 emit_insn (gen_vsx_xvcvdpuxws (rtx_tmp3
, rtx_tmp1
));
38779 emit_insn (gen_p8_vmrgew_v4si (dst
, rtx_tmp2
, rtx_tmp3
));
38782 /* Implement the TARGET_OPTAB_SUPPORTED_P hook. */
38785 rs6000_optab_supported_p (int op
, machine_mode mode1
, machine_mode
,
38786 optimization_type opt_type
)
38791 return (opt_type
== OPTIMIZE_FOR_SPEED
38792 && RS6000_RECIP_AUTO_RSQRTE_P (mode1
));
38799 /* Implement TARGET_CONSTANT_ALIGNMENT. */
38801 static HOST_WIDE_INT
38802 rs6000_constant_alignment (const_tree exp
, HOST_WIDE_INT align
)
38804 if (TREE_CODE (exp
) == STRING_CST
38805 && (STRICT_ALIGNMENT
|| !optimize_size
))
38806 return MAX (align
, BITS_PER_WORD
);
38810 /* Implement TARGET_STARTING_FRAME_OFFSET. */
38812 static HOST_WIDE_INT
38813 rs6000_starting_frame_offset (void)
38815 if (FRAME_GROWS_DOWNWARD
)
38817 return RS6000_STARTING_FRAME_OFFSET
;
38821 /* Create an alias for a mangled name where we have changed the mangling (in
38822 GCC 8.1, we used U10__float128, and now we use u9__ieee128). This is called
38823 via the target hook TARGET_ASM_GLOBALIZE_DECL_NAME. */
38825 #if TARGET_ELF && RS6000_WEAK
38827 rs6000_globalize_decl_name (FILE * stream
, tree decl
)
38829 const char *name
= XSTR (XEXP (DECL_RTL (decl
), 0), 0);
38831 targetm
.asm_out
.globalize_label (stream
, name
);
38833 if (rs6000_passes_ieee128
&& name
[0] == '_' && name
[1] == 'Z')
38835 tree save_asm_name
= DECL_ASSEMBLER_NAME (decl
);
38836 const char *old_name
;
38838 ieee128_mangling_gcc_8_1
= true;
38839 lang_hooks
.set_decl_assembler_name (decl
);
38840 old_name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
38841 SET_DECL_ASSEMBLER_NAME (decl
, save_asm_name
);
38842 ieee128_mangling_gcc_8_1
= false;
38844 if (strcmp (name
, old_name
) != 0)
38846 fprintf (stream
, "\t.weak %s\n", old_name
);
38847 fprintf (stream
, "\t.set %s,%s\n", old_name
, name
);
38854 struct gcc_target targetm
= TARGET_INITIALIZER
;
38856 #include "gt-rs6000.h"