1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2014 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
35 #include "stringpool.h"
36 #include "stor-layout.h"
38 #include "print-tree.h"
51 #include "basic-block.h"
52 #include "diagnostic-core.h"
57 #include "target-def.h"
58 #include "common/common-target.h"
59 #include "langhooks.h"
62 #include "sched-int.h"
63 #include "hash-table.h"
64 #include "basic-block.h"
65 #include "tree-ssa-alias.h"
66 #include "internal-fn.h"
67 #include "gimple-fold.h"
69 #include "gimple-expr.h"
73 #include "gimple-iterator.h"
74 #include "gimple-walk.h"
77 #include "tm-constrs.h"
80 #include "tree-vectorizer.h"
83 #include "target-globals.h"
86 #include "tree-pass.h"
89 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
92 #include "gstab.h" /* for N_SLINE */
95 #ifndef TARGET_NO_PROTOTYPE
96 #define TARGET_NO_PROTOTYPE 0
99 #define min(A,B) ((A) < (B) ? (A) : (B))
100 #define max(A,B) ((A) > (B) ? (A) : (B))
102 /* Structure used to define the rs6000 stack */
103 typedef struct rs6000_stack
{
104 int reload_completed
; /* stack info won't change from here on */
105 int first_gp_reg_save
; /* first callee saved GP register used */
106 int first_fp_reg_save
; /* first callee saved FP register used */
107 int first_altivec_reg_save
; /* first callee saved AltiVec register used */
108 int lr_save_p
; /* true if the link reg needs to be saved */
109 int cr_save_p
; /* true if the CR reg needs to be saved */
110 unsigned int vrsave_mask
; /* mask of vec registers to save */
111 int push_p
; /* true if we need to allocate stack space */
112 int calls_p
; /* true if the function makes any calls */
113 int world_save_p
; /* true if we're saving *everything*:
114 r13-r31, cr, f14-f31, vrsave, v20-v31 */
115 enum rs6000_abi abi
; /* which ABI to use */
116 int gp_save_offset
; /* offset to save GP regs from initial SP */
117 int fp_save_offset
; /* offset to save FP regs from initial SP */
118 int altivec_save_offset
; /* offset to save AltiVec regs from initial SP */
119 int lr_save_offset
; /* offset to save LR from initial SP */
120 int cr_save_offset
; /* offset to save CR from initial SP */
121 int vrsave_save_offset
; /* offset to save VRSAVE from initial SP */
122 int spe_gp_save_offset
; /* offset to save spe 64-bit gprs */
123 int varargs_save_offset
; /* offset to save the varargs registers */
124 int ehrd_offset
; /* offset to EH return data */
125 int ehcr_offset
; /* offset to EH CR field data */
126 int reg_size
; /* register size (4 or 8) */
127 HOST_WIDE_INT vars_size
; /* variable save area size */
128 int parm_size
; /* outgoing parameter size */
129 int save_size
; /* save area size */
130 int fixed_size
; /* fixed size of stack frame */
131 int gp_size
; /* size of saved GP registers */
132 int fp_size
; /* size of saved FP registers */
133 int altivec_size
; /* size of saved AltiVec registers */
134 int cr_size
; /* size to hold CR if not in save_size */
135 int vrsave_size
; /* size to hold VRSAVE if not in save_size */
136 int altivec_padding_size
; /* size of altivec alignment padding if
138 int spe_gp_size
; /* size of 64-bit GPR save size for SPE */
139 int spe_padding_size
;
140 HOST_WIDE_INT total_size
; /* total bytes allocated for stack */
141 int spe_64bit_regs_used
;
145 /* A C structure for machine-specific, per-function data.
146 This is added to the cfun structure. */
147 typedef struct GTY(()) machine_function
149 /* Whether the instruction chain has been scanned already. */
150 int insn_chain_scanned_p
;
151 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
152 int ra_needs_full_frame
;
153 /* Flags if __builtin_return_address (0) was used. */
155 /* Cache lr_save_p after expansion of builtin_eh_return. */
157 /* Whether we need to save the TOC to the reserved stack location in the
158 function prologue. */
159 bool save_toc_in_prologue
;
160 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
161 varargs save area. */
162 HOST_WIDE_INT varargs_save_offset
;
163 /* Temporary stack slot to use for SDmode copies. This slot is
164 64-bits wide and is allocated early enough so that the offset
165 does not overflow the 16-bit load/store offset field. */
166 rtx sdmode_stack_slot
;
167 /* Flag if r2 setup is needed with ELFv2 ABI. */
168 bool r2_setup_needed
;
171 /* Support targetm.vectorize.builtin_mask_for_load. */
172 static GTY(()) tree altivec_builtin_mask_for_load
;
174 /* Set to nonzero once AIX common-mode calls have been defined. */
175 static GTY(()) int common_mode_defined
;
177 /* Label number of label created for -mrelocatable, to call to so we can
178 get the address of the GOT section */
179 static int rs6000_pic_labelno
;
182 /* Counter for labels which are to be placed in .fixup. */
183 int fixuplabelno
= 0;
186 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
189 /* Specify the machine mode that pointers have. After generation of rtl, the
190 compiler makes no further distinction between pointers and any other objects
191 of this machine mode. The type is unsigned since not all things that
192 include rs6000.h also include machmode.h. */
193 unsigned rs6000_pmode
;
195 /* Width in bits of a pointer. */
196 unsigned rs6000_pointer_size
;
198 #ifdef HAVE_AS_GNU_ATTRIBUTE
199 /* Flag whether floating point values have been passed/returned. */
200 static bool rs6000_passes_float
;
201 /* Flag whether vector values have been passed/returned. */
202 static bool rs6000_passes_vector
;
203 /* Flag whether small (<= 8 byte) structures have been returned. */
204 static bool rs6000_returns_struct
;
207 /* Value is TRUE if register/mode pair is acceptable. */
208 bool rs6000_hard_regno_mode_ok_p
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
210 /* Maximum number of registers needed for a given register class and mode. */
211 unsigned char rs6000_class_max_nregs
[NUM_MACHINE_MODES
][LIM_REG_CLASSES
];
213 /* How many registers are needed for a given register and mode. */
214 unsigned char rs6000_hard_regno_nregs
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
216 /* Map register number to register class. */
217 enum reg_class rs6000_regno_regclass
[FIRST_PSEUDO_REGISTER
];
219 static int dbg_cost_ctrl
;
221 /* Built in types. */
222 tree rs6000_builtin_types
[RS6000_BTI_MAX
];
223 tree rs6000_builtin_decls
[RS6000_BUILTIN_COUNT
];
225 /* Flag to say the TOC is initialized */
227 char toc_label_name
[10];
229 /* Cached value of rs6000_variable_issue. This is cached in
230 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
231 static short cached_can_issue_more
;
233 static GTY(()) section
*read_only_data_section
;
234 static GTY(()) section
*private_data_section
;
235 static GTY(()) section
*tls_data_section
;
236 static GTY(()) section
*tls_private_data_section
;
237 static GTY(()) section
*read_only_private_data_section
;
238 static GTY(()) section
*sdata2_section
;
239 static GTY(()) section
*toc_section
;
241 struct builtin_description
243 const HOST_WIDE_INT mask
;
244 const enum insn_code icode
;
245 const char *const name
;
246 const enum rs6000_builtins code
;
249 /* Describe the vector unit used for modes. */
250 enum rs6000_vector rs6000_vector_unit
[NUM_MACHINE_MODES
];
251 enum rs6000_vector rs6000_vector_mem
[NUM_MACHINE_MODES
];
253 /* Register classes for various constraints that are based on the target
255 enum reg_class rs6000_constraints
[RS6000_CONSTRAINT_MAX
];
257 /* Describe the alignment of a vector. */
258 int rs6000_vector_align
[NUM_MACHINE_MODES
];
260 /* Map selected modes to types for builtins. */
261 static GTY(()) tree builtin_mode_to_type
[MAX_MACHINE_MODE
][2];
263 /* What modes to automatically generate reciprocal divide estimate (fre) and
264 reciprocal sqrt (frsqrte) for. */
265 unsigned char rs6000_recip_bits
[MAX_MACHINE_MODE
];
267 /* Masks to determine which reciprocal esitmate instructions to generate
269 enum rs6000_recip_mask
{
270 RECIP_SF_DIV
= 0x001, /* Use divide estimate */
271 RECIP_DF_DIV
= 0x002,
272 RECIP_V4SF_DIV
= 0x004,
273 RECIP_V2DF_DIV
= 0x008,
275 RECIP_SF_RSQRT
= 0x010, /* Use reciprocal sqrt estimate. */
276 RECIP_DF_RSQRT
= 0x020,
277 RECIP_V4SF_RSQRT
= 0x040,
278 RECIP_V2DF_RSQRT
= 0x080,
280 /* Various combination of flags for -mrecip=xxx. */
282 RECIP_ALL
= (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
283 | RECIP_V2DF_DIV
| RECIP_SF_RSQRT
| RECIP_DF_RSQRT
284 | RECIP_V4SF_RSQRT
| RECIP_V2DF_RSQRT
),
286 RECIP_HIGH_PRECISION
= RECIP_ALL
,
288 /* On low precision machines like the power5, don't enable double precision
289 reciprocal square root estimate, since it isn't accurate enough. */
290 RECIP_LOW_PRECISION
= (RECIP_ALL
& ~(RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
))
293 /* -mrecip options. */
296 const char *string
; /* option name */
297 unsigned int mask
; /* mask bits to set */
298 } recip_options
[] = {
299 { "all", RECIP_ALL
},
300 { "none", RECIP_NONE
},
301 { "div", (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
303 { "divf", (RECIP_SF_DIV
| RECIP_V4SF_DIV
) },
304 { "divd", (RECIP_DF_DIV
| RECIP_V2DF_DIV
) },
305 { "rsqrt", (RECIP_SF_RSQRT
| RECIP_DF_RSQRT
| RECIP_V4SF_RSQRT
306 | RECIP_V2DF_RSQRT
) },
307 { "rsqrtf", (RECIP_SF_RSQRT
| RECIP_V4SF_RSQRT
) },
308 { "rsqrtd", (RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
) },
311 /* Pointer to function (in rs6000-c.c) that can define or undefine target
312 macros that have changed. Languages that don't support the preprocessor
313 don't link in rs6000-c.c, so we can't call it directly. */
314 void (*rs6000_target_modify_macros_ptr
) (bool, HOST_WIDE_INT
, HOST_WIDE_INT
);
316 /* Simplfy register classes into simpler classifications. We assume
317 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
318 check for standard register classes (gpr/floating/altivec/vsx) and
319 floating/vector classes (float/altivec/vsx). */
321 enum rs6000_reg_type
{
334 /* Map register class to register type. */
335 static enum rs6000_reg_type reg_class_to_reg_type
[N_REG_CLASSES
];
337 /* First/last register type for the 'normal' register types (i.e. general
338 purpose, floating point, altivec, and VSX registers). */
339 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
341 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
344 /* Register classes we care about in secondary reload or go if legitimate
345 address. We only need to worry about GPR, FPR, and Altivec registers here,
346 along an ANY field that is the OR of the 3 register classes. */
348 enum rs6000_reload_reg_type
{
349 RELOAD_REG_GPR
, /* General purpose registers. */
350 RELOAD_REG_FPR
, /* Traditional floating point regs. */
351 RELOAD_REG_VMX
, /* Altivec (VMX) registers. */
352 RELOAD_REG_ANY
, /* OR of GPR, FPR, Altivec masks. */
356 /* For setting up register classes, loop through the 3 register classes mapping
357 into real registers, and skip the ANY class, which is just an OR of the
359 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
360 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
362 /* Map reload register type to a register in the register class. */
363 struct reload_reg_map_type
{
364 const char *name
; /* Register class name. */
365 int reg
; /* Register in the register class. */
368 static const struct reload_reg_map_type reload_reg_map
[N_RELOAD_REG
] = {
369 { "Gpr", FIRST_GPR_REGNO
}, /* RELOAD_REG_GPR. */
370 { "Fpr", FIRST_FPR_REGNO
}, /* RELOAD_REG_FPR. */
371 { "VMX", FIRST_ALTIVEC_REGNO
}, /* RELOAD_REG_VMX. */
372 { "Any", -1 }, /* RELOAD_REG_ANY. */
375 /* Mask bits for each register class, indexed per mode. Historically the
376 compiler has been more restrictive which types can do PRE_MODIFY instead of
377 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
378 typedef unsigned char addr_mask_type
;
380 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
381 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
382 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
383 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
384 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
385 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
387 /* Register type masks based on the type, of valid addressing modes. */
388 struct rs6000_reg_addr
{
389 enum insn_code reload_load
; /* INSN to reload for loading. */
390 enum insn_code reload_store
; /* INSN to reload for storing. */
391 enum insn_code reload_fpr_gpr
; /* INSN to move from FPR to GPR. */
392 enum insn_code reload_gpr_vsx
; /* INSN to move from GPR to VSX. */
393 enum insn_code reload_vsx_gpr
; /* INSN to move from VSX to GPR. */
394 addr_mask_type addr_mask
[(int)N_RELOAD_REG
]; /* Valid address masks. */
395 bool scalar_in_vmx_p
; /* Scalar value can go in VMX. */
398 static struct rs6000_reg_addr reg_addr
[NUM_MACHINE_MODES
];
400 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
402 mode_supports_pre_incdec_p (enum machine_mode mode
)
404 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_INCDEC
)
408 /* Helper function to say whether a mode supports PRE_MODIFY. */
410 mode_supports_pre_modify_p (enum machine_mode mode
)
412 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_MODIFY
)
417 /* Target cpu costs. */
419 struct processor_costs
{
420 const int mulsi
; /* cost of SImode multiplication. */
421 const int mulsi_const
; /* cost of SImode multiplication by constant. */
422 const int mulsi_const9
; /* cost of SImode mult by short constant. */
423 const int muldi
; /* cost of DImode multiplication. */
424 const int divsi
; /* cost of SImode division. */
425 const int divdi
; /* cost of DImode division. */
426 const int fp
; /* cost of simple SFmode and DFmode insns. */
427 const int dmul
; /* cost of DFmode multiplication (and fmadd). */
428 const int sdiv
; /* cost of SFmode division (fdivs). */
429 const int ddiv
; /* cost of DFmode division (fdiv). */
430 const int cache_line_size
; /* cache line size in bytes. */
431 const int l1_cache_size
; /* size of l1 cache, in kilobytes. */
432 const int l2_cache_size
; /* size of l2 cache, in kilobytes. */
433 const int simultaneous_prefetches
; /* number of parallel prefetch
437 const struct processor_costs
*rs6000_cost
;
439 /* Processor costs (relative to an add) */
441 /* Instruction size costs on 32bit processors. */
443 struct processor_costs size32_cost
= {
444 COSTS_N_INSNS (1), /* mulsi */
445 COSTS_N_INSNS (1), /* mulsi_const */
446 COSTS_N_INSNS (1), /* mulsi_const9 */
447 COSTS_N_INSNS (1), /* muldi */
448 COSTS_N_INSNS (1), /* divsi */
449 COSTS_N_INSNS (1), /* divdi */
450 COSTS_N_INSNS (1), /* fp */
451 COSTS_N_INSNS (1), /* dmul */
452 COSTS_N_INSNS (1), /* sdiv */
453 COSTS_N_INSNS (1), /* ddiv */
460 /* Instruction size costs on 64bit processors. */
462 struct processor_costs size64_cost
= {
463 COSTS_N_INSNS (1), /* mulsi */
464 COSTS_N_INSNS (1), /* mulsi_const */
465 COSTS_N_INSNS (1), /* mulsi_const9 */
466 COSTS_N_INSNS (1), /* muldi */
467 COSTS_N_INSNS (1), /* divsi */
468 COSTS_N_INSNS (1), /* divdi */
469 COSTS_N_INSNS (1), /* fp */
470 COSTS_N_INSNS (1), /* dmul */
471 COSTS_N_INSNS (1), /* sdiv */
472 COSTS_N_INSNS (1), /* ddiv */
479 /* Instruction costs on RS64A processors. */
481 struct processor_costs rs64a_cost
= {
482 COSTS_N_INSNS (20), /* mulsi */
483 COSTS_N_INSNS (12), /* mulsi_const */
484 COSTS_N_INSNS (8), /* mulsi_const9 */
485 COSTS_N_INSNS (34), /* muldi */
486 COSTS_N_INSNS (65), /* divsi */
487 COSTS_N_INSNS (67), /* divdi */
488 COSTS_N_INSNS (4), /* fp */
489 COSTS_N_INSNS (4), /* dmul */
490 COSTS_N_INSNS (31), /* sdiv */
491 COSTS_N_INSNS (31), /* ddiv */
492 128, /* cache line size */
498 /* Instruction costs on MPCCORE processors. */
500 struct processor_costs mpccore_cost
= {
501 COSTS_N_INSNS (2), /* mulsi */
502 COSTS_N_INSNS (2), /* mulsi_const */
503 COSTS_N_INSNS (2), /* mulsi_const9 */
504 COSTS_N_INSNS (2), /* muldi */
505 COSTS_N_INSNS (6), /* divsi */
506 COSTS_N_INSNS (6), /* divdi */
507 COSTS_N_INSNS (4), /* fp */
508 COSTS_N_INSNS (5), /* dmul */
509 COSTS_N_INSNS (10), /* sdiv */
510 COSTS_N_INSNS (17), /* ddiv */
511 32, /* cache line size */
517 /* Instruction costs on PPC403 processors. */
519 struct processor_costs ppc403_cost
= {
520 COSTS_N_INSNS (4), /* mulsi */
521 COSTS_N_INSNS (4), /* mulsi_const */
522 COSTS_N_INSNS (4), /* mulsi_const9 */
523 COSTS_N_INSNS (4), /* muldi */
524 COSTS_N_INSNS (33), /* divsi */
525 COSTS_N_INSNS (33), /* divdi */
526 COSTS_N_INSNS (11), /* fp */
527 COSTS_N_INSNS (11), /* dmul */
528 COSTS_N_INSNS (11), /* sdiv */
529 COSTS_N_INSNS (11), /* ddiv */
530 32, /* cache line size */
536 /* Instruction costs on PPC405 processors. */
538 struct processor_costs ppc405_cost
= {
539 COSTS_N_INSNS (5), /* mulsi */
540 COSTS_N_INSNS (4), /* mulsi_const */
541 COSTS_N_INSNS (3), /* mulsi_const9 */
542 COSTS_N_INSNS (5), /* muldi */
543 COSTS_N_INSNS (35), /* divsi */
544 COSTS_N_INSNS (35), /* divdi */
545 COSTS_N_INSNS (11), /* fp */
546 COSTS_N_INSNS (11), /* dmul */
547 COSTS_N_INSNS (11), /* sdiv */
548 COSTS_N_INSNS (11), /* ddiv */
549 32, /* cache line size */
555 /* Instruction costs on PPC440 processors. */
557 struct processor_costs ppc440_cost
= {
558 COSTS_N_INSNS (3), /* mulsi */
559 COSTS_N_INSNS (2), /* mulsi_const */
560 COSTS_N_INSNS (2), /* mulsi_const9 */
561 COSTS_N_INSNS (3), /* muldi */
562 COSTS_N_INSNS (34), /* divsi */
563 COSTS_N_INSNS (34), /* divdi */
564 COSTS_N_INSNS (5), /* fp */
565 COSTS_N_INSNS (5), /* dmul */
566 COSTS_N_INSNS (19), /* sdiv */
567 COSTS_N_INSNS (33), /* ddiv */
568 32, /* cache line size */
574 /* Instruction costs on PPC476 processors. */
576 struct processor_costs ppc476_cost
= {
577 COSTS_N_INSNS (4), /* mulsi */
578 COSTS_N_INSNS (4), /* mulsi_const */
579 COSTS_N_INSNS (4), /* mulsi_const9 */
580 COSTS_N_INSNS (4), /* muldi */
581 COSTS_N_INSNS (11), /* divsi */
582 COSTS_N_INSNS (11), /* divdi */
583 COSTS_N_INSNS (6), /* fp */
584 COSTS_N_INSNS (6), /* dmul */
585 COSTS_N_INSNS (19), /* sdiv */
586 COSTS_N_INSNS (33), /* ddiv */
587 32, /* l1 cache line size */
593 /* Instruction costs on PPC601 processors. */
595 struct processor_costs ppc601_cost
= {
596 COSTS_N_INSNS (5), /* mulsi */
597 COSTS_N_INSNS (5), /* mulsi_const */
598 COSTS_N_INSNS (5), /* mulsi_const9 */
599 COSTS_N_INSNS (5), /* muldi */
600 COSTS_N_INSNS (36), /* divsi */
601 COSTS_N_INSNS (36), /* divdi */
602 COSTS_N_INSNS (4), /* fp */
603 COSTS_N_INSNS (5), /* dmul */
604 COSTS_N_INSNS (17), /* sdiv */
605 COSTS_N_INSNS (31), /* ddiv */
606 32, /* cache line size */
612 /* Instruction costs on PPC603 processors. */
614 struct processor_costs ppc603_cost
= {
615 COSTS_N_INSNS (5), /* mulsi */
616 COSTS_N_INSNS (3), /* mulsi_const */
617 COSTS_N_INSNS (2), /* mulsi_const9 */
618 COSTS_N_INSNS (5), /* muldi */
619 COSTS_N_INSNS (37), /* divsi */
620 COSTS_N_INSNS (37), /* divdi */
621 COSTS_N_INSNS (3), /* fp */
622 COSTS_N_INSNS (4), /* dmul */
623 COSTS_N_INSNS (18), /* sdiv */
624 COSTS_N_INSNS (33), /* ddiv */
625 32, /* cache line size */
631 /* Instruction costs on PPC604 processors. */
633 struct processor_costs ppc604_cost
= {
634 COSTS_N_INSNS (4), /* mulsi */
635 COSTS_N_INSNS (4), /* mulsi_const */
636 COSTS_N_INSNS (4), /* mulsi_const9 */
637 COSTS_N_INSNS (4), /* muldi */
638 COSTS_N_INSNS (20), /* divsi */
639 COSTS_N_INSNS (20), /* divdi */
640 COSTS_N_INSNS (3), /* fp */
641 COSTS_N_INSNS (3), /* dmul */
642 COSTS_N_INSNS (18), /* sdiv */
643 COSTS_N_INSNS (32), /* ddiv */
644 32, /* cache line size */
650 /* Instruction costs on PPC604e processors. */
652 struct processor_costs ppc604e_cost
= {
653 COSTS_N_INSNS (2), /* mulsi */
654 COSTS_N_INSNS (2), /* mulsi_const */
655 COSTS_N_INSNS (2), /* mulsi_const9 */
656 COSTS_N_INSNS (2), /* muldi */
657 COSTS_N_INSNS (20), /* divsi */
658 COSTS_N_INSNS (20), /* divdi */
659 COSTS_N_INSNS (3), /* fp */
660 COSTS_N_INSNS (3), /* dmul */
661 COSTS_N_INSNS (18), /* sdiv */
662 COSTS_N_INSNS (32), /* ddiv */
663 32, /* cache line size */
669 /* Instruction costs on PPC620 processors. */
671 struct processor_costs ppc620_cost
= {
672 COSTS_N_INSNS (5), /* mulsi */
673 COSTS_N_INSNS (4), /* mulsi_const */
674 COSTS_N_INSNS (3), /* mulsi_const9 */
675 COSTS_N_INSNS (7), /* muldi */
676 COSTS_N_INSNS (21), /* divsi */
677 COSTS_N_INSNS (37), /* divdi */
678 COSTS_N_INSNS (3), /* fp */
679 COSTS_N_INSNS (3), /* dmul */
680 COSTS_N_INSNS (18), /* sdiv */
681 COSTS_N_INSNS (32), /* ddiv */
682 128, /* cache line size */
688 /* Instruction costs on PPC630 processors. */
690 struct processor_costs ppc630_cost
= {
691 COSTS_N_INSNS (5), /* mulsi */
692 COSTS_N_INSNS (4), /* mulsi_const */
693 COSTS_N_INSNS (3), /* mulsi_const9 */
694 COSTS_N_INSNS (7), /* muldi */
695 COSTS_N_INSNS (21), /* divsi */
696 COSTS_N_INSNS (37), /* divdi */
697 COSTS_N_INSNS (3), /* fp */
698 COSTS_N_INSNS (3), /* dmul */
699 COSTS_N_INSNS (17), /* sdiv */
700 COSTS_N_INSNS (21), /* ddiv */
701 128, /* cache line size */
707 /* Instruction costs on Cell processor. */
708 /* COSTS_N_INSNS (1) ~ one add. */
710 struct processor_costs ppccell_cost
= {
711 COSTS_N_INSNS (9/2)+2, /* mulsi */
712 COSTS_N_INSNS (6/2), /* mulsi_const */
713 COSTS_N_INSNS (6/2), /* mulsi_const9 */
714 COSTS_N_INSNS (15/2)+2, /* muldi */
715 COSTS_N_INSNS (38/2), /* divsi */
716 COSTS_N_INSNS (70/2), /* divdi */
717 COSTS_N_INSNS (10/2), /* fp */
718 COSTS_N_INSNS (10/2), /* dmul */
719 COSTS_N_INSNS (74/2), /* sdiv */
720 COSTS_N_INSNS (74/2), /* ddiv */
721 128, /* cache line size */
727 /* Instruction costs on PPC750 and PPC7400 processors. */
729 struct processor_costs ppc750_cost
= {
730 COSTS_N_INSNS (5), /* mulsi */
731 COSTS_N_INSNS (3), /* mulsi_const */
732 COSTS_N_INSNS (2), /* mulsi_const9 */
733 COSTS_N_INSNS (5), /* muldi */
734 COSTS_N_INSNS (17), /* divsi */
735 COSTS_N_INSNS (17), /* divdi */
736 COSTS_N_INSNS (3), /* fp */
737 COSTS_N_INSNS (3), /* dmul */
738 COSTS_N_INSNS (17), /* sdiv */
739 COSTS_N_INSNS (31), /* ddiv */
740 32, /* cache line size */
746 /* Instruction costs on PPC7450 processors. */
748 struct processor_costs ppc7450_cost
= {
749 COSTS_N_INSNS (4), /* mulsi */
750 COSTS_N_INSNS (3), /* mulsi_const */
751 COSTS_N_INSNS (3), /* mulsi_const9 */
752 COSTS_N_INSNS (4), /* muldi */
753 COSTS_N_INSNS (23), /* divsi */
754 COSTS_N_INSNS (23), /* divdi */
755 COSTS_N_INSNS (5), /* fp */
756 COSTS_N_INSNS (5), /* dmul */
757 COSTS_N_INSNS (21), /* sdiv */
758 COSTS_N_INSNS (35), /* ddiv */
759 32, /* cache line size */
765 /* Instruction costs on PPC8540 processors. */
767 struct processor_costs ppc8540_cost
= {
768 COSTS_N_INSNS (4), /* mulsi */
769 COSTS_N_INSNS (4), /* mulsi_const */
770 COSTS_N_INSNS (4), /* mulsi_const9 */
771 COSTS_N_INSNS (4), /* muldi */
772 COSTS_N_INSNS (19), /* divsi */
773 COSTS_N_INSNS (19), /* divdi */
774 COSTS_N_INSNS (4), /* fp */
775 COSTS_N_INSNS (4), /* dmul */
776 COSTS_N_INSNS (29), /* sdiv */
777 COSTS_N_INSNS (29), /* ddiv */
778 32, /* cache line size */
781 1, /* prefetch streams /*/
784 /* Instruction costs on E300C2 and E300C3 cores. */
786 struct processor_costs ppce300c2c3_cost
= {
787 COSTS_N_INSNS (4), /* mulsi */
788 COSTS_N_INSNS (4), /* mulsi_const */
789 COSTS_N_INSNS (4), /* mulsi_const9 */
790 COSTS_N_INSNS (4), /* muldi */
791 COSTS_N_INSNS (19), /* divsi */
792 COSTS_N_INSNS (19), /* divdi */
793 COSTS_N_INSNS (3), /* fp */
794 COSTS_N_INSNS (4), /* dmul */
795 COSTS_N_INSNS (18), /* sdiv */
796 COSTS_N_INSNS (33), /* ddiv */
800 1, /* prefetch streams /*/
803 /* Instruction costs on PPCE500MC processors. */
805 struct processor_costs ppce500mc_cost
= {
806 COSTS_N_INSNS (4), /* mulsi */
807 COSTS_N_INSNS (4), /* mulsi_const */
808 COSTS_N_INSNS (4), /* mulsi_const9 */
809 COSTS_N_INSNS (4), /* muldi */
810 COSTS_N_INSNS (14), /* divsi */
811 COSTS_N_INSNS (14), /* divdi */
812 COSTS_N_INSNS (8), /* fp */
813 COSTS_N_INSNS (10), /* dmul */
814 COSTS_N_INSNS (36), /* sdiv */
815 COSTS_N_INSNS (66), /* ddiv */
816 64, /* cache line size */
819 1, /* prefetch streams /*/
822 /* Instruction costs on PPCE500MC64 processors. */
824 struct processor_costs ppce500mc64_cost
= {
825 COSTS_N_INSNS (4), /* mulsi */
826 COSTS_N_INSNS (4), /* mulsi_const */
827 COSTS_N_INSNS (4), /* mulsi_const9 */
828 COSTS_N_INSNS (4), /* muldi */
829 COSTS_N_INSNS (14), /* divsi */
830 COSTS_N_INSNS (14), /* divdi */
831 COSTS_N_INSNS (4), /* fp */
832 COSTS_N_INSNS (10), /* dmul */
833 COSTS_N_INSNS (36), /* sdiv */
834 COSTS_N_INSNS (66), /* ddiv */
835 64, /* cache line size */
838 1, /* prefetch streams /*/
841 /* Instruction costs on PPCE5500 processors. */
843 struct processor_costs ppce5500_cost
= {
844 COSTS_N_INSNS (5), /* mulsi */
845 COSTS_N_INSNS (5), /* mulsi_const */
846 COSTS_N_INSNS (4), /* mulsi_const9 */
847 COSTS_N_INSNS (5), /* muldi */
848 COSTS_N_INSNS (14), /* divsi */
849 COSTS_N_INSNS (14), /* divdi */
850 COSTS_N_INSNS (7), /* fp */
851 COSTS_N_INSNS (10), /* dmul */
852 COSTS_N_INSNS (36), /* sdiv */
853 COSTS_N_INSNS (66), /* ddiv */
854 64, /* cache line size */
857 1, /* prefetch streams /*/
860 /* Instruction costs on PPCE6500 processors. */
862 struct processor_costs ppce6500_cost
= {
863 COSTS_N_INSNS (5), /* mulsi */
864 COSTS_N_INSNS (5), /* mulsi_const */
865 COSTS_N_INSNS (4), /* mulsi_const9 */
866 COSTS_N_INSNS (5), /* muldi */
867 COSTS_N_INSNS (14), /* divsi */
868 COSTS_N_INSNS (14), /* divdi */
869 COSTS_N_INSNS (7), /* fp */
870 COSTS_N_INSNS (10), /* dmul */
871 COSTS_N_INSNS (36), /* sdiv */
872 COSTS_N_INSNS (66), /* ddiv */
873 64, /* cache line size */
876 1, /* prefetch streams /*/
879 /* Instruction costs on AppliedMicro Titan processors. */
881 struct processor_costs titan_cost
= {
882 COSTS_N_INSNS (5), /* mulsi */
883 COSTS_N_INSNS (5), /* mulsi_const */
884 COSTS_N_INSNS (5), /* mulsi_const9 */
885 COSTS_N_INSNS (5), /* muldi */
886 COSTS_N_INSNS (18), /* divsi */
887 COSTS_N_INSNS (18), /* divdi */
888 COSTS_N_INSNS (10), /* fp */
889 COSTS_N_INSNS (10), /* dmul */
890 COSTS_N_INSNS (46), /* sdiv */
891 COSTS_N_INSNS (72), /* ddiv */
892 32, /* cache line size */
895 1, /* prefetch streams /*/
898 /* Instruction costs on POWER4 and POWER5 processors. */
900 struct processor_costs power4_cost
= {
901 COSTS_N_INSNS (3), /* mulsi */
902 COSTS_N_INSNS (2), /* mulsi_const */
903 COSTS_N_INSNS (2), /* mulsi_const9 */
904 COSTS_N_INSNS (4), /* muldi */
905 COSTS_N_INSNS (18), /* divsi */
906 COSTS_N_INSNS (34), /* divdi */
907 COSTS_N_INSNS (3), /* fp */
908 COSTS_N_INSNS (3), /* dmul */
909 COSTS_N_INSNS (17), /* sdiv */
910 COSTS_N_INSNS (17), /* ddiv */
911 128, /* cache line size */
914 8, /* prefetch streams /*/
917 /* Instruction costs on POWER6 processors. */
919 struct processor_costs power6_cost
= {
920 COSTS_N_INSNS (8), /* mulsi */
921 COSTS_N_INSNS (8), /* mulsi_const */
922 COSTS_N_INSNS (8), /* mulsi_const9 */
923 COSTS_N_INSNS (8), /* muldi */
924 COSTS_N_INSNS (22), /* divsi */
925 COSTS_N_INSNS (28), /* divdi */
926 COSTS_N_INSNS (3), /* fp */
927 COSTS_N_INSNS (3), /* dmul */
928 COSTS_N_INSNS (13), /* sdiv */
929 COSTS_N_INSNS (16), /* ddiv */
930 128, /* cache line size */
933 16, /* prefetch streams */
936 /* Instruction costs on POWER7 processors. */
938 struct processor_costs power7_cost
= {
939 COSTS_N_INSNS (2), /* mulsi */
940 COSTS_N_INSNS (2), /* mulsi_const */
941 COSTS_N_INSNS (2), /* mulsi_const9 */
942 COSTS_N_INSNS (2), /* muldi */
943 COSTS_N_INSNS (18), /* divsi */
944 COSTS_N_INSNS (34), /* divdi */
945 COSTS_N_INSNS (3), /* fp */
946 COSTS_N_INSNS (3), /* dmul */
947 COSTS_N_INSNS (13), /* sdiv */
948 COSTS_N_INSNS (16), /* ddiv */
949 128, /* cache line size */
952 12, /* prefetch streams */
955 /* Instruction costs on POWER8 processors. */
957 struct processor_costs power8_cost
= {
958 COSTS_N_INSNS (3), /* mulsi */
959 COSTS_N_INSNS (3), /* mulsi_const */
960 COSTS_N_INSNS (3), /* mulsi_const9 */
961 COSTS_N_INSNS (3), /* muldi */
962 COSTS_N_INSNS (19), /* divsi */
963 COSTS_N_INSNS (35), /* divdi */
964 COSTS_N_INSNS (3), /* fp */
965 COSTS_N_INSNS (3), /* dmul */
966 COSTS_N_INSNS (14), /* sdiv */
967 COSTS_N_INSNS (17), /* ddiv */
968 128, /* cache line size */
971 12, /* prefetch streams */
974 /* Instruction costs on POWER A2 processors. */
976 struct processor_costs ppca2_cost
= {
977 COSTS_N_INSNS (16), /* mulsi */
978 COSTS_N_INSNS (16), /* mulsi_const */
979 COSTS_N_INSNS (16), /* mulsi_const9 */
980 COSTS_N_INSNS (16), /* muldi */
981 COSTS_N_INSNS (22), /* divsi */
982 COSTS_N_INSNS (28), /* divdi */
983 COSTS_N_INSNS (3), /* fp */
984 COSTS_N_INSNS (3), /* dmul */
985 COSTS_N_INSNS (59), /* sdiv */
986 COSTS_N_INSNS (72), /* ddiv */
990 16, /* prefetch streams */
994 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
995 #undef RS6000_BUILTIN_1
996 #undef RS6000_BUILTIN_2
997 #undef RS6000_BUILTIN_3
998 #undef RS6000_BUILTIN_A
999 #undef RS6000_BUILTIN_D
1000 #undef RS6000_BUILTIN_E
1001 #undef RS6000_BUILTIN_H
1002 #undef RS6000_BUILTIN_P
1003 #undef RS6000_BUILTIN_Q
1004 #undef RS6000_BUILTIN_S
1005 #undef RS6000_BUILTIN_X
1007 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1008 { NAME, ICODE, MASK, ATTR },
1010 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1011 { NAME, ICODE, MASK, ATTR },
1013 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1014 { NAME, ICODE, MASK, ATTR },
1016 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1017 { NAME, ICODE, MASK, ATTR },
1019 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1020 { NAME, ICODE, MASK, ATTR },
1022 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
1023 { NAME, ICODE, MASK, ATTR },
1025 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1026 { NAME, ICODE, MASK, ATTR },
1028 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1029 { NAME, ICODE, MASK, ATTR },
1031 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1032 { NAME, ICODE, MASK, ATTR },
1034 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
1035 { NAME, ICODE, MASK, ATTR },
1037 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1038 { NAME, ICODE, MASK, ATTR },
1040 struct rs6000_builtin_info_type
{
1042 const enum insn_code icode
;
1043 const HOST_WIDE_INT mask
;
1044 const unsigned attr
;
1047 static const struct rs6000_builtin_info_type rs6000_builtin_info
[] =
1049 #include "rs6000-builtin.def"
1052 #undef RS6000_BUILTIN_1
1053 #undef RS6000_BUILTIN_2
1054 #undef RS6000_BUILTIN_3
1055 #undef RS6000_BUILTIN_A
1056 #undef RS6000_BUILTIN_D
1057 #undef RS6000_BUILTIN_E
1058 #undef RS6000_BUILTIN_H
1059 #undef RS6000_BUILTIN_P
1060 #undef RS6000_BUILTIN_Q
1061 #undef RS6000_BUILTIN_S
1062 #undef RS6000_BUILTIN_X
1064 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1065 static tree (*rs6000_veclib_handler
) (tree
, tree
, tree
);
1068 static bool rs6000_debug_legitimate_address_p (enum machine_mode
, rtx
, bool);
1069 static bool spe_func_has_64bit_regs_p (void);
1070 static struct machine_function
* rs6000_init_machine_status (void);
1071 static int rs6000_ra_ever_killed (void);
1072 static tree
rs6000_handle_longcall_attribute (tree
*, tree
, tree
, int, bool *);
1073 static tree
rs6000_handle_altivec_attribute (tree
*, tree
, tree
, int, bool *);
1074 static tree
rs6000_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
1075 static tree
rs6000_builtin_vectorized_libmass (tree
, tree
, tree
);
1076 static void rs6000_emit_set_long_const (rtx
, HOST_WIDE_INT
);
1077 static int rs6000_memory_move_cost (enum machine_mode
, reg_class_t
, bool);
1078 static bool rs6000_debug_rtx_costs (rtx
, int, int, int, int *, bool);
1079 static int rs6000_debug_address_cost (rtx
, enum machine_mode
, addr_space_t
,
1081 static int rs6000_debug_adjust_cost (rtx_insn
*, rtx
, rtx_insn
*, int);
1082 static bool is_microcoded_insn (rtx_insn
*);
1083 static bool is_nonpipeline_insn (rtx_insn
*);
1084 static bool is_cracked_insn (rtx_insn
*);
1085 static bool is_load_insn (rtx
, rtx
*);
1086 static bool is_store_insn (rtx
, rtx
*);
1087 static bool set_to_load_agen (rtx_insn
*,rtx_insn
*);
1088 static bool insn_terminates_group_p (rtx_insn
*, enum group_termination
);
1089 static bool insn_must_be_first_in_group (rtx_insn
*);
1090 static bool insn_must_be_last_in_group (rtx_insn
*);
1091 static void altivec_init_builtins (void);
1092 static tree
builtin_function_type (enum machine_mode
, enum machine_mode
,
1093 enum machine_mode
, enum machine_mode
,
1094 enum rs6000_builtins
, const char *name
);
1095 static void rs6000_common_init_builtins (void);
1096 static void paired_init_builtins (void);
1097 static rtx
paired_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
1098 static void spe_init_builtins (void);
1099 static void htm_init_builtins (void);
1100 static rtx
spe_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
1101 static rtx
spe_expand_evsel_builtin (enum insn_code
, tree
, rtx
);
1102 static int rs6000_emit_int_cmove (rtx
, rtx
, rtx
, rtx
);
1103 static rs6000_stack_t
*rs6000_stack_info (void);
1104 static void is_altivec_return_reg (rtx
, void *);
1105 int easy_vector_constant (rtx
, enum machine_mode
);
1106 static rtx
rs6000_debug_legitimize_address (rtx
, rtx
, enum machine_mode
);
1107 static rtx
rs6000_legitimize_tls_address (rtx
, enum tls_model
);
1108 static rtx
rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*, const_tree
,
1111 static void macho_branch_islands (void);
1113 static rtx
rs6000_legitimize_reload_address (rtx
, enum machine_mode
, int, int,
1115 static rtx
rs6000_debug_legitimize_reload_address (rtx
, enum machine_mode
, int,
1117 static bool rs6000_mode_dependent_address (const_rtx
);
1118 static bool rs6000_debug_mode_dependent_address (const_rtx
);
1119 static enum reg_class
rs6000_secondary_reload_class (enum reg_class
,
1120 enum machine_mode
, rtx
);
1121 static enum reg_class
rs6000_debug_secondary_reload_class (enum reg_class
,
1124 static enum reg_class
rs6000_preferred_reload_class (rtx
, enum reg_class
);
1125 static enum reg_class
rs6000_debug_preferred_reload_class (rtx
,
1127 static bool rs6000_secondary_memory_needed (enum reg_class
, enum reg_class
,
1129 static bool rs6000_debug_secondary_memory_needed (enum reg_class
,
1132 static bool rs6000_cannot_change_mode_class (enum machine_mode
,
1135 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode
,
1138 static bool rs6000_save_toc_in_prologue_p (void);
1140 rtx (*rs6000_legitimize_reload_address_ptr
) (rtx
, enum machine_mode
, int, int,
1142 = rs6000_legitimize_reload_address
;
1144 static bool (*rs6000_mode_dependent_address_ptr
) (const_rtx
)
1145 = rs6000_mode_dependent_address
;
1147 enum reg_class (*rs6000_secondary_reload_class_ptr
) (enum reg_class
,
1148 enum machine_mode
, rtx
)
1149 = rs6000_secondary_reload_class
;
1151 enum reg_class (*rs6000_preferred_reload_class_ptr
) (rtx
, enum reg_class
)
1152 = rs6000_preferred_reload_class
;
1154 bool (*rs6000_secondary_memory_needed_ptr
) (enum reg_class
, enum reg_class
,
1156 = rs6000_secondary_memory_needed
;
1158 bool (*rs6000_cannot_change_mode_class_ptr
) (enum machine_mode
,
1161 = rs6000_cannot_change_mode_class
;
1163 const int INSN_NOT_AVAILABLE
= -1;
1165 static void rs6000_print_isa_options (FILE *, int, const char *,
1167 static void rs6000_print_builtin_options (FILE *, int, const char *,
1170 static enum rs6000_reg_type
register_to_reg_type (rtx
, bool *);
1171 static bool rs6000_secondary_reload_move (enum rs6000_reg_type
,
1172 enum rs6000_reg_type
,
1174 secondary_reload_info
*,
1176 rtl_opt_pass
*make_pass_analyze_swaps (gcc::context
*);
1178 /* Hash table stuff for keeping track of TOC entries. */
1180 struct GTY((for_user
)) toc_hash_struct
1182 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1183 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1185 enum machine_mode key_mode
;
1189 struct toc_hasher
: ggc_hasher
<toc_hash_struct
*>
1191 static hashval_t
hash (toc_hash_struct
*);
1192 static bool equal (toc_hash_struct
*, toc_hash_struct
*);
1195 static GTY (()) hash_table
<toc_hasher
> *toc_hash_table
;
1197 /* Hash table to keep track of the argument types for builtin functions. */
1199 struct GTY((for_user
)) builtin_hash_struct
1202 enum machine_mode mode
[4]; /* return value + 3 arguments. */
1203 unsigned char uns_p
[4]; /* and whether the types are unsigned. */
1206 struct builtin_hasher
: ggc_hasher
<builtin_hash_struct
*>
1208 static hashval_t
hash (builtin_hash_struct
*);
1209 static bool equal (builtin_hash_struct
*, builtin_hash_struct
*);
1212 static GTY (()) hash_table
<builtin_hasher
> *builtin_hash_table
;
1215 /* Default register names. */
1216 char rs6000_reg_names
[][8] =
1218 "0", "1", "2", "3", "4", "5", "6", "7",
1219 "8", "9", "10", "11", "12", "13", "14", "15",
1220 "16", "17", "18", "19", "20", "21", "22", "23",
1221 "24", "25", "26", "27", "28", "29", "30", "31",
1222 "0", "1", "2", "3", "4", "5", "6", "7",
1223 "8", "9", "10", "11", "12", "13", "14", "15",
1224 "16", "17", "18", "19", "20", "21", "22", "23",
1225 "24", "25", "26", "27", "28", "29", "30", "31",
1226 "mq", "lr", "ctr","ap",
1227 "0", "1", "2", "3", "4", "5", "6", "7",
1229 /* AltiVec registers. */
1230 "0", "1", "2", "3", "4", "5", "6", "7",
1231 "8", "9", "10", "11", "12", "13", "14", "15",
1232 "16", "17", "18", "19", "20", "21", "22", "23",
1233 "24", "25", "26", "27", "28", "29", "30", "31",
1235 /* SPE registers. */
1236 "spe_acc", "spefscr",
1237 /* Soft frame pointer. */
1239 /* HTM SPR registers. */
1240 "tfhar", "tfiar", "texasr",
1241 /* SPE High registers. */
1242 "0", "1", "2", "3", "4", "5", "6", "7",
1243 "8", "9", "10", "11", "12", "13", "14", "15",
1244 "16", "17", "18", "19", "20", "21", "22", "23",
1245 "24", "25", "26", "27", "28", "29", "30", "31"
1248 #ifdef TARGET_REGNAMES
1249 static const char alt_reg_names
[][8] =
1251 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1252 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1253 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1254 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1255 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1256 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1257 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1258 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1259 "mq", "lr", "ctr", "ap",
1260 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1262 /* AltiVec registers. */
1263 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1264 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1265 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1266 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1268 /* SPE registers. */
1269 "spe_acc", "spefscr",
1270 /* Soft frame pointer. */
1272 /* HTM SPR registers. */
1273 "tfhar", "tfiar", "texasr",
1274 /* SPE High registers. */
1275 "%rh0", "%rh1", "%rh2", "%rh3", "%rh4", "%rh5", "%rh6", "%rh7",
1276 "%rh8", "%rh9", "%rh10", "%r11", "%rh12", "%rh13", "%rh14", "%rh15",
1277 "%rh16", "%rh17", "%rh18", "%rh19", "%rh20", "%rh21", "%rh22", "%rh23",
1278 "%rh24", "%rh25", "%rh26", "%rh27", "%rh28", "%rh29", "%rh30", "%rh31"
1282 /* Table of valid machine attributes. */
1284 static const struct attribute_spec rs6000_attribute_table
[] =
1286 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1287 affects_type_identity } */
1288 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute
,
1290 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1292 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1294 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1296 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1298 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1299 SUBTARGET_ATTRIBUTE_TABLE
,
1301 { NULL
, 0, 0, false, false, false, NULL
, false }
1304 #ifndef TARGET_PROFILE_KERNEL
1305 #define TARGET_PROFILE_KERNEL 0
1308 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1309 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1311 /* Initialize the GCC target structure. */
1312 #undef TARGET_ATTRIBUTE_TABLE
1313 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1314 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1315 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1316 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1317 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1319 #undef TARGET_ASM_ALIGNED_DI_OP
1320 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1322 /* Default unaligned ops are only provided for ELF. Find the ops needed
1323 for non-ELF systems. */
1324 #ifndef OBJECT_FORMAT_ELF
1326 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1328 #undef TARGET_ASM_UNALIGNED_HI_OP
1329 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1330 #undef TARGET_ASM_UNALIGNED_SI_OP
1331 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1332 #undef TARGET_ASM_UNALIGNED_DI_OP
1333 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1336 #undef TARGET_ASM_UNALIGNED_HI_OP
1337 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1338 #undef TARGET_ASM_UNALIGNED_SI_OP
1339 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1340 #undef TARGET_ASM_UNALIGNED_DI_OP
1341 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1342 #undef TARGET_ASM_ALIGNED_DI_OP
1343 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1347 /* This hook deals with fixups for relocatable code and DI-mode objects
1349 #undef TARGET_ASM_INTEGER
1350 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1352 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1353 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1354 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1357 #undef TARGET_SET_UP_BY_PROLOGUE
1358 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1360 #undef TARGET_HAVE_TLS
1361 #define TARGET_HAVE_TLS HAVE_AS_TLS
1363 #undef TARGET_CANNOT_FORCE_CONST_MEM
1364 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1366 #undef TARGET_DELEGITIMIZE_ADDRESS
1367 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1369 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1370 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1372 #undef TARGET_ASM_FUNCTION_PROLOGUE
1373 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1374 #undef TARGET_ASM_FUNCTION_EPILOGUE
1375 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1377 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1378 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1380 #undef TARGET_LEGITIMIZE_ADDRESS
1381 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1383 #undef TARGET_SCHED_VARIABLE_ISSUE
1384 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1386 #undef TARGET_SCHED_ISSUE_RATE
1387 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1388 #undef TARGET_SCHED_ADJUST_COST
1389 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1390 #undef TARGET_SCHED_ADJUST_PRIORITY
1391 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1392 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1393 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1394 #undef TARGET_SCHED_INIT
1395 #define TARGET_SCHED_INIT rs6000_sched_init
1396 #undef TARGET_SCHED_FINISH
1397 #define TARGET_SCHED_FINISH rs6000_sched_finish
1398 #undef TARGET_SCHED_REORDER
1399 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1400 #undef TARGET_SCHED_REORDER2
1401 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1403 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1404 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1406 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1407 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1409 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1410 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1411 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1412 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1413 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1414 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1415 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1416 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1418 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1419 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1420 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1421 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1422 rs6000_builtin_support_vector_misalignment
1423 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1424 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1425 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1426 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1427 rs6000_builtin_vectorization_cost
1428 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1429 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1430 rs6000_preferred_simd_mode
1431 #undef TARGET_VECTORIZE_INIT_COST
1432 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1433 #undef TARGET_VECTORIZE_ADD_STMT_COST
1434 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1435 #undef TARGET_VECTORIZE_FINISH_COST
1436 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1437 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1438 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1440 #undef TARGET_INIT_BUILTINS
1441 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1442 #undef TARGET_BUILTIN_DECL
1443 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1445 #undef TARGET_EXPAND_BUILTIN
1446 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1448 #undef TARGET_MANGLE_TYPE
1449 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1451 #undef TARGET_INIT_LIBFUNCS
1452 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1455 #undef TARGET_BINDS_LOCAL_P
1456 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1459 #undef TARGET_MS_BITFIELD_LAYOUT_P
1460 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1462 #undef TARGET_ASM_OUTPUT_MI_THUNK
1463 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1465 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1466 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1468 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1469 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1471 #undef TARGET_REGISTER_MOVE_COST
1472 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1473 #undef TARGET_MEMORY_MOVE_COST
1474 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1475 #undef TARGET_RTX_COSTS
1476 #define TARGET_RTX_COSTS rs6000_rtx_costs
1477 #undef TARGET_ADDRESS_COST
1478 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1480 #undef TARGET_DWARF_REGISTER_SPAN
1481 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1483 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1484 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1486 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1487 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1489 /* On rs6000, function arguments are promoted, as are function return
1491 #undef TARGET_PROMOTE_FUNCTION_MODE
1492 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1494 #undef TARGET_RETURN_IN_MEMORY
1495 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1497 #undef TARGET_RETURN_IN_MSB
1498 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1500 #undef TARGET_SETUP_INCOMING_VARARGS
1501 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1503 /* Always strict argument naming on rs6000. */
1504 #undef TARGET_STRICT_ARGUMENT_NAMING
1505 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1506 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1507 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1508 #undef TARGET_SPLIT_COMPLEX_ARG
1509 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1510 #undef TARGET_MUST_PASS_IN_STACK
1511 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1512 #undef TARGET_PASS_BY_REFERENCE
1513 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1514 #undef TARGET_ARG_PARTIAL_BYTES
1515 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1516 #undef TARGET_FUNCTION_ARG_ADVANCE
1517 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1518 #undef TARGET_FUNCTION_ARG
1519 #define TARGET_FUNCTION_ARG rs6000_function_arg
1520 #undef TARGET_FUNCTION_ARG_BOUNDARY
1521 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1523 #undef TARGET_BUILD_BUILTIN_VA_LIST
1524 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1526 #undef TARGET_EXPAND_BUILTIN_VA_START
1527 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1529 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1530 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1532 #undef TARGET_EH_RETURN_FILTER_MODE
1533 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1535 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1536 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1538 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1539 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1541 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1542 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1544 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1545 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1547 #undef TARGET_OPTION_OVERRIDE
1548 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1550 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1551 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1552 rs6000_builtin_vectorized_function
1555 #undef TARGET_STACK_PROTECT_FAIL
1556 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1559 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1560 The PowerPC architecture requires only weak consistency among
1561 processors--that is, memory accesses between processors need not be
1562 sequentially consistent and memory accesses among processors can occur
1563 in any order. The ability to order memory accesses weakly provides
1564 opportunities for more efficient use of the system bus. Unless a
1565 dependency exists, the 604e allows read operations to precede store
1567 #undef TARGET_RELAXED_ORDERING
1568 #define TARGET_RELAXED_ORDERING true
1571 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1572 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1575 /* Use a 32-bit anchor range. This leads to sequences like:
1577 addis tmp,anchor,high
1580 where tmp itself acts as an anchor, and can be shared between
1581 accesses to the same 64k page. */
1582 #undef TARGET_MIN_ANCHOR_OFFSET
1583 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1584 #undef TARGET_MAX_ANCHOR_OFFSET
1585 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1586 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1587 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1588 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1589 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1591 #undef TARGET_BUILTIN_RECIPROCAL
1592 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1594 #undef TARGET_EXPAND_TO_RTL_HOOK
1595 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1597 #undef TARGET_INSTANTIATE_DECLS
1598 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1600 #undef TARGET_SECONDARY_RELOAD
1601 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1603 #undef TARGET_LEGITIMATE_ADDRESS_P
1604 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1606 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1607 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1610 #define TARGET_LRA_P rs6000_lra_p
1612 #undef TARGET_CAN_ELIMINATE
1613 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1615 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1616 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1618 #undef TARGET_TRAMPOLINE_INIT
1619 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1621 #undef TARGET_FUNCTION_VALUE
1622 #define TARGET_FUNCTION_VALUE rs6000_function_value
1624 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1625 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1627 #undef TARGET_OPTION_SAVE
1628 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1630 #undef TARGET_OPTION_RESTORE
1631 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1633 #undef TARGET_OPTION_PRINT
1634 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1636 #undef TARGET_CAN_INLINE_P
1637 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1639 #undef TARGET_SET_CURRENT_FUNCTION
1640 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1642 #undef TARGET_LEGITIMATE_CONSTANT_P
1643 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1645 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1646 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1648 #undef TARGET_CAN_USE_DOLOOP_P
1649 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1652 /* Processor table. */
1655 const char *const name
; /* Canonical processor name. */
1656 const enum processor_type processor
; /* Processor type enum value. */
1657 const HOST_WIDE_INT target_enable
; /* Target flags to enable. */
1660 static struct rs6000_ptt
const processor_target_table
[] =
1662 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1663 #include "rs6000-cpus.def"
1667 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1671 rs6000_cpu_name_lookup (const char *name
)
1677 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
1678 if (! strcmp (name
, processor_target_table
[i
].name
))
1686 /* Return number of consecutive hard regs needed starting at reg REGNO
1687 to hold something of mode MODE.
1688 This is ordinarily the length in words of a value of mode MODE
1689 but can be less for certain modes in special long registers.
1691 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1692 scalar instructions. The upper 32 bits are only available to the
1695 POWER and PowerPC GPRs hold 32 bits worth;
1696 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1699 rs6000_hard_regno_nregs_internal (int regno
, enum machine_mode mode
)
1701 unsigned HOST_WIDE_INT reg_size
;
1703 /* TF/TD modes are special in that they always take 2 registers. */
1704 if (FP_REGNO_P (regno
))
1705 reg_size
= ((VECTOR_MEM_VSX_P (mode
) && mode
!= TDmode
&& mode
!= TFmode
)
1706 ? UNITS_PER_VSX_WORD
1707 : UNITS_PER_FP_WORD
);
1709 else if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
1710 reg_size
= UNITS_PER_SPE_WORD
;
1712 else if (ALTIVEC_REGNO_P (regno
))
1713 reg_size
= UNITS_PER_ALTIVEC_WORD
;
1715 /* The value returned for SCmode in the E500 double case is 2 for
1716 ABI compatibility; storing an SCmode value in a single register
1717 would require function_arg and rs6000_spe_function_arg to handle
1718 SCmode so as to pass the value correctly in a pair of
1720 else if (TARGET_E500_DOUBLE
&& FLOAT_MODE_P (mode
) && mode
!= SCmode
1721 && !DECIMAL_FLOAT_MODE_P (mode
))
1722 reg_size
= UNITS_PER_FP_WORD
;
1725 reg_size
= UNITS_PER_WORD
;
1727 return (GET_MODE_SIZE (mode
) + reg_size
- 1) / reg_size
;
1730 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1733 rs6000_hard_regno_mode_ok (int regno
, enum machine_mode mode
)
1735 int last_regno
= regno
+ rs6000_hard_regno_nregs
[mode
][regno
] - 1;
1737 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1738 register combinations, and use PTImode where we need to deal with quad
1739 word memory operations. Don't allow quad words in the argument or frame
1740 pointer registers, just registers 0..31. */
1741 if (mode
== PTImode
)
1742 return (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
1743 && IN_RANGE (last_regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
1744 && ((regno
& 1) == 0));
1746 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1747 implementations. Don't allow an item to be split between a FP register
1748 and an Altivec register. Allow TImode in all VSX registers if the user
1750 if (TARGET_VSX
&& VSX_REGNO_P (regno
)
1751 && (VECTOR_MEM_VSX_P (mode
)
1752 || reg_addr
[mode
].scalar_in_vmx_p
1753 || (TARGET_VSX_TIMODE
&& mode
== TImode
)
1754 || (TARGET_VADDUQM
&& mode
== V1TImode
)))
1756 if (FP_REGNO_P (regno
))
1757 return FP_REGNO_P (last_regno
);
1759 if (ALTIVEC_REGNO_P (regno
))
1761 if (GET_MODE_SIZE (mode
) != 16 && !reg_addr
[mode
].scalar_in_vmx_p
)
1764 return ALTIVEC_REGNO_P (last_regno
);
1768 /* The GPRs can hold any mode, but values bigger than one register
1769 cannot go past R31. */
1770 if (INT_REGNO_P (regno
))
1771 return INT_REGNO_P (last_regno
);
1773 /* The float registers (except for VSX vector modes) can only hold floating
1774 modes and DImode. */
1775 if (FP_REGNO_P (regno
))
1777 if (SCALAR_FLOAT_MODE_P (mode
)
1778 && (mode
!= TDmode
|| (regno
% 2) == 0)
1779 && FP_REGNO_P (last_regno
))
1782 if (GET_MODE_CLASS (mode
) == MODE_INT
1783 && GET_MODE_SIZE (mode
) == UNITS_PER_FP_WORD
)
1786 if (PAIRED_SIMD_REGNO_P (regno
) && TARGET_PAIRED_FLOAT
1787 && PAIRED_VECTOR_MODE (mode
))
1793 /* The CR register can only hold CC modes. */
1794 if (CR_REGNO_P (regno
))
1795 return GET_MODE_CLASS (mode
) == MODE_CC
;
1797 if (CA_REGNO_P (regno
))
1798 return mode
== Pmode
|| mode
== SImode
;
1800 /* AltiVec only in AldyVec registers. */
1801 if (ALTIVEC_REGNO_P (regno
))
1802 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
)
1803 || mode
== V1TImode
);
1805 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1806 if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
1809 /* We cannot put non-VSX TImode or PTImode anywhere except general register
1810 and it must be able to fit within the register set. */
1812 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
;
1815 /* Print interesting facts about registers. */
1817 rs6000_debug_reg_print (int first_regno
, int last_regno
, const char *reg_name
)
1821 for (r
= first_regno
; r
<= last_regno
; ++r
)
1823 const char *comma
= "";
1826 if (first_regno
== last_regno
)
1827 fprintf (stderr
, "%s:\t", reg_name
);
1829 fprintf (stderr
, "%s%d:\t", reg_name
, r
- first_regno
);
1832 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1833 if (rs6000_hard_regno_mode_ok_p
[m
][r
] && rs6000_hard_regno_nregs
[m
][r
])
1837 fprintf (stderr
, ",\n\t");
1842 if (rs6000_hard_regno_nregs
[m
][r
] > 1)
1843 len
+= fprintf (stderr
, "%s%s/%d", comma
, GET_MODE_NAME (m
),
1844 rs6000_hard_regno_nregs
[m
][r
]);
1846 len
+= fprintf (stderr
, "%s%s", comma
, GET_MODE_NAME (m
));
1851 if (call_used_regs
[r
])
1855 fprintf (stderr
, ",\n\t");
1860 len
+= fprintf (stderr
, "%s%s", comma
, "call-used");
1868 fprintf (stderr
, ",\n\t");
1873 len
+= fprintf (stderr
, "%s%s", comma
, "fixed");
1879 fprintf (stderr
, ",\n\t");
1883 len
+= fprintf (stderr
, "%sreg-class = %s", comma
,
1884 reg_class_names
[(int)rs6000_regno_regclass
[r
]]);
1889 fprintf (stderr
, ",\n\t");
1893 fprintf (stderr
, "%sregno = %d\n", comma
, r
);
1898 rs6000_debug_vector_unit (enum rs6000_vector v
)
1904 case VECTOR_NONE
: ret
= "none"; break;
1905 case VECTOR_ALTIVEC
: ret
= "altivec"; break;
1906 case VECTOR_VSX
: ret
= "vsx"; break;
1907 case VECTOR_P8_VECTOR
: ret
= "p8_vector"; break;
1908 case VECTOR_PAIRED
: ret
= "paired"; break;
1909 case VECTOR_SPE
: ret
= "spe"; break;
1910 case VECTOR_OTHER
: ret
= "other"; break;
1911 default: ret
= "unknown"; break;
1917 /* Print the address masks in a human readble fashion. */
1919 rs6000_debug_print_mode (ssize_t m
)
1923 fprintf (stderr
, "Mode: %-5s", GET_MODE_NAME (m
));
1924 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
1926 addr_mask_type mask
= reg_addr
[m
].addr_mask
[rc
];
1928 " %s: %c%c%c%c%c%c",
1929 reload_reg_map
[rc
].name
,
1930 (mask
& RELOAD_REG_VALID
) != 0 ? 'v' : ' ',
1931 (mask
& RELOAD_REG_MULTIPLE
) != 0 ? 'm' : ' ',
1932 (mask
& RELOAD_REG_INDEXED
) != 0 ? 'i' : ' ',
1933 (mask
& RELOAD_REG_OFFSET
) != 0 ? 'o' : ' ',
1934 (mask
& RELOAD_REG_PRE_INCDEC
) != 0 ? '+' : ' ',
1935 (mask
& RELOAD_REG_PRE_MODIFY
) != 0 ? '+' : ' ');
1938 if (rs6000_vector_unit
[m
] != VECTOR_NONE
1939 || rs6000_vector_mem
[m
] != VECTOR_NONE
1940 || (reg_addr
[m
].reload_store
!= CODE_FOR_nothing
)
1941 || (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
)
1942 || reg_addr
[m
].scalar_in_vmx_p
)
1945 " Vector-arith=%-10s Vector-mem=%-10s Reload=%c%c Upper=%c",
1946 rs6000_debug_vector_unit (rs6000_vector_unit
[m
]),
1947 rs6000_debug_vector_unit (rs6000_vector_mem
[m
]),
1948 (reg_addr
[m
].reload_store
!= CODE_FOR_nothing
) ? 's' : '*',
1949 (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
) ? 'l' : '*',
1950 (reg_addr
[m
].scalar_in_vmx_p
) ? 'y' : 'n');
1953 fputs ("\n", stderr
);
1956 #define DEBUG_FMT_ID "%-32s= "
1957 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
1958 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
1959 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
1961 /* Print various interesting information with -mdebug=reg. */
1963 rs6000_debug_reg_global (void)
1965 static const char *const tf
[2] = { "false", "true" };
1966 const char *nl
= (const char *)0;
1969 char costly_num
[20];
1971 char flags_buffer
[40];
1972 const char *costly_str
;
1973 const char *nop_str
;
1974 const char *trace_str
;
1975 const char *abi_str
;
1976 const char *cmodel_str
;
1977 struct cl_target_option cl_opts
;
1979 /* Modes we want tieable information on. */
1980 static const enum machine_mode print_tieable_modes
[] = {
2016 /* Virtual regs we are interested in. */
2017 const static struct {
2018 int regno
; /* register number. */
2019 const char *name
; /* register name. */
2020 } virtual_regs
[] = {
2021 { STACK_POINTER_REGNUM
, "stack pointer:" },
2022 { TOC_REGNUM
, "toc: " },
2023 { STATIC_CHAIN_REGNUM
, "static chain: " },
2024 { RS6000_PIC_OFFSET_TABLE_REGNUM
, "pic offset: " },
2025 { HARD_FRAME_POINTER_REGNUM
, "hard frame: " },
2026 { ARG_POINTER_REGNUM
, "arg pointer: " },
2027 { FRAME_POINTER_REGNUM
, "frame pointer:" },
2028 { FIRST_PSEUDO_REGISTER
, "first pseudo: " },
2029 { FIRST_VIRTUAL_REGISTER
, "first virtual:" },
2030 { VIRTUAL_INCOMING_ARGS_REGNUM
, "incoming_args:" },
2031 { VIRTUAL_STACK_VARS_REGNUM
, "stack_vars: " },
2032 { VIRTUAL_STACK_DYNAMIC_REGNUM
, "stack_dynamic:" },
2033 { VIRTUAL_OUTGOING_ARGS_REGNUM
, "outgoing_args:" },
2034 { VIRTUAL_CFA_REGNUM
, "cfa (frame): " },
2035 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM
, "stack boundry:" },
2036 { LAST_VIRTUAL_REGISTER
, "last virtual: " },
2039 fputs ("\nHard register information:\n", stderr
);
2040 rs6000_debug_reg_print (FIRST_GPR_REGNO
, LAST_GPR_REGNO
, "gr");
2041 rs6000_debug_reg_print (FIRST_FPR_REGNO
, LAST_FPR_REGNO
, "fp");
2042 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO
,
2045 rs6000_debug_reg_print (LR_REGNO
, LR_REGNO
, "lr");
2046 rs6000_debug_reg_print (CTR_REGNO
, CTR_REGNO
, "ctr");
2047 rs6000_debug_reg_print (CR0_REGNO
, CR7_REGNO
, "cr");
2048 rs6000_debug_reg_print (CA_REGNO
, CA_REGNO
, "ca");
2049 rs6000_debug_reg_print (VRSAVE_REGNO
, VRSAVE_REGNO
, "vrsave");
2050 rs6000_debug_reg_print (VSCR_REGNO
, VSCR_REGNO
, "vscr");
2051 rs6000_debug_reg_print (SPE_ACC_REGNO
, SPE_ACC_REGNO
, "spe_a");
2052 rs6000_debug_reg_print (SPEFSCR_REGNO
, SPEFSCR_REGNO
, "spe_f");
2054 fputs ("\nVirtual/stack/frame registers:\n", stderr
);
2055 for (v
= 0; v
< ARRAY_SIZE (virtual_regs
); v
++)
2056 fprintf (stderr
, "%s regno = %3d\n", virtual_regs
[v
].name
, virtual_regs
[v
].regno
);
2060 "d reg_class = %s\n"
2061 "f reg_class = %s\n"
2062 "v reg_class = %s\n"
2063 "wa reg_class = %s\n"
2064 "wd reg_class = %s\n"
2065 "wf reg_class = %s\n"
2066 "wg reg_class = %s\n"
2067 "wh reg_class = %s\n"
2068 "wi reg_class = %s\n"
2069 "wj reg_class = %s\n"
2070 "wk reg_class = %s\n"
2071 "wl reg_class = %s\n"
2072 "wm reg_class = %s\n"
2073 "wr reg_class = %s\n"
2074 "ws reg_class = %s\n"
2075 "wt reg_class = %s\n"
2076 "wu reg_class = %s\n"
2077 "wv reg_class = %s\n"
2078 "ww reg_class = %s\n"
2079 "wx reg_class = %s\n"
2080 "wy reg_class = %s\n"
2081 "wz reg_class = %s\n"
2083 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_d
]],
2084 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_f
]],
2085 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_v
]],
2086 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wa
]],
2087 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wd
]],
2088 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wf
]],
2089 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wg
]],
2090 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wh
]],
2091 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wi
]],
2092 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wj
]],
2093 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wk
]],
2094 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wl
]],
2095 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wm
]],
2096 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wr
]],
2097 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ws
]],
2098 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wt
]],
2099 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wu
]],
2100 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wv
]],
2101 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ww
]],
2102 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wx
]],
2103 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wy
]],
2104 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wz
]]);
2107 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2108 rs6000_debug_print_mode (m
);
2110 fputs ("\n", stderr
);
2112 for (m1
= 0; m1
< ARRAY_SIZE (print_tieable_modes
); m1
++)
2114 enum machine_mode mode1
= print_tieable_modes
[m1
];
2115 bool first_time
= true;
2117 nl
= (const char *)0;
2118 for (m2
= 0; m2
< ARRAY_SIZE (print_tieable_modes
); m2
++)
2120 enum machine_mode mode2
= print_tieable_modes
[m2
];
2121 if (mode1
!= mode2
&& MODES_TIEABLE_P (mode1
, mode2
))
2125 fprintf (stderr
, "Tieable modes %s:", GET_MODE_NAME (mode1
));
2130 fprintf (stderr
, " %s", GET_MODE_NAME (mode2
));
2135 fputs ("\n", stderr
);
2141 if (rs6000_recip_control
)
2143 fprintf (stderr
, "\nReciprocal mask = 0x%x\n", rs6000_recip_control
);
2145 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2146 if (rs6000_recip_bits
[m
])
2149 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2151 (RS6000_RECIP_AUTO_RE_P (m
)
2153 : (RS6000_RECIP_HAVE_RE_P (m
) ? "have" : "none")),
2154 (RS6000_RECIP_AUTO_RSQRTE_P (m
)
2156 : (RS6000_RECIP_HAVE_RSQRTE_P (m
) ? "have" : "none")));
2159 fputs ("\n", stderr
);
2162 if (rs6000_cpu_index
>= 0)
2164 const char *name
= processor_target_table
[rs6000_cpu_index
].name
;
2166 = processor_target_table
[rs6000_cpu_index
].target_enable
;
2168 sprintf (flags_buffer
, "-mcpu=%s flags", name
);
2169 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2172 fprintf (stderr
, DEBUG_FMT_S
, "cpu", "<none>");
2174 if (rs6000_tune_index
>= 0)
2176 const char *name
= processor_target_table
[rs6000_tune_index
].name
;
2178 = processor_target_table
[rs6000_tune_index
].target_enable
;
2180 sprintf (flags_buffer
, "-mtune=%s flags", name
);
2181 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2184 fprintf (stderr
, DEBUG_FMT_S
, "tune", "<none>");
2186 cl_target_option_save (&cl_opts
, &global_options
);
2187 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags",
2190 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags_explicit",
2191 rs6000_isa_flags_explicit
);
2193 rs6000_print_builtin_options (stderr
, 0, "rs6000_builtin_mask",
2194 rs6000_builtin_mask
);
2196 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
2198 fprintf (stderr
, DEBUG_FMT_S
, "--with-cpu default",
2199 OPTION_TARGET_CPU_DEFAULT
? OPTION_TARGET_CPU_DEFAULT
: "<none>");
2201 switch (rs6000_sched_costly_dep
)
2203 case max_dep_latency
:
2204 costly_str
= "max_dep_latency";
2208 costly_str
= "no_dep_costly";
2211 case all_deps_costly
:
2212 costly_str
= "all_deps_costly";
2215 case true_store_to_load_dep_costly
:
2216 costly_str
= "true_store_to_load_dep_costly";
2219 case store_to_load_dep_costly
:
2220 costly_str
= "store_to_load_dep_costly";
2224 costly_str
= costly_num
;
2225 sprintf (costly_num
, "%d", (int)rs6000_sched_costly_dep
);
2229 fprintf (stderr
, DEBUG_FMT_S
, "sched_costly_dep", costly_str
);
2231 switch (rs6000_sched_insert_nops
)
2233 case sched_finish_regroup_exact
:
2234 nop_str
= "sched_finish_regroup_exact";
2237 case sched_finish_pad_groups
:
2238 nop_str
= "sched_finish_pad_groups";
2241 case sched_finish_none
:
2242 nop_str
= "sched_finish_none";
2247 sprintf (nop_num
, "%d", (int)rs6000_sched_insert_nops
);
2251 fprintf (stderr
, DEBUG_FMT_S
, "sched_insert_nops", nop_str
);
2253 switch (rs6000_sdata
)
2260 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "data");
2264 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "sysv");
2268 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "eabi");
2273 switch (rs6000_traceback
)
2275 case traceback_default
: trace_str
= "default"; break;
2276 case traceback_none
: trace_str
= "none"; break;
2277 case traceback_part
: trace_str
= "part"; break;
2278 case traceback_full
: trace_str
= "full"; break;
2279 default: trace_str
= "unknown"; break;
2282 fprintf (stderr
, DEBUG_FMT_S
, "traceback", trace_str
);
2284 switch (rs6000_current_cmodel
)
2286 case CMODEL_SMALL
: cmodel_str
= "small"; break;
2287 case CMODEL_MEDIUM
: cmodel_str
= "medium"; break;
2288 case CMODEL_LARGE
: cmodel_str
= "large"; break;
2289 default: cmodel_str
= "unknown"; break;
2292 fprintf (stderr
, DEBUG_FMT_S
, "cmodel", cmodel_str
);
2294 switch (rs6000_current_abi
)
2296 case ABI_NONE
: abi_str
= "none"; break;
2297 case ABI_AIX
: abi_str
= "aix"; break;
2298 case ABI_ELFv2
: abi_str
= "ELFv2"; break;
2299 case ABI_V4
: abi_str
= "V4"; break;
2300 case ABI_DARWIN
: abi_str
= "darwin"; break;
2301 default: abi_str
= "unknown"; break;
2304 fprintf (stderr
, DEBUG_FMT_S
, "abi", abi_str
);
2306 if (rs6000_altivec_abi
)
2307 fprintf (stderr
, DEBUG_FMT_S
, "altivec_abi", "true");
2310 fprintf (stderr
, DEBUG_FMT_S
, "spe_abi", "true");
2312 if (rs6000_darwin64_abi
)
2313 fprintf (stderr
, DEBUG_FMT_S
, "darwin64_abi", "true");
2315 if (rs6000_float_gprs
)
2316 fprintf (stderr
, DEBUG_FMT_S
, "float_gprs", "true");
2318 fprintf (stderr
, DEBUG_FMT_S
, "fprs",
2319 (TARGET_FPRS
? "true" : "false"));
2321 fprintf (stderr
, DEBUG_FMT_S
, "single_float",
2322 (TARGET_SINGLE_FLOAT
? "true" : "false"));
2324 fprintf (stderr
, DEBUG_FMT_S
, "double_float",
2325 (TARGET_DOUBLE_FLOAT
? "true" : "false"));
2327 fprintf (stderr
, DEBUG_FMT_S
, "soft_float",
2328 (TARGET_SOFT_FLOAT
? "true" : "false"));
2330 fprintf (stderr
, DEBUG_FMT_S
, "e500_single",
2331 (TARGET_E500_SINGLE
? "true" : "false"));
2333 fprintf (stderr
, DEBUG_FMT_S
, "e500_double",
2334 (TARGET_E500_DOUBLE
? "true" : "false"));
2336 if (TARGET_LINK_STACK
)
2337 fprintf (stderr
, DEBUG_FMT_S
, "link_stack", "true");
2339 if (targetm
.lra_p ())
2340 fprintf (stderr
, DEBUG_FMT_S
, "lra", "true");
2342 if (TARGET_P8_FUSION
)
2343 fprintf (stderr
, DEBUG_FMT_S
, "p8 fusion",
2344 (TARGET_P8_FUSION_SIGN
) ? "zero+sign" : "zero");
2346 fprintf (stderr
, DEBUG_FMT_S
, "plt-format",
2347 TARGET_SECURE_PLT
? "secure" : "bss");
2348 fprintf (stderr
, DEBUG_FMT_S
, "struct-return",
2349 aix_struct_return
? "aix" : "sysv");
2350 fprintf (stderr
, DEBUG_FMT_S
, "always_hint", tf
[!!rs6000_always_hint
]);
2351 fprintf (stderr
, DEBUG_FMT_S
, "sched_groups", tf
[!!rs6000_sched_groups
]);
2352 fprintf (stderr
, DEBUG_FMT_S
, "align_branch",
2353 tf
[!!rs6000_align_branch_targets
]);
2354 fprintf (stderr
, DEBUG_FMT_D
, "tls_size", rs6000_tls_size
);
2355 fprintf (stderr
, DEBUG_FMT_D
, "long_double_size",
2356 rs6000_long_double_type_size
);
2357 fprintf (stderr
, DEBUG_FMT_D
, "sched_restricted_insns_priority",
2358 (int)rs6000_sched_restricted_insns_priority
);
2359 fprintf (stderr
, DEBUG_FMT_D
, "Number of standard builtins",
2361 fprintf (stderr
, DEBUG_FMT_D
, "Number of rs6000 builtins",
2362 (int)RS6000_BUILTIN_COUNT
);
2365 fprintf (stderr
, DEBUG_FMT_D
, "VSX easy 64-bit scalar element",
2366 (int)VECTOR_ELEMENT_SCALAR_64BIT
);
2370 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2371 legitimate address support to figure out the appropriate addressing to
2375 rs6000_setup_reg_addr_masks (void)
2377 ssize_t rc
, reg
, m
, nregs
;
2378 addr_mask_type any_addr_mask
, addr_mask
;
2380 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2382 enum machine_mode m2
= (enum machine_mode
)m
;
2384 /* SDmode is special in that we want to access it only via REG+REG
2385 addressing on power7 and above, since we want to use the LFIWZX and
2386 STFIWZX instructions to load it. */
2387 bool indexed_only_p
= (m
== SDmode
&& TARGET_NO_SDMODE_STACK
);
2390 for (rc
= FIRST_RELOAD_REG_CLASS
; rc
<= LAST_RELOAD_REG_CLASS
; rc
++)
2393 reg
= reload_reg_map
[rc
].reg
;
2395 /* Can mode values go in the GPR/FPR/Altivec registers? */
2396 if (reg
>= 0 && rs6000_hard_regno_mode_ok_p
[m
][reg
])
2398 nregs
= rs6000_hard_regno_nregs
[m
][reg
];
2399 addr_mask
|= RELOAD_REG_VALID
;
2401 /* Indicate if the mode takes more than 1 physical register. If
2402 it takes a single register, indicate it can do REG+REG
2404 if (nregs
> 1 || m
== BLKmode
)
2405 addr_mask
|= RELOAD_REG_MULTIPLE
;
2407 addr_mask
|= RELOAD_REG_INDEXED
;
2409 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2410 addressing. Restrict addressing on SPE for 64-bit types
2411 because of the SUBREG hackery used to address 64-bit floats in
2412 '32-bit' GPRs. To simplify secondary reload, don't allow
2413 update forms on scalar floating point types that can go in the
2417 && (rc
== RELOAD_REG_GPR
|| rc
== RELOAD_REG_FPR
)
2418 && GET_MODE_SIZE (m2
) <= 8
2419 && !VECTOR_MODE_P (m2
)
2420 && !COMPLEX_MODE_P (m2
)
2422 && !(TARGET_E500_DOUBLE
&& GET_MODE_SIZE (m2
) == 8)
2423 && !reg_addr
[m2
].scalar_in_vmx_p
)
2425 addr_mask
|= RELOAD_REG_PRE_INCDEC
;
2427 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2428 we don't allow PRE_MODIFY for some multi-register
2433 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2437 if (TARGET_POWERPC64
)
2438 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2444 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2450 /* GPR and FPR registers can do REG+OFFSET addressing, except
2451 possibly for SDmode. */
2452 if ((addr_mask
!= 0) && !indexed_only_p
2453 && (rc
== RELOAD_REG_GPR
|| rc
== RELOAD_REG_FPR
))
2454 addr_mask
|= RELOAD_REG_OFFSET
;
2456 reg_addr
[m
].addr_mask
[rc
] = addr_mask
;
2457 any_addr_mask
|= addr_mask
;
2460 reg_addr
[m
].addr_mask
[RELOAD_REG_ANY
] = any_addr_mask
;
2465 /* Initialize the various global tables that are based on register size. */
2467 rs6000_init_hard_regno_mode_ok (bool global_init_p
)
2473 /* Precalculate REGNO_REG_CLASS. */
2474 rs6000_regno_regclass
[0] = GENERAL_REGS
;
2475 for (r
= 1; r
< 32; ++r
)
2476 rs6000_regno_regclass
[r
] = BASE_REGS
;
2478 for (r
= 32; r
< 64; ++r
)
2479 rs6000_regno_regclass
[r
] = FLOAT_REGS
;
2481 for (r
= 64; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2482 rs6000_regno_regclass
[r
] = NO_REGS
;
2484 for (r
= FIRST_ALTIVEC_REGNO
; r
<= LAST_ALTIVEC_REGNO
; ++r
)
2485 rs6000_regno_regclass
[r
] = ALTIVEC_REGS
;
2487 rs6000_regno_regclass
[CR0_REGNO
] = CR0_REGS
;
2488 for (r
= CR1_REGNO
; r
<= CR7_REGNO
; ++r
)
2489 rs6000_regno_regclass
[r
] = CR_REGS
;
2491 rs6000_regno_regclass
[LR_REGNO
] = LINK_REGS
;
2492 rs6000_regno_regclass
[CTR_REGNO
] = CTR_REGS
;
2493 rs6000_regno_regclass
[CA_REGNO
] = NO_REGS
;
2494 rs6000_regno_regclass
[VRSAVE_REGNO
] = VRSAVE_REGS
;
2495 rs6000_regno_regclass
[VSCR_REGNO
] = VRSAVE_REGS
;
2496 rs6000_regno_regclass
[SPE_ACC_REGNO
] = SPE_ACC_REGS
;
2497 rs6000_regno_regclass
[SPEFSCR_REGNO
] = SPEFSCR_REGS
;
2498 rs6000_regno_regclass
[TFHAR_REGNO
] = SPR_REGS
;
2499 rs6000_regno_regclass
[TFIAR_REGNO
] = SPR_REGS
;
2500 rs6000_regno_regclass
[TEXASR_REGNO
] = SPR_REGS
;
2501 rs6000_regno_regclass
[ARG_POINTER_REGNUM
] = BASE_REGS
;
2502 rs6000_regno_regclass
[FRAME_POINTER_REGNUM
] = BASE_REGS
;
2504 /* Precalculate register class to simpler reload register class. We don't
2505 need all of the register classes that are combinations of different
2506 classes, just the simple ones that have constraint letters. */
2507 for (c
= 0; c
< N_REG_CLASSES
; c
++)
2508 reg_class_to_reg_type
[c
] = NO_REG_TYPE
;
2510 reg_class_to_reg_type
[(int)GENERAL_REGS
] = GPR_REG_TYPE
;
2511 reg_class_to_reg_type
[(int)BASE_REGS
] = GPR_REG_TYPE
;
2512 reg_class_to_reg_type
[(int)VSX_REGS
] = VSX_REG_TYPE
;
2513 reg_class_to_reg_type
[(int)VRSAVE_REGS
] = SPR_REG_TYPE
;
2514 reg_class_to_reg_type
[(int)VSCR_REGS
] = SPR_REG_TYPE
;
2515 reg_class_to_reg_type
[(int)LINK_REGS
] = SPR_REG_TYPE
;
2516 reg_class_to_reg_type
[(int)CTR_REGS
] = SPR_REG_TYPE
;
2517 reg_class_to_reg_type
[(int)LINK_OR_CTR_REGS
] = SPR_REG_TYPE
;
2518 reg_class_to_reg_type
[(int)CR_REGS
] = CR_REG_TYPE
;
2519 reg_class_to_reg_type
[(int)CR0_REGS
] = CR_REG_TYPE
;
2520 reg_class_to_reg_type
[(int)SPE_ACC_REGS
] = SPE_ACC_TYPE
;
2521 reg_class_to_reg_type
[(int)SPEFSCR_REGS
] = SPEFSCR_REG_TYPE
;
2525 reg_class_to_reg_type
[(int)FLOAT_REGS
] = VSX_REG_TYPE
;
2526 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = VSX_REG_TYPE
;
2530 reg_class_to_reg_type
[(int)FLOAT_REGS
] = FPR_REG_TYPE
;
2531 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = ALTIVEC_REG_TYPE
;
2534 /* Precalculate the valid memory formats as well as the vector information,
2535 this must be set up before the rs6000_hard_regno_nregs_internal calls
2537 gcc_assert ((int)VECTOR_NONE
== 0);
2538 memset ((void *) &rs6000_vector_unit
[0], '\0', sizeof (rs6000_vector_unit
));
2539 memset ((void *) &rs6000_vector_mem
[0], '\0', sizeof (rs6000_vector_unit
));
2541 gcc_assert ((int)CODE_FOR_nothing
== 0);
2542 memset ((void *) ®_addr
[0], '\0', sizeof (reg_addr
));
2544 gcc_assert ((int)NO_REGS
== 0);
2545 memset ((void *) &rs6000_constraints
[0], '\0', sizeof (rs6000_constraints
));
2547 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2548 believes it can use native alignment or still uses 128-bit alignment. */
2549 if (TARGET_VSX
&& !TARGET_VSX_ALIGN_128
)
2560 /* V2DF mode, VSX only. */
2563 rs6000_vector_unit
[V2DFmode
] = VECTOR_VSX
;
2564 rs6000_vector_mem
[V2DFmode
] = VECTOR_VSX
;
2565 rs6000_vector_align
[V2DFmode
] = align64
;
2568 /* V4SF mode, either VSX or Altivec. */
2571 rs6000_vector_unit
[V4SFmode
] = VECTOR_VSX
;
2572 rs6000_vector_mem
[V4SFmode
] = VECTOR_VSX
;
2573 rs6000_vector_align
[V4SFmode
] = align32
;
2575 else if (TARGET_ALTIVEC
)
2577 rs6000_vector_unit
[V4SFmode
] = VECTOR_ALTIVEC
;
2578 rs6000_vector_mem
[V4SFmode
] = VECTOR_ALTIVEC
;
2579 rs6000_vector_align
[V4SFmode
] = align32
;
2582 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2586 rs6000_vector_unit
[V4SImode
] = VECTOR_ALTIVEC
;
2587 rs6000_vector_unit
[V8HImode
] = VECTOR_ALTIVEC
;
2588 rs6000_vector_unit
[V16QImode
] = VECTOR_ALTIVEC
;
2589 rs6000_vector_align
[V4SImode
] = align32
;
2590 rs6000_vector_align
[V8HImode
] = align32
;
2591 rs6000_vector_align
[V16QImode
] = align32
;
2595 rs6000_vector_mem
[V4SImode
] = VECTOR_VSX
;
2596 rs6000_vector_mem
[V8HImode
] = VECTOR_VSX
;
2597 rs6000_vector_mem
[V16QImode
] = VECTOR_VSX
;
2601 rs6000_vector_mem
[V4SImode
] = VECTOR_ALTIVEC
;
2602 rs6000_vector_mem
[V8HImode
] = VECTOR_ALTIVEC
;
2603 rs6000_vector_mem
[V16QImode
] = VECTOR_ALTIVEC
;
2607 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
2608 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
2611 rs6000_vector_mem
[V2DImode
] = VECTOR_VSX
;
2612 rs6000_vector_unit
[V2DImode
]
2613 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
2614 rs6000_vector_align
[V2DImode
] = align64
;
2616 rs6000_vector_mem
[V1TImode
] = VECTOR_VSX
;
2617 rs6000_vector_unit
[V1TImode
]
2618 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
2619 rs6000_vector_align
[V1TImode
] = 128;
2622 /* DFmode, see if we want to use the VSX unit. */
2623 if (TARGET_VSX
&& TARGET_VSX_SCALAR_DOUBLE
)
2625 rs6000_vector_unit
[DFmode
] = VECTOR_VSX
;
2626 rs6000_vector_mem
[DFmode
]
2627 = (TARGET_UPPER_REGS_DF
? VECTOR_VSX
: VECTOR_NONE
);
2628 rs6000_vector_align
[DFmode
] = align64
;
2631 /* Allow TImode in VSX register and set the VSX memory macros. */
2632 if (TARGET_VSX
&& TARGET_VSX_TIMODE
)
2634 rs6000_vector_mem
[TImode
] = VECTOR_VSX
;
2635 rs6000_vector_align
[TImode
] = align64
;
2638 /* TODO add SPE and paired floating point vector support. */
2640 /* Register class constraints for the constraints that depend on compile
2641 switches. When the VSX code was added, different constraints were added
2642 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
2643 of the VSX registers are used. The register classes for scalar floating
2644 point types is set, based on whether we allow that type into the upper
2645 (Altivec) registers. GCC has register classes to target the Altivec
2646 registers for load/store operations, to select using a VSX memory
2647 operation instead of the traditional floating point operation. The
2650 d - Register class to use with traditional DFmode instructions.
2651 f - Register class to use with traditional SFmode instructions.
2652 v - Altivec register.
2653 wa - Any VSX register.
2654 wc - Reserved to represent individual CR bits (used in LLVM).
2655 wd - Preferred register class for V2DFmode.
2656 wf - Preferred register class for V4SFmode.
2657 wg - Float register for power6x move insns.
2658 wh - FP register for direct move instructions.
2659 wi - FP or VSX register to hold 64-bit integers for VSX insns.
2660 wj - FP or VSX register to hold 64-bit integers for direct moves.
2661 wk - FP or VSX register to hold 64-bit doubles for direct moves.
2662 wl - Float register if we can do 32-bit signed int loads.
2663 wm - VSX register for ISA 2.07 direct move operations.
2664 wn - always NO_REGS.
2665 wr - GPR if 64-bit mode is permitted.
2666 ws - Register class to do ISA 2.06 DF operations.
2667 wt - VSX register for TImode in VSX registers.
2668 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
2669 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
2670 ww - Register class to do SF conversions in with VSX operations.
2671 wx - Float register if we can do 32-bit int stores.
2672 wy - Register class to do ISA 2.07 SF operations.
2673 wz - Float register if we can do 32-bit unsigned int loads. */
2675 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
)
2676 rs6000_constraints
[RS6000_CONSTRAINT_f
] = FLOAT_REGS
; /* SFmode */
2678 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
2679 rs6000_constraints
[RS6000_CONSTRAINT_d
] = FLOAT_REGS
; /* DFmode */
2683 rs6000_constraints
[RS6000_CONSTRAINT_wa
] = VSX_REGS
;
2684 rs6000_constraints
[RS6000_CONSTRAINT_wd
] = VSX_REGS
; /* V2DFmode */
2685 rs6000_constraints
[RS6000_CONSTRAINT_wf
] = VSX_REGS
; /* V4SFmode */
2686 rs6000_constraints
[RS6000_CONSTRAINT_wi
] = FLOAT_REGS
; /* DImode */
2688 if (TARGET_VSX_TIMODE
)
2689 rs6000_constraints
[RS6000_CONSTRAINT_wt
] = VSX_REGS
; /* TImode */
2691 if (TARGET_UPPER_REGS_DF
) /* DFmode */
2693 rs6000_constraints
[RS6000_CONSTRAINT_ws
] = VSX_REGS
;
2694 rs6000_constraints
[RS6000_CONSTRAINT_wv
] = ALTIVEC_REGS
;
2697 rs6000_constraints
[RS6000_CONSTRAINT_ws
] = FLOAT_REGS
;
2700 /* Add conditional constraints based on various options, to allow us to
2701 collapse multiple insn patterns. */
2703 rs6000_constraints
[RS6000_CONSTRAINT_v
] = ALTIVEC_REGS
;
2705 if (TARGET_MFPGPR
) /* DFmode */
2706 rs6000_constraints
[RS6000_CONSTRAINT_wg
] = FLOAT_REGS
;
2709 rs6000_constraints
[RS6000_CONSTRAINT_wl
] = FLOAT_REGS
; /* DImode */
2711 if (TARGET_DIRECT_MOVE
)
2713 rs6000_constraints
[RS6000_CONSTRAINT_wh
] = FLOAT_REGS
;
2714 rs6000_constraints
[RS6000_CONSTRAINT_wj
] /* DImode */
2715 = rs6000_constraints
[RS6000_CONSTRAINT_wi
];
2716 rs6000_constraints
[RS6000_CONSTRAINT_wk
] /* DFmode */
2717 = rs6000_constraints
[RS6000_CONSTRAINT_ws
];
2718 rs6000_constraints
[RS6000_CONSTRAINT_wm
] = VSX_REGS
;
2721 if (TARGET_POWERPC64
)
2722 rs6000_constraints
[RS6000_CONSTRAINT_wr
] = GENERAL_REGS
;
2724 if (TARGET_P8_VECTOR
&& TARGET_UPPER_REGS_SF
) /* SFmode */
2726 rs6000_constraints
[RS6000_CONSTRAINT_wu
] = ALTIVEC_REGS
;
2727 rs6000_constraints
[RS6000_CONSTRAINT_wy
] = VSX_REGS
;
2728 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = VSX_REGS
;
2730 else if (TARGET_P8_VECTOR
)
2732 rs6000_constraints
[RS6000_CONSTRAINT_wy
] = FLOAT_REGS
;
2733 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = FLOAT_REGS
;
2735 else if (TARGET_VSX
)
2736 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = FLOAT_REGS
;
2739 rs6000_constraints
[RS6000_CONSTRAINT_wx
] = FLOAT_REGS
; /* DImode */
2742 rs6000_constraints
[RS6000_CONSTRAINT_wz
] = FLOAT_REGS
; /* DImode */
2744 /* Set up the reload helper and direct move functions. */
2745 if (TARGET_VSX
|| TARGET_ALTIVEC
)
2749 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_di_store
;
2750 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_di_load
;
2751 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_di_store
;
2752 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_di_load
;
2753 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_di_store
;
2754 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_di_load
;
2755 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_di_store
;
2756 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_di_load
;
2757 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_di_store
;
2758 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_di_load
;
2759 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_di_store
;
2760 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_di_load
;
2761 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_di_store
;
2762 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_di_load
;
2763 if (TARGET_VSX
&& TARGET_UPPER_REGS_DF
)
2765 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_di_store
;
2766 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_di_load
;
2767 reg_addr
[DFmode
].scalar_in_vmx_p
= true;
2768 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_di_store
;
2769 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_di_load
;
2771 if (TARGET_P8_VECTOR
)
2773 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_di_store
;
2774 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_di_load
;
2775 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_di_store
;
2776 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_di_load
;
2777 if (TARGET_UPPER_REGS_SF
)
2778 reg_addr
[SFmode
].scalar_in_vmx_p
= true;
2780 if (TARGET_VSX_TIMODE
)
2782 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_di_store
;
2783 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_di_load
;
2785 if (TARGET_DIRECT_MOVE
)
2787 if (TARGET_POWERPC64
)
2789 reg_addr
[TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxti
;
2790 reg_addr
[V1TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv1ti
;
2791 reg_addr
[V2DFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2df
;
2792 reg_addr
[V2DImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2di
;
2793 reg_addr
[V4SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4sf
;
2794 reg_addr
[V4SImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4si
;
2795 reg_addr
[V8HImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv8hi
;
2796 reg_addr
[V16QImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv16qi
;
2797 reg_addr
[SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxsf
;
2799 reg_addr
[TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprti
;
2800 reg_addr
[V1TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv1ti
;
2801 reg_addr
[V2DFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2df
;
2802 reg_addr
[V2DImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2di
;
2803 reg_addr
[V4SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4sf
;
2804 reg_addr
[V4SImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4si
;
2805 reg_addr
[V8HImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv8hi
;
2806 reg_addr
[V16QImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv16qi
;
2807 reg_addr
[SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprsf
;
2811 reg_addr
[DImode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdi
;
2812 reg_addr
[DDmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdd
;
2813 reg_addr
[DFmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdf
;
2819 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_si_store
;
2820 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_si_load
;
2821 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_si_store
;
2822 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_si_load
;
2823 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_si_store
;
2824 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_si_load
;
2825 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_si_store
;
2826 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_si_load
;
2827 reg_addr
[V1TImode
].reload_store
= CODE_FOR_reload_v1ti_si_store
;
2828 reg_addr
[V1TImode
].reload_load
= CODE_FOR_reload_v1ti_si_load
;
2829 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_si_store
;
2830 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_si_load
;
2831 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_si_store
;
2832 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_si_load
;
2833 if (TARGET_VSX
&& TARGET_UPPER_REGS_DF
)
2835 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_si_store
;
2836 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_si_load
;
2837 reg_addr
[DFmode
].scalar_in_vmx_p
= true;
2838 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_si_store
;
2839 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_si_load
;
2841 if (TARGET_P8_VECTOR
)
2843 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_si_store
;
2844 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_si_load
;
2845 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_si_store
;
2846 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_si_load
;
2847 if (TARGET_UPPER_REGS_SF
)
2848 reg_addr
[SFmode
].scalar_in_vmx_p
= true;
2850 if (TARGET_VSX_TIMODE
)
2852 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_si_store
;
2853 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_si_load
;
2858 /* Precalculate HARD_REGNO_NREGS. */
2859 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2860 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2861 rs6000_hard_regno_nregs
[m
][r
]
2862 = rs6000_hard_regno_nregs_internal (r
, (enum machine_mode
)m
);
2864 /* Precalculate HARD_REGNO_MODE_OK. */
2865 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2866 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2867 if (rs6000_hard_regno_mode_ok (r
, (enum machine_mode
)m
))
2868 rs6000_hard_regno_mode_ok_p
[m
][r
] = true;
2870 /* Precalculate CLASS_MAX_NREGS sizes. */
2871 for (c
= 0; c
< LIM_REG_CLASSES
; ++c
)
2875 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
))
2876 reg_size
= UNITS_PER_VSX_WORD
;
2878 else if (c
== ALTIVEC_REGS
)
2879 reg_size
= UNITS_PER_ALTIVEC_WORD
;
2881 else if (c
== FLOAT_REGS
)
2882 reg_size
= UNITS_PER_FP_WORD
;
2885 reg_size
= UNITS_PER_WORD
;
2887 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2889 enum machine_mode m2
= (enum machine_mode
)m
;
2890 int reg_size2
= reg_size
;
2892 /* TFmode/TDmode always takes 2 registers, even in VSX. */
2893 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
)
2894 && (m
== TDmode
|| m
== TFmode
))
2895 reg_size2
= UNITS_PER_FP_WORD
;
2897 rs6000_class_max_nregs
[m
][c
]
2898 = (GET_MODE_SIZE (m2
) + reg_size2
- 1) / reg_size2
;
2902 if (TARGET_E500_DOUBLE
)
2903 rs6000_class_max_nregs
[DFmode
][GENERAL_REGS
] = 1;
2905 /* Calculate which modes to automatically generate code to use a the
2906 reciprocal divide and square root instructions. In the future, possibly
2907 automatically generate the instructions even if the user did not specify
2908 -mrecip. The older machines double precision reciprocal sqrt estimate is
2909 not accurate enough. */
2910 memset (rs6000_recip_bits
, 0, sizeof (rs6000_recip_bits
));
2912 rs6000_recip_bits
[SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2914 rs6000_recip_bits
[DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2915 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
2916 rs6000_recip_bits
[V4SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2917 if (VECTOR_UNIT_VSX_P (V2DFmode
))
2918 rs6000_recip_bits
[V2DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2920 if (TARGET_FRSQRTES
)
2921 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2923 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2924 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
2925 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2926 if (VECTOR_UNIT_VSX_P (V2DFmode
))
2927 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2929 if (rs6000_recip_control
)
2931 if (!flag_finite_math_only
)
2932 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2933 if (flag_trapping_math
)
2934 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2935 if (!flag_reciprocal_math
)
2936 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2937 if (flag_finite_math_only
&& !flag_trapping_math
&& flag_reciprocal_math
)
2939 if (RS6000_RECIP_HAVE_RE_P (SFmode
)
2940 && (rs6000_recip_control
& RECIP_SF_DIV
) != 0)
2941 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2943 if (RS6000_RECIP_HAVE_RE_P (DFmode
)
2944 && (rs6000_recip_control
& RECIP_DF_DIV
) != 0)
2945 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2947 if (RS6000_RECIP_HAVE_RE_P (V4SFmode
)
2948 && (rs6000_recip_control
& RECIP_V4SF_DIV
) != 0)
2949 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2951 if (RS6000_RECIP_HAVE_RE_P (V2DFmode
)
2952 && (rs6000_recip_control
& RECIP_V2DF_DIV
) != 0)
2953 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2955 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode
)
2956 && (rs6000_recip_control
& RECIP_SF_RSQRT
) != 0)
2957 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2959 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode
)
2960 && (rs6000_recip_control
& RECIP_DF_RSQRT
) != 0)
2961 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2963 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode
)
2964 && (rs6000_recip_control
& RECIP_V4SF_RSQRT
) != 0)
2965 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2967 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode
)
2968 && (rs6000_recip_control
& RECIP_V2DF_RSQRT
) != 0)
2969 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2973 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2974 legitimate address support to figure out the appropriate addressing to
2976 rs6000_setup_reg_addr_masks ();
2978 if (global_init_p
|| TARGET_DEBUG_TARGET
)
2980 if (TARGET_DEBUG_REG
)
2981 rs6000_debug_reg_global ();
2983 if (TARGET_DEBUG_COST
|| TARGET_DEBUG_REG
)
2985 "SImode variable mult cost = %d\n"
2986 "SImode constant mult cost = %d\n"
2987 "SImode short constant mult cost = %d\n"
2988 "DImode multipliciation cost = %d\n"
2989 "SImode division cost = %d\n"
2990 "DImode division cost = %d\n"
2991 "Simple fp operation cost = %d\n"
2992 "DFmode multiplication cost = %d\n"
2993 "SFmode division cost = %d\n"
2994 "DFmode division cost = %d\n"
2995 "cache line size = %d\n"
2996 "l1 cache size = %d\n"
2997 "l2 cache size = %d\n"
2998 "simultaneous prefetches = %d\n"
3001 rs6000_cost
->mulsi_const
,
3002 rs6000_cost
->mulsi_const9
,
3010 rs6000_cost
->cache_line_size
,
3011 rs6000_cost
->l1_cache_size
,
3012 rs6000_cost
->l2_cache_size
,
3013 rs6000_cost
->simultaneous_prefetches
);
3018 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3021 darwin_rs6000_override_options (void)
3023 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3025 rs6000_altivec_abi
= 1;
3026 TARGET_ALTIVEC_VRSAVE
= 1;
3027 rs6000_current_abi
= ABI_DARWIN
;
3029 if (DEFAULT_ABI
== ABI_DARWIN
3031 darwin_one_byte_bool
= 1;
3033 if (TARGET_64BIT
&& ! TARGET_POWERPC64
)
3035 rs6000_isa_flags
|= OPTION_MASK_POWERPC64
;
3036 warning (0, "-m64 requires PowerPC64 architecture, enabling");
3040 rs6000_default_long_calls
= 1;
3041 rs6000_isa_flags
|= OPTION_MASK_SOFT_FLOAT
;
3044 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3046 if (!flag_mkernel
&& !flag_apple_kext
3048 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
))
3049 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3051 /* Unless the user (not the configurer) has explicitly overridden
3052 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3053 G4 unless targeting the kernel. */
3056 && strverscmp (darwin_macosx_version_min
, "10.5") >= 0
3057 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
)
3058 && ! global_options_set
.x_rs6000_cpu_index
)
3060 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3065 /* If not otherwise specified by a target, make 'long double' equivalent to
3068 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3069 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3072 /* Return the builtin mask of the various options used that could affect which
3073 builtins were used. In the past we used target_flags, but we've run out of
3074 bits, and some options like SPE and PAIRED are no longer in
3078 rs6000_builtin_mask_calculate (void)
3080 return (((TARGET_ALTIVEC
) ? RS6000_BTM_ALTIVEC
: 0)
3081 | ((TARGET_VSX
) ? RS6000_BTM_VSX
: 0)
3082 | ((TARGET_SPE
) ? RS6000_BTM_SPE
: 0)
3083 | ((TARGET_PAIRED_FLOAT
) ? RS6000_BTM_PAIRED
: 0)
3084 | ((TARGET_FRE
) ? RS6000_BTM_FRE
: 0)
3085 | ((TARGET_FRES
) ? RS6000_BTM_FRES
: 0)
3086 | ((TARGET_FRSQRTE
) ? RS6000_BTM_FRSQRTE
: 0)
3087 | ((TARGET_FRSQRTES
) ? RS6000_BTM_FRSQRTES
: 0)
3088 | ((TARGET_POPCNTD
) ? RS6000_BTM_POPCNTD
: 0)
3089 | ((rs6000_cpu
== PROCESSOR_CELL
) ? RS6000_BTM_CELL
: 0)
3090 | ((TARGET_P8_VECTOR
) ? RS6000_BTM_P8_VECTOR
: 0)
3091 | ((TARGET_CRYPTO
) ? RS6000_BTM_CRYPTO
: 0)
3092 | ((TARGET_HTM
) ? RS6000_BTM_HTM
: 0)
3093 | ((TARGET_DFP
) ? RS6000_BTM_DFP
: 0)
3094 | ((TARGET_HARD_FLOAT
) ? RS6000_BTM_HARD_FLOAT
: 0)
3095 | ((TARGET_LONG_DOUBLE_128
) ? RS6000_BTM_LDBL128
: 0));
3098 /* Override command line options. Mostly we process the processor type and
3099 sometimes adjust other TARGET_ options. */
3102 rs6000_option_override_internal (bool global_init_p
)
3105 bool have_cpu
= false;
3107 /* The default cpu requested at configure time, if any. */
3108 const char *implicit_cpu
= OPTION_TARGET_CPU_DEFAULT
;
3110 HOST_WIDE_INT set_masks
;
3113 struct cl_target_option
*main_target_opt
3114 = ((global_init_p
|| target_option_default_node
== NULL
)
3115 ? NULL
: TREE_TARGET_OPTION (target_option_default_node
));
3117 /* Remember the explicit arguments. */
3119 rs6000_isa_flags_explicit
= global_options_set
.x_rs6000_isa_flags
;
3121 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3122 library functions, so warn about it. The flag may be useful for
3123 performance studies from time to time though, so don't disable it
3125 if (global_options_set
.x_rs6000_alignment_flags
3126 && rs6000_alignment_flags
== MASK_ALIGN_POWER
3127 && DEFAULT_ABI
== ABI_DARWIN
3129 warning (0, "-malign-power is not supported for 64-bit Darwin;"
3130 " it is incompatible with the installed C and C++ libraries");
3132 /* Numerous experiment shows that IRA based loop pressure
3133 calculation works better for RTL loop invariant motion on targets
3134 with enough (>= 32) registers. It is an expensive optimization.
3135 So it is on only for peak performance. */
3136 if (optimize
>= 3 && global_init_p
3137 && !global_options_set
.x_flag_ira_loop_pressure
)
3138 flag_ira_loop_pressure
= 1;
3140 /* Set the pointer size. */
3143 rs6000_pmode
= (int)DImode
;
3144 rs6000_pointer_size
= 64;
3148 rs6000_pmode
= (int)SImode
;
3149 rs6000_pointer_size
= 32;
3152 /* Some OSs don't support saving the high part of 64-bit registers on context
3153 switch. Other OSs don't support saving Altivec registers. On those OSs,
3154 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3155 if the user wants either, the user must explicitly specify them and we
3156 won't interfere with the user's specification. */
3158 set_masks
= POWERPC_MASKS
;
3159 #ifdef OS_MISSING_POWERPC64
3160 if (OS_MISSING_POWERPC64
)
3161 set_masks
&= ~OPTION_MASK_POWERPC64
;
3163 #ifdef OS_MISSING_ALTIVEC
3164 if (OS_MISSING_ALTIVEC
)
3165 set_masks
&= ~(OPTION_MASK_ALTIVEC
| OPTION_MASK_VSX
);
3168 /* Don't override by the processor default if given explicitly. */
3169 set_masks
&= ~rs6000_isa_flags_explicit
;
3171 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3172 the cpu in a target attribute or pragma, but did not specify a tuning
3173 option, use the cpu for the tuning option rather than the option specified
3174 with -mtune on the command line. Process a '--with-cpu' configuration
3175 request as an implicit --cpu. */
3176 if (rs6000_cpu_index
>= 0)
3178 cpu_index
= rs6000_cpu_index
;
3181 else if (main_target_opt
!= NULL
&& main_target_opt
->x_rs6000_cpu_index
>= 0)
3183 rs6000_cpu_index
= cpu_index
= main_target_opt
->x_rs6000_cpu_index
;
3186 else if (implicit_cpu
)
3188 rs6000_cpu_index
= cpu_index
= rs6000_cpu_name_lookup (implicit_cpu
);
3193 const char *default_cpu
= (TARGET_POWERPC64
? "powerpc64" : "powerpc");
3194 rs6000_cpu_index
= cpu_index
= rs6000_cpu_name_lookup (default_cpu
);
3198 gcc_assert (cpu_index
>= 0);
3200 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3201 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3202 with those from the cpu, except for options that were explicitly set. If
3203 we don't have a cpu, do not override the target bits set in
3207 rs6000_isa_flags
&= ~set_masks
;
3208 rs6000_isa_flags
|= (processor_target_table
[cpu_index
].target_enable
3212 rs6000_isa_flags
|= (processor_target_table
[cpu_index
].target_enable
3213 & ~rs6000_isa_flags_explicit
);
3215 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3216 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3217 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3218 to using rs6000_isa_flags, we need to do the initialization here. */
3220 rs6000_isa_flags
|= (TARGET_DEFAULT
& ~rs6000_isa_flags_explicit
);
3222 if (rs6000_tune_index
>= 0)
3223 tune_index
= rs6000_tune_index
;
3225 rs6000_tune_index
= tune_index
= cpu_index
;
3229 enum processor_type tune_proc
3230 = (TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
);
3233 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
3234 if (processor_target_table
[i
].processor
== tune_proc
)
3236 rs6000_tune_index
= tune_index
= i
;
3241 gcc_assert (tune_index
>= 0);
3242 rs6000_cpu
= processor_target_table
[tune_index
].processor
;
3244 /* Pick defaults for SPE related control flags. Do this early to make sure
3245 that the TARGET_ macros are representative ASAP. */
3247 int spe_capable_cpu
=
3248 (rs6000_cpu
== PROCESSOR_PPC8540
3249 || rs6000_cpu
== PROCESSOR_PPC8548
);
3251 if (!global_options_set
.x_rs6000_spe_abi
)
3252 rs6000_spe_abi
= spe_capable_cpu
;
3254 if (!global_options_set
.x_rs6000_spe
)
3255 rs6000_spe
= spe_capable_cpu
;
3257 if (!global_options_set
.x_rs6000_float_gprs
)
3259 (rs6000_cpu
== PROCESSOR_PPC8540
? 1
3260 : rs6000_cpu
== PROCESSOR_PPC8548
? 2
3264 if (global_options_set
.x_rs6000_spe_abi
3267 error ("not configured for SPE ABI");
3269 if (global_options_set
.x_rs6000_spe
3272 error ("not configured for SPE instruction set");
3274 if (main_target_opt
!= NULL
3275 && ((main_target_opt
->x_rs6000_spe_abi
!= rs6000_spe_abi
)
3276 || (main_target_opt
->x_rs6000_spe
!= rs6000_spe
)
3277 || (main_target_opt
->x_rs6000_float_gprs
!= rs6000_float_gprs
)))
3278 error ("target attribute or pragma changes SPE ABI");
3280 if (rs6000_cpu
== PROCESSOR_PPCE300C2
|| rs6000_cpu
== PROCESSOR_PPCE300C3
3281 || rs6000_cpu
== PROCESSOR_PPCE500MC
|| rs6000_cpu
== PROCESSOR_PPCE500MC64
3282 || rs6000_cpu
== PROCESSOR_PPCE5500
)
3285 error ("AltiVec not supported in this target");
3287 error ("SPE not supported in this target");
3289 if (rs6000_cpu
== PROCESSOR_PPCE6500
)
3292 error ("SPE not supported in this target");
3295 /* Disable Cell microcode if we are optimizing for the Cell
3296 and not optimizing for size. */
3297 if (rs6000_gen_cell_microcode
== -1)
3298 rs6000_gen_cell_microcode
= !(rs6000_cpu
== PROCESSOR_CELL
3301 /* If we are optimizing big endian systems for space and it's OK to
3302 use instructions that would be microcoded on the Cell, use the
3303 load/store multiple and string instructions. */
3304 if (BYTES_BIG_ENDIAN
&& optimize_size
&& rs6000_gen_cell_microcode
)
3305 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& (OPTION_MASK_MULTIPLE
3306 | OPTION_MASK_STRING
);
3308 /* Don't allow -mmultiple or -mstring on little endian systems
3309 unless the cpu is a 750, because the hardware doesn't support the
3310 instructions used in little endian mode, and causes an alignment
3311 trap. The 750 does not cause an alignment trap (except when the
3312 target is unaligned). */
3314 if (!BYTES_BIG_ENDIAN
&& rs6000_cpu
!= PROCESSOR_PPC750
)
3316 if (TARGET_MULTIPLE
)
3318 rs6000_isa_flags
&= ~OPTION_MASK_MULTIPLE
;
3319 if ((rs6000_isa_flags_explicit
& OPTION_MASK_MULTIPLE
) != 0)
3320 warning (0, "-mmultiple is not supported on little endian systems");
3325 rs6000_isa_flags
&= ~OPTION_MASK_STRING
;
3326 if ((rs6000_isa_flags_explicit
& OPTION_MASK_STRING
) != 0)
3327 warning (0, "-mstring is not supported on little endian systems");
3331 /* If little-endian, default to -mstrict-align on older processors.
3332 Testing for htm matches power8 and later. */
3333 if (!BYTES_BIG_ENDIAN
3334 && !(processor_target_table
[tune_index
].target_enable
& OPTION_MASK_HTM
))
3335 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& OPTION_MASK_STRICT_ALIGN
;
3337 /* -maltivec={le,be} implies -maltivec. */
3338 if (rs6000_altivec_element_order
!= 0)
3339 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
3341 /* Disallow -maltivec=le in big endian mode for now. This is not
3342 known to be useful for anyone. */
3343 if (BYTES_BIG_ENDIAN
&& rs6000_altivec_element_order
== 1)
3345 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
3346 rs6000_altivec_element_order
= 0;
3349 /* Add some warnings for VSX. */
3352 const char *msg
= NULL
;
3353 if (!TARGET_HARD_FLOAT
|| !TARGET_FPRS
3354 || !TARGET_SINGLE_FLOAT
|| !TARGET_DOUBLE_FLOAT
)
3356 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
3357 msg
= N_("-mvsx requires hardware floating point");
3360 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
3361 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
3364 else if (TARGET_PAIRED_FLOAT
)
3365 msg
= N_("-mvsx and -mpaired are incompatible");
3366 else if (TARGET_AVOID_XFORM
> 0)
3367 msg
= N_("-mvsx needs indexed addressing");
3368 else if (!TARGET_ALTIVEC
&& (rs6000_isa_flags_explicit
3369 & OPTION_MASK_ALTIVEC
))
3371 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
3372 msg
= N_("-mvsx and -mno-altivec are incompatible");
3374 msg
= N_("-mno-altivec disables vsx");
3380 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
3381 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
3385 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
3386 the -mcpu setting to enable options that conflict. */
3387 if ((!TARGET_HARD_FLOAT
|| !TARGET_ALTIVEC
|| !TARGET_VSX
)
3388 && (rs6000_isa_flags_explicit
& (OPTION_MASK_SOFT_FLOAT
3389 | OPTION_MASK_ALTIVEC
3390 | OPTION_MASK_VSX
)) != 0)
3391 rs6000_isa_flags
&= ~((OPTION_MASK_P8_VECTOR
| OPTION_MASK_CRYPTO
3392 | OPTION_MASK_DIRECT_MOVE
)
3393 & ~rs6000_isa_flags_explicit
);
3395 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
3396 rs6000_print_isa_options (stderr
, 0, "before defaults", rs6000_isa_flags
);
3398 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
3399 unless the user explicitly used the -mno-<option> to disable the code. */
3400 if (TARGET_P8_VECTOR
|| TARGET_DIRECT_MOVE
|| TARGET_CRYPTO
)
3401 rs6000_isa_flags
|= (ISA_2_7_MASKS_SERVER
& ~rs6000_isa_flags_explicit
);
3402 else if (TARGET_VSX
)
3403 rs6000_isa_flags
|= (ISA_2_6_MASKS_SERVER
& ~rs6000_isa_flags_explicit
);
3404 else if (TARGET_POPCNTD
)
3405 rs6000_isa_flags
|= (ISA_2_6_MASKS_EMBEDDED
& ~rs6000_isa_flags_explicit
);
3406 else if (TARGET_DFP
)
3407 rs6000_isa_flags
|= (ISA_2_5_MASKS_SERVER
& ~rs6000_isa_flags_explicit
);
3408 else if (TARGET_CMPB
)
3409 rs6000_isa_flags
|= (ISA_2_5_MASKS_EMBEDDED
& ~rs6000_isa_flags_explicit
);
3410 else if (TARGET_FPRND
)
3411 rs6000_isa_flags
|= (ISA_2_4_MASKS
& ~rs6000_isa_flags_explicit
);
3412 else if (TARGET_POPCNTB
)
3413 rs6000_isa_flags
|= (ISA_2_2_MASKS
& ~rs6000_isa_flags_explicit
);
3414 else if (TARGET_ALTIVEC
)
3415 rs6000_isa_flags
|= (OPTION_MASK_PPC_GFXOPT
& ~rs6000_isa_flags_explicit
);
3417 if (TARGET_CRYPTO
&& !TARGET_ALTIVEC
)
3419 if (rs6000_isa_flags_explicit
& OPTION_MASK_CRYPTO
)
3420 error ("-mcrypto requires -maltivec");
3421 rs6000_isa_flags
&= ~OPTION_MASK_CRYPTO
;
3424 if (TARGET_DIRECT_MOVE
&& !TARGET_VSX
)
3426 if (rs6000_isa_flags_explicit
& OPTION_MASK_DIRECT_MOVE
)
3427 error ("-mdirect-move requires -mvsx");
3428 rs6000_isa_flags
&= ~OPTION_MASK_DIRECT_MOVE
;
3431 if (TARGET_P8_VECTOR
&& !TARGET_ALTIVEC
)
3433 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
3434 error ("-mpower8-vector requires -maltivec");
3435 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
3438 if (TARGET_P8_VECTOR
&& !TARGET_VSX
)
3440 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
3441 error ("-mpower8-vector requires -mvsx");
3442 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
3445 if (TARGET_VSX_TIMODE
&& !TARGET_VSX
)
3447 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX_TIMODE
)
3448 error ("-mvsx-timode requires -mvsx");
3449 rs6000_isa_flags
&= ~OPTION_MASK_VSX_TIMODE
;
3452 if (TARGET_DFP
&& !TARGET_HARD_FLOAT
)
3454 if (rs6000_isa_flags_explicit
& OPTION_MASK_DFP
)
3455 error ("-mhard-dfp requires -mhard-float");
3456 rs6000_isa_flags
&= ~OPTION_MASK_DFP
;
3459 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
3460 silently turn off quad memory mode. */
3461 if ((TARGET_QUAD_MEMORY
|| TARGET_QUAD_MEMORY_ATOMIC
) && !TARGET_POWERPC64
)
3463 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
3464 warning (0, N_("-mquad-memory requires 64-bit mode"));
3466 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) != 0)
3467 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
3469 rs6000_isa_flags
&= ~(OPTION_MASK_QUAD_MEMORY
3470 | OPTION_MASK_QUAD_MEMORY_ATOMIC
);
3473 /* Non-atomic quad memory load/store are disabled for little endian, since
3474 the words are reversed, but atomic operations can still be done by
3475 swapping the words. */
3476 if (TARGET_QUAD_MEMORY
&& !WORDS_BIG_ENDIAN
)
3478 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
3479 warning (0, N_("-mquad-memory is not available in little endian mode"));
3481 rs6000_isa_flags
&= ~OPTION_MASK_QUAD_MEMORY
;
3484 /* Assume if the user asked for normal quad memory instructions, they want
3485 the atomic versions as well, unless they explicity told us not to use quad
3486 word atomic instructions. */
3487 if (TARGET_QUAD_MEMORY
3488 && !TARGET_QUAD_MEMORY_ATOMIC
3489 && ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY_ATOMIC
) == 0))
3490 rs6000_isa_flags
|= OPTION_MASK_QUAD_MEMORY_ATOMIC
;
3492 /* Enable power8 fusion if we are tuning for power8, even if we aren't
3493 generating power8 instructions. */
3494 if (!(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
))
3495 rs6000_isa_flags
|= (processor_target_table
[tune_index
].target_enable
3496 & OPTION_MASK_P8_FUSION
);
3498 /* Power8 does not fuse sign extended loads with the addis. If we are
3499 optimizing at high levels for speed, convert a sign extended load into a
3500 zero extending load, and an explicit sign extension. */
3501 if (TARGET_P8_FUSION
3502 && !(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION_SIGN
)
3503 && optimize_function_for_speed_p (cfun
)
3505 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION_SIGN
;
3507 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
3508 rs6000_print_isa_options (stderr
, 0, "after defaults", rs6000_isa_flags
);
3510 /* E500mc does "better" if we inline more aggressively. Respect the
3511 user's opinion, though. */
3512 if (rs6000_block_move_inline_limit
== 0
3513 && (rs6000_cpu
== PROCESSOR_PPCE500MC
3514 || rs6000_cpu
== PROCESSOR_PPCE500MC64
3515 || rs6000_cpu
== PROCESSOR_PPCE5500
3516 || rs6000_cpu
== PROCESSOR_PPCE6500
))
3517 rs6000_block_move_inline_limit
= 128;
3519 /* store_one_arg depends on expand_block_move to handle at least the
3520 size of reg_parm_stack_space. */
3521 if (rs6000_block_move_inline_limit
< (TARGET_POWERPC64
? 64 : 32))
3522 rs6000_block_move_inline_limit
= (TARGET_POWERPC64
? 64 : 32);
3526 /* If the appropriate debug option is enabled, replace the target hooks
3527 with debug versions that call the real version and then prints
3528 debugging information. */
3529 if (TARGET_DEBUG_COST
)
3531 targetm
.rtx_costs
= rs6000_debug_rtx_costs
;
3532 targetm
.address_cost
= rs6000_debug_address_cost
;
3533 targetm
.sched
.adjust_cost
= rs6000_debug_adjust_cost
;
3536 if (TARGET_DEBUG_ADDR
)
3538 targetm
.legitimate_address_p
= rs6000_debug_legitimate_address_p
;
3539 targetm
.legitimize_address
= rs6000_debug_legitimize_address
;
3540 rs6000_secondary_reload_class_ptr
3541 = rs6000_debug_secondary_reload_class
;
3542 rs6000_secondary_memory_needed_ptr
3543 = rs6000_debug_secondary_memory_needed
;
3544 rs6000_cannot_change_mode_class_ptr
3545 = rs6000_debug_cannot_change_mode_class
;
3546 rs6000_preferred_reload_class_ptr
3547 = rs6000_debug_preferred_reload_class
;
3548 rs6000_legitimize_reload_address_ptr
3549 = rs6000_debug_legitimize_reload_address
;
3550 rs6000_mode_dependent_address_ptr
3551 = rs6000_debug_mode_dependent_address
;
3554 if (rs6000_veclibabi_name
)
3556 if (strcmp (rs6000_veclibabi_name
, "mass") == 0)
3557 rs6000_veclib_handler
= rs6000_builtin_vectorized_libmass
;
3560 error ("unknown vectorization library ABI type (%s) for "
3561 "-mveclibabi= switch", rs6000_veclibabi_name
);
3567 if (!global_options_set
.x_rs6000_long_double_type_size
)
3569 if (main_target_opt
!= NULL
3570 && (main_target_opt
->x_rs6000_long_double_type_size
3571 != RS6000_DEFAULT_LONG_DOUBLE_SIZE
))
3572 error ("target attribute or pragma changes long double size");
3574 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
3577 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
3578 if (!global_options_set
.x_rs6000_ieeequad
)
3579 rs6000_ieeequad
= 1;
3582 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
3583 target attribute or pragma which automatically enables both options,
3584 unless the altivec ABI was set. This is set by default for 64-bit, but
3586 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
3587 rs6000_isa_flags
&= ~((OPTION_MASK_VSX
| OPTION_MASK_ALTIVEC
)
3588 & ~rs6000_isa_flags_explicit
);
3590 /* Enable Altivec ABI for AIX -maltivec. */
3591 if (TARGET_XCOFF
&& (TARGET_ALTIVEC
|| TARGET_VSX
))
3593 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
3594 error ("target attribute or pragma changes AltiVec ABI");
3596 rs6000_altivec_abi
= 1;
3599 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
3600 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
3601 be explicitly overridden in either case. */
3604 if (!global_options_set
.x_rs6000_altivec_abi
3605 && (TARGET_64BIT
|| TARGET_ALTIVEC
|| TARGET_VSX
))
3607 if (main_target_opt
!= NULL
&&
3608 !main_target_opt
->x_rs6000_altivec_abi
)
3609 error ("target attribute or pragma changes AltiVec ABI");
3611 rs6000_altivec_abi
= 1;
3615 /* Set the Darwin64 ABI as default for 64-bit Darwin.
3616 So far, the only darwin64 targets are also MACH-O. */
3618 && DEFAULT_ABI
== ABI_DARWIN
3621 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_darwin64_abi
)
3622 error ("target attribute or pragma changes darwin64 ABI");
3625 rs6000_darwin64_abi
= 1;
3626 /* Default to natural alignment, for better performance. */
3627 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
3631 /* Place FP constants in the constant pool instead of TOC
3632 if section anchors enabled. */
3633 if (flag_section_anchors
3634 && !global_options_set
.x_TARGET_NO_FP_IN_TOC
)
3635 TARGET_NO_FP_IN_TOC
= 1;
3637 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
3638 rs6000_print_isa_options (stderr
, 0, "before subtarget", rs6000_isa_flags
);
3640 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3641 SUBTARGET_OVERRIDE_OPTIONS
;
3643 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3644 SUBSUBTARGET_OVERRIDE_OPTIONS
;
3646 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
3647 SUB3TARGET_OVERRIDE_OPTIONS
;
3650 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
3651 rs6000_print_isa_options (stderr
, 0, "after subtarget", rs6000_isa_flags
);
3653 /* For the E500 family of cores, reset the single/double FP flags to let us
3654 check that they remain constant across attributes or pragmas. Also,
3655 clear a possible request for string instructions, not supported and which
3656 we might have silently queried above for -Os.
3658 For other families, clear ISEL in case it was set implicitly.
3663 case PROCESSOR_PPC8540
:
3664 case PROCESSOR_PPC8548
:
3665 case PROCESSOR_PPCE500MC
:
3666 case PROCESSOR_PPCE500MC64
:
3667 case PROCESSOR_PPCE5500
:
3668 case PROCESSOR_PPCE6500
:
3670 rs6000_single_float
= TARGET_E500_SINGLE
|| TARGET_E500_DOUBLE
;
3671 rs6000_double_float
= TARGET_E500_DOUBLE
;
3673 rs6000_isa_flags
&= ~OPTION_MASK_STRING
;
3679 if (have_cpu
&& !(rs6000_isa_flags_explicit
& OPTION_MASK_ISEL
))
3680 rs6000_isa_flags
&= ~OPTION_MASK_ISEL
;
3685 if (main_target_opt
)
3687 if (main_target_opt
->x_rs6000_single_float
!= rs6000_single_float
)
3688 error ("target attribute or pragma changes single precision floating "
3690 if (main_target_opt
->x_rs6000_double_float
!= rs6000_double_float
)
3691 error ("target attribute or pragma changes double precision floating "
3695 /* Detect invalid option combinations with E500. */
3698 rs6000_always_hint
= (rs6000_cpu
!= PROCESSOR_POWER4
3699 && rs6000_cpu
!= PROCESSOR_POWER5
3700 && rs6000_cpu
!= PROCESSOR_POWER6
3701 && rs6000_cpu
!= PROCESSOR_POWER7
3702 && rs6000_cpu
!= PROCESSOR_POWER8
3703 && rs6000_cpu
!= PROCESSOR_PPCA2
3704 && rs6000_cpu
!= PROCESSOR_CELL
3705 && rs6000_cpu
!= PROCESSOR_PPC476
);
3706 rs6000_sched_groups
= (rs6000_cpu
== PROCESSOR_POWER4
3707 || rs6000_cpu
== PROCESSOR_POWER5
3708 || rs6000_cpu
== PROCESSOR_POWER7
3709 || rs6000_cpu
== PROCESSOR_POWER8
);
3710 rs6000_align_branch_targets
= (rs6000_cpu
== PROCESSOR_POWER4
3711 || rs6000_cpu
== PROCESSOR_POWER5
3712 || rs6000_cpu
== PROCESSOR_POWER6
3713 || rs6000_cpu
== PROCESSOR_POWER7
3714 || rs6000_cpu
== PROCESSOR_POWER8
3715 || rs6000_cpu
== PROCESSOR_PPCE500MC
3716 || rs6000_cpu
== PROCESSOR_PPCE500MC64
3717 || rs6000_cpu
== PROCESSOR_PPCE5500
3718 || rs6000_cpu
== PROCESSOR_PPCE6500
);
3720 /* Allow debug switches to override the above settings. These are set to -1
3721 in rs6000.opt to indicate the user hasn't directly set the switch. */
3722 if (TARGET_ALWAYS_HINT
>= 0)
3723 rs6000_always_hint
= TARGET_ALWAYS_HINT
;
3725 if (TARGET_SCHED_GROUPS
>= 0)
3726 rs6000_sched_groups
= TARGET_SCHED_GROUPS
;
3728 if (TARGET_ALIGN_BRANCH_TARGETS
>= 0)
3729 rs6000_align_branch_targets
= TARGET_ALIGN_BRANCH_TARGETS
;
3731 rs6000_sched_restricted_insns_priority
3732 = (rs6000_sched_groups
? 1 : 0);
3734 /* Handle -msched-costly-dep option. */
3735 rs6000_sched_costly_dep
3736 = (rs6000_sched_groups
? true_store_to_load_dep_costly
: no_dep_costly
);
3738 if (rs6000_sched_costly_dep_str
)
3740 if (! strcmp (rs6000_sched_costly_dep_str
, "no"))
3741 rs6000_sched_costly_dep
= no_dep_costly
;
3742 else if (! strcmp (rs6000_sched_costly_dep_str
, "all"))
3743 rs6000_sched_costly_dep
= all_deps_costly
;
3744 else if (! strcmp (rs6000_sched_costly_dep_str
, "true_store_to_load"))
3745 rs6000_sched_costly_dep
= true_store_to_load_dep_costly
;
3746 else if (! strcmp (rs6000_sched_costly_dep_str
, "store_to_load"))
3747 rs6000_sched_costly_dep
= store_to_load_dep_costly
;
3749 rs6000_sched_costly_dep
= ((enum rs6000_dependence_cost
)
3750 atoi (rs6000_sched_costly_dep_str
));
3753 /* Handle -minsert-sched-nops option. */
3754 rs6000_sched_insert_nops
3755 = (rs6000_sched_groups
? sched_finish_regroup_exact
: sched_finish_none
);
3757 if (rs6000_sched_insert_nops_str
)
3759 if (! strcmp (rs6000_sched_insert_nops_str
, "no"))
3760 rs6000_sched_insert_nops
= sched_finish_none
;
3761 else if (! strcmp (rs6000_sched_insert_nops_str
, "pad"))
3762 rs6000_sched_insert_nops
= sched_finish_pad_groups
;
3763 else if (! strcmp (rs6000_sched_insert_nops_str
, "regroup_exact"))
3764 rs6000_sched_insert_nops
= sched_finish_regroup_exact
;
3766 rs6000_sched_insert_nops
= ((enum rs6000_nop_insertion
)
3767 atoi (rs6000_sched_insert_nops_str
));
3772 #ifdef TARGET_REGNAMES
3773 /* If the user desires alternate register names, copy in the
3774 alternate names now. */
3775 if (TARGET_REGNAMES
)
3776 memcpy (rs6000_reg_names
, alt_reg_names
, sizeof (rs6000_reg_names
));
3779 /* Set aix_struct_return last, after the ABI is determined.
3780 If -maix-struct-return or -msvr4-struct-return was explicitly
3781 used, don't override with the ABI default. */
3782 if (!global_options_set
.x_aix_struct_return
)
3783 aix_struct_return
= (DEFAULT_ABI
!= ABI_V4
|| DRAFT_V4_STRUCT_RET
);
3786 /* IBM XL compiler defaults to unsigned bitfields. */
3787 if (TARGET_XL_COMPAT
)
3788 flag_signed_bitfields
= 0;
3791 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
3792 REAL_MODE_FORMAT (TFmode
) = &ibm_extended_format
;
3795 ASM_GENERATE_INTERNAL_LABEL (toc_label_name
, "LCTOC", 1);
3797 /* We can only guarantee the availability of DI pseudo-ops when
3798 assembling for 64-bit targets. */
3801 targetm
.asm_out
.aligned_op
.di
= NULL
;
3802 targetm
.asm_out
.unaligned_op
.di
= NULL
;
3806 /* Set branch target alignment, if not optimizing for size. */
3809 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
3810 aligned 8byte to avoid misprediction by the branch predictor. */
3811 if (rs6000_cpu
== PROCESSOR_TITAN
3812 || rs6000_cpu
== PROCESSOR_CELL
)
3814 if (align_functions
<= 0)
3815 align_functions
= 8;
3816 if (align_jumps
<= 0)
3818 if (align_loops
<= 0)
3821 if (rs6000_align_branch_targets
)
3823 if (align_functions
<= 0)
3824 align_functions
= 16;
3825 if (align_jumps
<= 0)
3827 if (align_loops
<= 0)
3829 can_override_loop_align
= 1;
3833 if (align_jumps_max_skip
<= 0)
3834 align_jumps_max_skip
= 15;
3835 if (align_loops_max_skip
<= 0)
3836 align_loops_max_skip
= 15;
3839 /* Arrange to save and restore machine status around nested functions. */
3840 init_machine_status
= rs6000_init_machine_status
;
3842 /* We should always be splitting complex arguments, but we can't break
3843 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
3844 if (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
3845 targetm
.calls
.split_complex_arg
= NULL
;
3848 /* Initialize rs6000_cost with the appropriate target costs. */
3850 rs6000_cost
= TARGET_POWERPC64
? &size64_cost
: &size32_cost
;
3854 case PROCESSOR_RS64A
:
3855 rs6000_cost
= &rs64a_cost
;
3858 case PROCESSOR_MPCCORE
:
3859 rs6000_cost
= &mpccore_cost
;
3862 case PROCESSOR_PPC403
:
3863 rs6000_cost
= &ppc403_cost
;
3866 case PROCESSOR_PPC405
:
3867 rs6000_cost
= &ppc405_cost
;
3870 case PROCESSOR_PPC440
:
3871 rs6000_cost
= &ppc440_cost
;
3874 case PROCESSOR_PPC476
:
3875 rs6000_cost
= &ppc476_cost
;
3878 case PROCESSOR_PPC601
:
3879 rs6000_cost
= &ppc601_cost
;
3882 case PROCESSOR_PPC603
:
3883 rs6000_cost
= &ppc603_cost
;
3886 case PROCESSOR_PPC604
:
3887 rs6000_cost
= &ppc604_cost
;
3890 case PROCESSOR_PPC604e
:
3891 rs6000_cost
= &ppc604e_cost
;
3894 case PROCESSOR_PPC620
:
3895 rs6000_cost
= &ppc620_cost
;
3898 case PROCESSOR_PPC630
:
3899 rs6000_cost
= &ppc630_cost
;
3902 case PROCESSOR_CELL
:
3903 rs6000_cost
= &ppccell_cost
;
3906 case PROCESSOR_PPC750
:
3907 case PROCESSOR_PPC7400
:
3908 rs6000_cost
= &ppc750_cost
;
3911 case PROCESSOR_PPC7450
:
3912 rs6000_cost
= &ppc7450_cost
;
3915 case PROCESSOR_PPC8540
:
3916 case PROCESSOR_PPC8548
:
3917 rs6000_cost
= &ppc8540_cost
;
3920 case PROCESSOR_PPCE300C2
:
3921 case PROCESSOR_PPCE300C3
:
3922 rs6000_cost
= &ppce300c2c3_cost
;
3925 case PROCESSOR_PPCE500MC
:
3926 rs6000_cost
= &ppce500mc_cost
;
3929 case PROCESSOR_PPCE500MC64
:
3930 rs6000_cost
= &ppce500mc64_cost
;
3933 case PROCESSOR_PPCE5500
:
3934 rs6000_cost
= &ppce5500_cost
;
3937 case PROCESSOR_PPCE6500
:
3938 rs6000_cost
= &ppce6500_cost
;
3941 case PROCESSOR_TITAN
:
3942 rs6000_cost
= &titan_cost
;
3945 case PROCESSOR_POWER4
:
3946 case PROCESSOR_POWER5
:
3947 rs6000_cost
= &power4_cost
;
3950 case PROCESSOR_POWER6
:
3951 rs6000_cost
= &power6_cost
;
3954 case PROCESSOR_POWER7
:
3955 rs6000_cost
= &power7_cost
;
3958 case PROCESSOR_POWER8
:
3959 rs6000_cost
= &power8_cost
;
3962 case PROCESSOR_PPCA2
:
3963 rs6000_cost
= &ppca2_cost
;
3972 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
3973 rs6000_cost
->simultaneous_prefetches
,
3974 global_options
.x_param_values
,
3975 global_options_set
.x_param_values
);
3976 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, rs6000_cost
->l1_cache_size
,
3977 global_options
.x_param_values
,
3978 global_options_set
.x_param_values
);
3979 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
3980 rs6000_cost
->cache_line_size
,
3981 global_options
.x_param_values
,
3982 global_options_set
.x_param_values
);
3983 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, rs6000_cost
->l2_cache_size
,
3984 global_options
.x_param_values
,
3985 global_options_set
.x_param_values
);
3987 /* Increase loop peeling limits based on performance analysis. */
3988 maybe_set_param_value (PARAM_MAX_PEELED_INSNS
, 400,
3989 global_options
.x_param_values
,
3990 global_options_set
.x_param_values
);
3991 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS
, 400,
3992 global_options
.x_param_values
,
3993 global_options_set
.x_param_values
);
3995 /* If using typedef char *va_list, signal that
3996 __builtin_va_start (&ap, 0) can be optimized to
3997 ap = __builtin_next_arg (0). */
3998 if (DEFAULT_ABI
!= ABI_V4
)
3999 targetm
.expand_builtin_va_start
= NULL
;
4002 /* Set up single/double float flags.
4003 If TARGET_HARD_FLOAT is set, but neither single or double is set,
4004 then set both flags. */
4005 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
4006 && rs6000_single_float
== 0 && rs6000_double_float
== 0)
4007 rs6000_single_float
= rs6000_double_float
= 1;
4009 /* If not explicitly specified via option, decide whether to generate indexed
4010 load/store instructions. */
4011 if (TARGET_AVOID_XFORM
== -1)
4012 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4013 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4014 need indexed accesses and the type used is the scalar type of the element
4015 being loaded or stored. */
4016 TARGET_AVOID_XFORM
= (rs6000_cpu
== PROCESSOR_POWER6
&& TARGET_CMPB
4017 && !TARGET_ALTIVEC
);
4019 /* Set the -mrecip options. */
4020 if (rs6000_recip_name
)
4022 char *p
= ASTRDUP (rs6000_recip_name
);
4024 unsigned int mask
, i
;
4027 while ((q
= strtok (p
, ",")) != NULL
)
4038 if (!strcmp (q
, "default"))
4039 mask
= ((TARGET_RECIP_PRECISION
)
4040 ? RECIP_HIGH_PRECISION
: RECIP_LOW_PRECISION
);
4043 for (i
= 0; i
< ARRAY_SIZE (recip_options
); i
++)
4044 if (!strcmp (q
, recip_options
[i
].string
))
4046 mask
= recip_options
[i
].mask
;
4050 if (i
== ARRAY_SIZE (recip_options
))
4052 error ("unknown option for -mrecip=%s", q
);
4060 rs6000_recip_control
&= ~mask
;
4062 rs6000_recip_control
|= mask
;
4066 /* Set the builtin mask of the various options used that could affect which
4067 builtins were used. In the past we used target_flags, but we've run out
4068 of bits, and some options like SPE and PAIRED are no longer in
4070 rs6000_builtin_mask
= rs6000_builtin_mask_calculate ();
4071 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
4074 "new builtin mask = " HOST_WIDE_INT_PRINT_HEX
", ",
4075 rs6000_builtin_mask
);
4076 rs6000_print_builtin_options (stderr
, 0, NULL
, rs6000_builtin_mask
);
4079 /* Initialize all of the registers. */
4080 rs6000_init_hard_regno_mode_ok (global_init_p
);
4082 /* Save the initial options in case the user does function specific options */
4084 target_option_default_node
= target_option_current_node
4085 = build_target_option_node (&global_options
);
4087 /* If not explicitly specified via option, decide whether to generate the
4088 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4089 if (TARGET_LINK_STACK
== -1)
4090 SET_TARGET_LINK_STACK (rs6000_cpu
== PROCESSOR_PPC476
&& flag_pic
);
4095 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4096 define the target cpu type. */
4099 rs6000_option_override (void)
4101 (void) rs6000_option_override_internal (true);
4103 /* Register machine-specific passes. This needs to be done at start-up.
4104 It's convenient to do it here (like i386 does). */
4105 opt_pass
*pass_analyze_swaps
= make_pass_analyze_swaps (g
);
4107 static struct register_pass_info analyze_swaps_info
4108 = { pass_analyze_swaps
, "cse1", 1, PASS_POS_INSERT_BEFORE
};
4110 register_pass (&analyze_swaps_info
);
4114 /* Implement targetm.vectorize.builtin_mask_for_load. */
4116 rs6000_builtin_mask_for_load (void)
4118 if (TARGET_ALTIVEC
|| TARGET_VSX
)
4119 return altivec_builtin_mask_for_load
;
4124 /* Implement LOOP_ALIGN. */
4126 rs6000_loop_align (rtx label
)
4131 /* Don't override loop alignment if -falign-loops was specified. */
4132 if (!can_override_loop_align
)
4133 return align_loops_log
;
4135 bb
= BLOCK_FOR_INSN (label
);
4136 ninsns
= num_loop_insns(bb
->loop_father
);
4138 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
4139 if (ninsns
> 4 && ninsns
<= 8
4140 && (rs6000_cpu
== PROCESSOR_POWER4
4141 || rs6000_cpu
== PROCESSOR_POWER5
4142 || rs6000_cpu
== PROCESSOR_POWER6
4143 || rs6000_cpu
== PROCESSOR_POWER7
4144 || rs6000_cpu
== PROCESSOR_POWER8
))
4147 return align_loops_log
;
4150 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
4152 rs6000_loop_align_max_skip (rtx_insn
*label
)
4154 return (1 << rs6000_loop_align (label
)) - 1;
4157 /* Return true iff, data reference of TYPE can reach vector alignment (16)
4158 after applying N number of iterations. This routine does not determine
4159 how may iterations are required to reach desired alignment. */
4162 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED
, bool is_packed
)
4169 if (rs6000_alignment_flags
== MASK_ALIGN_NATURAL
)
4172 if (rs6000_alignment_flags
== MASK_ALIGN_POWER
)
4182 /* Assuming that all other types are naturally aligned. CHECKME! */
4187 /* Return true if the vector misalignment factor is supported by the
4190 rs6000_builtin_support_vector_misalignment (enum machine_mode mode
,
4197 /* Return if movmisalign pattern is not supported for this mode. */
4198 if (optab_handler (movmisalign_optab
, mode
) == CODE_FOR_nothing
)
4201 if (misalignment
== -1)
4203 /* Misalignment factor is unknown at compile time but we know
4204 it's word aligned. */
4205 if (rs6000_vector_alignment_reachable (type
, is_packed
))
4207 int element_size
= TREE_INT_CST_LOW (TYPE_SIZE (type
));
4209 if (element_size
== 64 || element_size
== 32)
4216 /* VSX supports word-aligned vector. */
4217 if (misalignment
% 4 == 0)
4223 /* Implement targetm.vectorize.builtin_vectorization_cost. */
4225 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
4226 tree vectype
, int misalign
)
4231 switch (type_of_cost
)
4241 case cond_branch_not_taken
:
4250 case vec_promote_demote
:
4256 case cond_branch_taken
:
4259 case unaligned_load
:
4260 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
4262 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
4264 /* Double word aligned. */
4272 /* Double word aligned. */
4276 /* Unknown misalignment. */
4289 /* Misaligned loads are not supported. */
4294 case unaligned_store
:
4295 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
4297 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
4299 /* Double word aligned. */
4307 /* Double word aligned. */
4311 /* Unknown misalignment. */
4324 /* Misaligned stores are not supported. */
4330 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
4331 elem_type
= TREE_TYPE (vectype
);
4332 /* 32-bit vectors loaded into registers are stored as double
4333 precision, so we need n/2 converts in addition to the usual
4334 n/2 merges to construct a vector of short floats from them. */
4335 if (SCALAR_FLOAT_TYPE_P (elem_type
)
4336 && TYPE_PRECISION (elem_type
) == 32)
4337 return elements
+ 1;
4339 return elements
/ 2 + 1;
4346 /* Implement targetm.vectorize.preferred_simd_mode. */
4348 static enum machine_mode
4349 rs6000_preferred_simd_mode (enum machine_mode mode
)
4358 if (TARGET_ALTIVEC
|| TARGET_VSX
)
4384 if (TARGET_PAIRED_FLOAT
4390 typedef struct _rs6000_cost_data
4392 struct loop
*loop_info
;
4396 /* Test for likely overcommitment of vector hardware resources. If a
4397 loop iteration is relatively large, and too large a percentage of
4398 instructions in the loop are vectorized, the cost model may not
4399 adequately reflect delays from unavailable vector resources.
4400 Penalize the loop body cost for this case. */
4403 rs6000_density_test (rs6000_cost_data
*data
)
4405 const int DENSITY_PCT_THRESHOLD
= 85;
4406 const int DENSITY_SIZE_THRESHOLD
= 70;
4407 const int DENSITY_PENALTY
= 10;
4408 struct loop
*loop
= data
->loop_info
;
4409 basic_block
*bbs
= get_loop_body (loop
);
4410 int nbbs
= loop
->num_nodes
;
4411 int vec_cost
= data
->cost
[vect_body
], not_vec_cost
= 0;
4414 for (i
= 0; i
< nbbs
; i
++)
4416 basic_block bb
= bbs
[i
];
4417 gimple_stmt_iterator gsi
;
4419 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4421 gimple stmt
= gsi_stmt (gsi
);
4422 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4424 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
4425 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
4431 density_pct
= (vec_cost
* 100) / (vec_cost
+ not_vec_cost
);
4433 if (density_pct
> DENSITY_PCT_THRESHOLD
4434 && vec_cost
+ not_vec_cost
> DENSITY_SIZE_THRESHOLD
)
4436 data
->cost
[vect_body
] = vec_cost
* (100 + DENSITY_PENALTY
) / 100;
4437 if (dump_enabled_p ())
4438 dump_printf_loc (MSG_NOTE
, vect_location
,
4439 "density %d%%, cost %d exceeds threshold, penalizing "
4440 "loop body cost by %d%%", density_pct
,
4441 vec_cost
+ not_vec_cost
, DENSITY_PENALTY
);
4445 /* Implement targetm.vectorize.init_cost. */
4448 rs6000_init_cost (struct loop
*loop_info
)
4450 rs6000_cost_data
*data
= XNEW (struct _rs6000_cost_data
);
4451 data
->loop_info
= loop_info
;
4452 data
->cost
[vect_prologue
] = 0;
4453 data
->cost
[vect_body
] = 0;
4454 data
->cost
[vect_epilogue
] = 0;
4458 /* Implement targetm.vectorize.add_stmt_cost. */
4461 rs6000_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
4462 struct _stmt_vec_info
*stmt_info
, int misalign
,
4463 enum vect_cost_model_location where
)
4465 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
4466 unsigned retval
= 0;
4468 if (flag_vect_cost_model
)
4470 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
4471 int stmt_cost
= rs6000_builtin_vectorization_cost (kind
, vectype
,
4473 /* Statements in an inner loop relative to the loop being
4474 vectorized are weighted more heavily. The value here is
4475 arbitrary and could potentially be improved with analysis. */
4476 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
4477 count
*= 50; /* FIXME. */
4479 retval
= (unsigned) (count
* stmt_cost
);
4480 cost_data
->cost
[where
] += retval
;
4486 /* Implement targetm.vectorize.finish_cost. */
4489 rs6000_finish_cost (void *data
, unsigned *prologue_cost
,
4490 unsigned *body_cost
, unsigned *epilogue_cost
)
4492 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
4494 if (cost_data
->loop_info
)
4495 rs6000_density_test (cost_data
);
4497 *prologue_cost
= cost_data
->cost
[vect_prologue
];
4498 *body_cost
= cost_data
->cost
[vect_body
];
4499 *epilogue_cost
= cost_data
->cost
[vect_epilogue
];
4502 /* Implement targetm.vectorize.destroy_cost_data. */
4505 rs6000_destroy_cost_data (void *data
)
4510 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
4511 library with vectorized intrinsics. */
4514 rs6000_builtin_vectorized_libmass (tree fndecl
, tree type_out
, tree type_in
)
4517 const char *suffix
= NULL
;
4518 tree fntype
, new_fndecl
, bdecl
= NULL_TREE
;
4521 enum machine_mode el_mode
, in_mode
;
4524 /* Libmass is suitable for unsafe math only as it does not correctly support
4525 parts of IEEE with the required precision such as denormals. Only support
4526 it if we have VSX to use the simd d2 or f4 functions.
4527 XXX: Add variable length support. */
4528 if (!flag_unsafe_math_optimizations
|| !TARGET_VSX
)
4531 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
4532 n
= TYPE_VECTOR_SUBPARTS (type_out
);
4533 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
4534 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
4535 if (el_mode
!= in_mode
4539 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
4541 enum built_in_function fn
= DECL_FUNCTION_CODE (fndecl
);
4544 case BUILT_IN_ATAN2
:
4545 case BUILT_IN_HYPOT
:
4551 case BUILT_IN_ACOSH
:
4553 case BUILT_IN_ASINH
:
4555 case BUILT_IN_ATANH
:
4563 case BUILT_IN_EXPM1
:
4564 case BUILT_IN_LGAMMA
:
4565 case BUILT_IN_LOG10
:
4566 case BUILT_IN_LOG1P
:
4574 bdecl
= builtin_decl_implicit (fn
);
4575 suffix
= "d2"; /* pow -> powd2 */
4576 if (el_mode
!= DFmode
4582 case BUILT_IN_ATAN2F
:
4583 case BUILT_IN_HYPOTF
:
4588 case BUILT_IN_ACOSF
:
4589 case BUILT_IN_ACOSHF
:
4590 case BUILT_IN_ASINF
:
4591 case BUILT_IN_ASINHF
:
4592 case BUILT_IN_ATANF
:
4593 case BUILT_IN_ATANHF
:
4594 case BUILT_IN_CBRTF
:
4596 case BUILT_IN_COSHF
:
4598 case BUILT_IN_ERFCF
:
4599 case BUILT_IN_EXP2F
:
4601 case BUILT_IN_EXPM1F
:
4602 case BUILT_IN_LGAMMAF
:
4603 case BUILT_IN_LOG10F
:
4604 case BUILT_IN_LOG1PF
:
4605 case BUILT_IN_LOG2F
:
4608 case BUILT_IN_SINHF
:
4609 case BUILT_IN_SQRTF
:
4611 case BUILT_IN_TANHF
:
4612 bdecl
= builtin_decl_implicit (fn
);
4613 suffix
= "4"; /* powf -> powf4 */
4614 if (el_mode
!= SFmode
4627 gcc_assert (suffix
!= NULL
);
4628 bname
= IDENTIFIER_POINTER (DECL_NAME (bdecl
));
4632 strcpy (name
, bname
+ sizeof ("__builtin_") - 1);
4633 strcat (name
, suffix
);
4636 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
4637 else if (n_args
== 2)
4638 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
4642 /* Build a function declaration for the vectorized function. */
4643 new_fndecl
= build_decl (BUILTINS_LOCATION
,
4644 FUNCTION_DECL
, get_identifier (name
), fntype
);
4645 TREE_PUBLIC (new_fndecl
) = 1;
4646 DECL_EXTERNAL (new_fndecl
) = 1;
4647 DECL_IS_NOVOPS (new_fndecl
) = 1;
4648 TREE_READONLY (new_fndecl
) = 1;
4653 /* Returns a function decl for a vectorized version of the builtin function
4654 with builtin function code FN and the result vector type TYPE, or NULL_TREE
4655 if it is not available. */
4658 rs6000_builtin_vectorized_function (tree fndecl
, tree type_out
,
4661 enum machine_mode in_mode
, out_mode
;
4664 if (TARGET_DEBUG_BUILTIN
)
4665 fprintf (stderr
, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
4666 IDENTIFIER_POINTER (DECL_NAME (fndecl
)),
4667 GET_MODE_NAME (TYPE_MODE (type_out
)),
4668 GET_MODE_NAME (TYPE_MODE (type_in
)));
4670 if (TREE_CODE (type_out
) != VECTOR_TYPE
4671 || TREE_CODE (type_in
) != VECTOR_TYPE
4672 || !TARGET_VECTORIZE_BUILTINS
)
4675 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
4676 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
4677 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
4678 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
4680 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
4682 enum built_in_function fn
= DECL_FUNCTION_CODE (fndecl
);
4685 case BUILT_IN_CLZIMAX
:
4686 case BUILT_IN_CLZLL
:
4689 if (TARGET_P8_VECTOR
&& in_mode
== out_mode
&& out_n
== in_n
)
4691 if (out_mode
== QImode
&& out_n
== 16)
4692 return rs6000_builtin_decls
[P8V_BUILTIN_VCLZB
];
4693 else if (out_mode
== HImode
&& out_n
== 8)
4694 return rs6000_builtin_decls
[P8V_BUILTIN_VCLZH
];
4695 else if (out_mode
== SImode
&& out_n
== 4)
4696 return rs6000_builtin_decls
[P8V_BUILTIN_VCLZW
];
4697 else if (out_mode
== DImode
&& out_n
== 2)
4698 return rs6000_builtin_decls
[P8V_BUILTIN_VCLZD
];
4701 case BUILT_IN_COPYSIGN
:
4702 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4703 && out_mode
== DFmode
&& out_n
== 2
4704 && in_mode
== DFmode
&& in_n
== 2)
4705 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNDP
];
4707 case BUILT_IN_COPYSIGNF
:
4708 if (out_mode
!= SFmode
|| out_n
!= 4
4709 || in_mode
!= SFmode
|| in_n
!= 4)
4711 if (VECTOR_UNIT_VSX_P (V4SFmode
))
4712 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNSP
];
4713 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
4714 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_COPYSIGN_V4SF
];
4716 case BUILT_IN_POPCOUNTIMAX
:
4717 case BUILT_IN_POPCOUNTLL
:
4718 case BUILT_IN_POPCOUNTL
:
4719 case BUILT_IN_POPCOUNT
:
4720 if (TARGET_P8_VECTOR
&& in_mode
== out_mode
&& out_n
== in_n
)
4722 if (out_mode
== QImode
&& out_n
== 16)
4723 return rs6000_builtin_decls
[P8V_BUILTIN_VPOPCNTB
];
4724 else if (out_mode
== HImode
&& out_n
== 8)
4725 return rs6000_builtin_decls
[P8V_BUILTIN_VPOPCNTH
];
4726 else if (out_mode
== SImode
&& out_n
== 4)
4727 return rs6000_builtin_decls
[P8V_BUILTIN_VPOPCNTW
];
4728 else if (out_mode
== DImode
&& out_n
== 2)
4729 return rs6000_builtin_decls
[P8V_BUILTIN_VPOPCNTD
];
4733 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4734 && out_mode
== DFmode
&& out_n
== 2
4735 && in_mode
== DFmode
&& in_n
== 2)
4736 return rs6000_builtin_decls
[VSX_BUILTIN_XVSQRTDP
];
4738 case BUILT_IN_SQRTF
:
4739 if (VECTOR_UNIT_VSX_P (V4SFmode
)
4740 && out_mode
== SFmode
&& out_n
== 4
4741 && in_mode
== SFmode
&& in_n
== 4)
4742 return rs6000_builtin_decls
[VSX_BUILTIN_XVSQRTSP
];
4745 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4746 && out_mode
== DFmode
&& out_n
== 2
4747 && in_mode
== DFmode
&& in_n
== 2)
4748 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIP
];
4750 case BUILT_IN_CEILF
:
4751 if (out_mode
!= SFmode
|| out_n
!= 4
4752 || in_mode
!= SFmode
|| in_n
!= 4)
4754 if (VECTOR_UNIT_VSX_P (V4SFmode
))
4755 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIP
];
4756 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
4757 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIP
];
4759 case BUILT_IN_FLOOR
:
4760 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4761 && out_mode
== DFmode
&& out_n
== 2
4762 && in_mode
== DFmode
&& in_n
== 2)
4763 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIM
];
4765 case BUILT_IN_FLOORF
:
4766 if (out_mode
!= SFmode
|| out_n
!= 4
4767 || in_mode
!= SFmode
|| in_n
!= 4)
4769 if (VECTOR_UNIT_VSX_P (V4SFmode
))
4770 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIM
];
4771 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
4772 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIM
];
4775 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4776 && out_mode
== DFmode
&& out_n
== 2
4777 && in_mode
== DFmode
&& in_n
== 2)
4778 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDDP
];
4781 if (VECTOR_UNIT_VSX_P (V4SFmode
)
4782 && out_mode
== SFmode
&& out_n
== 4
4783 && in_mode
== SFmode
&& in_n
== 4)
4784 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDSP
];
4785 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
4786 && out_mode
== SFmode
&& out_n
== 4
4787 && in_mode
== SFmode
&& in_n
== 4)
4788 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMADDFP
];
4790 case BUILT_IN_TRUNC
:
4791 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4792 && out_mode
== DFmode
&& out_n
== 2
4793 && in_mode
== DFmode
&& in_n
== 2)
4794 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIZ
];
4796 case BUILT_IN_TRUNCF
:
4797 if (out_mode
!= SFmode
|| out_n
!= 4
4798 || in_mode
!= SFmode
|| in_n
!= 4)
4800 if (VECTOR_UNIT_VSX_P (V4SFmode
))
4801 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIZ
];
4802 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
4803 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIZ
];
4805 case BUILT_IN_NEARBYINT
:
4806 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4807 && flag_unsafe_math_optimizations
4808 && out_mode
== DFmode
&& out_n
== 2
4809 && in_mode
== DFmode
&& in_n
== 2)
4810 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPI
];
4812 case BUILT_IN_NEARBYINTF
:
4813 if (VECTOR_UNIT_VSX_P (V4SFmode
)
4814 && flag_unsafe_math_optimizations
4815 && out_mode
== SFmode
&& out_n
== 4
4816 && in_mode
== SFmode
&& in_n
== 4)
4817 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPI
];
4820 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4821 && !flag_trapping_math
4822 && out_mode
== DFmode
&& out_n
== 2
4823 && in_mode
== DFmode
&& in_n
== 2)
4824 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIC
];
4826 case BUILT_IN_RINTF
:
4827 if (VECTOR_UNIT_VSX_P (V4SFmode
)
4828 && !flag_trapping_math
4829 && out_mode
== SFmode
&& out_n
== 4
4830 && in_mode
== SFmode
&& in_n
== 4)
4831 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIC
];
4838 else if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
4840 enum rs6000_builtins fn
4841 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
4844 case RS6000_BUILTIN_RSQRTF
:
4845 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
4846 && out_mode
== SFmode
&& out_n
== 4
4847 && in_mode
== SFmode
&& in_n
== 4)
4848 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRSQRTFP
];
4850 case RS6000_BUILTIN_RSQRT
:
4851 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4852 && out_mode
== DFmode
&& out_n
== 2
4853 && in_mode
== DFmode
&& in_n
== 2)
4854 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
4856 case RS6000_BUILTIN_RECIPF
:
4857 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
4858 && out_mode
== SFmode
&& out_n
== 4
4859 && in_mode
== SFmode
&& in_n
== 4)
4860 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRECIPFP
];
4862 case RS6000_BUILTIN_RECIP
:
4863 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4864 && out_mode
== DFmode
&& out_n
== 2
4865 && in_mode
== DFmode
&& in_n
== 2)
4866 return rs6000_builtin_decls
[VSX_BUILTIN_RECIP_V2DF
];
4873 /* Generate calls to libmass if appropriate. */
4874 if (rs6000_veclib_handler
)
4875 return rs6000_veclib_handler (fndecl
, type_out
, type_in
);
4880 /* Default CPU string for rs6000*_file_start functions. */
4881 static const char *rs6000_default_cpu
;
4883 /* Do anything needed at the start of the asm file. */
4886 rs6000_file_start (void)
4889 const char *start
= buffer
;
4890 FILE *file
= asm_out_file
;
4892 rs6000_default_cpu
= TARGET_CPU_DEFAULT
;
4894 default_file_start ();
4896 if (flag_verbose_asm
)
4898 sprintf (buffer
, "\n%s rs6000/powerpc options:", ASM_COMMENT_START
);
4900 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
4902 fprintf (file
, "%s --with-cpu=%s", start
, rs6000_default_cpu
);
4906 if (global_options_set
.x_rs6000_cpu_index
)
4908 fprintf (file
, "%s -mcpu=%s", start
,
4909 processor_target_table
[rs6000_cpu_index
].name
);
4913 if (global_options_set
.x_rs6000_tune_index
)
4915 fprintf (file
, "%s -mtune=%s", start
,
4916 processor_target_table
[rs6000_tune_index
].name
);
4920 if (PPC405_ERRATUM77
)
4922 fprintf (file
, "%s PPC405CR_ERRATUM77", start
);
4926 #ifdef USING_ELFOS_H
4927 switch (rs6000_sdata
)
4929 case SDATA_NONE
: fprintf (file
, "%s -msdata=none", start
); start
= ""; break;
4930 case SDATA_DATA
: fprintf (file
, "%s -msdata=data", start
); start
= ""; break;
4931 case SDATA_SYSV
: fprintf (file
, "%s -msdata=sysv", start
); start
= ""; break;
4932 case SDATA_EABI
: fprintf (file
, "%s -msdata=eabi", start
); start
= ""; break;
4935 if (rs6000_sdata
&& g_switch_value
)
4937 fprintf (file
, "%s -G %d", start
,
4947 if (DEFAULT_ABI
== ABI_ELFv2
)
4948 fprintf (file
, "\t.abiversion 2\n");
4950 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
4951 || (TARGET_ELF
&& flag_pic
== 2))
4953 switch_to_section (toc_section
);
4954 switch_to_section (text_section
);
4959 /* Return nonzero if this function is known to have a null epilogue. */
4962 direct_return (void)
4964 if (reload_completed
)
4966 rs6000_stack_t
*info
= rs6000_stack_info ();
4968 if (info
->first_gp_reg_save
== 32
4969 && info
->first_fp_reg_save
== 64
4970 && info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
4971 && ! info
->lr_save_p
4972 && ! info
->cr_save_p
4973 && info
->vrsave_mask
== 0
4981 /* Return the number of instructions it takes to form a constant in an
4982 integer register. */
4985 num_insns_constant_wide (HOST_WIDE_INT value
)
4987 /* signed constant loadable with addi */
4988 if ((unsigned HOST_WIDE_INT
) (value
+ 0x8000) < 0x10000)
4991 /* constant loadable with addis */
4992 else if ((value
& 0xffff) == 0
4993 && (value
>> 31 == -1 || value
>> 31 == 0))
4996 else if (TARGET_POWERPC64
)
4998 HOST_WIDE_INT low
= ((value
& 0xffffffff) ^ 0x80000000) - 0x80000000;
4999 HOST_WIDE_INT high
= value
>> 31;
5001 if (high
== 0 || high
== -1)
5007 return num_insns_constant_wide (high
) + 1;
5009 return num_insns_constant_wide (low
) + 1;
5011 return (num_insns_constant_wide (high
)
5012 + num_insns_constant_wide (low
) + 1);
5020 num_insns_constant (rtx op
, enum machine_mode mode
)
5022 HOST_WIDE_INT low
, high
;
5024 switch (GET_CODE (op
))
5027 if ((INTVAL (op
) >> 31) != 0 && (INTVAL (op
) >> 31) != -1
5028 && mask64_operand (op
, mode
))
5031 return num_insns_constant_wide (INTVAL (op
));
5033 case CONST_WIDE_INT
:
5036 int ins
= CONST_WIDE_INT_NUNITS (op
) - 1;
5037 for (i
= 0; i
< CONST_WIDE_INT_NUNITS (op
); i
++)
5038 ins
+= num_insns_constant_wide (CONST_WIDE_INT_ELT (op
, i
));
5043 if (mode
== SFmode
|| mode
== SDmode
)
5048 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
5049 if (DECIMAL_FLOAT_MODE_P (mode
))
5050 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
5052 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
5053 return num_insns_constant_wide ((HOST_WIDE_INT
) l
);
5059 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
5060 if (DECIMAL_FLOAT_MODE_P (mode
))
5061 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, l
);
5063 REAL_VALUE_TO_TARGET_DOUBLE (rv
, l
);
5064 high
= l
[WORDS_BIG_ENDIAN
== 0];
5065 low
= l
[WORDS_BIG_ENDIAN
!= 0];
5068 return (num_insns_constant_wide (low
)
5069 + num_insns_constant_wide (high
));
5072 if ((high
== 0 && low
>= 0)
5073 || (high
== -1 && low
< 0))
5074 return num_insns_constant_wide (low
);
5076 else if (mask64_operand (op
, mode
))
5080 return num_insns_constant_wide (high
) + 1;
5083 return (num_insns_constant_wide (high
)
5084 + num_insns_constant_wide (low
) + 1);
5092 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5093 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5094 corresponding element of the vector, but for V4SFmode and V2SFmode,
5095 the corresponding "float" is interpreted as an SImode integer. */
5098 const_vector_elt_as_int (rtx op
, unsigned int elt
)
5102 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5103 gcc_assert (GET_MODE (op
) != V2DImode
5104 && GET_MODE (op
) != V2DFmode
);
5106 tmp
= CONST_VECTOR_ELT (op
, elt
);
5107 if (GET_MODE (op
) == V4SFmode
5108 || GET_MODE (op
) == V2SFmode
)
5109 tmp
= gen_lowpart (SImode
, tmp
);
5110 return INTVAL (tmp
);
5113 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5114 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5115 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5116 all items are set to the same value and contain COPIES replicas of the
5117 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5118 operand and the others are set to the value of the operand's msb. */
5121 vspltis_constant (rtx op
, unsigned step
, unsigned copies
)
5123 enum machine_mode mode
= GET_MODE (op
);
5124 enum machine_mode inner
= GET_MODE_INNER (mode
);
5132 HOST_WIDE_INT splat_val
;
5133 HOST_WIDE_INT msb_val
;
5135 if (mode
== V2DImode
|| mode
== V2DFmode
|| mode
== V1TImode
)
5138 nunits
= GET_MODE_NUNITS (mode
);
5139 bitsize
= GET_MODE_BITSIZE (inner
);
5140 mask
= GET_MODE_MASK (inner
);
5142 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
5144 msb_val
= val
>= 0 ? 0 : -1;
5146 /* Construct the value to be splatted, if possible. If not, return 0. */
5147 for (i
= 2; i
<= copies
; i
*= 2)
5149 HOST_WIDE_INT small_val
;
5151 small_val
= splat_val
>> bitsize
;
5153 if (splat_val
!= ((small_val
<< bitsize
) | (small_val
& mask
)))
5155 splat_val
= small_val
;
5158 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
5159 if (EASY_VECTOR_15 (splat_val
))
5162 /* Also check if we can splat, and then add the result to itself. Do so if
5163 the value is positive, of if the splat instruction is using OP's mode;
5164 for splat_val < 0, the splat and the add should use the same mode. */
5165 else if (EASY_VECTOR_15_ADD_SELF (splat_val
)
5166 && (splat_val
>= 0 || (step
== 1 && copies
== 1)))
5169 /* Also check if are loading up the most significant bit which can be done by
5170 loading up -1 and shifting the value left by -1. */
5171 else if (EASY_VECTOR_MSB (splat_val
, inner
))
5177 /* Check if VAL is present in every STEP-th element, and the
5178 other elements are filled with its most significant bit. */
5179 for (i
= 1; i
< nunits
; ++i
)
5181 HOST_WIDE_INT desired_val
;
5182 unsigned elt
= BYTES_BIG_ENDIAN
? nunits
- 1 - i
: i
;
5183 if ((i
& (step
- 1)) == 0)
5186 desired_val
= msb_val
;
5188 if (desired_val
!= const_vector_elt_as_int (op
, elt
))
5196 /* Return true if OP is of the given MODE and can be synthesized
5197 with a vspltisb, vspltish or vspltisw. */
5200 easy_altivec_constant (rtx op
, enum machine_mode mode
)
5202 unsigned step
, copies
;
5204 if (mode
== VOIDmode
)
5205 mode
= GET_MODE (op
);
5206 else if (mode
!= GET_MODE (op
))
5209 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
5211 if (mode
== V2DFmode
)
5212 return zero_constant (op
, mode
);
5214 else if (mode
== V2DImode
)
5216 if (GET_CODE (CONST_VECTOR_ELT (op
, 0)) != CONST_INT
5217 || GET_CODE (CONST_VECTOR_ELT (op
, 1)) != CONST_INT
)
5220 if (zero_constant (op
, mode
))
5223 if (INTVAL (CONST_VECTOR_ELT (op
, 0)) == -1
5224 && INTVAL (CONST_VECTOR_ELT (op
, 1)) == -1)
5230 /* V1TImode is a special container for TImode. Ignore for now. */
5231 else if (mode
== V1TImode
)
5234 /* Start with a vspltisw. */
5235 step
= GET_MODE_NUNITS (mode
) / 4;
5238 if (vspltis_constant (op
, step
, copies
))
5241 /* Then try with a vspltish. */
5247 if (vspltis_constant (op
, step
, copies
))
5250 /* And finally a vspltisb. */
5256 if (vspltis_constant (op
, step
, copies
))
5262 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
5263 result is OP. Abort if it is not possible. */
5266 gen_easy_altivec_constant (rtx op
)
5268 enum machine_mode mode
= GET_MODE (op
);
5269 int nunits
= GET_MODE_NUNITS (mode
);
5270 rtx val
= CONST_VECTOR_ELT (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
5271 unsigned step
= nunits
/ 4;
5272 unsigned copies
= 1;
5274 /* Start with a vspltisw. */
5275 if (vspltis_constant (op
, step
, copies
))
5276 return gen_rtx_VEC_DUPLICATE (V4SImode
, gen_lowpart (SImode
, val
));
5278 /* Then try with a vspltish. */
5284 if (vspltis_constant (op
, step
, copies
))
5285 return gen_rtx_VEC_DUPLICATE (V8HImode
, gen_lowpart (HImode
, val
));
5287 /* And finally a vspltisb. */
5293 if (vspltis_constant (op
, step
, copies
))
5294 return gen_rtx_VEC_DUPLICATE (V16QImode
, gen_lowpart (QImode
, val
));
5300 output_vec_const_move (rtx
*operands
)
5303 enum machine_mode mode
;
5308 mode
= GET_MODE (dest
);
5312 if (zero_constant (vec
, mode
))
5313 return "xxlxor %x0,%x0,%x0";
5315 if ((mode
== V2DImode
|| mode
== V1TImode
)
5316 && INTVAL (CONST_VECTOR_ELT (vec
, 0)) == -1
5317 && INTVAL (CONST_VECTOR_ELT (vec
, 1)) == -1)
5318 return "vspltisw %0,-1";
5324 if (zero_constant (vec
, mode
))
5325 return "vxor %0,%0,%0";
5327 splat_vec
= gen_easy_altivec_constant (vec
);
5328 gcc_assert (GET_CODE (splat_vec
) == VEC_DUPLICATE
);
5329 operands
[1] = XEXP (splat_vec
, 0);
5330 if (!EASY_VECTOR_15 (INTVAL (operands
[1])))
5333 switch (GET_MODE (splat_vec
))
5336 return "vspltisw %0,%1";
5339 return "vspltish %0,%1";
5342 return "vspltisb %0,%1";
5349 gcc_assert (TARGET_SPE
);
5351 /* Vector constant 0 is handled as a splitter of V2SI, and in the
5352 pattern of V1DI, V4HI, and V2SF.
5354 FIXME: We should probably return # and add post reload
5355 splitters for these, but this way is so easy ;-). */
5356 cst
= INTVAL (CONST_VECTOR_ELT (vec
, 0));
5357 cst2
= INTVAL (CONST_VECTOR_ELT (vec
, 1));
5358 operands
[1] = CONST_VECTOR_ELT (vec
, 0);
5359 operands
[2] = CONST_VECTOR_ELT (vec
, 1);
5361 return "li %0,%1\n\tevmergelo %0,%0,%0";
5362 else if (WORDS_BIG_ENDIAN
)
5363 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
5365 return "li %0,%2\n\tevmergelo %0,%0,%0\n\tli %0,%1";
5368 /* Initialize TARGET of vector PAIRED to VALS. */
5371 paired_expand_vector_init (rtx target
, rtx vals
)
5373 enum machine_mode mode
= GET_MODE (target
);
5374 int n_elts
= GET_MODE_NUNITS (mode
);
5376 rtx x
, new_rtx
, tmp
, constant_op
, op1
, op2
;
5379 for (i
= 0; i
< n_elts
; ++i
)
5381 x
= XVECEXP (vals
, 0, i
);
5382 if (!(CONST_SCALAR_INT_P (x
) || CONST_DOUBLE_P (x
) || CONST_FIXED_P (x
)))
5387 /* Load from constant pool. */
5388 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
5394 /* The vector is initialized only with non-constants. */
5395 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, XVECEXP (vals
, 0, 0),
5396 XVECEXP (vals
, 0, 1));
5398 emit_move_insn (target
, new_rtx
);
5402 /* One field is non-constant and the other one is a constant. Load the
5403 constant from the constant pool and use ps_merge instruction to
5404 construct the whole vector. */
5405 op1
= XVECEXP (vals
, 0, 0);
5406 op2
= XVECEXP (vals
, 0, 1);
5408 constant_op
= (CONSTANT_P (op1
)) ? op1
: op2
;
5410 tmp
= gen_reg_rtx (GET_MODE (constant_op
));
5411 emit_move_insn (tmp
, constant_op
);
5413 if (CONSTANT_P (op1
))
5414 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, tmp
, op2
);
5416 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, op1
, tmp
);
5418 emit_move_insn (target
, new_rtx
);
5422 paired_expand_vector_move (rtx operands
[])
5424 rtx op0
= operands
[0], op1
= operands
[1];
5426 emit_move_insn (op0
, op1
);
5429 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
5430 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
5431 operands for the relation operation COND. This is a recursive
5435 paired_emit_vector_compare (enum rtx_code rcode
,
5436 rtx dest
, rtx op0
, rtx op1
,
5437 rtx cc_op0
, rtx cc_op1
)
5439 rtx tmp
= gen_reg_rtx (V2SFmode
);
5442 gcc_assert (TARGET_PAIRED_FLOAT
);
5443 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
5449 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
5453 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
5454 emit_insn (gen_selv2sf4 (dest
, tmp
, op0
, op1
, CONST0_RTX (SFmode
)));
5458 paired_emit_vector_compare (GE
, dest
, op0
, op1
, cc_op1
, cc_op0
);
5461 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
5464 tmp1
= gen_reg_rtx (V2SFmode
);
5465 max
= gen_reg_rtx (V2SFmode
);
5466 min
= gen_reg_rtx (V2SFmode
);
5467 gen_reg_rtx (V2SFmode
);
5469 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
5470 emit_insn (gen_selv2sf4
5471 (max
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
5472 emit_insn (gen_subv2sf3 (tmp
, cc_op1
, cc_op0
));
5473 emit_insn (gen_selv2sf4
5474 (min
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
5475 emit_insn (gen_subv2sf3 (tmp1
, min
, max
));
5476 emit_insn (gen_selv2sf4 (dest
, tmp1
, op0
, op1
, CONST0_RTX (SFmode
)));
5479 paired_emit_vector_compare (EQ
, dest
, op1
, op0
, cc_op0
, cc_op1
);
5482 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
5485 paired_emit_vector_compare (LT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
5488 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
5491 paired_emit_vector_compare (GT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
5500 /* Emit vector conditional expression.
5501 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
5502 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
5505 paired_emit_vector_cond_expr (rtx dest
, rtx op1
, rtx op2
,
5506 rtx cond
, rtx cc_op0
, rtx cc_op1
)
5508 enum rtx_code rcode
= GET_CODE (cond
);
5510 if (!TARGET_PAIRED_FLOAT
)
5513 paired_emit_vector_compare (rcode
, dest
, op1
, op2
, cc_op0
, cc_op1
);
5518 /* Initialize vector TARGET to VALS. */
5521 rs6000_expand_vector_init (rtx target
, rtx vals
)
5523 enum machine_mode mode
= GET_MODE (target
);
5524 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
5525 int n_elts
= GET_MODE_NUNITS (mode
);
5526 int n_var
= 0, one_var
= -1;
5527 bool all_same
= true, all_const_zero
= true;
5531 for (i
= 0; i
< n_elts
; ++i
)
5533 x
= XVECEXP (vals
, 0, i
);
5534 if (!(CONST_SCALAR_INT_P (x
) || CONST_DOUBLE_P (x
) || CONST_FIXED_P (x
)))
5535 ++n_var
, one_var
= i
;
5536 else if (x
!= CONST0_RTX (inner_mode
))
5537 all_const_zero
= false;
5539 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
5545 rtx const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
5546 bool int_vector_p
= (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
5547 if ((int_vector_p
|| TARGET_VSX
) && all_const_zero
)
5549 /* Zero register. */
5550 emit_insn (gen_rtx_SET (VOIDmode
, target
,
5551 gen_rtx_XOR (mode
, target
, target
)));
5554 else if (int_vector_p
&& easy_vector_constant (const_vec
, mode
))
5556 /* Splat immediate. */
5557 emit_insn (gen_rtx_SET (VOIDmode
, target
, const_vec
));
5562 /* Load from constant pool. */
5563 emit_move_insn (target
, const_vec
);
5568 /* Double word values on VSX can use xxpermdi or lxvdsx. */
5569 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
5571 rtx op0
= XVECEXP (vals
, 0, 0);
5572 rtx op1
= XVECEXP (vals
, 0, 1);
5575 if (!MEM_P (op0
) && !REG_P (op0
))
5576 op0
= force_reg (inner_mode
, op0
);
5577 if (mode
== V2DFmode
)
5578 emit_insn (gen_vsx_splat_v2df (target
, op0
));
5580 emit_insn (gen_vsx_splat_v2di (target
, op0
));
5584 op0
= force_reg (inner_mode
, op0
);
5585 op1
= force_reg (inner_mode
, op1
);
5586 if (mode
== V2DFmode
)
5587 emit_insn (gen_vsx_concat_v2df (target
, op0
, op1
));
5589 emit_insn (gen_vsx_concat_v2di (target
, op0
, op1
));
5594 /* With single precision floating point on VSX, know that internally single
5595 precision is actually represented as a double, and either make 2 V2DF
5596 vectors, and convert these vectors to single precision, or do one
5597 conversion, and splat the result to the other elements. */
5598 if (mode
== V4SFmode
&& VECTOR_MEM_VSX_P (mode
))
5602 rtx freg
= gen_reg_rtx (V4SFmode
);
5603 rtx sreg
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
5604 rtx cvt
= ((TARGET_XSCVDPSPN
)
5605 ? gen_vsx_xscvdpspn_scalar (freg
, sreg
)
5606 : gen_vsx_xscvdpsp_scalar (freg
, sreg
));
5609 emit_insn (gen_vsx_xxspltw_v4sf_direct (target
, freg
, const0_rtx
));
5613 rtx dbl_even
= gen_reg_rtx (V2DFmode
);
5614 rtx dbl_odd
= gen_reg_rtx (V2DFmode
);
5615 rtx flt_even
= gen_reg_rtx (V4SFmode
);
5616 rtx flt_odd
= gen_reg_rtx (V4SFmode
);
5617 rtx op0
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
5618 rtx op1
= force_reg (SFmode
, XVECEXP (vals
, 0, 1));
5619 rtx op2
= force_reg (SFmode
, XVECEXP (vals
, 0, 2));
5620 rtx op3
= force_reg (SFmode
, XVECEXP (vals
, 0, 3));
5622 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op1
));
5623 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op2
, op3
));
5624 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
5625 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
5626 rs6000_expand_extract_even (target
, flt_even
, flt_odd
);
5631 /* Store value to stack temp. Load vector element. Splat. However, splat
5632 of 64-bit items is not supported on Altivec. */
5633 if (all_same
&& GET_MODE_SIZE (inner_mode
) <= 4)
5635 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
5636 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0),
5637 XVECEXP (vals
, 0, 0));
5638 x
= gen_rtx_UNSPEC (VOIDmode
,
5639 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
5640 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
5642 gen_rtx_SET (VOIDmode
,
5645 x
= gen_rtx_VEC_SELECT (inner_mode
, target
,
5646 gen_rtx_PARALLEL (VOIDmode
,
5647 gen_rtvec (1, const0_rtx
)));
5648 emit_insn (gen_rtx_SET (VOIDmode
, target
,
5649 gen_rtx_VEC_DUPLICATE (mode
, x
)));
5653 /* One field is non-constant. Load constant then overwrite
5657 rtx copy
= copy_rtx (vals
);
5659 /* Load constant part of vector, substitute neighboring value for
5661 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, (one_var
+ 1) % n_elts
);
5662 rs6000_expand_vector_init (target
, copy
);
5664 /* Insert variable. */
5665 rs6000_expand_vector_set (target
, XVECEXP (vals
, 0, one_var
), one_var
);
5669 /* Construct the vector in memory one field at a time
5670 and load the whole vector. */
5671 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
5672 for (i
= 0; i
< n_elts
; i
++)
5673 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
5674 i
* GET_MODE_SIZE (inner_mode
)),
5675 XVECEXP (vals
, 0, i
));
5676 emit_move_insn (target
, mem
);
5679 /* Set field ELT of TARGET to VAL. */
5682 rs6000_expand_vector_set (rtx target
, rtx val
, int elt
)
5684 enum machine_mode mode
= GET_MODE (target
);
5685 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
5686 rtx reg
= gen_reg_rtx (mode
);
5688 int width
= GET_MODE_SIZE (inner_mode
);
5691 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
5693 rtx (*set_func
) (rtx
, rtx
, rtx
, rtx
)
5694 = ((mode
== V2DFmode
) ? gen_vsx_set_v2df
: gen_vsx_set_v2di
);
5695 emit_insn (set_func (target
, target
, val
, GEN_INT (elt
)));
5699 /* Simplify setting single element vectors like V1TImode. */
5700 if (GET_MODE_SIZE (mode
) == GET_MODE_SIZE (inner_mode
) && elt
== 0)
5702 emit_move_insn (target
, gen_lowpart (mode
, val
));
5706 /* Load single variable value. */
5707 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
5708 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0), val
);
5709 x
= gen_rtx_UNSPEC (VOIDmode
,
5710 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
5711 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
5713 gen_rtx_SET (VOIDmode
,
5717 /* Linear sequence. */
5718 mask
= gen_rtx_PARALLEL (V16QImode
, rtvec_alloc (16));
5719 for (i
= 0; i
< 16; ++i
)
5720 XVECEXP (mask
, 0, i
) = GEN_INT (i
);
5722 /* Set permute mask to insert element into target. */
5723 for (i
= 0; i
< width
; ++i
)
5724 XVECEXP (mask
, 0, elt
*width
+ i
)
5725 = GEN_INT (i
+ 0x10);
5726 x
= gen_rtx_CONST_VECTOR (V16QImode
, XVEC (mask
, 0));
5728 if (BYTES_BIG_ENDIAN
)
5729 x
= gen_rtx_UNSPEC (mode
,
5730 gen_rtvec (3, target
, reg
,
5731 force_reg (V16QImode
, x
)),
5735 /* Invert selector. We prefer to generate VNAND on P8 so
5736 that future fusion opportunities can kick in, but must
5737 generate VNOR elsewhere. */
5738 rtx notx
= gen_rtx_NOT (V16QImode
, force_reg (V16QImode
, x
));
5739 rtx iorx
= (TARGET_P8_VECTOR
5740 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
5741 : gen_rtx_AND (V16QImode
, notx
, notx
));
5742 rtx tmp
= gen_reg_rtx (V16QImode
);
5743 emit_insn (gen_rtx_SET (VOIDmode
, tmp
, iorx
));
5745 /* Permute with operands reversed and adjusted selector. */
5746 x
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, reg
, target
, tmp
),
5750 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
5753 /* Extract field ELT from VEC into TARGET. */
5756 rs6000_expand_vector_extract (rtx target
, rtx vec
, int elt
)
5758 enum machine_mode mode
= GET_MODE (vec
);
5759 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
5762 if (VECTOR_MEM_VSX_P (mode
))
5769 gcc_assert (elt
== 0 && inner_mode
== TImode
);
5770 emit_move_insn (target
, gen_lowpart (TImode
, vec
));
5773 emit_insn (gen_vsx_extract_v2df (target
, vec
, GEN_INT (elt
)));
5776 emit_insn (gen_vsx_extract_v2di (target
, vec
, GEN_INT (elt
)));
5779 emit_insn (gen_vsx_extract_v4sf (target
, vec
, GEN_INT (elt
)));
5784 /* Allocate mode-sized buffer. */
5785 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
5787 emit_move_insn (mem
, vec
);
5789 /* Add offset to field within buffer matching vector element. */
5790 mem
= adjust_address_nv (mem
, inner_mode
, elt
* GET_MODE_SIZE (inner_mode
));
5792 emit_move_insn (target
, adjust_address_nv (mem
, inner_mode
, 0));
5795 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
5796 implement ANDing by the mask IN. */
5798 build_mask64_2_operands (rtx in
, rtx
*out
)
5800 unsigned HOST_WIDE_INT c
, lsb
, m1
, m2
;
5803 gcc_assert (GET_CODE (in
) == CONST_INT
);
5808 /* Assume c initially something like 0x00fff000000fffff. The idea
5809 is to rotate the word so that the middle ^^^^^^ group of zeros
5810 is at the MS end and can be cleared with an rldicl mask. We then
5811 rotate back and clear off the MS ^^ group of zeros with a
5813 c
= ~c
; /* c == 0xff000ffffff00000 */
5814 lsb
= c
& -c
; /* lsb == 0x0000000000100000 */
5815 m1
= -lsb
; /* m1 == 0xfffffffffff00000 */
5816 c
= ~c
; /* c == 0x00fff000000fffff */
5817 c
&= -lsb
; /* c == 0x00fff00000000000 */
5818 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
5819 c
= ~c
; /* c == 0xff000fffffffffff */
5820 c
&= -lsb
; /* c == 0xff00000000000000 */
5822 while ((lsb
>>= 1) != 0)
5823 shift
++; /* shift == 44 on exit from loop */
5824 m1
<<= 64 - shift
; /* m1 == 0xffffff0000000000 */
5825 m1
= ~m1
; /* m1 == 0x000000ffffffffff */
5826 m2
= ~c
; /* m2 == 0x00ffffffffffffff */
5830 /* Assume c initially something like 0xff000f0000000000. The idea
5831 is to rotate the word so that the ^^^ middle group of zeros
5832 is at the LS end and can be cleared with an rldicr mask. We then
5833 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
5835 lsb
= c
& -c
; /* lsb == 0x0000010000000000 */
5836 m2
= -lsb
; /* m2 == 0xffffff0000000000 */
5837 c
= ~c
; /* c == 0x00fff0ffffffffff */
5838 c
&= -lsb
; /* c == 0x00fff00000000000 */
5839 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
5840 c
= ~c
; /* c == 0xff000fffffffffff */
5841 c
&= -lsb
; /* c == 0xff00000000000000 */
5843 while ((lsb
>>= 1) != 0)
5844 shift
++; /* shift == 44 on exit from loop */
5845 m1
= ~c
; /* m1 == 0x00ffffffffffffff */
5846 m1
>>= shift
; /* m1 == 0x0000000000000fff */
5847 m1
= ~m1
; /* m1 == 0xfffffffffffff000 */
5850 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
5851 masks will be all 1's. We are guaranteed more than one transition. */
5852 out
[0] = GEN_INT (64 - shift
);
5853 out
[1] = GEN_INT (m1
);
5854 out
[2] = GEN_INT (shift
);
5855 out
[3] = GEN_INT (m2
);
5858 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
5861 invalid_e500_subreg (rtx op
, enum machine_mode mode
)
5863 if (TARGET_E500_DOUBLE
)
5865 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
5866 subreg:TI and reg:TF. Decimal float modes are like integer
5867 modes (only low part of each register used) for this
5869 if (GET_CODE (op
) == SUBREG
5870 && (mode
== SImode
|| mode
== DImode
|| mode
== TImode
5871 || mode
== DDmode
|| mode
== TDmode
|| mode
== PTImode
)
5872 && REG_P (SUBREG_REG (op
))
5873 && (GET_MODE (SUBREG_REG (op
)) == DFmode
5874 || GET_MODE (SUBREG_REG (op
)) == TFmode
))
5877 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
5879 if (GET_CODE (op
) == SUBREG
5880 && (mode
== DFmode
|| mode
== TFmode
)
5881 && REG_P (SUBREG_REG (op
))
5882 && (GET_MODE (SUBREG_REG (op
)) == DImode
5883 || GET_MODE (SUBREG_REG (op
)) == TImode
5884 || GET_MODE (SUBREG_REG (op
)) == PTImode
5885 || GET_MODE (SUBREG_REG (op
)) == DDmode
5886 || GET_MODE (SUBREG_REG (op
)) == TDmode
))
5891 && GET_CODE (op
) == SUBREG
5893 && REG_P (SUBREG_REG (op
))
5894 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op
))))
5900 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
5901 selects whether the alignment is abi mandated, optional, or
5902 both abi and optional alignment. */
5905 rs6000_data_alignment (tree type
, unsigned int align
, enum data_align how
)
5907 if (how
!= align_opt
)
5909 if (TREE_CODE (type
) == VECTOR_TYPE
)
5911 if ((TARGET_SPE
&& SPE_VECTOR_MODE (TYPE_MODE (type
)))
5912 || (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (TYPE_MODE (type
))))
5917 else if (align
< 128)
5920 else if (TARGET_E500_DOUBLE
5921 && TREE_CODE (type
) == REAL_TYPE
5922 && TYPE_MODE (type
) == DFmode
)
5929 if (how
!= align_abi
)
5931 if (TREE_CODE (type
) == ARRAY_TYPE
5932 && TYPE_MODE (TREE_TYPE (type
)) == QImode
)
5934 if (align
< BITS_PER_WORD
)
5935 align
= BITS_PER_WORD
;
5942 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
5945 rs6000_special_adjust_field_align_p (tree field
, unsigned int computed
)
5947 if (TARGET_ALTIVEC
&& TREE_CODE (TREE_TYPE (field
)) == VECTOR_TYPE
)
5949 if (computed
!= 128)
5952 if (!warned
&& warn_psabi
)
5955 inform (input_location
,
5956 "the layout of aggregates containing vectors with"
5957 " %d-byte alignment has changed in GCC 5",
5958 computed
/ BITS_PER_UNIT
);
5961 /* In current GCC there is no special case. */
5968 /* AIX increases natural record alignment to doubleword if the first
5969 field is an FP double while the FP fields remain word aligned. */
5972 rs6000_special_round_type_align (tree type
, unsigned int computed
,
5973 unsigned int specified
)
5975 unsigned int align
= MAX (computed
, specified
);
5976 tree field
= TYPE_FIELDS (type
);
5978 /* Skip all non field decls */
5979 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
5980 field
= DECL_CHAIN (field
);
5982 if (field
!= NULL
&& field
!= type
)
5984 type
= TREE_TYPE (field
);
5985 while (TREE_CODE (type
) == ARRAY_TYPE
)
5986 type
= TREE_TYPE (type
);
5988 if (type
!= error_mark_node
&& TYPE_MODE (type
) == DFmode
)
5989 align
= MAX (align
, 64);
5995 /* Darwin increases record alignment to the natural alignment of
5999 darwin_rs6000_special_round_type_align (tree type
, unsigned int computed
,
6000 unsigned int specified
)
6002 unsigned int align
= MAX (computed
, specified
);
6004 if (TYPE_PACKED (type
))
6007 /* Find the first field, looking down into aggregates. */
6009 tree field
= TYPE_FIELDS (type
);
6010 /* Skip all non field decls */
6011 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
6012 field
= DECL_CHAIN (field
);
6015 /* A packed field does not contribute any extra alignment. */
6016 if (DECL_PACKED (field
))
6018 type
= TREE_TYPE (field
);
6019 while (TREE_CODE (type
) == ARRAY_TYPE
)
6020 type
= TREE_TYPE (type
);
6021 } while (AGGREGATE_TYPE_P (type
));
6023 if (! AGGREGATE_TYPE_P (type
) && type
!= error_mark_node
)
6024 align
= MAX (align
, TYPE_ALIGN (type
));
6029 /* Return 1 for an operand in small memory on V.4/eabi. */
6032 small_data_operand (rtx op ATTRIBUTE_UNUSED
,
6033 enum machine_mode mode ATTRIBUTE_UNUSED
)
6038 if (rs6000_sdata
== SDATA_NONE
|| rs6000_sdata
== SDATA_DATA
)
6041 if (DEFAULT_ABI
!= ABI_V4
)
6044 /* Vector and float memory instructions have a limited offset on the
6045 SPE, so using a vector or float variable directly as an operand is
6048 && (SPE_VECTOR_MODE (mode
) || FLOAT_MODE_P (mode
)))
6051 if (GET_CODE (op
) == SYMBOL_REF
)
6054 else if (GET_CODE (op
) != CONST
6055 || GET_CODE (XEXP (op
, 0)) != PLUS
6056 || GET_CODE (XEXP (XEXP (op
, 0), 0)) != SYMBOL_REF
6057 || GET_CODE (XEXP (XEXP (op
, 0), 1)) != CONST_INT
)
6062 rtx sum
= XEXP (op
, 0);
6063 HOST_WIDE_INT summand
;
6065 /* We have to be careful here, because it is the referenced address
6066 that must be 32k from _SDA_BASE_, not just the symbol. */
6067 summand
= INTVAL (XEXP (sum
, 1));
6068 if (summand
< 0 || summand
> g_switch_value
)
6071 sym_ref
= XEXP (sum
, 0);
6074 return SYMBOL_REF_SMALL_P (sym_ref
);
6080 /* Return true if either operand is a general purpose register. */
6083 gpr_or_gpr_p (rtx op0
, rtx op1
)
6085 return ((REG_P (op0
) && INT_REGNO_P (REGNO (op0
)))
6086 || (REG_P (op1
) && INT_REGNO_P (REGNO (op1
))));
6089 /* Return true if this is a move direct operation between GPR registers and
6090 floating point/VSX registers. */
6093 direct_move_p (rtx op0
, rtx op1
)
6097 if (!REG_P (op0
) || !REG_P (op1
))
6100 if (!TARGET_DIRECT_MOVE
&& !TARGET_MFPGPR
)
6103 regno0
= REGNO (op0
);
6104 regno1
= REGNO (op1
);
6105 if (regno0
>= FIRST_PSEUDO_REGISTER
|| regno1
>= FIRST_PSEUDO_REGISTER
)
6108 if (INT_REGNO_P (regno0
))
6109 return (TARGET_DIRECT_MOVE
) ? VSX_REGNO_P (regno1
) : FP_REGNO_P (regno1
);
6111 else if (INT_REGNO_P (regno1
))
6113 if (TARGET_MFPGPR
&& FP_REGNO_P (regno0
))
6116 else if (TARGET_DIRECT_MOVE
&& VSX_REGNO_P (regno0
))
6123 /* Return true if this is a load or store quad operation. This function does
6124 not handle the atomic quad memory instructions. */
6127 quad_load_store_p (rtx op0
, rtx op1
)
6131 if (!TARGET_QUAD_MEMORY
)
6134 else if (REG_P (op0
) && MEM_P (op1
))
6135 ret
= (quad_int_reg_operand (op0
, GET_MODE (op0
))
6136 && quad_memory_operand (op1
, GET_MODE (op1
))
6137 && !reg_overlap_mentioned_p (op0
, op1
));
6139 else if (MEM_P (op0
) && REG_P (op1
))
6140 ret
= (quad_memory_operand (op0
, GET_MODE (op0
))
6141 && quad_int_reg_operand (op1
, GET_MODE (op1
)));
6146 if (TARGET_DEBUG_ADDR
)
6148 fprintf (stderr
, "\n========== quad_load_store, return %s\n",
6149 ret
? "true" : "false");
6150 debug_rtx (gen_rtx_SET (VOIDmode
, op0
, op1
));
6156 /* Given an address, return a constant offset term if one exists. */
6159 address_offset (rtx op
)
6161 if (GET_CODE (op
) == PRE_INC
6162 || GET_CODE (op
) == PRE_DEC
)
6164 else if (GET_CODE (op
) == PRE_MODIFY
6165 || GET_CODE (op
) == LO_SUM
)
6168 if (GET_CODE (op
) == CONST
)
6171 if (GET_CODE (op
) == PLUS
)
6174 if (CONST_INT_P (op
))
6180 /* Return true if the MEM operand is a memory operand suitable for use
6181 with a (full width, possibly multiple) gpr load/store. On
6182 powerpc64 this means the offset must be divisible by 4.
6183 Implements 'Y' constraint.
6185 Accept direct, indexed, offset, lo_sum and tocref. Since this is
6186 a constraint function we know the operand has satisfied a suitable
6187 memory predicate. Also accept some odd rtl generated by reload
6188 (see rs6000_legitimize_reload_address for various forms). It is
6189 important that reload rtl be accepted by appropriate constraints
6190 but not by the operand predicate.
6192 Offsetting a lo_sum should not be allowed, except where we know by
6193 alignment that a 32k boundary is not crossed, but see the ???
6194 comment in rs6000_legitimize_reload_address. Note that by
6195 "offsetting" here we mean a further offset to access parts of the
6196 MEM. It's fine to have a lo_sum where the inner address is offset
6197 from a sym, since the same sym+offset will appear in the high part
6198 of the address calculation. */
6201 mem_operand_gpr (rtx op
, enum machine_mode mode
)
6203 unsigned HOST_WIDE_INT offset
;
6205 rtx addr
= XEXP (op
, 0);
6207 op
= address_offset (addr
);
6211 offset
= INTVAL (op
);
6212 if (TARGET_POWERPC64
&& (offset
& 3) != 0)
6215 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
6219 if (GET_CODE (addr
) == LO_SUM
)
6220 /* For lo_sum addresses, we must allow any offset except one that
6221 causes a wrap, so test only the low 16 bits. */
6222 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
6224 return offset
+ 0x8000 < 0x10000u
- extra
;
6227 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
6230 reg_offset_addressing_ok_p (enum machine_mode mode
)
6242 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. While
6243 TImode is not a vector mode, if we want to use the VSX registers to
6244 move it around, we need to restrict ourselves to reg+reg
6246 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
6254 /* Paired vector modes. Only reg+reg addressing is valid. */
6255 if (TARGET_PAIRED_FLOAT
)
6260 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
6261 addressing for the LFIWZX and STFIWX instructions. */
6262 if (TARGET_NO_SDMODE_STACK
)
6274 virtual_stack_registers_memory_p (rtx op
)
6278 if (GET_CODE (op
) == REG
)
6279 regnum
= REGNO (op
);
6281 else if (GET_CODE (op
) == PLUS
6282 && GET_CODE (XEXP (op
, 0)) == REG
6283 && GET_CODE (XEXP (op
, 1)) == CONST_INT
)
6284 regnum
= REGNO (XEXP (op
, 0));
6289 return (regnum
>= FIRST_VIRTUAL_REGISTER
6290 && regnum
<= LAST_VIRTUAL_POINTER_REGISTER
);
6293 /* Return true if a MODE sized memory accesses to OP plus OFFSET
6294 is known to not straddle a 32k boundary. */
6297 offsettable_ok_by_alignment (rtx op
, HOST_WIDE_INT offset
,
6298 enum machine_mode mode
)
6301 unsigned HOST_WIDE_INT dsize
, dalign
, lsb
, mask
;
6303 if (GET_CODE (op
) != SYMBOL_REF
)
6306 dsize
= GET_MODE_SIZE (mode
);
6307 decl
= SYMBOL_REF_DECL (op
);
6313 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
6314 replacing memory addresses with an anchor plus offset. We
6315 could find the decl by rummaging around in the block->objects
6316 VEC for the given offset but that seems like too much work. */
6317 dalign
= BITS_PER_UNIT
;
6318 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op
)
6319 && SYMBOL_REF_ANCHOR_P (op
)
6320 && SYMBOL_REF_BLOCK (op
) != NULL
)
6322 struct object_block
*block
= SYMBOL_REF_BLOCK (op
);
6324 dalign
= block
->alignment
;
6325 offset
+= SYMBOL_REF_BLOCK_OFFSET (op
);
6327 else if (CONSTANT_POOL_ADDRESS_P (op
))
6329 /* It would be nice to have get_pool_align().. */
6330 enum machine_mode cmode
= get_pool_mode (op
);
6332 dalign
= GET_MODE_ALIGNMENT (cmode
);
6335 else if (DECL_P (decl
))
6337 dalign
= DECL_ALIGN (decl
);
6341 /* Allow BLKmode when the entire object is known to not
6342 cross a 32k boundary. */
6343 if (!DECL_SIZE_UNIT (decl
))
6346 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl
)))
6349 dsize
= tree_to_uhwi (DECL_SIZE_UNIT (decl
));
6353 return dalign
/ BITS_PER_UNIT
>= dsize
;
6358 type
= TREE_TYPE (decl
);
6360 dalign
= TYPE_ALIGN (type
);
6361 if (CONSTANT_CLASS_P (decl
))
6362 dalign
= CONSTANT_ALIGNMENT (decl
, dalign
);
6364 dalign
= DATA_ALIGNMENT (decl
, dalign
);
6368 /* BLKmode, check the entire object. */
6369 if (TREE_CODE (decl
) == STRING_CST
)
6370 dsize
= TREE_STRING_LENGTH (decl
);
6371 else if (TYPE_SIZE_UNIT (type
)
6372 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
)))
6373 dsize
= tree_to_uhwi (TYPE_SIZE_UNIT (type
));
6379 return dalign
/ BITS_PER_UNIT
>= dsize
;
6383 /* Find how many bits of the alignment we know for this access. */
6384 mask
= dalign
/ BITS_PER_UNIT
- 1;
6385 lsb
= offset
& -offset
;
6389 return dalign
>= dsize
;
6393 constant_pool_expr_p (rtx op
)
6397 split_const (op
, &base
, &offset
);
6398 return (GET_CODE (base
) == SYMBOL_REF
6399 && CONSTANT_POOL_ADDRESS_P (base
)
6400 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base
), Pmode
));
6403 static const_rtx tocrel_base
, tocrel_offset
;
6405 /* Return true if OP is a toc pointer relative address (the output
6406 of create_TOC_reference). If STRICT, do not match high part or
6407 non-split -mcmodel=large/medium toc pointer relative addresses. */
6410 toc_relative_expr_p (const_rtx op
, bool strict
)
6415 if (TARGET_CMODEL
!= CMODEL_SMALL
)
6417 /* Only match the low part. */
6418 if (GET_CODE (op
) == LO_SUM
6419 && REG_P (XEXP (op
, 0))
6420 && INT_REG_OK_FOR_BASE_P (XEXP (op
, 0), strict
))
6427 tocrel_offset
= const0_rtx
;
6428 if (GET_CODE (op
) == PLUS
&& add_cint_operand (XEXP (op
, 1), GET_MODE (op
)))
6430 tocrel_base
= XEXP (op
, 0);
6431 tocrel_offset
= XEXP (op
, 1);
6434 return (GET_CODE (tocrel_base
) == UNSPEC
6435 && XINT (tocrel_base
, 1) == UNSPEC_TOCREL
);
6438 /* Return true if X is a constant pool address, and also for cmodel=medium
6439 if X is a toc-relative address known to be offsettable within MODE. */
6442 legitimate_constant_pool_address_p (const_rtx x
, enum machine_mode mode
,
6445 return (toc_relative_expr_p (x
, strict
)
6446 && (TARGET_CMODEL
!= CMODEL_MEDIUM
6447 || constant_pool_expr_p (XVECEXP (tocrel_base
, 0, 0))
6449 || offsettable_ok_by_alignment (XVECEXP (tocrel_base
, 0, 0),
6450 INTVAL (tocrel_offset
), mode
)));
6454 legitimate_small_data_p (enum machine_mode mode
, rtx x
)
6456 return (DEFAULT_ABI
== ABI_V4
6457 && !flag_pic
&& !TARGET_TOC
6458 && (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
)
6459 && small_data_operand (x
, mode
));
6462 /* SPE offset addressing is limited to 5-bits worth of double words. */
6463 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
6466 rs6000_legitimate_offset_address_p (enum machine_mode mode
, rtx x
,
6467 bool strict
, bool worst_case
)
6469 unsigned HOST_WIDE_INT offset
;
6472 if (GET_CODE (x
) != PLUS
)
6474 if (!REG_P (XEXP (x
, 0)))
6476 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
6478 if (!reg_offset_addressing_ok_p (mode
))
6479 return virtual_stack_registers_memory_p (x
);
6480 if (legitimate_constant_pool_address_p (x
, mode
, strict
|| lra_in_progress
))
6482 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
6485 offset
= INTVAL (XEXP (x
, 1));
6493 /* SPE vector modes. */
6494 return SPE_CONST_OFFSET_OK (offset
);
6499 /* On e500v2, we may have:
6501 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
6503 Which gets addressed with evldd instructions. */
6504 if (TARGET_E500_DOUBLE
)
6505 return SPE_CONST_OFFSET_OK (offset
);
6507 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
6509 if (VECTOR_MEM_VSX_P (mode
))
6514 if (!TARGET_POWERPC64
)
6516 else if (offset
& 3)
6521 if (TARGET_E500_DOUBLE
)
6522 return (SPE_CONST_OFFSET_OK (offset
)
6523 && SPE_CONST_OFFSET_OK (offset
+ 8));
6532 if (!TARGET_POWERPC64
)
6534 else if (offset
& 3)
6543 return offset
< 0x10000 - extra
;
6547 legitimate_indexed_address_p (rtx x
, int strict
)
6551 if (GET_CODE (x
) != PLUS
)
6557 /* Recognize the rtl generated by reload which we know will later be
6558 replaced with proper base and index regs. */
6560 && reload_in_progress
6561 && (REG_P (op0
) || GET_CODE (op0
) == PLUS
)
6565 return (REG_P (op0
) && REG_P (op1
)
6566 && ((INT_REG_OK_FOR_BASE_P (op0
, strict
)
6567 && INT_REG_OK_FOR_INDEX_P (op1
, strict
))
6568 || (INT_REG_OK_FOR_BASE_P (op1
, strict
)
6569 && INT_REG_OK_FOR_INDEX_P (op0
, strict
))));
6573 avoiding_indexed_address_p (enum machine_mode mode
)
6575 /* Avoid indexed addressing for modes that have non-indexed
6576 load/store instruction forms. */
6577 return (TARGET_AVOID_XFORM
&& VECTOR_MEM_NONE_P (mode
));
6581 legitimate_indirect_address_p (rtx x
, int strict
)
6583 return GET_CODE (x
) == REG
&& INT_REG_OK_FOR_BASE_P (x
, strict
);
6587 macho_lo_sum_memory_operand (rtx x
, enum machine_mode mode
)
6589 if (!TARGET_MACHO
|| !flag_pic
6590 || mode
!= SImode
|| GET_CODE (x
) != MEM
)
6594 if (GET_CODE (x
) != LO_SUM
)
6596 if (GET_CODE (XEXP (x
, 0)) != REG
)
6598 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 0))
6602 return CONSTANT_P (x
);
6606 legitimate_lo_sum_address_p (enum machine_mode mode
, rtx x
, int strict
)
6608 if (GET_CODE (x
) != LO_SUM
)
6610 if (GET_CODE (XEXP (x
, 0)) != REG
)
6612 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
6614 /* Restrict addressing for DI because of our SUBREG hackery. */
6615 if (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
6619 if (TARGET_ELF
|| TARGET_MACHO
)
6623 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
)
6625 /* LRA don't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
6626 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
6627 recognizes some LO_SUM addresses as valid although this
6628 function says opposite. In most cases, LRA through different
6629 transformations can generate correct code for address reloads.
6630 It can not manage only some LO_SUM cases. So we need to add
6631 code analogous to one in rs6000_legitimize_reload_address for
6632 LOW_SUM here saying that some addresses are still valid. */
6633 large_toc_ok
= (lra_in_progress
&& TARGET_CMODEL
!= CMODEL_SMALL
6634 && small_toc_ref (x
, VOIDmode
));
6635 if (TARGET_TOC
&& ! large_toc_ok
)
6637 if (GET_MODE_NUNITS (mode
) != 1)
6639 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
6640 && !(/* ??? Assume floating point reg based on mode? */
6641 TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
6642 && (mode
== DFmode
|| mode
== DDmode
)))
6645 return CONSTANT_P (x
) || large_toc_ok
;
6652 /* Try machine-dependent ways of modifying an illegitimate address
6653 to be legitimate. If we find one, return the new, valid address.
6654 This is used from only one place: `memory_address' in explow.c.
6656 OLDX is the address as it was before break_out_memory_refs was
6657 called. In some cases it is useful to look at this to decide what
6660 It is always safe for this function to do nothing. It exists to
6661 recognize opportunities to optimize the output.
6663 On RS/6000, first check for the sum of a register with a constant
6664 integer that is out of range. If so, generate code to add the
6665 constant with the low-order 16 bits masked to the register and force
6666 this result into another register (this can be done with `cau').
6667 Then generate an address of REG+(CONST&0xffff), allowing for the
6668 possibility of bit 16 being a one.
6670 Then check for the sum of a register and something not constant, try to
6671 load the other things into a register and return the sum. */
6674 rs6000_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
6675 enum machine_mode mode
)
6679 if (!reg_offset_addressing_ok_p (mode
))
6681 if (virtual_stack_registers_memory_p (x
))
6684 /* In theory we should not be seeing addresses of the form reg+0,
6685 but just in case it is generated, optimize it away. */
6686 if (GET_CODE (x
) == PLUS
&& XEXP (x
, 1) == const0_rtx
)
6687 return force_reg (Pmode
, XEXP (x
, 0));
6689 /* For TImode with load/store quad, restrict addresses to just a single
6690 pointer, so it works with both GPRs and VSX registers. */
6691 /* Make sure both operands are registers. */
6692 else if (GET_CODE (x
) == PLUS
6693 && (mode
!= TImode
|| !TARGET_QUAD_MEMORY
))
6694 return gen_rtx_PLUS (Pmode
,
6695 force_reg (Pmode
, XEXP (x
, 0)),
6696 force_reg (Pmode
, XEXP (x
, 1)));
6698 return force_reg (Pmode
, x
);
6700 if (GET_CODE (x
) == SYMBOL_REF
)
6702 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
6704 return rs6000_legitimize_tls_address (x
, model
);
6714 /* As in legitimate_offset_address_p we do not assume
6715 worst-case. The mode here is just a hint as to the registers
6716 used. A TImode is usually in gprs, but may actually be in
6717 fprs. Leave worst-case scenario for reload to handle via
6718 insn constraints. PTImode is only GPRs. */
6725 if (GET_CODE (x
) == PLUS
6726 && GET_CODE (XEXP (x
, 0)) == REG
6727 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6728 && ((unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)) + 0x8000)
6730 && !(SPE_VECTOR_MODE (mode
)
6731 || (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)))
6733 HOST_WIDE_INT high_int
, low_int
;
6735 low_int
= ((INTVAL (XEXP (x
, 1)) & 0xffff) ^ 0x8000) - 0x8000;
6736 if (low_int
>= 0x8000 - extra
)
6738 high_int
= INTVAL (XEXP (x
, 1)) - low_int
;
6739 sum
= force_operand (gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
6740 GEN_INT (high_int
)), 0);
6741 return plus_constant (Pmode
, sum
, low_int
);
6743 else if (GET_CODE (x
) == PLUS
6744 && GET_CODE (XEXP (x
, 0)) == REG
6745 && GET_CODE (XEXP (x
, 1)) != CONST_INT
6746 && GET_MODE_NUNITS (mode
) == 1
6747 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
6748 || (/* ??? Assume floating point reg based on mode? */
6749 (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
6750 && (mode
== DFmode
|| mode
== DDmode
)))
6751 && !avoiding_indexed_address_p (mode
))
6753 return gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
6754 force_reg (Pmode
, force_operand (XEXP (x
, 1), 0)));
6756 else if (SPE_VECTOR_MODE (mode
)
6757 || (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
))
6761 /* We accept [reg + reg] and [reg + OFFSET]. */
6763 if (GET_CODE (x
) == PLUS
)
6765 rtx op1
= XEXP (x
, 0);
6766 rtx op2
= XEXP (x
, 1);
6769 op1
= force_reg (Pmode
, op1
);
6771 if (GET_CODE (op2
) != REG
6772 && (GET_CODE (op2
) != CONST_INT
6773 || !SPE_CONST_OFFSET_OK (INTVAL (op2
))
6774 || (GET_MODE_SIZE (mode
) > 8
6775 && !SPE_CONST_OFFSET_OK (INTVAL (op2
) + 8))))
6776 op2
= force_reg (Pmode
, op2
);
6778 /* We can't always do [reg + reg] for these, because [reg +
6779 reg + offset] is not a legitimate addressing mode. */
6780 y
= gen_rtx_PLUS (Pmode
, op1
, op2
);
6782 if ((GET_MODE_SIZE (mode
) > 8 || mode
== DDmode
) && REG_P (op2
))
6783 return force_reg (Pmode
, y
);
6788 return force_reg (Pmode
, x
);
6790 else if ((TARGET_ELF
6792 || !MACHO_DYNAMIC_NO_PIC_P
6798 && GET_CODE (x
) != CONST_INT
6799 && GET_CODE (x
) != CONST_WIDE_INT
6800 && GET_CODE (x
) != CONST_DOUBLE
6802 && GET_MODE_NUNITS (mode
) == 1
6803 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
6804 || (/* ??? Assume floating point reg based on mode? */
6805 (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
6806 && (mode
== DFmode
|| mode
== DDmode
))))
6808 rtx reg
= gen_reg_rtx (Pmode
);
6810 emit_insn (gen_elf_high (reg
, x
));
6812 emit_insn (gen_macho_high (reg
, x
));
6813 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
6816 && GET_CODE (x
) == SYMBOL_REF
6817 && constant_pool_expr_p (x
)
6818 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), Pmode
))
6819 return create_TOC_reference (x
, NULL_RTX
);
6824 /* Debug version of rs6000_legitimize_address. */
6826 rs6000_debug_legitimize_address (rtx x
, rtx oldx
, enum machine_mode mode
)
6832 ret
= rs6000_legitimize_address (x
, oldx
, mode
);
6833 insns
= get_insns ();
6839 "\nrs6000_legitimize_address: mode %s, old code %s, "
6840 "new code %s, modified\n",
6841 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)),
6842 GET_RTX_NAME (GET_CODE (ret
)));
6844 fprintf (stderr
, "Original address:\n");
6847 fprintf (stderr
, "oldx:\n");
6850 fprintf (stderr
, "New address:\n");
6855 fprintf (stderr
, "Insns added:\n");
6856 debug_rtx_list (insns
, 20);
6862 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
6863 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)));
6874 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6875 We need to emit DTP-relative relocations. */
6877 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
6879 rs6000_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
6884 fputs ("\t.long\t", file
);
6887 fputs (DOUBLE_INT_ASM_OP
, file
);
6892 output_addr_const (file
, x
);
6893 fputs ("@dtprel+0x8000", file
);
6896 /* Return true if X is a symbol that refers to real (rather than emulated)
6900 rs6000_real_tls_symbol_ref_p (rtx x
)
6902 return (GET_CODE (x
) == SYMBOL_REF
6903 && SYMBOL_REF_TLS_MODEL (x
) >= TLS_MODEL_REAL
);
6906 /* In the name of slightly smaller debug output, and to cater to
6907 general assembler lossage, recognize various UNSPEC sequences
6908 and turn them back into a direct symbol reference. */
6911 rs6000_delegitimize_address (rtx orig_x
)
6915 orig_x
= delegitimize_mem_from_attrs (orig_x
);
6921 if (TARGET_CMODEL
!= CMODEL_SMALL
6922 && GET_CODE (y
) == LO_SUM
)
6926 if (GET_CODE (y
) == PLUS
6927 && GET_MODE (y
) == Pmode
6928 && CONST_INT_P (XEXP (y
, 1)))
6930 offset
= XEXP (y
, 1);
6934 if (GET_CODE (y
) == UNSPEC
6935 && XINT (y
, 1) == UNSPEC_TOCREL
)
6937 #ifdef ENABLE_CHECKING
6938 if (REG_P (XVECEXP (y
, 0, 1))
6939 && REGNO (XVECEXP (y
, 0, 1)) == TOC_REGISTER
)
6943 else if (GET_CODE (XVECEXP (y
, 0, 1)) == DEBUG_EXPR
)
6945 /* Weirdness alert. df_note_compute can replace r2 with a
6946 debug_expr when this unspec is in a debug_insn.
6947 Seen in gcc.dg/pr51957-1.c */
6955 y
= XVECEXP (y
, 0, 0);
6958 /* Do not associate thread-local symbols with the original
6959 constant pool symbol. */
6961 && GET_CODE (y
) == SYMBOL_REF
6962 && CONSTANT_POOL_ADDRESS_P (y
)
6963 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y
)))
6967 if (offset
!= NULL_RTX
)
6968 y
= gen_rtx_PLUS (Pmode
, y
, offset
);
6969 if (!MEM_P (orig_x
))
6972 return replace_equiv_address_nv (orig_x
, y
);
6976 && GET_CODE (orig_x
) == LO_SUM
6977 && GET_CODE (XEXP (orig_x
, 1)) == CONST
)
6979 y
= XEXP (XEXP (orig_x
, 1), 0);
6980 if (GET_CODE (y
) == UNSPEC
6981 && XINT (y
, 1) == UNSPEC_MACHOPIC_OFFSET
)
6982 return XVECEXP (y
, 0, 0);
6988 /* Return true if X shouldn't be emitted into the debug info.
6989 The linker doesn't like .toc section references from
6990 .debug_* sections, so reject .toc section symbols. */
6993 rs6000_const_not_ok_for_debug_p (rtx x
)
6995 if (GET_CODE (x
) == SYMBOL_REF
6996 && CONSTANT_POOL_ADDRESS_P (x
))
6998 rtx c
= get_pool_constant (x
);
6999 enum machine_mode cmode
= get_pool_mode (x
);
7000 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c
, cmode
))
7007 /* Construct the SYMBOL_REF for the tls_get_addr function. */
7009 static GTY(()) rtx rs6000_tls_symbol
;
7011 rs6000_tls_get_addr (void)
7013 if (!rs6000_tls_symbol
)
7014 rs6000_tls_symbol
= init_one_libfunc ("__tls_get_addr");
7016 return rs6000_tls_symbol
;
7019 /* Construct the SYMBOL_REF for TLS GOT references. */
7021 static GTY(()) rtx rs6000_got_symbol
;
7023 rs6000_got_sym (void)
7025 if (!rs6000_got_symbol
)
7027 rs6000_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
7028 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_LOCAL
;
7029 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_EXTERNAL
;
7032 return rs6000_got_symbol
;
7035 /* AIX Thread-Local Address support. */
7038 rs6000_legitimize_tls_address_aix (rtx addr
, enum tls_model model
)
7040 rtx sym
, mem
, tocref
, tlsreg
, tmpreg
, dest
, tlsaddr
;
7044 name
= XSTR (addr
, 0);
7045 /* Append TLS CSECT qualifier, unless the symbol already is qualified
7046 or the symbol will be in TLS private data section. */
7047 if (name
[strlen (name
) - 1] != ']'
7048 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr
))
7049 || bss_initializer_p (SYMBOL_REF_DECL (addr
))))
7051 tlsname
= XALLOCAVEC (char, strlen (name
) + 4);
7052 strcpy (tlsname
, name
);
7054 bss_initializer_p (SYMBOL_REF_DECL (addr
)) ? "[UL]" : "[TL]");
7055 tlsaddr
= copy_rtx (addr
);
7056 XSTR (tlsaddr
, 0) = ggc_strdup (tlsname
);
7061 /* Place addr into TOC constant pool. */
7062 sym
= force_const_mem (GET_MODE (tlsaddr
), tlsaddr
);
7064 /* Output the TOC entry and create the MEM referencing the value. */
7065 if (constant_pool_expr_p (XEXP (sym
, 0))
7066 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym
, 0)), Pmode
))
7068 tocref
= create_TOC_reference (XEXP (sym
, 0), NULL_RTX
);
7069 mem
= gen_const_mem (Pmode
, tocref
);
7070 set_mem_alias_set (mem
, get_TOC_alias_set ());
7075 /* Use global-dynamic for local-dynamic. */
7076 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
7077 || model
== TLS_MODEL_LOCAL_DYNAMIC
)
7079 /* Create new TOC reference for @m symbol. */
7080 name
= XSTR (XVECEXP (XEXP (mem
, 0), 0, 0), 0);
7081 tlsname
= XALLOCAVEC (char, strlen (name
) + 1);
7082 strcpy (tlsname
, "*LCM");
7083 strcat (tlsname
, name
+ 3);
7084 rtx modaddr
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (tlsname
));
7085 SYMBOL_REF_FLAGS (modaddr
) |= SYMBOL_FLAG_LOCAL
;
7086 tocref
= create_TOC_reference (modaddr
, NULL_RTX
);
7087 rtx modmem
= gen_const_mem (Pmode
, tocref
);
7088 set_mem_alias_set (modmem
, get_TOC_alias_set ());
7090 rtx modreg
= gen_reg_rtx (Pmode
);
7091 emit_insn (gen_rtx_SET (VOIDmode
, modreg
, modmem
));
7093 tmpreg
= gen_reg_rtx (Pmode
);
7094 emit_insn (gen_rtx_SET (VOIDmode
, tmpreg
, mem
));
7096 dest
= gen_reg_rtx (Pmode
);
7098 emit_insn (gen_tls_get_addrsi (dest
, modreg
, tmpreg
));
7100 emit_insn (gen_tls_get_addrdi (dest
, modreg
, tmpreg
));
7103 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
7104 else if (TARGET_32BIT
)
7106 tlsreg
= gen_reg_rtx (SImode
);
7107 emit_insn (gen_tls_get_tpointer (tlsreg
));
7110 tlsreg
= gen_rtx_REG (DImode
, 13);
7112 /* Load the TOC value into temporary register. */
7113 tmpreg
= gen_reg_rtx (Pmode
);
7114 emit_insn (gen_rtx_SET (VOIDmode
, tmpreg
, mem
));
7115 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
7116 gen_rtx_MINUS (Pmode
, addr
, tlsreg
));
7118 /* Add TOC symbol value to TLS pointer. */
7119 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, tmpreg
, tlsreg
));
7124 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
7125 this (thread-local) address. */
7128 rs6000_legitimize_tls_address (rtx addr
, enum tls_model model
)
7133 return rs6000_legitimize_tls_address_aix (addr
, model
);
7135 dest
= gen_reg_rtx (Pmode
);
7136 if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 16)
7142 tlsreg
= gen_rtx_REG (Pmode
, 13);
7143 insn
= gen_tls_tprel_64 (dest
, tlsreg
, addr
);
7147 tlsreg
= gen_rtx_REG (Pmode
, 2);
7148 insn
= gen_tls_tprel_32 (dest
, tlsreg
, addr
);
7152 else if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 32)
7156 tmp
= gen_reg_rtx (Pmode
);
7159 tlsreg
= gen_rtx_REG (Pmode
, 13);
7160 insn
= gen_tls_tprel_ha_64 (tmp
, tlsreg
, addr
);
7164 tlsreg
= gen_rtx_REG (Pmode
, 2);
7165 insn
= gen_tls_tprel_ha_32 (tmp
, tlsreg
, addr
);
7169 insn
= gen_tls_tprel_lo_64 (dest
, tmp
, addr
);
7171 insn
= gen_tls_tprel_lo_32 (dest
, tmp
, addr
);
7176 rtx r3
, got
, tga
, tmp1
, tmp2
, call_insn
;
7178 /* We currently use relocations like @got@tlsgd for tls, which
7179 means the linker will handle allocation of tls entries, placing
7180 them in the .got section. So use a pointer to the .got section,
7181 not one to secondary TOC sections used by 64-bit -mminimal-toc,
7182 or to secondary GOT sections used by 32-bit -fPIC. */
7184 got
= gen_rtx_REG (Pmode
, 2);
7188 got
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
7191 rtx gsym
= rs6000_got_sym ();
7192 got
= gen_reg_rtx (Pmode
);
7194 rs6000_emit_move (got
, gsym
, Pmode
);
7199 tmp1
= gen_reg_rtx (Pmode
);
7200 tmp2
= gen_reg_rtx (Pmode
);
7201 mem
= gen_const_mem (Pmode
, tmp1
);
7202 lab
= gen_label_rtx ();
7203 emit_insn (gen_load_toc_v4_PIC_1b (gsym
, lab
));
7204 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
7205 if (TARGET_LINK_STACK
)
7206 emit_insn (gen_addsi3 (tmp1
, tmp1
, GEN_INT (4)));
7207 emit_move_insn (tmp2
, mem
);
7208 last
= emit_insn (gen_addsi3 (got
, tmp1
, tmp2
));
7209 set_unique_reg_note (last
, REG_EQUAL
, gsym
);
7214 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
)
7216 tga
= rs6000_tls_get_addr ();
7217 emit_library_call_value (tga
, dest
, LCT_CONST
, Pmode
,
7218 1, const0_rtx
, Pmode
);
7220 r3
= gen_rtx_REG (Pmode
, 3);
7221 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
7224 insn
= gen_tls_gd_aix64 (r3
, got
, addr
, tga
, const0_rtx
);
7226 insn
= gen_tls_gd_aix32 (r3
, got
, addr
, tga
, const0_rtx
);
7228 else if (DEFAULT_ABI
== ABI_V4
)
7229 insn
= gen_tls_gd_sysvsi (r3
, got
, addr
, tga
, const0_rtx
);
7232 call_insn
= last_call_insn ();
7233 PATTERN (call_insn
) = insn
;
7234 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
7235 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
7236 pic_offset_table_rtx
);
7238 else if (model
== TLS_MODEL_LOCAL_DYNAMIC
)
7240 tga
= rs6000_tls_get_addr ();
7241 tmp1
= gen_reg_rtx (Pmode
);
7242 emit_library_call_value (tga
, tmp1
, LCT_CONST
, Pmode
,
7243 1, const0_rtx
, Pmode
);
7245 r3
= gen_rtx_REG (Pmode
, 3);
7246 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
7249 insn
= gen_tls_ld_aix64 (r3
, got
, tga
, const0_rtx
);
7251 insn
= gen_tls_ld_aix32 (r3
, got
, tga
, const0_rtx
);
7253 else if (DEFAULT_ABI
== ABI_V4
)
7254 insn
= gen_tls_ld_sysvsi (r3
, got
, tga
, const0_rtx
);
7257 call_insn
= last_call_insn ();
7258 PATTERN (call_insn
) = insn
;
7259 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
7260 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
7261 pic_offset_table_rtx
);
7263 if (rs6000_tls_size
== 16)
7266 insn
= gen_tls_dtprel_64 (dest
, tmp1
, addr
);
7268 insn
= gen_tls_dtprel_32 (dest
, tmp1
, addr
);
7270 else if (rs6000_tls_size
== 32)
7272 tmp2
= gen_reg_rtx (Pmode
);
7274 insn
= gen_tls_dtprel_ha_64 (tmp2
, tmp1
, addr
);
7276 insn
= gen_tls_dtprel_ha_32 (tmp2
, tmp1
, addr
);
7279 insn
= gen_tls_dtprel_lo_64 (dest
, tmp2
, addr
);
7281 insn
= gen_tls_dtprel_lo_32 (dest
, tmp2
, addr
);
7285 tmp2
= gen_reg_rtx (Pmode
);
7287 insn
= gen_tls_got_dtprel_64 (tmp2
, got
, addr
);
7289 insn
= gen_tls_got_dtprel_32 (tmp2
, got
, addr
);
7291 insn
= gen_rtx_SET (Pmode
, dest
,
7292 gen_rtx_PLUS (Pmode
, tmp2
, tmp1
));
7298 /* IE, or 64-bit offset LE. */
7299 tmp2
= gen_reg_rtx (Pmode
);
7301 insn
= gen_tls_got_tprel_64 (tmp2
, got
, addr
);
7303 insn
= gen_tls_got_tprel_32 (tmp2
, got
, addr
);
7306 insn
= gen_tls_tls_64 (dest
, tmp2
, addr
);
7308 insn
= gen_tls_tls_32 (dest
, tmp2
, addr
);
7316 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
7319 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
7321 if (GET_CODE (x
) == HIGH
7322 && GET_CODE (XEXP (x
, 0)) == UNSPEC
)
7325 /* A TLS symbol in the TOC cannot contain a sum. */
7326 if (GET_CODE (x
) == CONST
7327 && GET_CODE (XEXP (x
, 0)) == PLUS
7328 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
7329 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x
, 0), 0)) != 0)
7332 /* Do not place an ELF TLS symbol in the constant pool. */
7333 return TARGET_ELF
&& tls_referenced_p (x
);
7336 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
7337 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
7338 can be addressed relative to the toc pointer. */
7341 use_toc_relative_ref (rtx sym
)
7343 return ((constant_pool_expr_p (sym
)
7344 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym
),
7345 get_pool_mode (sym
)))
7346 || (TARGET_CMODEL
== CMODEL_MEDIUM
7347 && SYMBOL_REF_LOCAL_P (sym
)));
7350 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
7351 replace the input X, or the original X if no replacement is called for.
7352 The output parameter *WIN is 1 if the calling macro should goto WIN,
7355 For RS/6000, we wish to handle large displacements off a base
7356 register by splitting the addend across an addiu/addis and the mem insn.
7357 This cuts number of extra insns needed from 3 to 1.
7359 On Darwin, we use this to generate code for floating point constants.
7360 A movsf_low is generated so we wind up with 2 instructions rather than 3.
7361 The Darwin code is inside #if TARGET_MACHO because only then are the
7362 machopic_* functions defined. */
7364 rs6000_legitimize_reload_address (rtx x
, enum machine_mode mode
,
7365 int opnum
, int type
,
7366 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
7368 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
7370 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
7371 DFmode/DImode MEM. */
7374 && ((mode
== DFmode
&& recog_data
.operand_mode
[0] == V2DFmode
)
7375 || (mode
== DImode
&& recog_data
.operand_mode
[0] == V2DImode
)))
7376 reg_offset_p
= false;
7378 /* We must recognize output that we have already generated ourselves. */
7379 if (GET_CODE (x
) == PLUS
7380 && GET_CODE (XEXP (x
, 0)) == PLUS
7381 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
7382 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
7383 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
7385 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
7386 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
7387 opnum
, (enum reload_type
) type
);
7392 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
7393 if (GET_CODE (x
) == LO_SUM
7394 && GET_CODE (XEXP (x
, 0)) == HIGH
)
7396 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
7397 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
7398 opnum
, (enum reload_type
) type
);
7404 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
7405 && GET_CODE (x
) == LO_SUM
7406 && GET_CODE (XEXP (x
, 0)) == PLUS
7407 && XEXP (XEXP (x
, 0), 0) == pic_offset_table_rtx
7408 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == HIGH
7409 && XEXP (XEXP (XEXP (x
, 0), 1), 0) == XEXP (x
, 1)
7410 && machopic_operand_p (XEXP (x
, 1)))
7412 /* Result of previous invocation of this function on Darwin
7413 floating point constant. */
7414 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
7415 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
7416 opnum
, (enum reload_type
) type
);
7422 if (TARGET_CMODEL
!= CMODEL_SMALL
7424 && small_toc_ref (x
, VOIDmode
))
7426 rtx hi
= gen_rtx_HIGH (Pmode
, copy_rtx (x
));
7427 x
= gen_rtx_LO_SUM (Pmode
, hi
, x
);
7428 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
7429 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
7430 opnum
, (enum reload_type
) type
);
7435 if (GET_CODE (x
) == PLUS
7436 && GET_CODE (XEXP (x
, 0)) == REG
7437 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
7438 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
7439 && GET_CODE (XEXP (x
, 1)) == CONST_INT
7441 && !SPE_VECTOR_MODE (mode
)
7442 && !(TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
7443 && (!VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
)))
7445 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
7446 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
7448 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7450 /* Check for 32-bit overflow. */
7451 if (high
+ low
!= val
)
7457 /* Reload the high part into a base reg; leave the low part
7458 in the mem directly. */
7460 x
= gen_rtx_PLUS (GET_MODE (x
),
7461 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
7465 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
7466 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
7467 opnum
, (enum reload_type
) type
);
7472 if (GET_CODE (x
) == SYMBOL_REF
7474 && (!VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
))
7475 && !SPE_VECTOR_MODE (mode
)
7477 && DEFAULT_ABI
== ABI_DARWIN
7478 && (flag_pic
|| MACHO_DYNAMIC_NO_PIC_P
)
7479 && machopic_symbol_defined_p (x
)
7481 && DEFAULT_ABI
== ABI_V4
7484 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
7485 The same goes for DImode without 64-bit gprs and DFmode and DDmode
7487 ??? Assume floating point reg based on mode? This assumption is
7488 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
7489 where reload ends up doing a DFmode load of a constant from
7490 mem using two gprs. Unfortunately, at this point reload
7491 hasn't yet selected regs so poking around in reload data
7492 won't help and even if we could figure out the regs reliably,
7493 we'd still want to allow this transformation when the mem is
7494 naturally aligned. Since we say the address is good here, we
7495 can't disable offsets from LO_SUMs in mem_operand_gpr.
7496 FIXME: Allow offset from lo_sum for other modes too, when
7497 mem is sufficiently aligned. */
7500 && (mode
!= TImode
|| !TARGET_VSX_TIMODE
)
7502 && (mode
!= DImode
|| TARGET_POWERPC64
)
7503 && ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_POWERPC64
7504 || (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)))
7509 rtx offset
= machopic_gen_offset (x
);
7510 x
= gen_rtx_LO_SUM (GET_MODE (x
),
7511 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
7512 gen_rtx_HIGH (Pmode
, offset
)), offset
);
7516 x
= gen_rtx_LO_SUM (GET_MODE (x
),
7517 gen_rtx_HIGH (Pmode
, x
), x
);
7519 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
7520 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
7521 opnum
, (enum reload_type
) type
);
7526 /* Reload an offset address wrapped by an AND that represents the
7527 masking of the lower bits. Strip the outer AND and let reload
7528 convert the offset address into an indirect address. For VSX,
7529 force reload to create the address with an AND in a separate
7530 register, because we can't guarantee an altivec register will
7532 if (VECTOR_MEM_ALTIVEC_P (mode
)
7533 && GET_CODE (x
) == AND
7534 && GET_CODE (XEXP (x
, 0)) == PLUS
7535 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
7536 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
7537 && GET_CODE (XEXP (x
, 1)) == CONST_INT
7538 && INTVAL (XEXP (x
, 1)) == -16)
7547 && GET_CODE (x
) == SYMBOL_REF
7548 && use_toc_relative_ref (x
))
7550 x
= create_TOC_reference (x
, NULL_RTX
);
7551 if (TARGET_CMODEL
!= CMODEL_SMALL
)
7552 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
7553 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
7554 opnum
, (enum reload_type
) type
);
7562 /* Debug version of rs6000_legitimize_reload_address. */
7564 rs6000_debug_legitimize_reload_address (rtx x
, enum machine_mode mode
,
7565 int opnum
, int type
,
7566 int ind_levels
, int *win
)
7568 rtx ret
= rs6000_legitimize_reload_address (x
, mode
, opnum
, type
,
7571 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
7572 "type = %d, ind_levels = %d, win = %d, original addr:\n",
7573 GET_MODE_NAME (mode
), opnum
, type
, ind_levels
, *win
);
7577 fprintf (stderr
, "Same address returned\n");
7579 fprintf (stderr
, "NULL returned\n");
7582 fprintf (stderr
, "New address:\n");
7589 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
7590 that is a valid memory address for an instruction.
7591 The MODE argument is the machine mode for the MEM expression
7592 that wants to use this address.
7594 On the RS/6000, there are four valid address: a SYMBOL_REF that
7595 refers to a constant pool entry of an address (or the sum of it
7596 plus a constant), a short (16-bit signed) constant plus a register,
7597 the sum of two registers, or a register indirect, possibly with an
7598 auto-increment. For DFmode, DDmode and DImode with a constant plus
7599 register, we must ensure that both words are addressable or PowerPC64
7600 with offset word aligned.
7602 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
7603 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
7604 because adjacent memory cells are accessed by adding word-sized offsets
7605 during assembly output. */
7607 rs6000_legitimate_address_p (enum machine_mode mode
, rtx x
, bool reg_ok_strict
)
7609 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
7611 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
7612 if (VECTOR_MEM_ALTIVEC_P (mode
)
7613 && GET_CODE (x
) == AND
7614 && GET_CODE (XEXP (x
, 1)) == CONST_INT
7615 && INTVAL (XEXP (x
, 1)) == -16)
7618 if (TARGET_ELF
&& RS6000_SYMBOL_REF_TLS_P (x
))
7620 if (legitimate_indirect_address_p (x
, reg_ok_strict
))
7623 && (GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == PRE_DEC
)
7624 && mode_supports_pre_incdec_p (mode
)
7625 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
))
7627 if (virtual_stack_registers_memory_p (x
))
7629 if (reg_offset_p
&& legitimate_small_data_p (mode
, x
))
7632 && legitimate_constant_pool_address_p (x
, mode
,
7633 reg_ok_strict
|| lra_in_progress
))
7635 /* For TImode, if we have load/store quad and TImode in VSX registers, only
7636 allow register indirect addresses. This will allow the values to go in
7637 either GPRs or VSX registers without reloading. The vector types would
7638 tend to go into VSX registers, so we allow REG+REG, while TImode seems
7639 somewhat split, in that some uses are GPR based, and some VSX based. */
7640 if (mode
== TImode
&& TARGET_QUAD_MEMORY
&& TARGET_VSX_TIMODE
)
7642 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
7645 && GET_CODE (x
) == PLUS
7646 && GET_CODE (XEXP (x
, 0)) == REG
7647 && (XEXP (x
, 0) == virtual_stack_vars_rtx
7648 || XEXP (x
, 0) == arg_pointer_rtx
)
7649 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
7651 if (rs6000_legitimate_offset_address_p (mode
, x
, reg_ok_strict
, false))
7655 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
7657 || (mode
!= DFmode
&& mode
!= DDmode
)
7658 || (TARGET_E500_DOUBLE
&& mode
!= DDmode
))
7659 && (TARGET_POWERPC64
|| mode
!= DImode
)
7660 && (mode
!= TImode
|| VECTOR_MEM_VSX_P (TImode
))
7662 && !avoiding_indexed_address_p (mode
)
7663 && legitimate_indexed_address_p (x
, reg_ok_strict
))
7665 if (TARGET_UPDATE
&& GET_CODE (x
) == PRE_MODIFY
7666 && mode_supports_pre_modify_p (mode
)
7667 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
)
7668 && (rs6000_legitimate_offset_address_p (mode
, XEXP (x
, 1),
7669 reg_ok_strict
, false)
7670 || (!avoiding_indexed_address_p (mode
)
7671 && legitimate_indexed_address_p (XEXP (x
, 1), reg_ok_strict
)))
7672 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
7674 if (reg_offset_p
&& legitimate_lo_sum_address_p (mode
, x
, reg_ok_strict
))
7679 /* Debug version of rs6000_legitimate_address_p. */
7681 rs6000_debug_legitimate_address_p (enum machine_mode mode
, rtx x
,
7684 bool ret
= rs6000_legitimate_address_p (mode
, x
, reg_ok_strict
);
7686 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
7687 "strict = %d, reload = %s, code = %s\n",
7688 ret
? "true" : "false",
7689 GET_MODE_NAME (mode
),
7693 : (reload_in_progress
? "progress" : "before")),
7694 GET_RTX_NAME (GET_CODE (x
)));
7700 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
7703 rs6000_mode_dependent_address_p (const_rtx addr
,
7704 addr_space_t as ATTRIBUTE_UNUSED
)
7706 return rs6000_mode_dependent_address_ptr (addr
);
7709 /* Go to LABEL if ADDR (a legitimate address expression)
7710 has an effect that depends on the machine mode it is used for.
7712 On the RS/6000 this is true of all integral offsets (since AltiVec
7713 and VSX modes don't allow them) or is a pre-increment or decrement.
7715 ??? Except that due to conceptual problems in offsettable_address_p
7716 we can't really report the problems of integral offsets. So leave
7717 this assuming that the adjustable offset must be valid for the
7718 sub-words of a TFmode operand, which is what we had before. */
7721 rs6000_mode_dependent_address (const_rtx addr
)
7723 switch (GET_CODE (addr
))
7726 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
7727 is considered a legitimate address before reload, so there
7728 are no offset restrictions in that case. Note that this
7729 condition is safe in strict mode because any address involving
7730 virtual_stack_vars_rtx or arg_pointer_rtx would already have
7731 been rejected as illegitimate. */
7732 if (XEXP (addr
, 0) != virtual_stack_vars_rtx
7733 && XEXP (addr
, 0) != arg_pointer_rtx
7734 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
7736 unsigned HOST_WIDE_INT val
= INTVAL (XEXP (addr
, 1));
7737 return val
+ 0x8000 >= 0x10000 - (TARGET_POWERPC64
? 8 : 12);
7742 /* Anything in the constant pool is sufficiently aligned that
7743 all bytes have the same high part address. */
7744 return !legitimate_constant_pool_address_p (addr
, QImode
, false);
7746 /* Auto-increment cases are now treated generically in recog.c. */
7748 return TARGET_UPDATE
;
7750 /* AND is only allowed in Altivec loads. */
7761 /* Debug version of rs6000_mode_dependent_address. */
7763 rs6000_debug_mode_dependent_address (const_rtx addr
)
7765 bool ret
= rs6000_mode_dependent_address (addr
);
7767 fprintf (stderr
, "\nrs6000_mode_dependent_address: ret = %s\n",
7768 ret
? "true" : "false");
7774 /* Implement FIND_BASE_TERM. */
7777 rs6000_find_base_term (rtx op
)
7782 if (GET_CODE (base
) == CONST
)
7783 base
= XEXP (base
, 0);
7784 if (GET_CODE (base
) == PLUS
)
7785 base
= XEXP (base
, 0);
7786 if (GET_CODE (base
) == UNSPEC
)
7787 switch (XINT (base
, 1))
7790 case UNSPEC_MACHOPIC_OFFSET
:
7791 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
7792 for aliasing purposes. */
7793 return XVECEXP (base
, 0, 0);
7799 /* More elaborate version of recog's offsettable_memref_p predicate
7800 that works around the ??? note of rs6000_mode_dependent_address.
7801 In particular it accepts
7803 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
7805 in 32-bit mode, that the recog predicate rejects. */
7808 rs6000_offsettable_memref_p (rtx op
, enum machine_mode reg_mode
)
7815 /* First mimic offsettable_memref_p. */
7816 if (offsettable_address_p (true, GET_MODE (op
), XEXP (op
, 0)))
7819 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
7820 the latter predicate knows nothing about the mode of the memory
7821 reference and, therefore, assumes that it is the largest supported
7822 mode (TFmode). As a consequence, legitimate offsettable memory
7823 references are rejected. rs6000_legitimate_offset_address_p contains
7824 the correct logic for the PLUS case of rs6000_mode_dependent_address,
7825 at least with a little bit of help here given that we know the
7826 actual registers used. */
7827 worst_case
= ((TARGET_POWERPC64
&& GET_MODE_CLASS (reg_mode
) == MODE_INT
)
7828 || GET_MODE_SIZE (reg_mode
) == 4);
7829 return rs6000_legitimate_offset_address_p (GET_MODE (op
), XEXP (op
, 0),
7833 /* Change register usage conditional on target flags. */
7835 rs6000_conditional_register_usage (void)
7839 if (TARGET_DEBUG_TARGET
)
7840 fprintf (stderr
, "rs6000_conditional_register_usage called\n");
7842 /* Set MQ register fixed (already call_used) so that it will not be
7846 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
7848 fixed_regs
[13] = call_used_regs
[13]
7849 = call_really_used_regs
[13] = 1;
7851 /* Conditionally disable FPRs. */
7852 if (TARGET_SOFT_FLOAT
|| !TARGET_FPRS
)
7853 for (i
= 32; i
< 64; i
++)
7854 fixed_regs
[i
] = call_used_regs
[i
]
7855 = call_really_used_regs
[i
] = 1;
7857 /* The TOC register is not killed across calls in a way that is
7858 visible to the compiler. */
7859 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
7860 call_really_used_regs
[2] = 0;
7862 if (DEFAULT_ABI
== ABI_V4
7863 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
7865 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
7867 if (DEFAULT_ABI
== ABI_V4
7868 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
7870 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
7871 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
7872 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
7874 if (DEFAULT_ABI
== ABI_DARWIN
7875 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
)
7876 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
7877 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
7878 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
7880 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
)
7881 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
7882 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
7886 global_regs
[SPEFSCR_REGNO
] = 1;
7887 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
7888 registers in prologues and epilogues. We no longer use r14
7889 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
7890 pool for link-compatibility with older versions of GCC. Once
7891 "old" code has died out, we can return r14 to the allocation
7894 = call_used_regs
[14]
7895 = call_really_used_regs
[14] = 1;
7898 if (!TARGET_ALTIVEC
&& !TARGET_VSX
)
7900 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
7901 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
7902 call_really_used_regs
[VRSAVE_REGNO
] = 1;
7905 if (TARGET_ALTIVEC
|| TARGET_VSX
)
7906 global_regs
[VSCR_REGNO
] = 1;
7908 if (TARGET_ALTIVEC_ABI
)
7910 for (i
= FIRST_ALTIVEC_REGNO
; i
< FIRST_ALTIVEC_REGNO
+ 20; ++i
)
7911 call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
7913 /* AIX reserves VR20:31 in non-extended ABI mode. */
7915 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
< FIRST_ALTIVEC_REGNO
+ 32; ++i
)
7916 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
7921 /* Output insns to set DEST equal to the constant SOURCE as a series of
7922 lis, ori and shl instructions and return TRUE. */
7925 rs6000_emit_set_const (rtx dest
, rtx source
)
7927 enum machine_mode mode
= GET_MODE (dest
);
7932 gcc_checking_assert (CONST_INT_P (source
));
7933 c
= INTVAL (source
);
7938 emit_insn (gen_rtx_SET (VOIDmode
, dest
, source
));
7942 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (SImode
);
7944 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (temp
),
7945 GEN_INT (c
& ~(HOST_WIDE_INT
) 0xffff)));
7946 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
7947 gen_rtx_IOR (SImode
, copy_rtx (temp
),
7948 GEN_INT (c
& 0xffff))));
7952 if (!TARGET_POWERPC64
)
7956 hi
= operand_subword_force (copy_rtx (dest
), WORDS_BIG_ENDIAN
== 0,
7958 lo
= operand_subword_force (dest
, WORDS_BIG_ENDIAN
!= 0,
7960 emit_move_insn (hi
, GEN_INT (c
>> 32));
7961 c
= ((c
& 0xffffffff) ^ 0x80000000) - 0x80000000;
7962 emit_move_insn (lo
, GEN_INT (c
));
7965 rs6000_emit_set_long_const (dest
, c
);
7972 insn
= get_last_insn ();
7973 set
= single_set (insn
);
7974 if (! CONSTANT_P (SET_SRC (set
)))
7975 set_unique_reg_note (insn
, REG_EQUAL
, GEN_INT (c
));
7980 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
7981 Output insns to set DEST equal to the constant C as a series of
7982 lis, ori and shl instructions. */
7985 rs6000_emit_set_long_const (rtx dest
, HOST_WIDE_INT c
)
7988 HOST_WIDE_INT ud1
, ud2
, ud3
, ud4
;
7998 if ((ud4
== 0xffff && ud3
== 0xffff && ud2
== 0xffff && (ud1
& 0x8000))
7999 || (ud4
== 0 && ud3
== 0 && ud2
== 0 && ! (ud1
& 0x8000)))
8000 emit_move_insn (dest
, GEN_INT ((ud1
^ 0x8000) - 0x8000));
8002 else if ((ud4
== 0xffff && ud3
== 0xffff && (ud2
& 0x8000))
8003 || (ud4
== 0 && ud3
== 0 && ! (ud2
& 0x8000)))
8005 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
8007 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
8008 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
8010 emit_move_insn (dest
,
8011 gen_rtx_IOR (DImode
, copy_rtx (temp
),
8014 else if (ud3
== 0 && ud4
== 0)
8016 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
8018 gcc_assert (ud2
& 0x8000);
8019 emit_move_insn (copy_rtx (temp
),
8020 GEN_INT (((ud2
<< 16) ^ 0x80000000) - 0x80000000));
8022 emit_move_insn (copy_rtx (temp
),
8023 gen_rtx_IOR (DImode
, copy_rtx (temp
),
8025 emit_move_insn (dest
,
8026 gen_rtx_ZERO_EXTEND (DImode
,
8027 gen_lowpart (SImode
,
8030 else if ((ud4
== 0xffff && (ud3
& 0x8000))
8031 || (ud4
== 0 && ! (ud3
& 0x8000)))
8033 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
8035 emit_move_insn (copy_rtx (temp
),
8036 GEN_INT (((ud3
<< 16) ^ 0x80000000) - 0x80000000));
8038 emit_move_insn (copy_rtx (temp
),
8039 gen_rtx_IOR (DImode
, copy_rtx (temp
),
8041 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
8042 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
8045 emit_move_insn (dest
,
8046 gen_rtx_IOR (DImode
, copy_rtx (temp
),
8051 temp
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (DImode
);
8053 emit_move_insn (copy_rtx (temp
),
8054 GEN_INT (((ud4
<< 16) ^ 0x80000000) - 0x80000000));
8056 emit_move_insn (copy_rtx (temp
),
8057 gen_rtx_IOR (DImode
, copy_rtx (temp
),
8060 emit_move_insn (ud2
!= 0 || ud1
!= 0 ? copy_rtx (temp
) : dest
,
8061 gen_rtx_ASHIFT (DImode
, copy_rtx (temp
),
8064 emit_move_insn (ud1
!= 0 ? copy_rtx (temp
) : dest
,
8065 gen_rtx_IOR (DImode
, copy_rtx (temp
),
8066 GEN_INT (ud2
<< 16)));
8068 emit_move_insn (dest
,
8069 gen_rtx_IOR (DImode
, copy_rtx (temp
),
8074 /* Helper for the following. Get rid of [r+r] memory refs
8075 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
8078 rs6000_eliminate_indexed_memrefs (rtx operands
[2])
8080 if (reload_in_progress
)
8083 if (GET_CODE (operands
[0]) == MEM
8084 && GET_CODE (XEXP (operands
[0], 0)) != REG
8085 && ! legitimate_constant_pool_address_p (XEXP (operands
[0], 0),
8086 GET_MODE (operands
[0]), false))
8088 = replace_equiv_address (operands
[0],
8089 copy_addr_to_reg (XEXP (operands
[0], 0)));
8091 if (GET_CODE (operands
[1]) == MEM
8092 && GET_CODE (XEXP (operands
[1], 0)) != REG
8093 && ! legitimate_constant_pool_address_p (XEXP (operands
[1], 0),
8094 GET_MODE (operands
[1]), false))
8096 = replace_equiv_address (operands
[1],
8097 copy_addr_to_reg (XEXP (operands
[1], 0)));
8100 /* Generate a vector of constants to permute MODE for a little-endian
8101 storage operation by swapping the two halves of a vector. */
8103 rs6000_const_vec (enum machine_mode mode
)
8131 v
= rtvec_alloc (subparts
);
8133 for (i
= 0; i
< subparts
/ 2; ++i
)
8134 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
+ subparts
/ 2);
8135 for (i
= subparts
/ 2; i
< subparts
; ++i
)
8136 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
- subparts
/ 2);
8141 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
8142 for a VSX load or store operation. */
8144 rs6000_gen_le_vsx_permute (rtx source
, enum machine_mode mode
)
8146 rtx par
= gen_rtx_PARALLEL (VOIDmode
, rs6000_const_vec (mode
));
8147 return gen_rtx_VEC_SELECT (mode
, source
, par
);
8150 /* Emit a little-endian load from vector memory location SOURCE to VSX
8151 register DEST in mode MODE. The load is done with two permuting
8152 insn's that represent an lxvd2x and xxpermdi. */
8154 rs6000_emit_le_vsx_load (rtx dest
, rtx source
, enum machine_mode mode
)
8156 rtx tmp
, permute_mem
, permute_reg
;
8158 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
8160 if (mode
== TImode
|| mode
== V1TImode
)
8163 dest
= gen_lowpart (V2DImode
, dest
);
8164 source
= adjust_address (source
, V2DImode
, 0);
8167 tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest
) : dest
;
8168 permute_mem
= rs6000_gen_le_vsx_permute (source
, mode
);
8169 permute_reg
= rs6000_gen_le_vsx_permute (tmp
, mode
);
8170 emit_insn (gen_rtx_SET (VOIDmode
, tmp
, permute_mem
));
8171 emit_insn (gen_rtx_SET (VOIDmode
, dest
, permute_reg
));
8174 /* Emit a little-endian store to vector memory location DEST from VSX
8175 register SOURCE in mode MODE. The store is done with two permuting
8176 insn's that represent an xxpermdi and an stxvd2x. */
8178 rs6000_emit_le_vsx_store (rtx dest
, rtx source
, enum machine_mode mode
)
8180 rtx tmp
, permute_src
, permute_tmp
;
8182 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
8184 if (mode
== TImode
|| mode
== V1TImode
)
8187 dest
= adjust_address (dest
, V2DImode
, 0);
8188 source
= gen_lowpart (V2DImode
, source
);
8191 tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source
) : source
;
8192 permute_src
= rs6000_gen_le_vsx_permute (source
, mode
);
8193 permute_tmp
= rs6000_gen_le_vsx_permute (tmp
, mode
);
8194 emit_insn (gen_rtx_SET (VOIDmode
, tmp
, permute_src
));
8195 emit_insn (gen_rtx_SET (VOIDmode
, dest
, permute_tmp
));
8198 /* Emit a sequence representing a little-endian VSX load or store,
8199 moving data from SOURCE to DEST in mode MODE. This is done
8200 separately from rs6000_emit_move to ensure it is called only
8201 during expand. LE VSX loads and stores introduced later are
8202 handled with a split. The expand-time RTL generation allows
8203 us to optimize away redundant pairs of register-permutes. */
8205 rs6000_emit_le_vsx_move (rtx dest
, rtx source
, enum machine_mode mode
)
8207 gcc_assert (!BYTES_BIG_ENDIAN
8208 && VECTOR_MEM_VSX_P (mode
)
8209 && !gpr_or_gpr_p (dest
, source
)
8210 && (MEM_P (source
) ^ MEM_P (dest
)));
8214 gcc_assert (REG_P (dest
) || GET_CODE (dest
) == SUBREG
);
8215 rs6000_emit_le_vsx_load (dest
, source
, mode
);
8219 if (!REG_P (source
))
8220 source
= force_reg (mode
, source
);
8221 rs6000_emit_le_vsx_store (dest
, source
, mode
);
8225 /* Emit a move from SOURCE to DEST in mode MODE. */
8227 rs6000_emit_move (rtx dest
, rtx source
, enum machine_mode mode
)
8231 operands
[1] = source
;
8233 if (TARGET_DEBUG_ADDR
)
8236 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
8237 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
8238 GET_MODE_NAME (mode
),
8241 can_create_pseudo_p ());
8243 fprintf (stderr
, "source:\n");
8247 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
8248 if (CONST_WIDE_INT_P (operands
[1])
8249 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
8251 /* This should be fixed with the introduction of CONST_WIDE_INT. */
8255 /* Check if GCC is setting up a block move that will end up using FP
8256 registers as temporaries. We must make sure this is acceptable. */
8257 if (GET_CODE (operands
[0]) == MEM
8258 && GET_CODE (operands
[1]) == MEM
8260 && (SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[0]))
8261 || SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[1])))
8262 && ! (SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[0]) > 32
8263 ? 32 : MEM_ALIGN (operands
[0])))
8264 || SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[1]) > 32
8266 : MEM_ALIGN (operands
[1]))))
8267 && ! MEM_VOLATILE_P (operands
[0])
8268 && ! MEM_VOLATILE_P (operands
[1]))
8270 emit_move_insn (adjust_address (operands
[0], SImode
, 0),
8271 adjust_address (operands
[1], SImode
, 0));
8272 emit_move_insn (adjust_address (copy_rtx (operands
[0]), SImode
, 4),
8273 adjust_address (copy_rtx (operands
[1]), SImode
, 4));
8277 if (can_create_pseudo_p () && GET_CODE (operands
[0]) == MEM
8278 && !gpc_reg_operand (operands
[1], mode
))
8279 operands
[1] = force_reg (mode
, operands
[1]);
8281 /* Recognize the case where operand[1] is a reference to thread-local
8282 data and load its address to a register. */
8283 if (tls_referenced_p (operands
[1]))
8285 enum tls_model model
;
8286 rtx tmp
= operands
[1];
8289 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
8291 addend
= XEXP (XEXP (tmp
, 0), 1);
8292 tmp
= XEXP (XEXP (tmp
, 0), 0);
8295 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
8296 model
= SYMBOL_REF_TLS_MODEL (tmp
);
8297 gcc_assert (model
!= 0);
8299 tmp
= rs6000_legitimize_tls_address (tmp
, model
);
8302 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
8303 tmp
= force_operand (tmp
, operands
[0]);
8308 /* Handle the case where reload calls us with an invalid address. */
8309 if (reload_in_progress
&& mode
== Pmode
8310 && (! general_operand (operands
[1], mode
)
8311 || ! nonimmediate_operand (operands
[0], mode
)))
8314 /* 128-bit constant floating-point values on Darwin should really be
8315 loaded as two parts. */
8316 if (!TARGET_IEEEQUAD
&& TARGET_LONG_DOUBLE_128
8317 && mode
== TFmode
&& GET_CODE (operands
[1]) == CONST_DOUBLE
)
8319 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
, 0),
8320 simplify_gen_subreg (DFmode
, operands
[1], mode
, 0),
8322 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
,
8323 GET_MODE_SIZE (DFmode
)),
8324 simplify_gen_subreg (DFmode
, operands
[1], mode
,
8325 GET_MODE_SIZE (DFmode
)),
8330 if (reload_in_progress
&& cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
8331 cfun
->machine
->sdmode_stack_slot
=
8332 eliminate_regs (cfun
->machine
->sdmode_stack_slot
, VOIDmode
, NULL_RTX
);
8335 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
8336 p1:SD) if p1 is not of floating point class and p0 is spilled as
8337 we can have no analogous movsd_store for this. */
8338 if (lra_in_progress
&& mode
== DDmode
8339 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
8340 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
8341 && GET_CODE (operands
[1]) == SUBREG
&& REG_P (SUBREG_REG (operands
[1]))
8342 && GET_MODE (SUBREG_REG (operands
[1])) == SDmode
)
8345 int regno
= REGNO (SUBREG_REG (operands
[1]));
8347 if (regno
>= FIRST_PSEUDO_REGISTER
)
8349 cl
= reg_preferred_class (regno
);
8350 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][1];
8352 if (regno
>= 0 && ! FP_REGNO_P (regno
))
8355 operands
[0] = gen_lowpart_SUBREG (SDmode
, operands
[0]);
8356 operands
[1] = SUBREG_REG (operands
[1]);
8361 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
8362 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
8363 && (REG_P (operands
[1])
8364 || (GET_CODE (operands
[1]) == SUBREG
8365 && REG_P (SUBREG_REG (operands
[1])))))
8367 int regno
= REGNO (GET_CODE (operands
[1]) == SUBREG
8368 ? SUBREG_REG (operands
[1]) : operands
[1]);
8371 if (regno
>= FIRST_PSEUDO_REGISTER
)
8373 cl
= reg_preferred_class (regno
);
8374 gcc_assert (cl
!= NO_REGS
);
8375 regno
= ira_class_hard_regs
[cl
][0];
8377 if (FP_REGNO_P (regno
))
8379 if (GET_MODE (operands
[0]) != DDmode
)
8380 operands
[0] = gen_rtx_SUBREG (DDmode
, operands
[0], 0);
8381 emit_insn (gen_movsd_store (operands
[0], operands
[1]));
8383 else if (INT_REGNO_P (regno
))
8384 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
8389 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
8390 p:DD)) if p0 is not of floating point class and p1 is spilled as
8391 we can have no analogous movsd_load for this. */
8392 if (lra_in_progress
&& mode
== DDmode
8393 && GET_CODE (operands
[0]) == SUBREG
&& REG_P (SUBREG_REG (operands
[0]))
8394 && GET_MODE (SUBREG_REG (operands
[0])) == SDmode
8395 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
8396 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
8399 int regno
= REGNO (SUBREG_REG (operands
[0]));
8401 if (regno
>= FIRST_PSEUDO_REGISTER
)
8403 cl
= reg_preferred_class (regno
);
8404 regno
= cl
== NO_REGS
? -1 : ira_class_hard_regs
[cl
][0];
8406 if (regno
>= 0 && ! FP_REGNO_P (regno
))
8409 operands
[0] = SUBREG_REG (operands
[0]);
8410 operands
[1] = gen_lowpart_SUBREG (SDmode
, operands
[1]);
8415 && (REG_P (operands
[0])
8416 || (GET_CODE (operands
[0]) == SUBREG
8417 && REG_P (SUBREG_REG (operands
[0]))))
8418 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
8419 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
8421 int regno
= REGNO (GET_CODE (operands
[0]) == SUBREG
8422 ? SUBREG_REG (operands
[0]) : operands
[0]);
8425 if (regno
>= FIRST_PSEUDO_REGISTER
)
8427 cl
= reg_preferred_class (regno
);
8428 gcc_assert (cl
!= NO_REGS
);
8429 regno
= ira_class_hard_regs
[cl
][0];
8431 if (FP_REGNO_P (regno
))
8433 if (GET_MODE (operands
[1]) != DDmode
)
8434 operands
[1] = gen_rtx_SUBREG (DDmode
, operands
[1], 0);
8435 emit_insn (gen_movsd_load (operands
[0], operands
[1]));
8437 else if (INT_REGNO_P (regno
))
8438 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
8444 if (reload_in_progress
8446 && cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
8447 && MEM_P (operands
[0])
8448 && rtx_equal_p (operands
[0], cfun
->machine
->sdmode_stack_slot
)
8449 && REG_P (operands
[1]))
8451 if (FP_REGNO_P (REGNO (operands
[1])))
8453 rtx mem
= adjust_address_nv (operands
[0], DDmode
, 0);
8454 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
8455 emit_insn (gen_movsd_store (mem
, operands
[1]));
8457 else if (INT_REGNO_P (REGNO (operands
[1])))
8459 rtx mem
= operands
[0];
8460 if (BYTES_BIG_ENDIAN
)
8461 mem
= adjust_address_nv (mem
, mode
, 4);
8462 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
8463 emit_insn (gen_movsd_hardfloat (mem
, operands
[1]));
8469 if (reload_in_progress
8471 && REG_P (operands
[0])
8472 && MEM_P (operands
[1])
8473 && cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
8474 && rtx_equal_p (operands
[1], cfun
->machine
->sdmode_stack_slot
))
8476 if (FP_REGNO_P (REGNO (operands
[0])))
8478 rtx mem
= adjust_address_nv (operands
[1], DDmode
, 0);
8479 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
8480 emit_insn (gen_movsd_load (operands
[0], mem
));
8482 else if (INT_REGNO_P (REGNO (operands
[0])))
8484 rtx mem
= operands
[1];
8485 if (BYTES_BIG_ENDIAN
)
8486 mem
= adjust_address_nv (mem
, mode
, 4);
8487 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
8488 emit_insn (gen_movsd_hardfloat (operands
[0], mem
));
8495 /* FIXME: In the long term, this switch statement should go away
8496 and be replaced by a sequence of tests based on things like
8502 if (CONSTANT_P (operands
[1])
8503 && GET_CODE (operands
[1]) != CONST_INT
)
8504 operands
[1] = force_const_mem (mode
, operands
[1]);
8509 rs6000_eliminate_indexed_memrefs (operands
);
8516 if (CONSTANT_P (operands
[1])
8517 && ! easy_fp_constant (operands
[1], mode
))
8518 operands
[1] = force_const_mem (mode
, operands
[1]);
8532 if (CONSTANT_P (operands
[1])
8533 && !easy_vector_constant (operands
[1], mode
))
8534 operands
[1] = force_const_mem (mode
, operands
[1]);
8539 /* Use default pattern for address of ELF small data */
8542 && DEFAULT_ABI
== ABI_V4
8543 && (GET_CODE (operands
[1]) == SYMBOL_REF
8544 || GET_CODE (operands
[1]) == CONST
)
8545 && small_data_operand (operands
[1], mode
))
8547 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
8551 if (DEFAULT_ABI
== ABI_V4
8552 && mode
== Pmode
&& mode
== SImode
8553 && flag_pic
== 1 && got_operand (operands
[1], mode
))
8555 emit_insn (gen_movsi_got (operands
[0], operands
[1]));
8559 if ((TARGET_ELF
|| DEFAULT_ABI
== ABI_DARWIN
)
8563 && CONSTANT_P (operands
[1])
8564 && GET_CODE (operands
[1]) != HIGH
8565 && GET_CODE (operands
[1]) != CONST_INT
)
8567 rtx target
= (!can_create_pseudo_p ()
8569 : gen_reg_rtx (mode
));
8571 /* If this is a function address on -mcall-aixdesc,
8572 convert it to the address of the descriptor. */
8573 if (DEFAULT_ABI
== ABI_AIX
8574 && GET_CODE (operands
[1]) == SYMBOL_REF
8575 && XSTR (operands
[1], 0)[0] == '.')
8577 const char *name
= XSTR (operands
[1], 0);
8579 while (*name
== '.')
8581 new_ref
= gen_rtx_SYMBOL_REF (Pmode
, name
);
8582 CONSTANT_POOL_ADDRESS_P (new_ref
)
8583 = CONSTANT_POOL_ADDRESS_P (operands
[1]);
8584 SYMBOL_REF_FLAGS (new_ref
) = SYMBOL_REF_FLAGS (operands
[1]);
8585 SYMBOL_REF_USED (new_ref
) = SYMBOL_REF_USED (operands
[1]);
8586 SYMBOL_REF_DATA (new_ref
) = SYMBOL_REF_DATA (operands
[1]);
8587 operands
[1] = new_ref
;
8590 if (DEFAULT_ABI
== ABI_DARWIN
)
8593 if (MACHO_DYNAMIC_NO_PIC_P
)
8595 /* Take care of any required data indirection. */
8596 operands
[1] = rs6000_machopic_legitimize_pic_address (
8597 operands
[1], mode
, operands
[0]);
8598 if (operands
[0] != operands
[1])
8599 emit_insn (gen_rtx_SET (VOIDmode
,
8600 operands
[0], operands
[1]));
8604 emit_insn (gen_macho_high (target
, operands
[1]));
8605 emit_insn (gen_macho_low (operands
[0], target
, operands
[1]));
8609 emit_insn (gen_elf_high (target
, operands
[1]));
8610 emit_insn (gen_elf_low (operands
[0], target
, operands
[1]));
8614 /* If this is a SYMBOL_REF that refers to a constant pool entry,
8615 and we have put it in the TOC, we just need to make a TOC-relative
8618 && GET_CODE (operands
[1]) == SYMBOL_REF
8619 && use_toc_relative_ref (operands
[1]))
8620 operands
[1] = create_TOC_reference (operands
[1], operands
[0]);
8621 else if (mode
== Pmode
8622 && CONSTANT_P (operands
[1])
8623 && GET_CODE (operands
[1]) != HIGH
8624 && ((GET_CODE (operands
[1]) != CONST_INT
8625 && ! easy_fp_constant (operands
[1], mode
))
8626 || (GET_CODE (operands
[1]) == CONST_INT
8627 && (num_insns_constant (operands
[1], mode
)
8628 > (TARGET_CMODEL
!= CMODEL_SMALL
? 3 : 2)))
8629 || (GET_CODE (operands
[0]) == REG
8630 && FP_REGNO_P (REGNO (operands
[0]))))
8631 && !toc_relative_expr_p (operands
[1], false)
8632 && (TARGET_CMODEL
== CMODEL_SMALL
8633 || can_create_pseudo_p ()
8634 || (REG_P (operands
[0])
8635 && INT_REG_OK_FOR_BASE_P (operands
[0], true))))
8639 /* Darwin uses a special PIC legitimizer. */
8640 if (DEFAULT_ABI
== ABI_DARWIN
&& MACHOPIC_INDIRECT
)
8643 rs6000_machopic_legitimize_pic_address (operands
[1], mode
,
8645 if (operands
[0] != operands
[1])
8646 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
8651 /* If we are to limit the number of things we put in the TOC and
8652 this is a symbol plus a constant we can add in one insn,
8653 just put the symbol in the TOC and add the constant. Don't do
8654 this if reload is in progress. */
8655 if (GET_CODE (operands
[1]) == CONST
8656 && TARGET_NO_SUM_IN_TOC
&& ! reload_in_progress
8657 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
8658 && add_operand (XEXP (XEXP (operands
[1], 0), 1), mode
)
8659 && (GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == LABEL_REF
8660 || GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == SYMBOL_REF
)
8661 && ! side_effects_p (operands
[0]))
8664 force_const_mem (mode
, XEXP (XEXP (operands
[1], 0), 0));
8665 rtx other
= XEXP (XEXP (operands
[1], 0), 1);
8667 sym
= force_reg (mode
, sym
);
8668 emit_insn (gen_add3_insn (operands
[0], sym
, other
));
8672 operands
[1] = force_const_mem (mode
, operands
[1]);
8675 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
8676 && constant_pool_expr_p (XEXP (operands
[1], 0))
8677 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
8678 get_pool_constant (XEXP (operands
[1], 0)),
8679 get_pool_mode (XEXP (operands
[1], 0))))
8681 rtx tocref
= create_TOC_reference (XEXP (operands
[1], 0),
8683 operands
[1] = gen_const_mem (mode
, tocref
);
8684 set_mem_alias_set (operands
[1], get_TOC_alias_set ());
8690 if (!VECTOR_MEM_VSX_P (TImode
))
8691 rs6000_eliminate_indexed_memrefs (operands
);
8695 rs6000_eliminate_indexed_memrefs (operands
);
8699 fatal_insn ("bad move", gen_rtx_SET (VOIDmode
, dest
, source
));
8702 /* Above, we may have called force_const_mem which may have returned
8703 an invalid address. If we can, fix this up; otherwise, reload will
8704 have to deal with it. */
8705 if (GET_CODE (operands
[1]) == MEM
&& ! reload_in_progress
)
8706 operands
[1] = validize_mem (operands
[1]);
8709 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
8712 /* Return true if a structure, union or array containing FIELD should be
8713 accessed using `BLKMODE'.
8715 For the SPE, simd types are V2SI, and gcc can be tempted to put the
8716 entire thing in a DI and use subregs to access the internals.
8717 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
8718 back-end. Because a single GPR can hold a V2SI, but not a DI, the
8719 best thing to do is set structs to BLKmode and avoid Severe Tire
8722 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
8723 fit into 1, whereas DI still needs two. */
8726 rs6000_member_type_forces_blk (const_tree field
, enum machine_mode mode
)
8728 return ((TARGET_SPE
&& TREE_CODE (TREE_TYPE (field
)) == VECTOR_TYPE
)
8729 || (TARGET_E500_DOUBLE
&& mode
== DFmode
));
8732 /* Nonzero if we can use a floating-point register to pass this arg. */
8733 #define USE_FP_FOR_ARG_P(CUM,MODE) \
8734 (SCALAR_FLOAT_MODE_P (MODE) \
8735 && (CUM)->fregno <= FP_ARG_MAX_REG \
8736 && TARGET_HARD_FLOAT && TARGET_FPRS)
8738 /* Nonzero if we can use an AltiVec register to pass this arg. */
8739 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
8740 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
8741 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
8742 && TARGET_ALTIVEC_ABI \
8745 /* Walk down the type tree of TYPE counting consecutive base elements.
8746 If *MODEP is VOIDmode, then set it to the first valid floating point
8747 or vector type. If a non-floating point or vector type is found, or
8748 if a floating point or vector type that doesn't match a non-VOIDmode
8749 *MODEP is found, then return -1, otherwise return the count in the
8753 rs6000_aggregate_candidate (const_tree type
, enum machine_mode
*modep
)
8755 enum machine_mode mode
;
8758 switch (TREE_CODE (type
))
8761 mode
= TYPE_MODE (type
);
8762 if (!SCALAR_FLOAT_MODE_P (mode
))
8765 if (*modep
== VOIDmode
)
8774 mode
= TYPE_MODE (TREE_TYPE (type
));
8775 if (!SCALAR_FLOAT_MODE_P (mode
))
8778 if (*modep
== VOIDmode
)
8787 if (!TARGET_ALTIVEC_ABI
|| !TARGET_ALTIVEC
)
8790 /* Use V4SImode as representative of all 128-bit vector types. */
8791 size
= int_size_in_bytes (type
);
8801 if (*modep
== VOIDmode
)
8804 /* Vector modes are considered to be opaque: two vectors are
8805 equivalent for the purposes of being homogeneous aggregates
8806 if they are the same size. */
8815 tree index
= TYPE_DOMAIN (type
);
8817 /* Can't handle incomplete types nor sizes that are not
8819 if (!COMPLETE_TYPE_P (type
)
8820 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
8823 count
= rs6000_aggregate_candidate (TREE_TYPE (type
), modep
);
8826 || !TYPE_MAX_VALUE (index
)
8827 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index
))
8828 || !TYPE_MIN_VALUE (index
)
8829 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index
))
8833 count
*= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index
))
8834 - tree_to_uhwi (TYPE_MIN_VALUE (index
)));
8836 /* There must be no padding. */
8837 if (wi::ne_p (TYPE_SIZE (type
), count
* GET_MODE_BITSIZE (*modep
)))
8849 /* Can't handle incomplete types nor sizes that are not
8851 if (!COMPLETE_TYPE_P (type
)
8852 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
8855 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
8857 if (TREE_CODE (field
) != FIELD_DECL
)
8860 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
8866 /* There must be no padding. */
8867 if (wi::ne_p (TYPE_SIZE (type
), count
* GET_MODE_BITSIZE (*modep
)))
8874 case QUAL_UNION_TYPE
:
8876 /* These aren't very interesting except in a degenerate case. */
8881 /* Can't handle incomplete types nor sizes that are not
8883 if (!COMPLETE_TYPE_P (type
)
8884 || TREE_CODE (TYPE_SIZE (type
)) != INTEGER_CST
)
8887 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
8889 if (TREE_CODE (field
) != FIELD_DECL
)
8892 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
8895 count
= count
> sub_count
? count
: sub_count
;
8898 /* There must be no padding. */
8899 if (wi::ne_p (TYPE_SIZE (type
), count
* GET_MODE_BITSIZE (*modep
)))
8912 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
8913 float or vector aggregate that shall be passed in FP/vector registers
8914 according to the ELFv2 ABI, return the homogeneous element mode in
8915 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
8917 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
8920 rs6000_discover_homogeneous_aggregate (enum machine_mode mode
, const_tree type
,
8921 enum machine_mode
*elt_mode
,
8924 /* Note that we do not accept complex types at the top level as
8925 homogeneous aggregates; these types are handled via the
8926 targetm.calls.split_complex_arg mechanism. Complex types
8927 can be elements of homogeneous aggregates, however. */
8928 if (DEFAULT_ABI
== ABI_ELFv2
&& type
&& AGGREGATE_TYPE_P (type
))
8930 enum machine_mode field_mode
= VOIDmode
;
8931 int field_count
= rs6000_aggregate_candidate (type
, &field_mode
);
8933 if (field_count
> 0)
8935 int n_regs
= (SCALAR_FLOAT_MODE_P (field_mode
)?
8936 (GET_MODE_SIZE (field_mode
) + 7) >> 3 : 1);
8938 /* The ELFv2 ABI allows homogeneous aggregates to occupy
8939 up to AGGR_ARG_NUM_REG registers. */
8940 if (field_count
* n_regs
<= AGGR_ARG_NUM_REG
)
8943 *elt_mode
= field_mode
;
8945 *n_elts
= field_count
;
8958 /* Return a nonzero value to say to return the function value in
8959 memory, just as large structures are always returned. TYPE will be
8960 the data type of the value, and FNTYPE will be the type of the
8961 function doing the returning, or @code{NULL} for libcalls.
8963 The AIX ABI for the RS/6000 specifies that all structures are
8964 returned in memory. The Darwin ABI does the same.
8966 For the Darwin 64 Bit ABI, a function result can be returned in
8967 registers or in memory, depending on the size of the return data
8968 type. If it is returned in registers, the value occupies the same
8969 registers as it would if it were the first and only function
8970 argument. Otherwise, the function places its result in memory at
8971 the location pointed to by GPR3.
8973 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
8974 but a draft put them in memory, and GCC used to implement the draft
8975 instead of the final standard. Therefore, aix_struct_return
8976 controls this instead of DEFAULT_ABI; V.4 targets needing backward
8977 compatibility can change DRAFT_V4_STRUCT_RET to override the
8978 default, and -m switches get the final word. See
8979 rs6000_option_override_internal for more details.
8981 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
8982 long double support is enabled. These values are returned in memory.
8984 int_size_in_bytes returns -1 for variable size objects, which go in
8985 memory always. The cast to unsigned makes -1 > 8. */
8988 rs6000_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
8990 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
8992 && rs6000_darwin64_abi
8993 && TREE_CODE (type
) == RECORD_TYPE
8994 && int_size_in_bytes (type
) > 0)
8996 CUMULATIVE_ARGS valcum
;
9000 valcum
.fregno
= FP_ARG_MIN_REG
;
9001 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
9002 /* Do a trial code generation as if this were going to be passed
9003 as an argument; if any part goes in memory, we return NULL. */
9004 valret
= rs6000_darwin64_record_arg (&valcum
, type
, true, true);
9007 /* Otherwise fall through to more conventional ABI rules. */
9010 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
9011 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type
), type
,
9015 /* The ELFv2 ABI returns aggregates up to 16B in registers */
9016 if (DEFAULT_ABI
== ABI_ELFv2
&& AGGREGATE_TYPE_P (type
)
9017 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) <= 16)
9020 if (AGGREGATE_TYPE_P (type
)
9021 && (aix_struct_return
9022 || (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8))
9025 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
9026 modes only exist for GCC vector types if -maltivec. */
9027 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
9028 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
9031 /* Return synthetic vectors in memory. */
9032 if (TREE_CODE (type
) == VECTOR_TYPE
9033 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
9035 static bool warned_for_return_big_vectors
= false;
9036 if (!warned_for_return_big_vectors
)
9038 warning (0, "GCC vector returned by reference: "
9039 "non-standard ABI extension with no compatibility guarantee");
9040 warned_for_return_big_vectors
= true;
9045 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& TYPE_MODE (type
) == TFmode
)
9051 /* Specify whether values returned in registers should be at the most
9052 significant end of a register. We want aggregates returned by
9053 value to match the way aggregates are passed to functions. */
9056 rs6000_return_in_msb (const_tree valtype
)
9058 return (DEFAULT_ABI
== ABI_ELFv2
9060 && AGGREGATE_TYPE_P (valtype
)
9061 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype
), valtype
) == upward
);
9064 #ifdef HAVE_AS_GNU_ATTRIBUTE
9065 /* Return TRUE if a call to function FNDECL may be one that
9066 potentially affects the function calling ABI of the object file. */
9069 call_ABI_of_interest (tree fndecl
)
9071 if (symtab
->state
== EXPANSION
)
9073 struct cgraph_node
*c_node
;
9075 /* Libcalls are always interesting. */
9076 if (fndecl
== NULL_TREE
)
9079 /* Any call to an external function is interesting. */
9080 if (DECL_EXTERNAL (fndecl
))
9083 /* Interesting functions that we are emitting in this object file. */
9084 c_node
= cgraph_node::get (fndecl
);
9085 c_node
= c_node
->ultimate_alias_target ();
9086 return !c_node
->only_called_directly_p ();
9092 /* Initialize a variable CUM of type CUMULATIVE_ARGS
9093 for a call to a function whose data type is FNTYPE.
9094 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
9096 For incoming args we set the number of arguments in the prototype large
9097 so we never return a PARALLEL. */
9100 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
9101 rtx libname ATTRIBUTE_UNUSED
, int incoming
,
9102 int libcall
, int n_named_args
,
9103 tree fndecl ATTRIBUTE_UNUSED
,
9104 enum machine_mode return_mode ATTRIBUTE_UNUSED
)
9106 static CUMULATIVE_ARGS zero_cumulative
;
9108 *cum
= zero_cumulative
;
9110 cum
->fregno
= FP_ARG_MIN_REG
;
9111 cum
->vregno
= ALTIVEC_ARG_MIN_REG
;
9112 cum
->prototype
= (fntype
&& prototype_p (fntype
));
9113 cum
->call_cookie
= ((DEFAULT_ABI
== ABI_V4
&& libcall
)
9114 ? CALL_LIBCALL
: CALL_NORMAL
);
9115 cum
->sysv_gregno
= GP_ARG_MIN_REG
;
9116 cum
->stdarg
= stdarg_p (fntype
);
9118 cum
->nargs_prototype
= 0;
9119 if (incoming
|| cum
->prototype
)
9120 cum
->nargs_prototype
= n_named_args
;
9122 /* Check for a longcall attribute. */
9123 if ((!fntype
&& rs6000_default_long_calls
)
9125 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype
))
9126 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype
))))
9127 cum
->call_cookie
|= CALL_LONG
;
9129 if (TARGET_DEBUG_ARG
)
9131 fprintf (stderr
, "\ninit_cumulative_args:");
9134 tree ret_type
= TREE_TYPE (fntype
);
9135 fprintf (stderr
, " ret code = %s,",
9136 get_tree_code_name (TREE_CODE (ret_type
)));
9139 if (cum
->call_cookie
& CALL_LONG
)
9140 fprintf (stderr
, " longcall,");
9142 fprintf (stderr
, " proto = %d, nargs = %d\n",
9143 cum
->prototype
, cum
->nargs_prototype
);
9146 #ifdef HAVE_AS_GNU_ATTRIBUTE
9147 if (DEFAULT_ABI
== ABI_V4
)
9149 cum
->escapes
= call_ABI_of_interest (fndecl
);
9156 return_type
= TREE_TYPE (fntype
);
9157 return_mode
= TYPE_MODE (return_type
);
9160 return_type
= lang_hooks
.types
.type_for_mode (return_mode
, 0);
9162 if (return_type
!= NULL
)
9164 if (TREE_CODE (return_type
) == RECORD_TYPE
9165 && TYPE_TRANSPARENT_AGGR (return_type
))
9167 return_type
= TREE_TYPE (first_field (return_type
));
9168 return_mode
= TYPE_MODE (return_type
);
9170 if (AGGREGATE_TYPE_P (return_type
)
9171 && ((unsigned HOST_WIDE_INT
) int_size_in_bytes (return_type
)
9173 rs6000_returns_struct
= true;
9175 if (SCALAR_FLOAT_MODE_P (return_mode
))
9176 rs6000_passes_float
= true;
9177 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode
)
9178 || SPE_VECTOR_MODE (return_mode
))
9179 rs6000_passes_vector
= true;
9186 && TARGET_ALTIVEC_ABI
9187 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype
))))
9189 error ("cannot return value in vector register because"
9190 " altivec instructions are disabled, use -maltivec"
9195 /* Return true if TYPE must be passed on the stack and not in registers. */
9198 rs6000_must_pass_in_stack (enum machine_mode mode
, const_tree type
)
9200 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
|| TARGET_64BIT
)
9201 return must_pass_in_stack_var_size (mode
, type
);
9203 return must_pass_in_stack_var_size_or_pad (mode
, type
);
9206 /* If defined, a C expression which determines whether, and in which
9207 direction, to pad out an argument with extra space. The value
9208 should be of type `enum direction': either `upward' to pad above
9209 the argument, `downward' to pad below, or `none' to inhibit
9212 For the AIX ABI structs are always stored left shifted in their
9216 function_arg_padding (enum machine_mode mode
, const_tree type
)
9218 #ifndef AGGREGATE_PADDING_FIXED
9219 #define AGGREGATE_PADDING_FIXED 0
9221 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
9222 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
9225 if (!AGGREGATE_PADDING_FIXED
)
9227 /* GCC used to pass structures of the same size as integer types as
9228 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
9229 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
9230 passed padded downward, except that -mstrict-align further
9231 muddied the water in that multi-component structures of 2 and 4
9232 bytes in size were passed padded upward.
9234 The following arranges for best compatibility with previous
9235 versions of gcc, but removes the -mstrict-align dependency. */
9236 if (BYTES_BIG_ENDIAN
)
9238 HOST_WIDE_INT size
= 0;
9240 if (mode
== BLKmode
)
9242 if (type
&& TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
9243 size
= int_size_in_bytes (type
);
9246 size
= GET_MODE_SIZE (mode
);
9248 if (size
== 1 || size
== 2 || size
== 4)
9254 if (AGGREGATES_PAD_UPWARD_ALWAYS
)
9256 if (type
!= 0 && AGGREGATE_TYPE_P (type
))
9260 /* Fall back to the default. */
9261 return DEFAULT_FUNCTION_ARG_PADDING (mode
, type
);
9264 /* If defined, a C expression that gives the alignment boundary, in bits,
9265 of an argument with the specified mode and type. If it is not defined,
9266 PARM_BOUNDARY is used for all arguments.
9268 V.4 wants long longs and doubles to be double word aligned. Just
9269 testing the mode size is a boneheaded way to do this as it means
9270 that other types such as complex int are also double word aligned.
9271 However, we're stuck with this because changing the ABI might break
9272 existing library interfaces.
9274 Doubleword align SPE vectors.
9275 Quadword align Altivec/VSX vectors.
9276 Quadword align large synthetic vector types. */
9279 rs6000_function_arg_boundary (enum machine_mode mode
, const_tree type
)
9281 enum machine_mode elt_mode
;
9284 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
9286 if (DEFAULT_ABI
== ABI_V4
9287 && (GET_MODE_SIZE (mode
) == 8
9288 || (TARGET_HARD_FLOAT
9290 && (mode
== TFmode
|| mode
== TDmode
))))
9292 else if (SPE_VECTOR_MODE (mode
)
9293 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
9294 && int_size_in_bytes (type
) >= 8
9295 && int_size_in_bytes (type
) < 16))
9297 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
9298 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
9299 && int_size_in_bytes (type
) >= 16))
9302 /* Aggregate types that need > 8 byte alignment are quadword-aligned
9303 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
9304 -mcompat-align-parm is used. */
9305 if (((DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
)
9306 || DEFAULT_ABI
== ABI_ELFv2
)
9307 && type
&& TYPE_ALIGN (type
) > 64)
9309 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
9310 or homogeneous float/vector aggregates here. We already handled
9311 vector aggregates above, but still need to check for float here. */
9312 bool aggregate_p
= (AGGREGATE_TYPE_P (type
)
9313 && !SCALAR_FLOAT_MODE_P (elt_mode
));
9315 /* We used to check for BLKmode instead of the above aggregate type
9316 check. Warn when this results in any difference to the ABI. */
9317 if (aggregate_p
!= (mode
== BLKmode
))
9320 if (!warned
&& warn_psabi
)
9323 inform (input_location
,
9324 "the ABI of passing aggregates with %d-byte alignment"
9325 " has changed in GCC 5",
9326 (int) TYPE_ALIGN (type
) / BITS_PER_UNIT
);
9334 /* Similar for the Darwin64 ABI. Note that for historical reasons we
9335 implement the "aggregate type" check as a BLKmode check here; this
9336 means certain aggregate types are in fact not aligned. */
9337 if (TARGET_MACHO
&& rs6000_darwin64_abi
9339 && type
&& TYPE_ALIGN (type
) > 64)
9342 return PARM_BOUNDARY
;
9345 /* The offset in words to the start of the parameter save area. */
9348 rs6000_parm_offset (void)
9350 return (DEFAULT_ABI
== ABI_V4
? 2
9351 : DEFAULT_ABI
== ABI_ELFv2
? 4
9355 /* For a function parm of MODE and TYPE, return the starting word in
9356 the parameter area. NWORDS of the parameter area are already used. */
9359 rs6000_parm_start (enum machine_mode mode
, const_tree type
,
9360 unsigned int nwords
)
9364 align
= rs6000_function_arg_boundary (mode
, type
) / PARM_BOUNDARY
- 1;
9365 return nwords
+ (-(rs6000_parm_offset () + nwords
) & align
);
9368 /* Compute the size (in words) of a function argument. */
9370 static unsigned long
9371 rs6000_arg_size (enum machine_mode mode
, const_tree type
)
9375 if (mode
!= BLKmode
)
9376 size
= GET_MODE_SIZE (mode
);
9378 size
= int_size_in_bytes (type
);
9381 return (size
+ 3) >> 2;
9383 return (size
+ 7) >> 3;
9386 /* Use this to flush pending int fields. */
9389 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*cum
,
9390 HOST_WIDE_INT bitpos
, int final
)
9392 unsigned int startbit
, endbit
;
9393 int intregs
, intoffset
;
9394 enum machine_mode mode
;
9396 /* Handle the situations where a float is taking up the first half
9397 of the GPR, and the other half is empty (typically due to
9398 alignment restrictions). We can detect this by a 8-byte-aligned
9399 int field, or by seeing that this is the final flush for this
9400 argument. Count the word and continue on. */
9401 if (cum
->floats_in_gpr
== 1
9402 && (cum
->intoffset
% 64 == 0
9403 || (cum
->intoffset
== -1 && final
)))
9406 cum
->floats_in_gpr
= 0;
9409 if (cum
->intoffset
== -1)
9412 intoffset
= cum
->intoffset
;
9413 cum
->intoffset
= -1;
9414 cum
->floats_in_gpr
= 0;
9416 if (intoffset
% BITS_PER_WORD
!= 0)
9418 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
9420 if (mode
== BLKmode
)
9422 /* We couldn't find an appropriate mode, which happens,
9423 e.g., in packed structs when there are 3 bytes to load.
9424 Back intoffset back to the beginning of the word in this
9426 intoffset
= intoffset
& -BITS_PER_WORD
;
9430 startbit
= intoffset
& -BITS_PER_WORD
;
9431 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
9432 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
9433 cum
->words
+= intregs
;
9434 /* words should be unsigned. */
9435 if ((unsigned)cum
->words
< (endbit
/BITS_PER_WORD
))
9437 int pad
= (endbit
/BITS_PER_WORD
) - cum
->words
;
9442 /* The darwin64 ABI calls for us to recurse down through structs,
9443 looking for elements passed in registers. Unfortunately, we have
9444 to track int register count here also because of misalignments
9445 in powerpc alignment mode. */
9448 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*cum
,
9450 HOST_WIDE_INT startbitpos
)
9454 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
9455 if (TREE_CODE (f
) == FIELD_DECL
)
9457 HOST_WIDE_INT bitpos
= startbitpos
;
9458 tree ftype
= TREE_TYPE (f
);
9459 enum machine_mode mode
;
9460 if (ftype
== error_mark_node
)
9462 mode
= TYPE_MODE (ftype
);
9464 if (DECL_SIZE (f
) != 0
9465 && tree_fits_uhwi_p (bit_position (f
)))
9466 bitpos
+= int_bit_position (f
);
9468 /* ??? FIXME: else assume zero offset. */
9470 if (TREE_CODE (ftype
) == RECORD_TYPE
)
9471 rs6000_darwin64_record_arg_advance_recurse (cum
, ftype
, bitpos
);
9472 else if (USE_FP_FOR_ARG_P (cum
, mode
))
9474 unsigned n_fpregs
= (GET_MODE_SIZE (mode
) + 7) >> 3;
9475 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
9476 cum
->fregno
+= n_fpregs
;
9477 /* Single-precision floats present a special problem for
9478 us, because they are smaller than an 8-byte GPR, and so
9479 the structure-packing rules combined with the standard
9480 varargs behavior mean that we want to pack float/float
9481 and float/int combinations into a single register's
9482 space. This is complicated by the arg advance flushing,
9483 which works on arbitrarily large groups of int-type
9487 if (cum
->floats_in_gpr
== 1)
9489 /* Two floats in a word; count the word and reset
9492 cum
->floats_in_gpr
= 0;
9494 else if (bitpos
% 64 == 0)
9496 /* A float at the beginning of an 8-byte word;
9497 count it and put off adjusting cum->words until
9498 we see if a arg advance flush is going to do it
9500 cum
->floats_in_gpr
++;
9504 /* The float is at the end of a word, preceded
9505 by integer fields, so the arg advance flush
9506 just above has already set cum->words and
9507 everything is taken care of. */
9511 cum
->words
+= n_fpregs
;
9513 else if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
9515 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
9519 else if (cum
->intoffset
== -1)
9520 cum
->intoffset
= bitpos
;
9524 /* Check for an item that needs to be considered specially under the darwin 64
9525 bit ABI. These are record types where the mode is BLK or the structure is
9528 rs6000_darwin64_struct_check_p (enum machine_mode mode
, const_tree type
)
9530 return rs6000_darwin64_abi
9531 && ((mode
== BLKmode
9532 && TREE_CODE (type
) == RECORD_TYPE
9533 && int_size_in_bytes (type
) > 0)
9534 || (type
&& TREE_CODE (type
) == RECORD_TYPE
9535 && int_size_in_bytes (type
) == 8)) ? 1 : 0;
9538 /* Update the data in CUM to advance over an argument
9539 of mode MODE and data type TYPE.
9540 (TYPE is null for libcalls where that information may not be available.)
9542 Note that for args passed by reference, function_arg will be called
9543 with MODE and TYPE set to that of the pointer to the arg, not the arg
9547 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
9548 const_tree type
, bool named
, int depth
)
9550 enum machine_mode elt_mode
;
9553 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
9555 /* Only tick off an argument if we're not recursing. */
9557 cum
->nargs_prototype
--;
9559 #ifdef HAVE_AS_GNU_ATTRIBUTE
9560 if (DEFAULT_ABI
== ABI_V4
9563 if (SCALAR_FLOAT_MODE_P (mode
))
9564 rs6000_passes_float
= true;
9565 else if (named
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
9566 rs6000_passes_vector
= true;
9567 else if (SPE_VECTOR_MODE (mode
)
9569 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
)
9570 rs6000_passes_vector
= true;
9574 if (TARGET_ALTIVEC_ABI
9575 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
9576 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
9577 && int_size_in_bytes (type
) == 16)))
9581 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
9583 cum
->vregno
+= n_elts
;
9585 if (!TARGET_ALTIVEC
)
9586 error ("cannot pass argument in vector register because"
9587 " altivec instructions are disabled, use -maltivec"
9590 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
9591 even if it is going to be passed in a vector register.
9592 Darwin does the same for variable-argument functions. */
9593 if (((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9595 || (cum
->stdarg
&& DEFAULT_ABI
!= ABI_V4
))
9605 /* Vector parameters must be 16-byte aligned. In 32-bit
9606 mode this means we need to take into account the offset
9607 to the parameter save area. In 64-bit mode, they just
9608 have to start on an even word, since the parameter save
9609 area is 16-byte aligned. */
9611 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
9613 align
= cum
->words
& 1;
9614 cum
->words
+= align
+ rs6000_arg_size (mode
, type
);
9616 if (TARGET_DEBUG_ARG
)
9618 fprintf (stderr
, "function_adv: words = %2d, align=%d, ",
9620 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s\n",
9621 cum
->nargs_prototype
, cum
->prototype
,
9622 GET_MODE_NAME (mode
));
9626 else if (TARGET_SPE_ABI
&& TARGET_SPE
&& SPE_VECTOR_MODE (mode
)
9628 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
)
9631 else if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
9633 int size
= int_size_in_bytes (type
);
9634 /* Variable sized types have size == -1 and are
9635 treated as if consisting entirely of ints.
9636 Pad to 16 byte boundary if needed. */
9637 if (TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
9638 && (cum
->words
% 2) != 0)
9640 /* For varargs, we can just go up by the size of the struct. */
9642 cum
->words
+= (size
+ 7) / 8;
9645 /* It is tempting to say int register count just goes up by
9646 sizeof(type)/8, but this is wrong in a case such as
9647 { int; double; int; } [powerpc alignment]. We have to
9648 grovel through the fields for these too. */
9650 cum
->floats_in_gpr
= 0;
9651 rs6000_darwin64_record_arg_advance_recurse (cum
, type
, 0);
9652 rs6000_darwin64_record_arg_advance_flush (cum
,
9653 size
* BITS_PER_UNIT
, 1);
9655 if (TARGET_DEBUG_ARG
)
9657 fprintf (stderr
, "function_adv: words = %2d, align=%d, size=%d",
9658 cum
->words
, TYPE_ALIGN (type
), size
);
9660 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
9661 cum
->nargs_prototype
, cum
->prototype
,
9662 GET_MODE_NAME (mode
));
9665 else if (DEFAULT_ABI
== ABI_V4
)
9667 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
9668 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
9669 || (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
9670 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)
9671 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
))
9673 /* _Decimal128 must use an even/odd register pair. This assumes
9674 that the register number is odd when fregno is odd. */
9675 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
9678 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
9679 <= FP_ARG_V4_MAX_REG
)
9680 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
9683 cum
->fregno
= FP_ARG_V4_MAX_REG
+ 1;
9684 if (mode
== DFmode
|| mode
== TFmode
9685 || mode
== DDmode
|| mode
== TDmode
)
9686 cum
->words
+= cum
->words
& 1;
9687 cum
->words
+= rs6000_arg_size (mode
, type
);
9692 int n_words
= rs6000_arg_size (mode
, type
);
9693 int gregno
= cum
->sysv_gregno
;
9695 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
9696 (r7,r8) or (r9,r10). As does any other 2 word item such
9697 as complex int due to a historical mistake. */
9699 gregno
+= (1 - gregno
) & 1;
9701 /* Multi-reg args are not split between registers and stack. */
9702 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
9704 /* Long long and SPE vectors are aligned on the stack.
9705 So are other 2 word items such as complex int due to
9706 a historical mistake. */
9708 cum
->words
+= cum
->words
& 1;
9709 cum
->words
+= n_words
;
9712 /* Note: continuing to accumulate gregno past when we've started
9713 spilling to the stack indicates the fact that we've started
9714 spilling to the stack to expand_builtin_saveregs. */
9715 cum
->sysv_gregno
= gregno
+ n_words
;
9718 if (TARGET_DEBUG_ARG
)
9720 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
9721 cum
->words
, cum
->fregno
);
9722 fprintf (stderr
, "gregno = %2d, nargs = %4d, proto = %d, ",
9723 cum
->sysv_gregno
, cum
->nargs_prototype
, cum
->prototype
);
9724 fprintf (stderr
, "mode = %4s, named = %d\n",
9725 GET_MODE_NAME (mode
), named
);
9730 int n_words
= rs6000_arg_size (mode
, type
);
9731 int start_words
= cum
->words
;
9732 int align_words
= rs6000_parm_start (mode
, type
, start_words
);
9734 cum
->words
= align_words
+ n_words
;
9736 if (SCALAR_FLOAT_MODE_P (elt_mode
)
9737 && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
9739 /* _Decimal128 must be passed in an even/odd float register pair.
9740 This assumes that the register number is odd when fregno is
9742 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
9744 cum
->fregno
+= n_elts
* ((GET_MODE_SIZE (elt_mode
) + 7) >> 3);
9747 if (TARGET_DEBUG_ARG
)
9749 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
9750 cum
->words
, cum
->fregno
);
9751 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s, ",
9752 cum
->nargs_prototype
, cum
->prototype
, GET_MODE_NAME (mode
));
9753 fprintf (stderr
, "named = %d, align = %d, depth = %d\n",
9754 named
, align_words
- start_words
, depth
);
9760 rs6000_function_arg_advance (cumulative_args_t cum
, enum machine_mode mode
,
9761 const_tree type
, bool named
)
9763 rs6000_function_arg_advance_1 (get_cumulative_args (cum
), mode
, type
, named
,
9768 spe_build_register_parallel (enum machine_mode mode
, int gregno
)
9775 r1
= gen_rtx_REG (DImode
, gregno
);
9776 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
9777 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, r1
));
9781 r1
= gen_rtx_REG (DImode
, gregno
);
9782 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
9783 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
9784 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
9785 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r3
));
9788 r1
= gen_rtx_REG (DImode
, gregno
);
9789 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
9790 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
9791 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
9792 r5
= gen_rtx_REG (DImode
, gregno
+ 4);
9793 r5
= gen_rtx_EXPR_LIST (VOIDmode
, r5
, GEN_INT (16));
9794 r7
= gen_rtx_REG (DImode
, gregno
+ 6);
9795 r7
= gen_rtx_EXPR_LIST (VOIDmode
, r7
, GEN_INT (24));
9796 return gen_rtx_PARALLEL (mode
, gen_rtvec (4, r1
, r3
, r5
, r7
));
9803 /* Determine where to put a SIMD argument on the SPE. */
9805 rs6000_spe_function_arg (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
9808 int gregno
= cum
->sysv_gregno
;
9810 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
9811 are passed and returned in a pair of GPRs for ABI compatibility. */
9812 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
9813 || mode
== DCmode
|| mode
== TCmode
))
9815 int n_words
= rs6000_arg_size (mode
, type
);
9817 /* Doubles go in an odd/even register pair (r5/r6, etc). */
9819 gregno
+= (1 - gregno
) & 1;
9821 /* Multi-reg args are not split between registers and stack. */
9822 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
9825 return spe_build_register_parallel (mode
, gregno
);
9829 int n_words
= rs6000_arg_size (mode
, type
);
9831 /* SPE vectors are put in odd registers. */
9832 if (n_words
== 2 && (gregno
& 1) == 0)
9835 if (gregno
+ n_words
- 1 <= GP_ARG_MAX_REG
)
9838 enum machine_mode m
= SImode
;
9840 r1
= gen_rtx_REG (m
, gregno
);
9841 r1
= gen_rtx_EXPR_LIST (m
, r1
, const0_rtx
);
9842 r2
= gen_rtx_REG (m
, gregno
+ 1);
9843 r2
= gen_rtx_EXPR_LIST (m
, r2
, GEN_INT (4));
9844 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
9851 if (gregno
<= GP_ARG_MAX_REG
)
9852 return gen_rtx_REG (mode
, gregno
);
9858 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
9859 structure between cum->intoffset and bitpos to integer registers. */
9862 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*cum
,
9863 HOST_WIDE_INT bitpos
, rtx rvec
[], int *k
)
9865 enum machine_mode mode
;
9867 unsigned int startbit
, endbit
;
9868 int this_regno
, intregs
, intoffset
;
9871 if (cum
->intoffset
== -1)
9874 intoffset
= cum
->intoffset
;
9875 cum
->intoffset
= -1;
9877 /* If this is the trailing part of a word, try to only load that
9878 much into the register. Otherwise load the whole register. Note
9879 that in the latter case we may pick up unwanted bits. It's not a
9880 problem at the moment but may wish to revisit. */
9882 if (intoffset
% BITS_PER_WORD
!= 0)
9884 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
9886 if (mode
== BLKmode
)
9888 /* We couldn't find an appropriate mode, which happens,
9889 e.g., in packed structs when there are 3 bytes to load.
9890 Back intoffset back to the beginning of the word in this
9892 intoffset
= intoffset
& -BITS_PER_WORD
;
9899 startbit
= intoffset
& -BITS_PER_WORD
;
9900 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
9901 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
9902 this_regno
= cum
->words
+ intoffset
/ BITS_PER_WORD
;
9904 if (intregs
> 0 && intregs
> GP_ARG_NUM_REG
- this_regno
)
9907 intregs
= MIN (intregs
, GP_ARG_NUM_REG
- this_regno
);
9911 intoffset
/= BITS_PER_UNIT
;
9914 regno
= GP_ARG_MIN_REG
+ this_regno
;
9915 reg
= gen_rtx_REG (mode
, regno
);
9917 gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
9920 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
9924 while (intregs
> 0);
9927 /* Recursive workhorse for the following. */
9930 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*cum
, const_tree type
,
9931 HOST_WIDE_INT startbitpos
, rtx rvec
[],
9936 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
9937 if (TREE_CODE (f
) == FIELD_DECL
)
9939 HOST_WIDE_INT bitpos
= startbitpos
;
9940 tree ftype
= TREE_TYPE (f
);
9941 enum machine_mode mode
;
9942 if (ftype
== error_mark_node
)
9944 mode
= TYPE_MODE (ftype
);
9946 if (DECL_SIZE (f
) != 0
9947 && tree_fits_uhwi_p (bit_position (f
)))
9948 bitpos
+= int_bit_position (f
);
9950 /* ??? FIXME: else assume zero offset. */
9952 if (TREE_CODE (ftype
) == RECORD_TYPE
)
9953 rs6000_darwin64_record_arg_recurse (cum
, ftype
, bitpos
, rvec
, k
);
9954 else if (cum
->named
&& USE_FP_FOR_ARG_P (cum
, mode
))
9956 unsigned n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
9960 case SCmode
: mode
= SFmode
; break;
9961 case DCmode
: mode
= DFmode
; break;
9962 case TCmode
: mode
= TFmode
; break;
9966 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
9967 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
9969 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
9970 && (mode
== TFmode
|| mode
== TDmode
));
9971 /* Long double or _Decimal128 split over regs and memory. */
9972 mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
: DFmode
;
9976 = gen_rtx_EXPR_LIST (VOIDmode
,
9977 gen_rtx_REG (mode
, cum
->fregno
++),
9978 GEN_INT (bitpos
/ BITS_PER_UNIT
));
9979 if (mode
== TFmode
|| mode
== TDmode
)
9982 else if (cum
->named
&& USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
9984 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
9986 = gen_rtx_EXPR_LIST (VOIDmode
,
9987 gen_rtx_REG (mode
, cum
->vregno
++),
9988 GEN_INT (bitpos
/ BITS_PER_UNIT
));
9990 else if (cum
->intoffset
== -1)
9991 cum
->intoffset
= bitpos
;
9995 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
9996 the register(s) to be used for each field and subfield of a struct
9997 being passed by value, along with the offset of where the
9998 register's value may be found in the block. FP fields go in FP
9999 register, vector fields go in vector registers, and everything
10000 else goes in int registers, packed as in memory.
10002 This code is also used for function return values. RETVAL indicates
10003 whether this is the case.
10005 Much of this is taken from the SPARC V9 port, which has a similar
10006 calling convention. */
10009 rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*orig_cum
, const_tree type
,
10010 bool named
, bool retval
)
10012 rtx rvec
[FIRST_PSEUDO_REGISTER
];
10013 int k
= 1, kbase
= 1;
10014 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
10015 /* This is a copy; modifications are not visible to our caller. */
10016 CUMULATIVE_ARGS copy_cum
= *orig_cum
;
10017 CUMULATIVE_ARGS
*cum
= ©_cum
;
10019 /* Pad to 16 byte boundary if needed. */
10020 if (!retval
&& TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
10021 && (cum
->words
% 2) != 0)
10024 cum
->intoffset
= 0;
10025 cum
->use_stack
= 0;
10026 cum
->named
= named
;
10028 /* Put entries into rvec[] for individual FP and vector fields, and
10029 for the chunks of memory that go in int regs. Note we start at
10030 element 1; 0 is reserved for an indication of using memory, and
10031 may or may not be filled in below. */
10032 rs6000_darwin64_record_arg_recurse (cum
, type
, /* startbit pos= */ 0, rvec
, &k
);
10033 rs6000_darwin64_record_arg_flush (cum
, typesize
* BITS_PER_UNIT
, rvec
, &k
);
10035 /* If any part of the struct went on the stack put all of it there.
10036 This hack is because the generic code for
10037 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
10038 parts of the struct are not at the beginning. */
10039 if (cum
->use_stack
)
10042 return NULL_RTX
; /* doesn't go in registers at all */
10044 rvec
[0] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
10046 if (k
> 1 || cum
->use_stack
)
10047 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (k
- kbase
, &rvec
[kbase
]));
10052 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
10055 rs6000_mixed_function_arg (enum machine_mode mode
, const_tree type
,
10060 rtx rvec
[GP_ARG_NUM_REG
+ 1];
10062 if (align_words
>= GP_ARG_NUM_REG
)
10065 n_units
= rs6000_arg_size (mode
, type
);
10067 /* Optimize the simple case where the arg fits in one gpr, except in
10068 the case of BLKmode due to assign_parms assuming that registers are
10069 BITS_PER_WORD wide. */
10071 || (n_units
== 1 && mode
!= BLKmode
))
10072 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
10075 if (align_words
+ n_units
> GP_ARG_NUM_REG
)
10076 /* Not all of the arg fits in gprs. Say that it goes in memory too,
10077 using a magic NULL_RTX component.
10078 This is not strictly correct. Only some of the arg belongs in
10079 memory, not all of it. However, the normal scheme using
10080 function_arg_partial_nregs can result in unusual subregs, eg.
10081 (subreg:SI (reg:DF) 4), which are not handled well. The code to
10082 store the whole arg to memory is often more efficient than code
10083 to store pieces, and we know that space is available in the right
10084 place for the whole arg. */
10085 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
10090 rtx r
= gen_rtx_REG (SImode
, GP_ARG_MIN_REG
+ align_words
);
10091 rtx off
= GEN_INT (i
++ * 4);
10092 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
10094 while (++align_words
< GP_ARG_NUM_REG
&& --n_units
!= 0);
10096 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
10099 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
10100 but must also be copied into the parameter save area starting at
10101 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
10102 to the GPRs and/or memory. Return the number of elements used. */
10105 rs6000_psave_function_arg (enum machine_mode mode
, const_tree type
,
10106 int align_words
, rtx
*rvec
)
10110 if (align_words
< GP_ARG_NUM_REG
)
10112 int n_words
= rs6000_arg_size (mode
, type
);
10114 if (align_words
+ n_words
> GP_ARG_NUM_REG
10116 || (TARGET_32BIT
&& TARGET_POWERPC64
))
10118 /* If this is partially on the stack, then we only
10119 include the portion actually in registers here. */
10120 enum machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
10123 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
10125 /* Not all of the arg fits in gprs. Say that it goes in memory
10126 too, using a magic NULL_RTX component. Also see comment in
10127 rs6000_mixed_function_arg for why the normal
10128 function_arg_partial_nregs scheme doesn't work in this case. */
10129 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
10134 rtx r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
10135 rtx off
= GEN_INT (i
++ * GET_MODE_SIZE (rmode
));
10136 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
10138 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
10142 /* The whole arg fits in gprs. */
10143 rtx r
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
10144 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
10149 /* It's entirely in memory. */
10150 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
10156 /* RVEC is a vector of K components of an argument of mode MODE.
10157 Construct the final function_arg return value from it. */
10160 rs6000_finish_function_arg (enum machine_mode mode
, rtx
*rvec
, int k
)
10162 gcc_assert (k
>= 1);
10164 /* Avoid returning a PARALLEL in the trivial cases. */
10167 if (XEXP (rvec
[0], 0) == NULL_RTX
)
10170 if (GET_MODE (XEXP (rvec
[0], 0)) == mode
)
10171 return XEXP (rvec
[0], 0);
10174 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
10177 /* Determine where to put an argument to a function.
10178 Value is zero to push the argument on the stack,
10179 or a hard register in which to store the argument.
10181 MODE is the argument's machine mode.
10182 TYPE is the data type of the argument (as a tree).
10183 This is null for libcalls where that information may
10185 CUM is a variable of type CUMULATIVE_ARGS which gives info about
10186 the preceding args and about the function being called. It is
10187 not modified in this routine.
10188 NAMED is nonzero if this argument is a named parameter
10189 (otherwise it is an extra parameter matching an ellipsis).
10191 On RS/6000 the first eight words of non-FP are normally in registers
10192 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
10193 Under V.4, the first 8 FP args are in registers.
10195 If this is floating-point and no prototype is specified, we use
10196 both an FP and integer register (or possibly FP reg and stack). Library
10197 functions (when CALL_LIBCALL is set) always have the proper types for args,
10198 so we can pass the FP value just in one register. emit_library_function
10199 doesn't support PARALLEL anyway.
10201 Note that for args passed by reference, function_arg will be called
10202 with MODE and TYPE set to that of the pointer to the arg, not the arg
10206 rs6000_function_arg (cumulative_args_t cum_v
, enum machine_mode mode
,
10207 const_tree type
, bool named
)
10209 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
10210 enum rs6000_abi abi
= DEFAULT_ABI
;
10211 enum machine_mode elt_mode
;
10214 /* Return a marker to indicate whether CR1 needs to set or clear the
10215 bit that V.4 uses to say fp args were passed in registers.
10216 Assume that we don't need the marker for software floating point,
10217 or compiler generated library calls. */
10218 if (mode
== VOIDmode
)
10221 && (cum
->call_cookie
& CALL_LIBCALL
) == 0
10223 || (cum
->nargs_prototype
< 0
10224 && (cum
->prototype
|| TARGET_NO_PROTOTYPE
))))
10226 /* For the SPE, we need to crxor CR6 always. */
10227 if (TARGET_SPE_ABI
)
10228 return GEN_INT (cum
->call_cookie
| CALL_V4_SET_FP_ARGS
);
10229 else if (TARGET_HARD_FLOAT
&& TARGET_FPRS
)
10230 return GEN_INT (cum
->call_cookie
10231 | ((cum
->fregno
== FP_ARG_MIN_REG
)
10232 ? CALL_V4_SET_FP_ARGS
10233 : CALL_V4_CLEAR_FP_ARGS
));
10236 return GEN_INT (cum
->call_cookie
& ~CALL_LIBCALL
);
10239 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
10241 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
10243 rtx rslt
= rs6000_darwin64_record_arg (cum
, type
, named
, /*retval= */false);
10244 if (rslt
!= NULL_RTX
)
10246 /* Else fall through to usual handling. */
10249 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
10251 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
10255 /* Do we also need to pass this argument in the parameter
10257 if (TARGET_64BIT
&& ! cum
->prototype
)
10259 int align_words
= (cum
->words
+ 1) & ~1;
10260 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
10263 /* Describe where this argument goes in the vector registers. */
10264 for (i
= 0; i
< n_elts
&& cum
->vregno
+ i
<= ALTIVEC_ARG_MAX_REG
; i
++)
10266 r
= gen_rtx_REG (elt_mode
, cum
->vregno
+ i
);
10267 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
10268 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
10271 return rs6000_finish_function_arg (mode
, rvec
, k
);
10273 else if (TARGET_ALTIVEC_ABI
10274 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
10275 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
10276 && int_size_in_bytes (type
) == 16)))
10278 if (named
|| abi
== ABI_V4
)
10282 /* Vector parameters to varargs functions under AIX or Darwin
10283 get passed in memory and possibly also in GPRs. */
10284 int align
, align_words
, n_words
;
10285 enum machine_mode part_mode
;
10287 /* Vector parameters must be 16-byte aligned. In 32-bit
10288 mode this means we need to take into account the offset
10289 to the parameter save area. In 64-bit mode, they just
10290 have to start on an even word, since the parameter save
10291 area is 16-byte aligned. */
10293 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
10295 align
= cum
->words
& 1;
10296 align_words
= cum
->words
+ align
;
10298 /* Out of registers? Memory, then. */
10299 if (align_words
>= GP_ARG_NUM_REG
)
10302 if (TARGET_32BIT
&& TARGET_POWERPC64
)
10303 return rs6000_mixed_function_arg (mode
, type
, align_words
);
10305 /* The vector value goes in GPRs. Only the part of the
10306 value in GPRs is reported here. */
10308 n_words
= rs6000_arg_size (mode
, type
);
10309 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
10310 /* Fortunately, there are only two possibilities, the value
10311 is either wholly in GPRs or half in GPRs and half not. */
10312 part_mode
= DImode
;
10314 return gen_rtx_REG (part_mode
, GP_ARG_MIN_REG
+ align_words
);
10317 else if (TARGET_SPE_ABI
&& TARGET_SPE
10318 && (SPE_VECTOR_MODE (mode
)
10319 || (TARGET_E500_DOUBLE
&& (mode
== DFmode
10322 || mode
== TCmode
))))
10323 return rs6000_spe_function_arg (cum
, mode
, type
);
10325 else if (abi
== ABI_V4
)
10327 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
10328 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
10329 || (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
10330 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)
10331 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
))
10333 /* _Decimal128 must use an even/odd register pair. This assumes
10334 that the register number is odd when fregno is odd. */
10335 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
10338 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
10339 <= FP_ARG_V4_MAX_REG
)
10340 return gen_rtx_REG (mode
, cum
->fregno
);
10346 int n_words
= rs6000_arg_size (mode
, type
);
10347 int gregno
= cum
->sysv_gregno
;
10349 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
10350 (r7,r8) or (r9,r10). As does any other 2 word item such
10351 as complex int due to a historical mistake. */
10353 gregno
+= (1 - gregno
) & 1;
10355 /* Multi-reg args are not split between registers and stack. */
10356 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
10359 if (TARGET_32BIT
&& TARGET_POWERPC64
)
10360 return rs6000_mixed_function_arg (mode
, type
,
10361 gregno
- GP_ARG_MIN_REG
);
10362 return gen_rtx_REG (mode
, gregno
);
10367 int align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
10369 /* _Decimal128 must be passed in an even/odd float register pair.
10370 This assumes that the register number is odd when fregno is odd. */
10371 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
10374 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
10376 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
10379 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
10382 /* Do we also need to pass this argument in the parameter
10384 if (type
&& (cum
->nargs_prototype
<= 0
10385 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
10386 && TARGET_XL_COMPAT
10387 && align_words
>= GP_ARG_NUM_REG
)))
10388 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
10390 /* Describe where this argument goes in the fprs. */
10391 for (i
= 0; i
< n_elts
10392 && cum
->fregno
+ i
* n_fpreg
<= FP_ARG_MAX_REG
; i
++)
10394 /* Check if the argument is split over registers and memory.
10395 This can only ever happen for long double or _Decimal128;
10396 complex types are handled via split_complex_arg. */
10397 enum machine_mode fmode
= elt_mode
;
10398 if (cum
->fregno
+ (i
+ 1) * n_fpreg
> FP_ARG_MAX_REG
+ 1)
10400 gcc_assert (fmode
== TFmode
|| fmode
== TDmode
);
10401 fmode
= DECIMAL_FLOAT_MODE_P (fmode
) ? DDmode
: DFmode
;
10404 r
= gen_rtx_REG (fmode
, cum
->fregno
+ i
* n_fpreg
);
10405 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
10406 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
10409 /* If there were not enough FPRs to hold the argument, the rest
10410 usually goes into memory. However, if the current position
10411 is still within the register parameter area, a portion may
10412 actually have to go into GPRs.
10414 Note that it may happen that the portion of the argument
10415 passed in the first "half" of the first GPR was already
10416 passed in the last FPR as well.
10418 For unnamed arguments, we already set up GPRs to cover the
10419 whole argument in rs6000_psave_function_arg, so there is
10420 nothing further to do at this point. */
10421 fpr_words
= (i
* GET_MODE_SIZE (elt_mode
)) / (TARGET_32BIT
? 4 : 8);
10422 if (i
< n_elts
&& align_words
+ fpr_words
< GP_ARG_NUM_REG
10423 && cum
->nargs_prototype
> 0)
10425 static bool warned
;
10427 enum machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
10428 int n_words
= rs6000_arg_size (mode
, type
);
10430 align_words
+= fpr_words
;
10431 n_words
-= fpr_words
;
10435 r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
10436 off
= GEN_INT (fpr_words
++ * GET_MODE_SIZE (rmode
));
10437 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
10439 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
10441 if (!warned
&& warn_psabi
)
10444 inform (input_location
,
10445 "the ABI of passing homogeneous float aggregates"
10446 " has changed in GCC 5");
10450 return rs6000_finish_function_arg (mode
, rvec
, k
);
10452 else if (align_words
< GP_ARG_NUM_REG
)
10454 if (TARGET_32BIT
&& TARGET_POWERPC64
)
10455 return rs6000_mixed_function_arg (mode
, type
, align_words
);
10457 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
10464 /* For an arg passed partly in registers and partly in memory, this is
10465 the number of bytes passed in registers. For args passed entirely in
10466 registers or entirely in memory, zero. When an arg is described by a
10467 PARALLEL, perhaps using more than one register type, this function
10468 returns the number of bytes used by the first element of the PARALLEL. */
10471 rs6000_arg_partial_bytes (cumulative_args_t cum_v
, enum machine_mode mode
,
10472 tree type
, bool named
)
10474 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
10475 bool passed_in_gprs
= true;
10478 enum machine_mode elt_mode
;
10481 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
10483 if (DEFAULT_ABI
== ABI_V4
)
10486 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
10488 /* If we are passing this arg in the fixed parameter save area
10489 (gprs or memory) as well as VRs, we do not use the partial
10490 bytes mechanism; instead, rs6000_function_arg will return a
10491 PARALLEL including a memory element as necessary. */
10492 if (TARGET_64BIT
&& ! cum
->prototype
)
10495 /* Otherwise, we pass in VRs only. Check for partial copies. */
10496 passed_in_gprs
= false;
10497 if (cum
->vregno
+ n_elts
> ALTIVEC_ARG_MAX_REG
+ 1)
10498 ret
= (ALTIVEC_ARG_MAX_REG
+ 1 - cum
->vregno
) * 16;
10501 /* In this complicated case we just disable the partial_nregs code. */
10502 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
10505 align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
10507 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
10509 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
10511 /* If we are passing this arg in the fixed parameter save area
10512 (gprs or memory) as well as FPRs, we do not use the partial
10513 bytes mechanism; instead, rs6000_function_arg will return a
10514 PARALLEL including a memory element as necessary. */
10516 && (cum
->nargs_prototype
<= 0
10517 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
10518 && TARGET_XL_COMPAT
10519 && align_words
>= GP_ARG_NUM_REG
)))
10522 /* Otherwise, we pass in FPRs only. Check for partial copies. */
10523 passed_in_gprs
= false;
10524 if (cum
->fregno
+ n_elts
* n_fpreg
> FP_ARG_MAX_REG
+ 1)
10526 /* Compute number of bytes / words passed in FPRs. If there
10527 is still space available in the register parameter area
10528 *after* that amount, a part of the argument will be passed
10529 in GPRs. In that case, the total amount passed in any
10530 registers is equal to the amount that would have been passed
10531 in GPRs if everything were passed there, so we fall back to
10532 the GPR code below to compute the appropriate value. */
10533 int fpr
= ((FP_ARG_MAX_REG
+ 1 - cum
->fregno
)
10534 * MIN (8, GET_MODE_SIZE (elt_mode
)));
10535 int fpr_words
= fpr
/ (TARGET_32BIT
? 4 : 8);
10537 if (align_words
+ fpr_words
< GP_ARG_NUM_REG
)
10538 passed_in_gprs
= true;
10545 && align_words
< GP_ARG_NUM_REG
10546 && GP_ARG_NUM_REG
< align_words
+ rs6000_arg_size (mode
, type
))
10547 ret
= (GP_ARG_NUM_REG
- align_words
) * (TARGET_32BIT
? 4 : 8);
10549 if (ret
!= 0 && TARGET_DEBUG_ARG
)
10550 fprintf (stderr
, "rs6000_arg_partial_bytes: %d\n", ret
);
10555 /* A C expression that indicates when an argument must be passed by
10556 reference. If nonzero for an argument, a copy of that argument is
10557 made in memory and a pointer to the argument is passed instead of
10558 the argument itself. The pointer is passed in whatever way is
10559 appropriate for passing a pointer to that type.
10561 Under V.4, aggregates and long double are passed by reference.
10563 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
10564 reference unless the AltiVec vector extension ABI is in force.
10566 As an extension to all ABIs, variable sized types are passed by
10570 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
10571 enum machine_mode mode
, const_tree type
,
10572 bool named ATTRIBUTE_UNUSED
)
10574 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& mode
== TFmode
)
10576 if (TARGET_DEBUG_ARG
)
10577 fprintf (stderr
, "function_arg_pass_by_reference: V4 long double\n");
10584 if (DEFAULT_ABI
== ABI_V4
&& AGGREGATE_TYPE_P (type
))
10586 if (TARGET_DEBUG_ARG
)
10587 fprintf (stderr
, "function_arg_pass_by_reference: V4 aggregate\n");
10591 if (int_size_in_bytes (type
) < 0)
10593 if (TARGET_DEBUG_ARG
)
10594 fprintf (stderr
, "function_arg_pass_by_reference: variable size\n");
10598 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10599 modes only exist for GCC vector types if -maltivec. */
10600 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
10602 if (TARGET_DEBUG_ARG
)
10603 fprintf (stderr
, "function_arg_pass_by_reference: AltiVec\n");
10607 /* Pass synthetic vectors in memory. */
10608 if (TREE_CODE (type
) == VECTOR_TYPE
10609 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
10611 static bool warned_for_pass_big_vectors
= false;
10612 if (TARGET_DEBUG_ARG
)
10613 fprintf (stderr
, "function_arg_pass_by_reference: synthetic vector\n");
10614 if (!warned_for_pass_big_vectors
)
10616 warning (0, "GCC vector passed by reference: "
10617 "non-standard ABI extension with no compatibility guarantee");
10618 warned_for_pass_big_vectors
= true;
10626 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
10627 already processes. Return true if the parameter must be passed
10628 (fully or partially) on the stack. */
10631 rs6000_parm_needs_stack (cumulative_args_t args_so_far
, tree type
)
10633 enum machine_mode mode
;
10637 /* Catch errors. */
10638 if (type
== NULL
|| type
== error_mark_node
)
10641 /* Handle types with no storage requirement. */
10642 if (TYPE_MODE (type
) == VOIDmode
)
10645 /* Handle complex types. */
10646 if (TREE_CODE (type
) == COMPLEX_TYPE
)
10647 return (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
))
10648 || rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
)));
10650 /* Handle transparent aggregates. */
10651 if ((TREE_CODE (type
) == UNION_TYPE
|| TREE_CODE (type
) == RECORD_TYPE
)
10652 && TYPE_TRANSPARENT_AGGR (type
))
10653 type
= TREE_TYPE (first_field (type
));
10655 /* See if this arg was passed by invisible reference. */
10656 if (pass_by_reference (get_cumulative_args (args_so_far
),
10657 TYPE_MODE (type
), type
, true))
10658 type
= build_pointer_type (type
);
10660 /* Find mode as it is passed by the ABI. */
10661 unsignedp
= TYPE_UNSIGNED (type
);
10662 mode
= promote_mode (type
, TYPE_MODE (type
), &unsignedp
);
10664 /* If we must pass in stack, we need a stack. */
10665 if (rs6000_must_pass_in_stack (mode
, type
))
10668 /* If there is no incoming register, we need a stack. */
10669 entry_parm
= rs6000_function_arg (args_so_far
, mode
, type
, true);
10670 if (entry_parm
== NULL
)
10673 /* Likewise if we need to pass both in registers and on the stack. */
10674 if (GET_CODE (entry_parm
) == PARALLEL
10675 && XEXP (XVECEXP (entry_parm
, 0, 0), 0) == NULL_RTX
)
10678 /* Also true if we're partially in registers and partially not. */
10679 if (rs6000_arg_partial_bytes (args_so_far
, mode
, type
, true) != 0)
10682 /* Update info on where next arg arrives in registers. */
10683 rs6000_function_arg_advance (args_so_far
, mode
, type
, true);
10687 /* Return true if FUN has no prototype, has a variable argument
10688 list, or passes any parameter in memory. */
10691 rs6000_function_parms_need_stack (tree fun
, bool incoming
)
10693 tree fntype
, result
;
10694 CUMULATIVE_ARGS args_so_far_v
;
10695 cumulative_args_t args_so_far
;
10698 /* Must be a libcall, all of which only use reg parms. */
10703 fntype
= TREE_TYPE (fun
);
10705 /* Varargs functions need the parameter save area. */
10706 if ((!incoming
&& !prototype_p (fntype
)) || stdarg_p (fntype
))
10709 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v
, fntype
, NULL_RTX
);
10710 args_so_far
= pack_cumulative_args (&args_so_far_v
);
10712 /* When incoming, we will have been passed the function decl.
10713 It is necessary to use the decl to handle K&R style functions,
10714 where TYPE_ARG_TYPES may not be available. */
10717 gcc_assert (DECL_P (fun
));
10718 result
= DECL_RESULT (fun
);
10721 result
= TREE_TYPE (fntype
);
10723 if (result
&& aggregate_value_p (result
, fntype
))
10725 if (!TYPE_P (result
))
10726 result
= TREE_TYPE (result
);
10727 result
= build_pointer_type (result
);
10728 rs6000_parm_needs_stack (args_so_far
, result
);
10735 for (parm
= DECL_ARGUMENTS (fun
);
10736 parm
&& parm
!= void_list_node
;
10737 parm
= TREE_CHAIN (parm
))
10738 if (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (parm
)))
10743 function_args_iterator args_iter
;
10746 FOREACH_FUNCTION_ARGS (fntype
, arg_type
, args_iter
)
10747 if (rs6000_parm_needs_stack (args_so_far
, arg_type
))
10754 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
10755 usually a constant depending on the ABI. However, in the ELFv2 ABI
10756 the register parameter area is optional when calling a function that
10757 has a prototype is scope, has no variable argument list, and passes
10758 all parameters in registers. */
10761 rs6000_reg_parm_stack_space (tree fun
, bool incoming
)
10763 int reg_parm_stack_space
;
10765 switch (DEFAULT_ABI
)
10768 reg_parm_stack_space
= 0;
10773 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
10777 /* ??? Recomputing this every time is a bit expensive. Is there
10778 a place to cache this information? */
10779 if (rs6000_function_parms_need_stack (fun
, incoming
))
10780 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
10782 reg_parm_stack_space
= 0;
10786 return reg_parm_stack_space
;
10790 rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
)
10793 enum machine_mode reg_mode
= TARGET_32BIT
? SImode
: DImode
;
10798 for (i
= 0; i
< nregs
; i
++)
10800 rtx tem
= adjust_address_nv (x
, reg_mode
, i
* GET_MODE_SIZE (reg_mode
));
10801 if (reload_completed
)
10803 if (! strict_memory_address_p (reg_mode
, XEXP (tem
, 0)))
10806 tem
= simplify_gen_subreg (reg_mode
, x
, BLKmode
,
10807 i
* GET_MODE_SIZE (reg_mode
));
10810 tem
= replace_equiv_address (tem
, XEXP (tem
, 0));
10814 emit_move_insn (tem
, gen_rtx_REG (reg_mode
, regno
+ i
));
10818 /* Perform any needed actions needed for a function that is receiving a
10819 variable number of arguments.
10823 MODE and TYPE are the mode and type of the current parameter.
10825 PRETEND_SIZE is a variable that should be set to the amount of stack
10826 that must be pushed by the prolog to pretend that our caller pushed
10829 Normally, this macro will push all remaining incoming registers on the
10830 stack and set PRETEND_SIZE to the length of the registers pushed. */
10833 setup_incoming_varargs (cumulative_args_t cum
, enum machine_mode mode
,
10834 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
10837 CUMULATIVE_ARGS next_cum
;
10838 int reg_size
= TARGET_32BIT
? 4 : 8;
10839 rtx save_area
= NULL_RTX
, mem
;
10840 int first_reg_offset
;
10841 alias_set_type set
;
10843 /* Skip the last named argument. */
10844 next_cum
= *get_cumulative_args (cum
);
10845 rs6000_function_arg_advance_1 (&next_cum
, mode
, type
, true, 0);
10847 if (DEFAULT_ABI
== ABI_V4
)
10849 first_reg_offset
= next_cum
.sysv_gregno
- GP_ARG_MIN_REG
;
10853 int gpr_reg_num
= 0, gpr_size
= 0, fpr_size
= 0;
10854 HOST_WIDE_INT offset
= 0;
10856 /* Try to optimize the size of the varargs save area.
10857 The ABI requires that ap.reg_save_area is doubleword
10858 aligned, but we don't need to allocate space for all
10859 the bytes, only those to which we actually will save
10861 if (cfun
->va_list_gpr_size
&& first_reg_offset
< GP_ARG_NUM_REG
)
10862 gpr_reg_num
= GP_ARG_NUM_REG
- first_reg_offset
;
10863 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
10864 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
10865 && cfun
->va_list_fpr_size
)
10868 fpr_size
= (next_cum
.fregno
- FP_ARG_MIN_REG
)
10869 * UNITS_PER_FP_WORD
;
10870 if (cfun
->va_list_fpr_size
10871 < FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
10872 fpr_size
+= cfun
->va_list_fpr_size
* UNITS_PER_FP_WORD
;
10874 fpr_size
+= (FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
10875 * UNITS_PER_FP_WORD
;
10879 offset
= -((first_reg_offset
* reg_size
) & ~7);
10880 if (!fpr_size
&& gpr_reg_num
> cfun
->va_list_gpr_size
)
10882 gpr_reg_num
= cfun
->va_list_gpr_size
;
10883 if (reg_size
== 4 && (first_reg_offset
& 1))
10886 gpr_size
= (gpr_reg_num
* reg_size
+ 7) & ~7;
10889 offset
= - (int) (next_cum
.fregno
- FP_ARG_MIN_REG
)
10890 * UNITS_PER_FP_WORD
10891 - (int) (GP_ARG_NUM_REG
* reg_size
);
10893 if (gpr_size
+ fpr_size
)
10896 = assign_stack_local (BLKmode
, gpr_size
+ fpr_size
, 64);
10897 gcc_assert (GET_CODE (reg_save_area
) == MEM
);
10898 reg_save_area
= XEXP (reg_save_area
, 0);
10899 if (GET_CODE (reg_save_area
) == PLUS
)
10901 gcc_assert (XEXP (reg_save_area
, 0)
10902 == virtual_stack_vars_rtx
);
10903 gcc_assert (GET_CODE (XEXP (reg_save_area
, 1)) == CONST_INT
);
10904 offset
+= INTVAL (XEXP (reg_save_area
, 1));
10907 gcc_assert (reg_save_area
== virtual_stack_vars_rtx
);
10910 cfun
->machine
->varargs_save_offset
= offset
;
10911 save_area
= plus_constant (Pmode
, virtual_stack_vars_rtx
, offset
);
10916 first_reg_offset
= next_cum
.words
;
10917 save_area
= virtual_incoming_args_rtx
;
10919 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
10920 first_reg_offset
+= rs6000_arg_size (TYPE_MODE (type
), type
);
10923 set
= get_varargs_alias_set ();
10924 if (! no_rtl
&& first_reg_offset
< GP_ARG_NUM_REG
10925 && cfun
->va_list_gpr_size
)
10927 int n_gpr
, nregs
= GP_ARG_NUM_REG
- first_reg_offset
;
10929 if (va_list_gpr_counter_field
)
10930 /* V4 va_list_gpr_size counts number of registers needed. */
10931 n_gpr
= cfun
->va_list_gpr_size
;
10933 /* char * va_list instead counts number of bytes needed. */
10934 n_gpr
= (cfun
->va_list_gpr_size
+ reg_size
- 1) / reg_size
;
10939 mem
= gen_rtx_MEM (BLKmode
,
10940 plus_constant (Pmode
, save_area
,
10941 first_reg_offset
* reg_size
));
10942 MEM_NOTRAP_P (mem
) = 1;
10943 set_mem_alias_set (mem
, set
);
10944 set_mem_align (mem
, BITS_PER_WORD
);
10946 rs6000_move_block_from_reg (GP_ARG_MIN_REG
+ first_reg_offset
, mem
,
10950 /* Save FP registers if needed. */
10951 if (DEFAULT_ABI
== ABI_V4
10952 && TARGET_HARD_FLOAT
&& TARGET_FPRS
10954 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
10955 && cfun
->va_list_fpr_size
)
10957 int fregno
= next_cum
.fregno
, nregs
;
10958 rtx cr1
= gen_rtx_REG (CCmode
, CR1_REGNO
);
10959 rtx lab
= gen_label_rtx ();
10960 int off
= (GP_ARG_NUM_REG
* reg_size
) + ((fregno
- FP_ARG_MIN_REG
)
10961 * UNITS_PER_FP_WORD
);
10964 (gen_rtx_SET (VOIDmode
,
10966 gen_rtx_IF_THEN_ELSE (VOIDmode
,
10967 gen_rtx_NE (VOIDmode
, cr1
,
10969 gen_rtx_LABEL_REF (VOIDmode
, lab
),
10973 fregno
<= FP_ARG_V4_MAX_REG
&& nregs
< cfun
->va_list_fpr_size
;
10974 fregno
++, off
+= UNITS_PER_FP_WORD
, nregs
++)
10976 mem
= gen_rtx_MEM ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
10978 plus_constant (Pmode
, save_area
, off
));
10979 MEM_NOTRAP_P (mem
) = 1;
10980 set_mem_alias_set (mem
, set
);
10981 set_mem_align (mem
, GET_MODE_ALIGNMENT (
10982 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
10983 ? DFmode
: SFmode
));
10984 emit_move_insn (mem
, gen_rtx_REG (
10985 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
10986 ? DFmode
: SFmode
, fregno
));
10993 /* Create the va_list data type. */
10996 rs6000_build_builtin_va_list (void)
10998 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
, record
, type_decl
;
11000 /* For AIX, prefer 'char *' because that's what the system
11001 header files like. */
11002 if (DEFAULT_ABI
!= ABI_V4
)
11003 return build_pointer_type (char_type_node
);
11005 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
11006 type_decl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
11007 get_identifier ("__va_list_tag"), record
);
11009 f_gpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("gpr"),
11010 unsigned_char_type_node
);
11011 f_fpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("fpr"),
11012 unsigned_char_type_node
);
11013 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
11014 every user file. */
11015 f_res
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
11016 get_identifier ("reserved"), short_unsigned_type_node
);
11017 f_ovf
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
11018 get_identifier ("overflow_arg_area"),
11020 f_sav
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
11021 get_identifier ("reg_save_area"),
11024 va_list_gpr_counter_field
= f_gpr
;
11025 va_list_fpr_counter_field
= f_fpr
;
11027 DECL_FIELD_CONTEXT (f_gpr
) = record
;
11028 DECL_FIELD_CONTEXT (f_fpr
) = record
;
11029 DECL_FIELD_CONTEXT (f_res
) = record
;
11030 DECL_FIELD_CONTEXT (f_ovf
) = record
;
11031 DECL_FIELD_CONTEXT (f_sav
) = record
;
11033 TYPE_STUB_DECL (record
) = type_decl
;
11034 TYPE_NAME (record
) = type_decl
;
11035 TYPE_FIELDS (record
) = f_gpr
;
11036 DECL_CHAIN (f_gpr
) = f_fpr
;
11037 DECL_CHAIN (f_fpr
) = f_res
;
11038 DECL_CHAIN (f_res
) = f_ovf
;
11039 DECL_CHAIN (f_ovf
) = f_sav
;
11041 layout_type (record
);
11043 /* The correct type is an array type of one element. */
11044 return build_array_type (record
, build_index_type (size_zero_node
));
11047 /* Implement va_start. */
11050 rs6000_va_start (tree valist
, rtx nextarg
)
11052 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
11053 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
11054 tree gpr
, fpr
, ovf
, sav
, t
;
11056 /* Only SVR4 needs something special. */
11057 if (DEFAULT_ABI
!= ABI_V4
)
11059 std_expand_builtin_va_start (valist
, nextarg
);
11063 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
11064 f_fpr
= DECL_CHAIN (f_gpr
);
11065 f_res
= DECL_CHAIN (f_fpr
);
11066 f_ovf
= DECL_CHAIN (f_res
);
11067 f_sav
= DECL_CHAIN (f_ovf
);
11069 valist
= build_simple_mem_ref (valist
);
11070 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
11071 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
11073 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
11075 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
11078 /* Count number of gp and fp argument registers used. */
11079 words
= crtl
->args
.info
.words
;
11080 n_gpr
= MIN (crtl
->args
.info
.sysv_gregno
- GP_ARG_MIN_REG
,
11082 n_fpr
= MIN (crtl
->args
.info
.fregno
- FP_ARG_MIN_REG
,
11085 if (TARGET_DEBUG_ARG
)
11086 fprintf (stderr
, "va_start: words = "HOST_WIDE_INT_PRINT_DEC
", n_gpr = "
11087 HOST_WIDE_INT_PRINT_DEC
", n_fpr = "HOST_WIDE_INT_PRINT_DEC
"\n",
11088 words
, n_gpr
, n_fpr
);
11090 if (cfun
->va_list_gpr_size
)
11092 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
11093 build_int_cst (NULL_TREE
, n_gpr
));
11094 TREE_SIDE_EFFECTS (t
) = 1;
11095 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
11098 if (cfun
->va_list_fpr_size
)
11100 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
11101 build_int_cst (NULL_TREE
, n_fpr
));
11102 TREE_SIDE_EFFECTS (t
) = 1;
11103 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
11105 #ifdef HAVE_AS_GNU_ATTRIBUTE
11106 if (call_ABI_of_interest (cfun
->decl
))
11107 rs6000_passes_float
= true;
11111 /* Find the overflow area. */
11112 t
= make_tree (TREE_TYPE (ovf
), virtual_incoming_args_rtx
);
11114 t
= fold_build_pointer_plus_hwi (t
, words
* UNITS_PER_WORD
);
11115 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
11116 TREE_SIDE_EFFECTS (t
) = 1;
11117 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
11119 /* If there were no va_arg invocations, don't set up the register
11121 if (!cfun
->va_list_gpr_size
11122 && !cfun
->va_list_fpr_size
11123 && n_gpr
< GP_ARG_NUM_REG
11124 && n_fpr
< FP_ARG_V4_MAX_REG
)
11127 /* Find the register save area. */
11128 t
= make_tree (TREE_TYPE (sav
), virtual_stack_vars_rtx
);
11129 if (cfun
->machine
->varargs_save_offset
)
11130 t
= fold_build_pointer_plus_hwi (t
, cfun
->machine
->varargs_save_offset
);
11131 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
11132 TREE_SIDE_EFFECTS (t
) = 1;
11133 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
11136 /* Implement va_arg. */
11139 rs6000_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
11140 gimple_seq
*post_p
)
11142 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
11143 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
11144 int size
, rsize
, n_reg
, sav_ofs
, sav_scale
;
11145 tree lab_false
, lab_over
, addr
;
11147 tree ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
11151 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
11153 t
= rs6000_gimplify_va_arg (valist
, ptrtype
, pre_p
, post_p
);
11154 return build_va_arg_indirect_ref (t
);
11157 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
11158 earlier version of gcc, with the property that it always applied alignment
11159 adjustments to the va-args (even for zero-sized types). The cheapest way
11160 to deal with this is to replicate the effect of the part of
11161 std_gimplify_va_arg_expr that carries out the align adjust, for the case
11163 We don't need to check for pass-by-reference because of the test above.
11164 We can return a simplifed answer, since we know there's no offset to add. */
11167 && rs6000_darwin64_abi
)
11168 || DEFAULT_ABI
== ABI_ELFv2
11169 || (DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
))
11170 && integer_zerop (TYPE_SIZE (type
)))
11172 unsigned HOST_WIDE_INT align
, boundary
;
11173 tree valist_tmp
= get_initialized_tmp_var (valist
, pre_p
, NULL
);
11174 align
= PARM_BOUNDARY
/ BITS_PER_UNIT
;
11175 boundary
= rs6000_function_arg_boundary (TYPE_MODE (type
), type
);
11176 if (boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
11177 boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
11178 boundary
/= BITS_PER_UNIT
;
11179 if (boundary
> align
)
11182 /* This updates arg ptr by the amount that would be necessary
11183 to align the zero-sized (but not zero-alignment) item. */
11184 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
11185 fold_build_pointer_plus_hwi (valist_tmp
, boundary
- 1));
11186 gimplify_and_add (t
, pre_p
);
11188 t
= fold_convert (sizetype
, valist_tmp
);
11189 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
11190 fold_convert (TREE_TYPE (valist
),
11191 fold_build2 (BIT_AND_EXPR
, sizetype
, t
,
11192 size_int (-boundary
))));
11193 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
11194 gimplify_and_add (t
, pre_p
);
11196 /* Since it is zero-sized there's no increment for the item itself. */
11197 valist_tmp
= fold_convert (build_pointer_type (type
), valist_tmp
);
11198 return build_va_arg_indirect_ref (valist_tmp
);
11201 if (DEFAULT_ABI
!= ABI_V4
)
11203 if (targetm
.calls
.split_complex_arg
&& TREE_CODE (type
) == COMPLEX_TYPE
)
11205 tree elem_type
= TREE_TYPE (type
);
11206 enum machine_mode elem_mode
= TYPE_MODE (elem_type
);
11207 int elem_size
= GET_MODE_SIZE (elem_mode
);
11209 if (elem_size
< UNITS_PER_WORD
)
11211 tree real_part
, imag_part
;
11212 gimple_seq post
= NULL
;
11214 real_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
11216 /* Copy the value into a temporary, lest the formal temporary
11217 be reused out from under us. */
11218 real_part
= get_initialized_tmp_var (real_part
, pre_p
, &post
);
11219 gimple_seq_add_seq (pre_p
, post
);
11221 imag_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
11224 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
11228 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
11231 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
11232 f_fpr
= DECL_CHAIN (f_gpr
);
11233 f_res
= DECL_CHAIN (f_fpr
);
11234 f_ovf
= DECL_CHAIN (f_res
);
11235 f_sav
= DECL_CHAIN (f_ovf
);
11237 valist
= build_va_arg_indirect_ref (valist
);
11238 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
11239 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
11241 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
11243 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
11246 size
= int_size_in_bytes (type
);
11247 rsize
= (size
+ 3) / 4;
11250 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
11251 && ((TARGET_SINGLE_FLOAT
&& TYPE_MODE (type
) == SFmode
)
11252 || (TARGET_DOUBLE_FLOAT
11253 && (TYPE_MODE (type
) == DFmode
11254 || TYPE_MODE (type
) == TFmode
11255 || TYPE_MODE (type
) == SDmode
11256 || TYPE_MODE (type
) == DDmode
11257 || TYPE_MODE (type
) == TDmode
))))
11259 /* FP args go in FP registers, if present. */
11261 n_reg
= (size
+ 7) / 8;
11262 sav_ofs
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4) * 4;
11263 sav_scale
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4);
11264 if (TYPE_MODE (type
) != SFmode
&& TYPE_MODE (type
) != SDmode
)
11269 /* Otherwise into GP registers. */
11278 /* Pull the value out of the saved registers.... */
11281 addr
= create_tmp_var (ptr_type_node
, "addr");
11283 /* AltiVec vectors never go in registers when -mabi=altivec. */
11284 if (TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
11288 lab_false
= create_artificial_label (input_location
);
11289 lab_over
= create_artificial_label (input_location
);
11291 /* Long long and SPE vectors are aligned in the registers.
11292 As are any other 2 gpr item such as complex int due to a
11293 historical mistake. */
11295 if (n_reg
== 2 && reg
== gpr
)
11298 u
= build2 (BIT_AND_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
11299 build_int_cst (TREE_TYPE (reg
), n_reg
- 1));
11300 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
),
11301 unshare_expr (reg
), u
);
11303 /* _Decimal128 is passed in even/odd fpr pairs; the stored
11304 reg number is 0 for f1, so we want to make it odd. */
11305 else if (reg
== fpr
&& TYPE_MODE (type
) == TDmode
)
11307 t
= build2 (BIT_IOR_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
11308 build_int_cst (TREE_TYPE (reg
), 1));
11309 u
= build2 (MODIFY_EXPR
, void_type_node
, unshare_expr (reg
), t
);
11312 t
= fold_convert (TREE_TYPE (reg
), size_int (8 - n_reg
+ 1));
11313 t
= build2 (GE_EXPR
, boolean_type_node
, u
, t
);
11314 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
11315 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
11316 gimplify_and_add (t
, pre_p
);
11320 t
= fold_build_pointer_plus_hwi (sav
, sav_ofs
);
11322 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
11323 build_int_cst (TREE_TYPE (reg
), n_reg
));
11324 u
= fold_convert (sizetype
, u
);
11325 u
= build2 (MULT_EXPR
, sizetype
, u
, size_int (sav_scale
));
11326 t
= fold_build_pointer_plus (t
, u
);
11328 /* _Decimal32 varargs are located in the second word of the 64-bit
11329 FP register for 32-bit binaries. */
11330 if (!TARGET_POWERPC64
11331 && TARGET_HARD_FLOAT
&& TARGET_FPRS
11332 && TYPE_MODE (type
) == SDmode
)
11333 t
= fold_build_pointer_plus_hwi (t
, size
);
11335 gimplify_assign (addr
, t
, pre_p
);
11337 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
11339 stmt
= gimple_build_label (lab_false
);
11340 gimple_seq_add_stmt (pre_p
, stmt
);
11342 if ((n_reg
== 2 && !regalign
) || n_reg
> 2)
11344 /* Ensure that we don't find any more args in regs.
11345 Alignment has taken care of for special cases. */
11346 gimplify_assign (reg
, build_int_cst (TREE_TYPE (reg
), 8), pre_p
);
11350 /* ... otherwise out of the overflow area. */
11352 /* Care for on-stack alignment if needed. */
11356 t
= fold_build_pointer_plus_hwi (t
, align
- 1);
11357 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
11358 build_int_cst (TREE_TYPE (t
), -align
));
11360 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
11362 gimplify_assign (unshare_expr (addr
), t
, pre_p
);
11364 t
= fold_build_pointer_plus_hwi (t
, size
);
11365 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
11369 stmt
= gimple_build_label (lab_over
);
11370 gimple_seq_add_stmt (pre_p
, stmt
);
11373 if (STRICT_ALIGNMENT
11374 && (TYPE_ALIGN (type
)
11375 > (unsigned) BITS_PER_UNIT
* (align
< 4 ? 4 : align
)))
11377 /* The value (of type complex double, for example) may not be
11378 aligned in memory in the saved registers, so copy via a
11379 temporary. (This is the same code as used for SPARC.) */
11380 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
11381 tree dest_addr
= build_fold_addr_expr (tmp
);
11383 tree copy
= build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
11384 3, dest_addr
, addr
, size_int (rsize
* 4));
11386 gimplify_and_add (copy
, pre_p
);
11390 addr
= fold_convert (ptrtype
, addr
);
11391 return build_va_arg_indirect_ref (addr
);
11397 def_builtin (const char *name
, tree type
, enum rs6000_builtins code
)
11400 unsigned classify
= rs6000_builtin_info
[(int)code
].attr
;
11401 const char *attr_string
= "";
11403 gcc_assert (name
!= NULL
);
11404 gcc_assert (IN_RANGE ((int)code
, 0, (int)RS6000_BUILTIN_COUNT
));
11406 if (rs6000_builtin_decls
[(int)code
])
11407 fatal_error ("internal error: builtin function %s already processed", name
);
11409 rs6000_builtin_decls
[(int)code
] = t
=
11410 add_builtin_function (name
, type
, (int)code
, BUILT_IN_MD
, NULL
, NULL_TREE
);
11412 /* Set any special attributes. */
11413 if ((classify
& RS6000_BTC_CONST
) != 0)
11415 /* const function, function only depends on the inputs. */
11416 TREE_READONLY (t
) = 1;
11417 TREE_NOTHROW (t
) = 1;
11418 attr_string
= ", pure";
11420 else if ((classify
& RS6000_BTC_PURE
) != 0)
11422 /* pure function, function can read global memory, but does not set any
11424 DECL_PURE_P (t
) = 1;
11425 TREE_NOTHROW (t
) = 1;
11426 attr_string
= ", const";
11428 else if ((classify
& RS6000_BTC_FP
) != 0)
11430 /* Function is a math function. If rounding mode is on, then treat the
11431 function as not reading global memory, but it can have arbitrary side
11432 effects. If it is off, then assume the function is a const function.
11433 This mimics the ATTR_MATHFN_FPROUNDING attribute in
11434 builtin-attribute.def that is used for the math functions. */
11435 TREE_NOTHROW (t
) = 1;
11436 if (flag_rounding_math
)
11438 DECL_PURE_P (t
) = 1;
11439 DECL_IS_NOVOPS (t
) = 1;
11440 attr_string
= ", fp, pure";
11444 TREE_READONLY (t
) = 1;
11445 attr_string
= ", fp, const";
11448 else if ((classify
& RS6000_BTC_ATTR_MASK
) != 0)
11449 gcc_unreachable ();
11451 if (TARGET_DEBUG_BUILTIN
)
11452 fprintf (stderr
, "rs6000_builtin, code = %4d, %s%s\n",
11453 (int)code
, name
, attr_string
);
11456 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
11458 #undef RS6000_BUILTIN_1
11459 #undef RS6000_BUILTIN_2
11460 #undef RS6000_BUILTIN_3
11461 #undef RS6000_BUILTIN_A
11462 #undef RS6000_BUILTIN_D
11463 #undef RS6000_BUILTIN_E
11464 #undef RS6000_BUILTIN_H
11465 #undef RS6000_BUILTIN_P
11466 #undef RS6000_BUILTIN_Q
11467 #undef RS6000_BUILTIN_S
11468 #undef RS6000_BUILTIN_X
11470 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11471 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11472 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
11473 { MASK, ICODE, NAME, ENUM },
11475 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11476 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11477 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11478 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11479 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11480 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11481 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11482 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11484 static const struct builtin_description bdesc_3arg
[] =
11486 #include "rs6000-builtin.def"
11489 /* DST operations: void foo (void *, const int, const char). */
11491 #undef RS6000_BUILTIN_1
11492 #undef RS6000_BUILTIN_2
11493 #undef RS6000_BUILTIN_3
11494 #undef RS6000_BUILTIN_A
11495 #undef RS6000_BUILTIN_D
11496 #undef RS6000_BUILTIN_E
11497 #undef RS6000_BUILTIN_H
11498 #undef RS6000_BUILTIN_P
11499 #undef RS6000_BUILTIN_Q
11500 #undef RS6000_BUILTIN_S
11501 #undef RS6000_BUILTIN_X
11503 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11504 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11505 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11506 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11507 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
11508 { MASK, ICODE, NAME, ENUM },
11510 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11511 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11512 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11513 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11514 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11515 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11517 static const struct builtin_description bdesc_dst
[] =
11519 #include "rs6000-builtin.def"
11522 /* Simple binary operations: VECc = foo (VECa, VECb). */
11524 #undef RS6000_BUILTIN_1
11525 #undef RS6000_BUILTIN_2
11526 #undef RS6000_BUILTIN_3
11527 #undef RS6000_BUILTIN_A
11528 #undef RS6000_BUILTIN_D
11529 #undef RS6000_BUILTIN_E
11530 #undef RS6000_BUILTIN_H
11531 #undef RS6000_BUILTIN_P
11532 #undef RS6000_BUILTIN_Q
11533 #undef RS6000_BUILTIN_S
11534 #undef RS6000_BUILTIN_X
11536 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11537 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
11538 { MASK, ICODE, NAME, ENUM },
11540 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11541 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11542 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11543 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11544 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11545 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11546 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11547 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11548 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11550 static const struct builtin_description bdesc_2arg
[] =
11552 #include "rs6000-builtin.def"
11555 #undef RS6000_BUILTIN_1
11556 #undef RS6000_BUILTIN_2
11557 #undef RS6000_BUILTIN_3
11558 #undef RS6000_BUILTIN_A
11559 #undef RS6000_BUILTIN_D
11560 #undef RS6000_BUILTIN_E
11561 #undef RS6000_BUILTIN_H
11562 #undef RS6000_BUILTIN_P
11563 #undef RS6000_BUILTIN_Q
11564 #undef RS6000_BUILTIN_S
11565 #undef RS6000_BUILTIN_X
11567 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11568 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11569 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11570 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11571 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11572 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11573 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11574 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
11575 { MASK, ICODE, NAME, ENUM },
11577 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11578 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11579 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11581 /* AltiVec predicates. */
11583 static const struct builtin_description bdesc_altivec_preds
[] =
11585 #include "rs6000-builtin.def"
11588 /* SPE predicates. */
11589 #undef RS6000_BUILTIN_1
11590 #undef RS6000_BUILTIN_2
11591 #undef RS6000_BUILTIN_3
11592 #undef RS6000_BUILTIN_A
11593 #undef RS6000_BUILTIN_D
11594 #undef RS6000_BUILTIN_E
11595 #undef RS6000_BUILTIN_H
11596 #undef RS6000_BUILTIN_P
11597 #undef RS6000_BUILTIN_Q
11598 #undef RS6000_BUILTIN_S
11599 #undef RS6000_BUILTIN_X
11601 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11602 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11603 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11604 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11605 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11606 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11607 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11608 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11609 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11610 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
11611 { MASK, ICODE, NAME, ENUM },
11613 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11615 static const struct builtin_description bdesc_spe_predicates
[] =
11617 #include "rs6000-builtin.def"
11620 /* SPE evsel predicates. */
11621 #undef RS6000_BUILTIN_1
11622 #undef RS6000_BUILTIN_2
11623 #undef RS6000_BUILTIN_3
11624 #undef RS6000_BUILTIN_A
11625 #undef RS6000_BUILTIN_D
11626 #undef RS6000_BUILTIN_E
11627 #undef RS6000_BUILTIN_H
11628 #undef RS6000_BUILTIN_P
11629 #undef RS6000_BUILTIN_Q
11630 #undef RS6000_BUILTIN_S
11631 #undef RS6000_BUILTIN_X
11633 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11634 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11635 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11636 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11637 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11638 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
11639 { MASK, ICODE, NAME, ENUM },
11641 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11642 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11643 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11644 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11645 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11647 static const struct builtin_description bdesc_spe_evsel
[] =
11649 #include "rs6000-builtin.def"
11652 /* PAIRED predicates. */
11653 #undef RS6000_BUILTIN_1
11654 #undef RS6000_BUILTIN_2
11655 #undef RS6000_BUILTIN_3
11656 #undef RS6000_BUILTIN_A
11657 #undef RS6000_BUILTIN_D
11658 #undef RS6000_BUILTIN_E
11659 #undef RS6000_BUILTIN_H
11660 #undef RS6000_BUILTIN_P
11661 #undef RS6000_BUILTIN_Q
11662 #undef RS6000_BUILTIN_S
11663 #undef RS6000_BUILTIN_X
11665 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11666 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11667 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11668 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11669 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11670 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11671 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11672 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11673 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
11674 { MASK, ICODE, NAME, ENUM },
11676 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11677 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11679 static const struct builtin_description bdesc_paired_preds
[] =
11681 #include "rs6000-builtin.def"
11684 /* ABS* operations. */
11686 #undef RS6000_BUILTIN_1
11687 #undef RS6000_BUILTIN_2
11688 #undef RS6000_BUILTIN_3
11689 #undef RS6000_BUILTIN_A
11690 #undef RS6000_BUILTIN_D
11691 #undef RS6000_BUILTIN_E
11692 #undef RS6000_BUILTIN_H
11693 #undef RS6000_BUILTIN_P
11694 #undef RS6000_BUILTIN_Q
11695 #undef RS6000_BUILTIN_S
11696 #undef RS6000_BUILTIN_X
11698 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11699 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11700 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11701 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
11702 { MASK, ICODE, NAME, ENUM },
11704 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11705 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11706 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11707 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11708 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11709 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11710 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11712 static const struct builtin_description bdesc_abs
[] =
11714 #include "rs6000-builtin.def"
11717 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
11720 #undef RS6000_BUILTIN_1
11721 #undef RS6000_BUILTIN_2
11722 #undef RS6000_BUILTIN_3
11723 #undef RS6000_BUILTIN_A
11724 #undef RS6000_BUILTIN_D
11725 #undef RS6000_BUILTIN_E
11726 #undef RS6000_BUILTIN_H
11727 #undef RS6000_BUILTIN_P
11728 #undef RS6000_BUILTIN_Q
11729 #undef RS6000_BUILTIN_S
11730 #undef RS6000_BUILTIN_X
11732 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
11733 { MASK, ICODE, NAME, ENUM },
11735 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11736 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11737 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11738 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11739 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11740 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11741 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11742 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11743 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11744 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11746 static const struct builtin_description bdesc_1arg
[] =
11748 #include "rs6000-builtin.def"
11751 /* HTM builtins. */
11752 #undef RS6000_BUILTIN_1
11753 #undef RS6000_BUILTIN_2
11754 #undef RS6000_BUILTIN_3
11755 #undef RS6000_BUILTIN_A
11756 #undef RS6000_BUILTIN_D
11757 #undef RS6000_BUILTIN_E
11758 #undef RS6000_BUILTIN_H
11759 #undef RS6000_BUILTIN_P
11760 #undef RS6000_BUILTIN_Q
11761 #undef RS6000_BUILTIN_S
11762 #undef RS6000_BUILTIN_X
11764 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11765 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11766 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11767 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11768 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11769 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11770 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
11771 { MASK, ICODE, NAME, ENUM },
11773 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11774 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11775 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11776 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11778 static const struct builtin_description bdesc_htm
[] =
11780 #include "rs6000-builtin.def"
11783 #undef RS6000_BUILTIN_1
11784 #undef RS6000_BUILTIN_2
11785 #undef RS6000_BUILTIN_3
11786 #undef RS6000_BUILTIN_A
11787 #undef RS6000_BUILTIN_D
11788 #undef RS6000_BUILTIN_E
11789 #undef RS6000_BUILTIN_H
11790 #undef RS6000_BUILTIN_P
11791 #undef RS6000_BUILTIN_Q
11792 #undef RS6000_BUILTIN_S
11794 /* Return true if a builtin function is overloaded. */
11796 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode
)
11798 return (rs6000_builtin_info
[(int)fncode
].attr
& RS6000_BTC_OVERLOADED
) != 0;
11801 /* Expand an expression EXP that calls a builtin without arguments. */
11803 rs6000_expand_zeroop_builtin (enum insn_code icode
, rtx target
)
11806 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11808 if (icode
== CODE_FOR_nothing
)
11809 /* Builtin not supported on this processor. */
11813 || GET_MODE (target
) != tmode
11814 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11815 target
= gen_reg_rtx (tmode
);
11817 pat
= GEN_FCN (icode
) (target
);
11827 rs6000_expand_mtfsf_builtin (enum insn_code icode
, tree exp
)
11830 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11831 tree arg1
= CALL_EXPR_ARG (exp
, 1);
11832 rtx op0
= expand_normal (arg0
);
11833 rtx op1
= expand_normal (arg1
);
11834 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
11835 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
11837 if (icode
== CODE_FOR_nothing
)
11838 /* Builtin not supported on this processor. */
11841 /* If we got invalid arguments bail out before generating bad rtl. */
11842 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
11845 if (GET_CODE (op0
) != CONST_INT
11846 || INTVAL (op0
) > 255
11847 || INTVAL (op0
) < 0)
11849 error ("argument 1 must be an 8-bit field value");
11853 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
11854 op0
= copy_to_mode_reg (mode0
, op0
);
11856 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
11857 op1
= copy_to_mode_reg (mode1
, op1
);
11859 pat
= GEN_FCN (icode
) (op0
, op1
);
11869 rs6000_expand_unop_builtin (enum insn_code icode
, tree exp
, rtx target
)
11872 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11873 rtx op0
= expand_normal (arg0
);
11874 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11875 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11877 if (icode
== CODE_FOR_nothing
)
11878 /* Builtin not supported on this processor. */
11881 /* If we got invalid arguments bail out before generating bad rtl. */
11882 if (arg0
== error_mark_node
)
11885 if (icode
== CODE_FOR_altivec_vspltisb
11886 || icode
== CODE_FOR_altivec_vspltish
11887 || icode
== CODE_FOR_altivec_vspltisw
11888 || icode
== CODE_FOR_spe_evsplatfi
11889 || icode
== CODE_FOR_spe_evsplati
)
11891 /* Only allow 5-bit *signed* literals. */
11892 if (GET_CODE (op0
) != CONST_INT
11893 || INTVAL (op0
) > 15
11894 || INTVAL (op0
) < -16)
11896 error ("argument 1 must be a 5-bit signed literal");
11902 || GET_MODE (target
) != tmode
11903 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11904 target
= gen_reg_rtx (tmode
);
11906 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11907 op0
= copy_to_mode_reg (mode0
, op0
);
11909 pat
= GEN_FCN (icode
) (target
, op0
);
11918 altivec_expand_abs_builtin (enum insn_code icode
, tree exp
, rtx target
)
11920 rtx pat
, scratch1
, scratch2
;
11921 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11922 rtx op0
= expand_normal (arg0
);
11923 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11924 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11926 /* If we have invalid arguments, bail out before generating bad rtl. */
11927 if (arg0
== error_mark_node
)
11931 || GET_MODE (target
) != tmode
11932 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11933 target
= gen_reg_rtx (tmode
);
11935 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11936 op0
= copy_to_mode_reg (mode0
, op0
);
11938 scratch1
= gen_reg_rtx (mode0
);
11939 scratch2
= gen_reg_rtx (mode0
);
11941 pat
= GEN_FCN (icode
) (target
, op0
, scratch1
, scratch2
);
11950 rs6000_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
11953 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11954 tree arg1
= CALL_EXPR_ARG (exp
, 1);
11955 rtx op0
= expand_normal (arg0
);
11956 rtx op1
= expand_normal (arg1
);
11957 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11958 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11959 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
11961 if (icode
== CODE_FOR_nothing
)
11962 /* Builtin not supported on this processor. */
11965 /* If we got invalid arguments bail out before generating bad rtl. */
11966 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
11969 if (icode
== CODE_FOR_altivec_vcfux
11970 || icode
== CODE_FOR_altivec_vcfsx
11971 || icode
== CODE_FOR_altivec_vctsxs
11972 || icode
== CODE_FOR_altivec_vctuxs
11973 || icode
== CODE_FOR_altivec_vspltb
11974 || icode
== CODE_FOR_altivec_vsplth
11975 || icode
== CODE_FOR_altivec_vspltw
11976 || icode
== CODE_FOR_spe_evaddiw
11977 || icode
== CODE_FOR_spe_evldd
11978 || icode
== CODE_FOR_spe_evldh
11979 || icode
== CODE_FOR_spe_evldw
11980 || icode
== CODE_FOR_spe_evlhhesplat
11981 || icode
== CODE_FOR_spe_evlhhossplat
11982 || icode
== CODE_FOR_spe_evlhhousplat
11983 || icode
== CODE_FOR_spe_evlwhe
11984 || icode
== CODE_FOR_spe_evlwhos
11985 || icode
== CODE_FOR_spe_evlwhou
11986 || icode
== CODE_FOR_spe_evlwhsplat
11987 || icode
== CODE_FOR_spe_evlwwsplat
11988 || icode
== CODE_FOR_spe_evrlwi
11989 || icode
== CODE_FOR_spe_evslwi
11990 || icode
== CODE_FOR_spe_evsrwis
11991 || icode
== CODE_FOR_spe_evsubifw
11992 || icode
== CODE_FOR_spe_evsrwiu
)
11994 /* Only allow 5-bit unsigned literals. */
11996 if (TREE_CODE (arg1
) != INTEGER_CST
11997 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
11999 error ("argument 2 must be a 5-bit unsigned literal");
12005 || GET_MODE (target
) != tmode
12006 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12007 target
= gen_reg_rtx (tmode
);
12009 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12010 op0
= copy_to_mode_reg (mode0
, op0
);
12011 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
12012 op1
= copy_to_mode_reg (mode1
, op1
);
12014 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
12023 altivec_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
12026 tree cr6_form
= CALL_EXPR_ARG (exp
, 0);
12027 tree arg0
= CALL_EXPR_ARG (exp
, 1);
12028 tree arg1
= CALL_EXPR_ARG (exp
, 2);
12029 rtx op0
= expand_normal (arg0
);
12030 rtx op1
= expand_normal (arg1
);
12031 enum machine_mode tmode
= SImode
;
12032 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
12033 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
12036 if (TREE_CODE (cr6_form
) != INTEGER_CST
)
12038 error ("argument 1 of __builtin_altivec_predicate must be a constant");
12042 cr6_form_int
= TREE_INT_CST_LOW (cr6_form
);
12044 gcc_assert (mode0
== mode1
);
12046 /* If we have invalid arguments, bail out before generating bad rtl. */
12047 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
12051 || GET_MODE (target
) != tmode
12052 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12053 target
= gen_reg_rtx (tmode
);
12055 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12056 op0
= copy_to_mode_reg (mode0
, op0
);
12057 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
12058 op1
= copy_to_mode_reg (mode1
, op1
);
12060 scratch
= gen_reg_rtx (mode0
);
12062 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
12067 /* The vec_any* and vec_all* predicates use the same opcodes for two
12068 different operations, but the bits in CR6 will be different
12069 depending on what information we want. So we have to play tricks
12070 with CR6 to get the right bits out.
12072 If you think this is disgusting, look at the specs for the
12073 AltiVec predicates. */
12075 switch (cr6_form_int
)
12078 emit_insn (gen_cr6_test_for_zero (target
));
12081 emit_insn (gen_cr6_test_for_zero_reverse (target
));
12084 emit_insn (gen_cr6_test_for_lt (target
));
12087 emit_insn (gen_cr6_test_for_lt_reverse (target
));
12090 error ("argument 1 of __builtin_altivec_predicate is out of range");
12098 paired_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
)
12101 tree arg0
= CALL_EXPR_ARG (exp
, 0);
12102 tree arg1
= CALL_EXPR_ARG (exp
, 1);
12103 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
12104 enum machine_mode mode0
= Pmode
;
12105 enum machine_mode mode1
= Pmode
;
12106 rtx op0
= expand_normal (arg0
);
12107 rtx op1
= expand_normal (arg1
);
12109 if (icode
== CODE_FOR_nothing
)
12110 /* Builtin not supported on this processor. */
12113 /* If we got invalid arguments bail out before generating bad rtl. */
12114 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
12118 || GET_MODE (target
) != tmode
12119 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12120 target
= gen_reg_rtx (tmode
);
12122 op1
= copy_to_mode_reg (mode1
, op1
);
12124 if (op0
== const0_rtx
)
12126 addr
= gen_rtx_MEM (tmode
, op1
);
12130 op0
= copy_to_mode_reg (mode0
, op0
);
12131 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
12134 pat
= GEN_FCN (icode
) (target
, addr
);
12143 /* Return a constant vector for use as a little-endian permute control vector
12144 to reverse the order of elements of the given vector mode. */
12146 swap_selector_for_mode (enum machine_mode mode
)
12148 /* These are little endian vectors, so their elements are reversed
12149 from what you would normally expect for a permute control vector. */
12150 unsigned int swap2
[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
12151 unsigned int swap4
[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
12152 unsigned int swap8
[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
12153 unsigned int swap16
[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
12154 unsigned int *swaparray
, i
;
12171 swaparray
= swap16
;
12174 gcc_unreachable ();
12177 for (i
= 0; i
< 16; ++i
)
12178 perm
[i
] = GEN_INT (swaparray
[i
]);
12180 return force_reg (V16QImode
, gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, perm
)));
12183 /* Generate code for an "lvx", "lvxl", or "lve*x" built-in for a little endian target
12184 with -maltivec=be specified. Issue the load followed by an element-reversing
12187 altivec_expand_lvx_be (rtx op0
, rtx op1
, enum machine_mode mode
, unsigned unspec
)
12189 rtx tmp
= gen_reg_rtx (mode
);
12190 rtx load
= gen_rtx_SET (VOIDmode
, tmp
, op1
);
12191 rtx lvx
= gen_rtx_UNSPEC (mode
, gen_rtvec (1, const0_rtx
), unspec
);
12192 rtx par
= gen_rtx_PARALLEL (mode
, gen_rtvec (2, load
, lvx
));
12193 rtx sel
= swap_selector_for_mode (mode
);
12194 rtx vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, tmp
, tmp
, sel
), UNSPEC_VPERM
);
12196 gcc_assert (REG_P (op0
));
12198 emit_insn (gen_rtx_SET (VOIDmode
, op0
, vperm
));
12201 /* Generate code for a "stvx" or "stvxl" built-in for a little endian target
12202 with -maltivec=be specified. Issue the store preceded by an element-reversing
12205 altivec_expand_stvx_be (rtx op0
, rtx op1
, enum machine_mode mode
, unsigned unspec
)
12207 rtx tmp
= gen_reg_rtx (mode
);
12208 rtx store
= gen_rtx_SET (VOIDmode
, op0
, tmp
);
12209 rtx stvx
= gen_rtx_UNSPEC (mode
, gen_rtvec (1, const0_rtx
), unspec
);
12210 rtx par
= gen_rtx_PARALLEL (mode
, gen_rtvec (2, store
, stvx
));
12211 rtx sel
= swap_selector_for_mode (mode
);
12214 gcc_assert (REG_P (op1
));
12215 vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op1
, sel
), UNSPEC_VPERM
);
12216 emit_insn (gen_rtx_SET (VOIDmode
, tmp
, vperm
));
12220 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
12221 specified. Issue the store preceded by an element-reversing permute. */
12223 altivec_expand_stvex_be (rtx op0
, rtx op1
, enum machine_mode mode
, unsigned unspec
)
12225 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
12226 rtx tmp
= gen_reg_rtx (mode
);
12227 rtx stvx
= gen_rtx_UNSPEC (inner_mode
, gen_rtvec (1, tmp
), unspec
);
12228 rtx sel
= swap_selector_for_mode (mode
);
12231 gcc_assert (REG_P (op1
));
12232 vperm
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op1
, sel
), UNSPEC_VPERM
);
12233 emit_insn (gen_rtx_SET (VOIDmode
, tmp
, vperm
));
12234 emit_insn (gen_rtx_SET (VOIDmode
, op0
, stvx
));
12238 altivec_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
12241 tree arg0
= CALL_EXPR_ARG (exp
, 0);
12242 tree arg1
= CALL_EXPR_ARG (exp
, 1);
12243 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
12244 enum machine_mode mode0
= Pmode
;
12245 enum machine_mode mode1
= Pmode
;
12246 rtx op0
= expand_normal (arg0
);
12247 rtx op1
= expand_normal (arg1
);
12249 if (icode
== CODE_FOR_nothing
)
12250 /* Builtin not supported on this processor. */
12253 /* If we got invalid arguments bail out before generating bad rtl. */
12254 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
12258 || GET_MODE (target
) != tmode
12259 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12260 target
= gen_reg_rtx (tmode
);
12262 op1
= copy_to_mode_reg (mode1
, op1
);
12264 if (op0
== const0_rtx
)
12266 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
12270 op0
= copy_to_mode_reg (mode0
, op0
);
12271 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
12274 pat
= GEN_FCN (icode
) (target
, addr
);
12284 spe_expand_stv_builtin (enum insn_code icode
, tree exp
)
12286 tree arg0
= CALL_EXPR_ARG (exp
, 0);
12287 tree arg1
= CALL_EXPR_ARG (exp
, 1);
12288 tree arg2
= CALL_EXPR_ARG (exp
, 2);
12289 rtx op0
= expand_normal (arg0
);
12290 rtx op1
= expand_normal (arg1
);
12291 rtx op2
= expand_normal (arg2
);
12293 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
12294 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
12295 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
12297 /* Invalid arguments. Bail before doing anything stoopid! */
12298 if (arg0
== error_mark_node
12299 || arg1
== error_mark_node
12300 || arg2
== error_mark_node
)
12303 if (! (*insn_data
[icode
].operand
[2].predicate
) (op0
, mode2
))
12304 op0
= copy_to_mode_reg (mode2
, op0
);
12305 if (! (*insn_data
[icode
].operand
[0].predicate
) (op1
, mode0
))
12306 op1
= copy_to_mode_reg (mode0
, op1
);
12307 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
12308 op2
= copy_to_mode_reg (mode1
, op2
);
12310 pat
= GEN_FCN (icode
) (op1
, op2
, op0
);
12317 paired_expand_stv_builtin (enum insn_code icode
, tree exp
)
12319 tree arg0
= CALL_EXPR_ARG (exp
, 0);
12320 tree arg1
= CALL_EXPR_ARG (exp
, 1);
12321 tree arg2
= CALL_EXPR_ARG (exp
, 2);
12322 rtx op0
= expand_normal (arg0
);
12323 rtx op1
= expand_normal (arg1
);
12324 rtx op2
= expand_normal (arg2
);
12326 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
12327 enum machine_mode mode1
= Pmode
;
12328 enum machine_mode mode2
= Pmode
;
12330 /* Invalid arguments. Bail before doing anything stoopid! */
12331 if (arg0
== error_mark_node
12332 || arg1
== error_mark_node
12333 || arg2
== error_mark_node
)
12336 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, tmode
))
12337 op0
= copy_to_mode_reg (tmode
, op0
);
12339 op2
= copy_to_mode_reg (mode2
, op2
);
12341 if (op1
== const0_rtx
)
12343 addr
= gen_rtx_MEM (tmode
, op2
);
12347 op1
= copy_to_mode_reg (mode1
, op1
);
12348 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
12351 pat
= GEN_FCN (icode
) (addr
, op0
);
12358 altivec_expand_stv_builtin (enum insn_code icode
, tree exp
)
12360 tree arg0
= CALL_EXPR_ARG (exp
, 0);
12361 tree arg1
= CALL_EXPR_ARG (exp
, 1);
12362 tree arg2
= CALL_EXPR_ARG (exp
, 2);
12363 rtx op0
= expand_normal (arg0
);
12364 rtx op1
= expand_normal (arg1
);
12365 rtx op2
= expand_normal (arg2
);
12367 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
12368 enum machine_mode smode
= insn_data
[icode
].operand
[1].mode
;
12369 enum machine_mode mode1
= Pmode
;
12370 enum machine_mode mode2
= Pmode
;
12372 /* Invalid arguments. Bail before doing anything stoopid! */
12373 if (arg0
== error_mark_node
12374 || arg1
== error_mark_node
12375 || arg2
== error_mark_node
)
12378 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, smode
))
12379 op0
= copy_to_mode_reg (smode
, op0
);
12381 op2
= copy_to_mode_reg (mode2
, op2
);
12383 if (op1
== const0_rtx
)
12385 addr
= gen_rtx_MEM (tmode
, op2
);
12389 op1
= copy_to_mode_reg (mode1
, op1
);
12390 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
12393 pat
= GEN_FCN (icode
) (addr
, op0
);
12399 /* Return the appropriate SPR number associated with the given builtin. */
12400 static inline HOST_WIDE_INT
12401 htm_spr_num (enum rs6000_builtins code
)
12403 if (code
== HTM_BUILTIN_GET_TFHAR
12404 || code
== HTM_BUILTIN_SET_TFHAR
)
12406 else if (code
== HTM_BUILTIN_GET_TFIAR
12407 || code
== HTM_BUILTIN_SET_TFIAR
)
12409 else if (code
== HTM_BUILTIN_GET_TEXASR
12410 || code
== HTM_BUILTIN_SET_TEXASR
)
12412 gcc_assert (code
== HTM_BUILTIN_GET_TEXASRU
12413 || code
== HTM_BUILTIN_SET_TEXASRU
);
12414 return TEXASRU_SPR
;
12417 /* Return the appropriate SPR regno associated with the given builtin. */
12418 static inline HOST_WIDE_INT
12419 htm_spr_regno (enum rs6000_builtins code
)
12421 if (code
== HTM_BUILTIN_GET_TFHAR
12422 || code
== HTM_BUILTIN_SET_TFHAR
)
12423 return TFHAR_REGNO
;
12424 else if (code
== HTM_BUILTIN_GET_TFIAR
12425 || code
== HTM_BUILTIN_SET_TFIAR
)
12426 return TFIAR_REGNO
;
12427 gcc_assert (code
== HTM_BUILTIN_GET_TEXASR
12428 || code
== HTM_BUILTIN_SET_TEXASR
12429 || code
== HTM_BUILTIN_GET_TEXASRU
12430 || code
== HTM_BUILTIN_SET_TEXASRU
);
12431 return TEXASR_REGNO
;
12434 /* Return the correct ICODE value depending on whether we are
12435 setting or reading the HTM SPRs. */
12436 static inline enum insn_code
12437 rs6000_htm_spr_icode (bool nonvoid
)
12440 return (TARGET_64BIT
) ? CODE_FOR_htm_mfspr_di
: CODE_FOR_htm_mfspr_si
;
12442 return (TARGET_64BIT
) ? CODE_FOR_htm_mtspr_di
: CODE_FOR_htm_mtspr_si
;
12445 /* Expand the HTM builtin in EXP and store the result in TARGET.
12446 Store true in *EXPANDEDP if we found a builtin to expand. */
12448 htm_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
12450 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
12451 bool nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
12452 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
12453 const struct builtin_description
*d
;
12456 *expandedp
= false;
12458 /* Expand the HTM builtins. */
12460 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
12461 if (d
->code
== fcode
)
12463 rtx op
[MAX_HTM_OPERANDS
], pat
;
12466 call_expr_arg_iterator iter
;
12467 unsigned attr
= rs6000_builtin_info
[fcode
].attr
;
12468 enum insn_code icode
= d
->icode
;
12470 if (attr
& RS6000_BTC_SPR
)
12471 icode
= rs6000_htm_spr_icode (nonvoid
);
12475 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
12477 || GET_MODE (target
) != tmode
12478 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12479 target
= gen_reg_rtx (tmode
);
12480 op
[nopnds
++] = target
;
12483 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
12485 const struct insn_operand_data
*insn_op
;
12487 if (arg
== error_mark_node
|| nopnds
>= MAX_HTM_OPERANDS
)
12490 insn_op
= &insn_data
[icode
].operand
[nopnds
];
12492 op
[nopnds
] = expand_normal (arg
);
12494 if (!(*insn_op
->predicate
) (op
[nopnds
], insn_op
->mode
))
12496 if (!strcmp (insn_op
->constraint
, "n"))
12498 int arg_num
= (nonvoid
) ? nopnds
: nopnds
+ 1;
12499 if (!CONST_INT_P (op
[nopnds
]))
12500 error ("argument %d must be an unsigned literal", arg_num
);
12502 error ("argument %d is an unsigned literal that is "
12503 "out of range", arg_num
);
12506 op
[nopnds
] = copy_to_mode_reg (insn_op
->mode
, op
[nopnds
]);
12512 /* Handle the builtins for extended mnemonics. These accept
12513 no arguments, but map to builtins that take arguments. */
12516 case HTM_BUILTIN_TENDALL
: /* Alias for: tend. 1 */
12517 case HTM_BUILTIN_TRESUME
: /* Alias for: tsr. 1 */
12518 op
[nopnds
++] = GEN_INT (1);
12519 #ifdef ENABLE_CHECKING
12520 attr
|= RS6000_BTC_UNARY
;
12523 case HTM_BUILTIN_TSUSPEND
: /* Alias for: tsr. 0 */
12524 op
[nopnds
++] = GEN_INT (0);
12525 #ifdef ENABLE_CHECKING
12526 attr
|= RS6000_BTC_UNARY
;
12533 /* If this builtin accesses SPRs, then pass in the appropriate
12534 SPR number and SPR regno as the last two operands. */
12535 if (attr
& RS6000_BTC_SPR
)
12537 op
[nopnds
++] = gen_rtx_CONST_INT (Pmode
, htm_spr_num (fcode
));
12538 op
[nopnds
++] = gen_rtx_REG (Pmode
, htm_spr_regno (fcode
));
12541 #ifdef ENABLE_CHECKING
12542 int expected_nopnds
= 0;
12543 if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_UNARY
)
12544 expected_nopnds
= 1;
12545 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_BINARY
)
12546 expected_nopnds
= 2;
12547 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_TERNARY
)
12548 expected_nopnds
= 3;
12549 if (!(attr
& RS6000_BTC_VOID
))
12550 expected_nopnds
+= 1;
12551 if (attr
& RS6000_BTC_SPR
)
12552 expected_nopnds
+= 2;
12554 gcc_assert (nopnds
== expected_nopnds
&& nopnds
<= MAX_HTM_OPERANDS
);
12560 pat
= GEN_FCN (icode
) (op
[0]);
12563 pat
= GEN_FCN (icode
) (op
[0], op
[1]);
12566 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2]);
12569 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2], op
[3]);
12572 gcc_unreachable ();
12588 rs6000_expand_ternop_builtin (enum insn_code icode
, tree exp
, rtx target
)
12591 tree arg0
= CALL_EXPR_ARG (exp
, 0);
12592 tree arg1
= CALL_EXPR_ARG (exp
, 1);
12593 tree arg2
= CALL_EXPR_ARG (exp
, 2);
12594 rtx op0
= expand_normal (arg0
);
12595 rtx op1
= expand_normal (arg1
);
12596 rtx op2
= expand_normal (arg2
);
12597 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
12598 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
12599 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
12600 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
12602 if (icode
== CODE_FOR_nothing
)
12603 /* Builtin not supported on this processor. */
12606 /* If we got invalid arguments bail out before generating bad rtl. */
12607 if (arg0
== error_mark_node
12608 || arg1
== error_mark_node
12609 || arg2
== error_mark_node
)
12612 /* Check and prepare argument depending on the instruction code.
12614 Note that a switch statement instead of the sequence of tests
12615 would be incorrect as many of the CODE_FOR values could be
12616 CODE_FOR_nothing and that would yield multiple alternatives
12617 with identical values. We'd never reach here at runtime in
12619 if (icode
== CODE_FOR_altivec_vsldoi_v4sf
12620 || icode
== CODE_FOR_altivec_vsldoi_v4si
12621 || icode
== CODE_FOR_altivec_vsldoi_v8hi
12622 || icode
== CODE_FOR_altivec_vsldoi_v16qi
)
12624 /* Only allow 4-bit unsigned literals. */
12626 if (TREE_CODE (arg2
) != INTEGER_CST
12627 || TREE_INT_CST_LOW (arg2
) & ~0xf)
12629 error ("argument 3 must be a 4-bit unsigned literal");
12633 else if (icode
== CODE_FOR_vsx_xxpermdi_v2df
12634 || icode
== CODE_FOR_vsx_xxpermdi_v2di
12635 || icode
== CODE_FOR_vsx_xxsldwi_v16qi
12636 || icode
== CODE_FOR_vsx_xxsldwi_v8hi
12637 || icode
== CODE_FOR_vsx_xxsldwi_v4si
12638 || icode
== CODE_FOR_vsx_xxsldwi_v4sf
12639 || icode
== CODE_FOR_vsx_xxsldwi_v2di
12640 || icode
== CODE_FOR_vsx_xxsldwi_v2df
)
12642 /* Only allow 2-bit unsigned literals. */
12644 if (TREE_CODE (arg2
) != INTEGER_CST
12645 || TREE_INT_CST_LOW (arg2
) & ~0x3)
12647 error ("argument 3 must be a 2-bit unsigned literal");
12651 else if (icode
== CODE_FOR_vsx_set_v2df
12652 || icode
== CODE_FOR_vsx_set_v2di
12653 || icode
== CODE_FOR_bcdadd
12654 || icode
== CODE_FOR_bcdadd_lt
12655 || icode
== CODE_FOR_bcdadd_eq
12656 || icode
== CODE_FOR_bcdadd_gt
12657 || icode
== CODE_FOR_bcdsub
12658 || icode
== CODE_FOR_bcdsub_lt
12659 || icode
== CODE_FOR_bcdsub_eq
12660 || icode
== CODE_FOR_bcdsub_gt
)
12662 /* Only allow 1-bit unsigned literals. */
12664 if (TREE_CODE (arg2
) != INTEGER_CST
12665 || TREE_INT_CST_LOW (arg2
) & ~0x1)
12667 error ("argument 3 must be a 1-bit unsigned literal");
12671 else if (icode
== CODE_FOR_dfp_ddedpd_dd
12672 || icode
== CODE_FOR_dfp_ddedpd_td
)
12674 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
12676 if (TREE_CODE (arg0
) != INTEGER_CST
12677 || TREE_INT_CST_LOW (arg2
) & ~0x3)
12679 error ("argument 1 must be 0 or 2");
12683 else if (icode
== CODE_FOR_dfp_denbcd_dd
12684 || icode
== CODE_FOR_dfp_denbcd_td
)
12686 /* Only allow 1-bit unsigned literals. */
12688 if (TREE_CODE (arg0
) != INTEGER_CST
12689 || TREE_INT_CST_LOW (arg0
) & ~0x1)
12691 error ("argument 1 must be a 1-bit unsigned literal");
12695 else if (icode
== CODE_FOR_dfp_dscli_dd
12696 || icode
== CODE_FOR_dfp_dscli_td
12697 || icode
== CODE_FOR_dfp_dscri_dd
12698 || icode
== CODE_FOR_dfp_dscri_td
)
12700 /* Only allow 6-bit unsigned literals. */
12702 if (TREE_CODE (arg1
) != INTEGER_CST
12703 || TREE_INT_CST_LOW (arg1
) & ~0x3f)
12705 error ("argument 2 must be a 6-bit unsigned literal");
12709 else if (icode
== CODE_FOR_crypto_vshasigmaw
12710 || icode
== CODE_FOR_crypto_vshasigmad
)
12712 /* Check whether the 2nd and 3rd arguments are integer constants and in
12713 range and prepare arguments. */
12715 if (TREE_CODE (arg1
) != INTEGER_CST
|| wi::geu_p (arg1
, 2))
12717 error ("argument 2 must be 0 or 1");
12722 if (TREE_CODE (arg2
) != INTEGER_CST
|| wi::geu_p (arg1
, 16))
12724 error ("argument 3 must be in the range 0..15");
12730 || GET_MODE (target
) != tmode
12731 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12732 target
= gen_reg_rtx (tmode
);
12734 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12735 op0
= copy_to_mode_reg (mode0
, op0
);
12736 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
12737 op1
= copy_to_mode_reg (mode1
, op1
);
12738 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
12739 op2
= copy_to_mode_reg (mode2
, op2
);
12741 if (TARGET_PAIRED_FLOAT
&& icode
== CODE_FOR_selv2sf4
)
12742 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
, CONST0_RTX (SFmode
));
12744 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
12752 /* Expand the lvx builtins. */
12754 altivec_expand_ld_builtin (tree exp
, rtx target
, bool *expandedp
)
12756 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
12757 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
12759 enum machine_mode tmode
, mode0
;
12761 enum insn_code icode
;
12765 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi
:
12766 icode
= CODE_FOR_vector_altivec_load_v16qi
;
12768 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi
:
12769 icode
= CODE_FOR_vector_altivec_load_v8hi
;
12771 case ALTIVEC_BUILTIN_LD_INTERNAL_4si
:
12772 icode
= CODE_FOR_vector_altivec_load_v4si
;
12774 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf
:
12775 icode
= CODE_FOR_vector_altivec_load_v4sf
;
12777 case ALTIVEC_BUILTIN_LD_INTERNAL_2df
:
12778 icode
= CODE_FOR_vector_altivec_load_v2df
;
12780 case ALTIVEC_BUILTIN_LD_INTERNAL_2di
:
12781 icode
= CODE_FOR_vector_altivec_load_v2di
;
12782 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti
:
12783 icode
= CODE_FOR_vector_altivec_load_v1ti
;
12786 *expandedp
= false;
12792 arg0
= CALL_EXPR_ARG (exp
, 0);
12793 op0
= expand_normal (arg0
);
12794 tmode
= insn_data
[icode
].operand
[0].mode
;
12795 mode0
= insn_data
[icode
].operand
[1].mode
;
12798 || GET_MODE (target
) != tmode
12799 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12800 target
= gen_reg_rtx (tmode
);
12802 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12803 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
12805 pat
= GEN_FCN (icode
) (target
, op0
);
12812 /* Expand the stvx builtins. */
12814 altivec_expand_st_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
12817 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
12818 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
12820 enum machine_mode mode0
, mode1
;
12822 enum insn_code icode
;
12826 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi
:
12827 icode
= CODE_FOR_vector_altivec_store_v16qi
;
12829 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi
:
12830 icode
= CODE_FOR_vector_altivec_store_v8hi
;
12832 case ALTIVEC_BUILTIN_ST_INTERNAL_4si
:
12833 icode
= CODE_FOR_vector_altivec_store_v4si
;
12835 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf
:
12836 icode
= CODE_FOR_vector_altivec_store_v4sf
;
12838 case ALTIVEC_BUILTIN_ST_INTERNAL_2df
:
12839 icode
= CODE_FOR_vector_altivec_store_v2df
;
12841 case ALTIVEC_BUILTIN_ST_INTERNAL_2di
:
12842 icode
= CODE_FOR_vector_altivec_store_v2di
;
12843 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti
:
12844 icode
= CODE_FOR_vector_altivec_store_v1ti
;
12847 *expandedp
= false;
12851 arg0
= CALL_EXPR_ARG (exp
, 0);
12852 arg1
= CALL_EXPR_ARG (exp
, 1);
12853 op0
= expand_normal (arg0
);
12854 op1
= expand_normal (arg1
);
12855 mode0
= insn_data
[icode
].operand
[0].mode
;
12856 mode1
= insn_data
[icode
].operand
[1].mode
;
12858 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
12859 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
12860 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
12861 op1
= copy_to_mode_reg (mode1
, op1
);
12863 pat
= GEN_FCN (icode
) (op0
, op1
);
12871 /* Expand the dst builtins. */
12873 altivec_expand_dst_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
12876 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
12877 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
12878 tree arg0
, arg1
, arg2
;
12879 enum machine_mode mode0
, mode1
;
12880 rtx pat
, op0
, op1
, op2
;
12881 const struct builtin_description
*d
;
12884 *expandedp
= false;
12886 /* Handle DST variants. */
12888 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
12889 if (d
->code
== fcode
)
12891 arg0
= CALL_EXPR_ARG (exp
, 0);
12892 arg1
= CALL_EXPR_ARG (exp
, 1);
12893 arg2
= CALL_EXPR_ARG (exp
, 2);
12894 op0
= expand_normal (arg0
);
12895 op1
= expand_normal (arg1
);
12896 op2
= expand_normal (arg2
);
12897 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
12898 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
12900 /* Invalid arguments, bail out before generating bad rtl. */
12901 if (arg0
== error_mark_node
12902 || arg1
== error_mark_node
12903 || arg2
== error_mark_node
)
12908 if (TREE_CODE (arg2
) != INTEGER_CST
12909 || TREE_INT_CST_LOW (arg2
) & ~0x3)
12911 error ("argument to %qs must be a 2-bit unsigned literal", d
->name
);
12915 if (! (*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
12916 op0
= copy_to_mode_reg (Pmode
, op0
);
12917 if (! (*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
12918 op1
= copy_to_mode_reg (mode1
, op1
);
12920 pat
= GEN_FCN (d
->icode
) (op0
, op1
, op2
);
12930 /* Expand vec_init builtin. */
12932 altivec_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
12934 enum machine_mode tmode
= TYPE_MODE (type
);
12935 enum machine_mode inner_mode
= GET_MODE_INNER (tmode
);
12936 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
12938 gcc_assert (VECTOR_MODE_P (tmode
));
12939 gcc_assert (n_elt
== call_expr_nargs (exp
));
12941 if (!target
|| !register_operand (target
, tmode
))
12942 target
= gen_reg_rtx (tmode
);
12944 /* If we have a vector compromised of a single element, such as V1TImode, do
12945 the initialization directly. */
12946 if (n_elt
== 1 && GET_MODE_SIZE (tmode
) == GET_MODE_SIZE (inner_mode
))
12948 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, 0));
12949 emit_move_insn (target
, gen_lowpart (tmode
, x
));
12953 rtvec v
= rtvec_alloc (n_elt
);
12955 for (i
= 0; i
< n_elt
; ++i
)
12957 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
12958 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
12961 rs6000_expand_vector_init (target
, gen_rtx_PARALLEL (tmode
, v
));
12967 /* Return the integer constant in ARG. Constrain it to be in the range
12968 of the subparts of VEC_TYPE; issue an error if not. */
12971 get_element_number (tree vec_type
, tree arg
)
12973 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
12975 if (!tree_fits_uhwi_p (arg
)
12976 || (elt
= tree_to_uhwi (arg
), elt
> max
))
12978 error ("selector must be an integer constant in the range 0..%wi", max
);
12985 /* Expand vec_set builtin. */
12987 altivec_expand_vec_set_builtin (tree exp
)
12989 enum machine_mode tmode
, mode1
;
12990 tree arg0
, arg1
, arg2
;
12994 arg0
= CALL_EXPR_ARG (exp
, 0);
12995 arg1
= CALL_EXPR_ARG (exp
, 1);
12996 arg2
= CALL_EXPR_ARG (exp
, 2);
12998 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
12999 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
13000 gcc_assert (VECTOR_MODE_P (tmode
));
13002 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
13003 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
13004 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
13006 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
13007 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
13009 op0
= force_reg (tmode
, op0
);
13010 op1
= force_reg (mode1
, op1
);
13012 rs6000_expand_vector_set (op0
, op1
, elt
);
13017 /* Expand vec_ext builtin. */
13019 altivec_expand_vec_ext_builtin (tree exp
, rtx target
)
13021 enum machine_mode tmode
, mode0
;
13026 arg0
= CALL_EXPR_ARG (exp
, 0);
13027 arg1
= CALL_EXPR_ARG (exp
, 1);
13029 op0
= expand_normal (arg0
);
13030 elt
= get_element_number (TREE_TYPE (arg0
), arg1
);
13032 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
13033 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
13034 gcc_assert (VECTOR_MODE_P (mode0
));
13036 op0
= force_reg (mode0
, op0
);
13038 if (optimize
|| !target
|| !register_operand (target
, tmode
))
13039 target
= gen_reg_rtx (tmode
);
13041 rs6000_expand_vector_extract (target
, op0
, elt
);
13046 /* Expand the builtin in EXP and store the result in TARGET. Store
13047 true in *EXPANDEDP if we found a builtin to expand. */
13049 altivec_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
13051 const struct builtin_description
*d
;
13053 enum insn_code icode
;
13054 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
13057 enum machine_mode tmode
, mode0
;
13058 enum rs6000_builtins fcode
13059 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
13061 if (rs6000_overloaded_builtin_p (fcode
))
13064 error ("unresolved overload for Altivec builtin %qF", fndecl
);
13066 /* Given it is invalid, just generate a normal call. */
13067 return expand_call (exp
, target
, false);
13070 target
= altivec_expand_ld_builtin (exp
, target
, expandedp
);
13074 target
= altivec_expand_st_builtin (exp
, target
, expandedp
);
13078 target
= altivec_expand_dst_builtin (exp
, target
, expandedp
);
13086 case ALTIVEC_BUILTIN_STVX_V2DF
:
13087 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df
, exp
);
13088 case ALTIVEC_BUILTIN_STVX_V2DI
:
13089 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di
, exp
);
13090 case ALTIVEC_BUILTIN_STVX_V4SF
:
13091 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf
, exp
);
13092 case ALTIVEC_BUILTIN_STVX
:
13093 case ALTIVEC_BUILTIN_STVX_V4SI
:
13094 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si
, exp
);
13095 case ALTIVEC_BUILTIN_STVX_V8HI
:
13096 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi
, exp
);
13097 case ALTIVEC_BUILTIN_STVX_V16QI
:
13098 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi
, exp
);
13099 case ALTIVEC_BUILTIN_STVEBX
:
13100 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx
, exp
);
13101 case ALTIVEC_BUILTIN_STVEHX
:
13102 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx
, exp
);
13103 case ALTIVEC_BUILTIN_STVEWX
:
13104 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx
, exp
);
13105 case ALTIVEC_BUILTIN_STVXL_V2DF
:
13106 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df
, exp
);
13107 case ALTIVEC_BUILTIN_STVXL_V2DI
:
13108 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di
, exp
);
13109 case ALTIVEC_BUILTIN_STVXL_V4SF
:
13110 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf
, exp
);
13111 case ALTIVEC_BUILTIN_STVXL
:
13112 case ALTIVEC_BUILTIN_STVXL_V4SI
:
13113 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si
, exp
);
13114 case ALTIVEC_BUILTIN_STVXL_V8HI
:
13115 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi
, exp
);
13116 case ALTIVEC_BUILTIN_STVXL_V16QI
:
13117 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi
, exp
);
13119 case ALTIVEC_BUILTIN_STVLX
:
13120 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx
, exp
);
13121 case ALTIVEC_BUILTIN_STVLXL
:
13122 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl
, exp
);
13123 case ALTIVEC_BUILTIN_STVRX
:
13124 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx
, exp
);
13125 case ALTIVEC_BUILTIN_STVRXL
:
13126 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl
, exp
);
13128 case VSX_BUILTIN_STXVD2X_V1TI
:
13129 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti
, exp
);
13130 case VSX_BUILTIN_STXVD2X_V2DF
:
13131 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df
, exp
);
13132 case VSX_BUILTIN_STXVD2X_V2DI
:
13133 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di
, exp
);
13134 case VSX_BUILTIN_STXVW4X_V4SF
:
13135 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf
, exp
);
13136 case VSX_BUILTIN_STXVW4X_V4SI
:
13137 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si
, exp
);
13138 case VSX_BUILTIN_STXVW4X_V8HI
:
13139 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi
, exp
);
13140 case VSX_BUILTIN_STXVW4X_V16QI
:
13141 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi
, exp
);
13143 case ALTIVEC_BUILTIN_MFVSCR
:
13144 icode
= CODE_FOR_altivec_mfvscr
;
13145 tmode
= insn_data
[icode
].operand
[0].mode
;
13148 || GET_MODE (target
) != tmode
13149 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13150 target
= gen_reg_rtx (tmode
);
13152 pat
= GEN_FCN (icode
) (target
);
13158 case ALTIVEC_BUILTIN_MTVSCR
:
13159 icode
= CODE_FOR_altivec_mtvscr
;
13160 arg0
= CALL_EXPR_ARG (exp
, 0);
13161 op0
= expand_normal (arg0
);
13162 mode0
= insn_data
[icode
].operand
[0].mode
;
13164 /* If we got invalid arguments bail out before generating bad rtl. */
13165 if (arg0
== error_mark_node
)
13168 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
13169 op0
= copy_to_mode_reg (mode0
, op0
);
13171 pat
= GEN_FCN (icode
) (op0
);
13176 case ALTIVEC_BUILTIN_DSSALL
:
13177 emit_insn (gen_altivec_dssall ());
13180 case ALTIVEC_BUILTIN_DSS
:
13181 icode
= CODE_FOR_altivec_dss
;
13182 arg0
= CALL_EXPR_ARG (exp
, 0);
13184 op0
= expand_normal (arg0
);
13185 mode0
= insn_data
[icode
].operand
[0].mode
;
13187 /* If we got invalid arguments bail out before generating bad rtl. */
13188 if (arg0
== error_mark_node
)
13191 if (TREE_CODE (arg0
) != INTEGER_CST
13192 || TREE_INT_CST_LOW (arg0
) & ~0x3)
13194 error ("argument to dss must be a 2-bit unsigned literal");
13198 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
13199 op0
= copy_to_mode_reg (mode0
, op0
);
13201 emit_insn (gen_altivec_dss (op0
));
13204 case ALTIVEC_BUILTIN_VEC_INIT_V4SI
:
13205 case ALTIVEC_BUILTIN_VEC_INIT_V8HI
:
13206 case ALTIVEC_BUILTIN_VEC_INIT_V16QI
:
13207 case ALTIVEC_BUILTIN_VEC_INIT_V4SF
:
13208 case VSX_BUILTIN_VEC_INIT_V2DF
:
13209 case VSX_BUILTIN_VEC_INIT_V2DI
:
13210 case VSX_BUILTIN_VEC_INIT_V1TI
:
13211 return altivec_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
13213 case ALTIVEC_BUILTIN_VEC_SET_V4SI
:
13214 case ALTIVEC_BUILTIN_VEC_SET_V8HI
:
13215 case ALTIVEC_BUILTIN_VEC_SET_V16QI
:
13216 case ALTIVEC_BUILTIN_VEC_SET_V4SF
:
13217 case VSX_BUILTIN_VEC_SET_V2DF
:
13218 case VSX_BUILTIN_VEC_SET_V2DI
:
13219 case VSX_BUILTIN_VEC_SET_V1TI
:
13220 return altivec_expand_vec_set_builtin (exp
);
13222 case ALTIVEC_BUILTIN_VEC_EXT_V4SI
:
13223 case ALTIVEC_BUILTIN_VEC_EXT_V8HI
:
13224 case ALTIVEC_BUILTIN_VEC_EXT_V16QI
:
13225 case ALTIVEC_BUILTIN_VEC_EXT_V4SF
:
13226 case VSX_BUILTIN_VEC_EXT_V2DF
:
13227 case VSX_BUILTIN_VEC_EXT_V2DI
:
13228 case VSX_BUILTIN_VEC_EXT_V1TI
:
13229 return altivec_expand_vec_ext_builtin (exp
, target
);
13233 /* Fall through. */
13236 /* Expand abs* operations. */
13238 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
13239 if (d
->code
== fcode
)
13240 return altivec_expand_abs_builtin (d
->icode
, exp
, target
);
13242 /* Expand the AltiVec predicates. */
13243 d
= bdesc_altivec_preds
;
13244 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
13245 if (d
->code
== fcode
)
13246 return altivec_expand_predicate_builtin (d
->icode
, exp
, target
);
13248 /* LV* are funky. We initialized them differently. */
13251 case ALTIVEC_BUILTIN_LVSL
:
13252 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl
,
13253 exp
, target
, false);
13254 case ALTIVEC_BUILTIN_LVSR
:
13255 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr
,
13256 exp
, target
, false);
13257 case ALTIVEC_BUILTIN_LVEBX
:
13258 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx
,
13259 exp
, target
, false);
13260 case ALTIVEC_BUILTIN_LVEHX
:
13261 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx
,
13262 exp
, target
, false);
13263 case ALTIVEC_BUILTIN_LVEWX
:
13264 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx
,
13265 exp
, target
, false);
13266 case ALTIVEC_BUILTIN_LVXL_V2DF
:
13267 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df
,
13268 exp
, target
, false);
13269 case ALTIVEC_BUILTIN_LVXL_V2DI
:
13270 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di
,
13271 exp
, target
, false);
13272 case ALTIVEC_BUILTIN_LVXL_V4SF
:
13273 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf
,
13274 exp
, target
, false);
13275 case ALTIVEC_BUILTIN_LVXL
:
13276 case ALTIVEC_BUILTIN_LVXL_V4SI
:
13277 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si
,
13278 exp
, target
, false);
13279 case ALTIVEC_BUILTIN_LVXL_V8HI
:
13280 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi
,
13281 exp
, target
, false);
13282 case ALTIVEC_BUILTIN_LVXL_V16QI
:
13283 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi
,
13284 exp
, target
, false);
13285 case ALTIVEC_BUILTIN_LVX_V2DF
:
13286 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df
,
13287 exp
, target
, false);
13288 case ALTIVEC_BUILTIN_LVX_V2DI
:
13289 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di
,
13290 exp
, target
, false);
13291 case ALTIVEC_BUILTIN_LVX_V4SF
:
13292 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf
,
13293 exp
, target
, false);
13294 case ALTIVEC_BUILTIN_LVX
:
13295 case ALTIVEC_BUILTIN_LVX_V4SI
:
13296 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si
,
13297 exp
, target
, false);
13298 case ALTIVEC_BUILTIN_LVX_V8HI
:
13299 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi
,
13300 exp
, target
, false);
13301 case ALTIVEC_BUILTIN_LVX_V16QI
:
13302 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi
,
13303 exp
, target
, false);
13304 case ALTIVEC_BUILTIN_LVLX
:
13305 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx
,
13306 exp
, target
, true);
13307 case ALTIVEC_BUILTIN_LVLXL
:
13308 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl
,
13309 exp
, target
, true);
13310 case ALTIVEC_BUILTIN_LVRX
:
13311 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx
,
13312 exp
, target
, true);
13313 case ALTIVEC_BUILTIN_LVRXL
:
13314 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl
,
13315 exp
, target
, true);
13316 case VSX_BUILTIN_LXVD2X_V1TI
:
13317 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti
,
13318 exp
, target
, false);
13319 case VSX_BUILTIN_LXVD2X_V2DF
:
13320 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df
,
13321 exp
, target
, false);
13322 case VSX_BUILTIN_LXVD2X_V2DI
:
13323 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di
,
13324 exp
, target
, false);
13325 case VSX_BUILTIN_LXVW4X_V4SF
:
13326 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf
,
13327 exp
, target
, false);
13328 case VSX_BUILTIN_LXVW4X_V4SI
:
13329 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si
,
13330 exp
, target
, false);
13331 case VSX_BUILTIN_LXVW4X_V8HI
:
13332 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi
,
13333 exp
, target
, false);
13334 case VSX_BUILTIN_LXVW4X_V16QI
:
13335 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi
,
13336 exp
, target
, false);
13340 /* Fall through. */
13343 *expandedp
= false;
13347 /* Expand the builtin in EXP and store the result in TARGET. Store
13348 true in *EXPANDEDP if we found a builtin to expand. */
13350 paired_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
13352 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
13353 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
13354 const struct builtin_description
*d
;
13361 case PAIRED_BUILTIN_STX
:
13362 return paired_expand_stv_builtin (CODE_FOR_paired_stx
, exp
);
13363 case PAIRED_BUILTIN_LX
:
13364 return paired_expand_lv_builtin (CODE_FOR_paired_lx
, exp
, target
);
13367 /* Fall through. */
13370 /* Expand the paired predicates. */
13371 d
= bdesc_paired_preds
;
13372 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); i
++, d
++)
13373 if (d
->code
== fcode
)
13374 return paired_expand_predicate_builtin (d
->icode
, exp
, target
);
13376 *expandedp
= false;
13380 /* Binops that need to be initialized manually, but can be expanded
13381 automagically by rs6000_expand_binop_builtin. */
13382 static const struct builtin_description bdesc_2arg_spe
[] =
13384 { RS6000_BTM_SPE
, CODE_FOR_spe_evlddx
, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX
},
13385 { RS6000_BTM_SPE
, CODE_FOR_spe_evldwx
, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX
},
13386 { RS6000_BTM_SPE
, CODE_FOR_spe_evldhx
, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX
},
13387 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhex
, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX
},
13388 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhoux
, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX
},
13389 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhosx
, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX
},
13390 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwwsplatx
, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX
},
13391 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhsplatx
, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX
},
13392 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhesplatx
, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX
},
13393 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhousplatx
, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX
},
13394 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhossplatx
, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX
},
13395 { RS6000_BTM_SPE
, CODE_FOR_spe_evldd
, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD
},
13396 { RS6000_BTM_SPE
, CODE_FOR_spe_evldw
, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW
},
13397 { RS6000_BTM_SPE
, CODE_FOR_spe_evldh
, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH
},
13398 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhe
, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE
},
13399 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhou
, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU
},
13400 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhos
, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS
},
13401 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwwsplat
, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT
},
13402 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhsplat
, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT
},
13403 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhesplat
, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT
},
13404 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhousplat
, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT
},
13405 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhossplat
, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT
}
13408 /* Expand the builtin in EXP and store the result in TARGET. Store
13409 true in *EXPANDEDP if we found a builtin to expand.
13411 This expands the SPE builtins that are not simple unary and binary
13414 spe_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
13416 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
13418 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
13419 enum insn_code icode
;
13420 enum machine_mode tmode
, mode0
;
13422 const struct builtin_description
*d
;
13427 /* Syntax check for a 5-bit unsigned immediate. */
13430 case SPE_BUILTIN_EVSTDD
:
13431 case SPE_BUILTIN_EVSTDH
:
13432 case SPE_BUILTIN_EVSTDW
:
13433 case SPE_BUILTIN_EVSTWHE
:
13434 case SPE_BUILTIN_EVSTWHO
:
13435 case SPE_BUILTIN_EVSTWWE
:
13436 case SPE_BUILTIN_EVSTWWO
:
13437 arg1
= CALL_EXPR_ARG (exp
, 2);
13438 if (TREE_CODE (arg1
) != INTEGER_CST
13439 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
13441 error ("argument 2 must be a 5-bit unsigned literal");
13449 /* The evsplat*i instructions are not quite generic. */
13452 case SPE_BUILTIN_EVSPLATFI
:
13453 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi
,
13455 case SPE_BUILTIN_EVSPLATI
:
13456 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati
,
13462 d
= bdesc_2arg_spe
;
13463 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg_spe
); ++i
, ++d
)
13464 if (d
->code
== fcode
)
13465 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
13467 d
= bdesc_spe_predicates
;
13468 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, ++d
)
13469 if (d
->code
== fcode
)
13470 return spe_expand_predicate_builtin (d
->icode
, exp
, target
);
13472 d
= bdesc_spe_evsel
;
13473 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, ++d
)
13474 if (d
->code
== fcode
)
13475 return spe_expand_evsel_builtin (d
->icode
, exp
, target
);
13479 case SPE_BUILTIN_EVSTDDX
:
13480 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx
, exp
);
13481 case SPE_BUILTIN_EVSTDHX
:
13482 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx
, exp
);
13483 case SPE_BUILTIN_EVSTDWX
:
13484 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx
, exp
);
13485 case SPE_BUILTIN_EVSTWHEX
:
13486 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex
, exp
);
13487 case SPE_BUILTIN_EVSTWHOX
:
13488 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox
, exp
);
13489 case SPE_BUILTIN_EVSTWWEX
:
13490 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex
, exp
);
13491 case SPE_BUILTIN_EVSTWWOX
:
13492 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox
, exp
);
13493 case SPE_BUILTIN_EVSTDD
:
13494 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd
, exp
);
13495 case SPE_BUILTIN_EVSTDH
:
13496 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh
, exp
);
13497 case SPE_BUILTIN_EVSTDW
:
13498 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw
, exp
);
13499 case SPE_BUILTIN_EVSTWHE
:
13500 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe
, exp
);
13501 case SPE_BUILTIN_EVSTWHO
:
13502 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho
, exp
);
13503 case SPE_BUILTIN_EVSTWWE
:
13504 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe
, exp
);
13505 case SPE_BUILTIN_EVSTWWO
:
13506 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo
, exp
);
13507 case SPE_BUILTIN_MFSPEFSCR
:
13508 icode
= CODE_FOR_spe_mfspefscr
;
13509 tmode
= insn_data
[icode
].operand
[0].mode
;
13512 || GET_MODE (target
) != tmode
13513 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13514 target
= gen_reg_rtx (tmode
);
13516 pat
= GEN_FCN (icode
) (target
);
13521 case SPE_BUILTIN_MTSPEFSCR
:
13522 icode
= CODE_FOR_spe_mtspefscr
;
13523 arg0
= CALL_EXPR_ARG (exp
, 0);
13524 op0
= expand_normal (arg0
);
13525 mode0
= insn_data
[icode
].operand
[0].mode
;
13527 if (arg0
== error_mark_node
)
13530 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
13531 op0
= copy_to_mode_reg (mode0
, op0
);
13533 pat
= GEN_FCN (icode
) (op0
);
13541 *expandedp
= false;
13546 paired_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
13548 rtx pat
, scratch
, tmp
;
13549 tree form
= CALL_EXPR_ARG (exp
, 0);
13550 tree arg0
= CALL_EXPR_ARG (exp
, 1);
13551 tree arg1
= CALL_EXPR_ARG (exp
, 2);
13552 rtx op0
= expand_normal (arg0
);
13553 rtx op1
= expand_normal (arg1
);
13554 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13555 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
13557 enum rtx_code code
;
13559 if (TREE_CODE (form
) != INTEGER_CST
)
13561 error ("argument 1 of __builtin_paired_predicate must be a constant");
13565 form_int
= TREE_INT_CST_LOW (form
);
13567 gcc_assert (mode0
== mode1
);
13569 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13573 || GET_MODE (target
) != SImode
13574 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
13575 target
= gen_reg_rtx (SImode
);
13576 if (!(*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13577 op0
= copy_to_mode_reg (mode0
, op0
);
13578 if (!(*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
13579 op1
= copy_to_mode_reg (mode1
, op1
);
13581 scratch
= gen_reg_rtx (CCFPmode
);
13583 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
13605 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
13608 error ("argument 1 of __builtin_paired_predicate is out of range");
13612 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
13613 emit_move_insn (target
, tmp
);
13618 spe_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
13620 rtx pat
, scratch
, tmp
;
13621 tree form
= CALL_EXPR_ARG (exp
, 0);
13622 tree arg0
= CALL_EXPR_ARG (exp
, 1);
13623 tree arg1
= CALL_EXPR_ARG (exp
, 2);
13624 rtx op0
= expand_normal (arg0
);
13625 rtx op1
= expand_normal (arg1
);
13626 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13627 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
13629 enum rtx_code code
;
13631 if (TREE_CODE (form
) != INTEGER_CST
)
13633 error ("argument 1 of __builtin_spe_predicate must be a constant");
13637 form_int
= TREE_INT_CST_LOW (form
);
13639 gcc_assert (mode0
== mode1
);
13641 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13645 || GET_MODE (target
) != SImode
13646 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
13647 target
= gen_reg_rtx (SImode
);
13649 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13650 op0
= copy_to_mode_reg (mode0
, op0
);
13651 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
13652 op1
= copy_to_mode_reg (mode1
, op1
);
13654 scratch
= gen_reg_rtx (CCmode
);
13656 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
13661 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
13662 _lower_. We use one compare, but look in different bits of the
13663 CR for each variant.
13665 There are 2 elements in each SPE simd type (upper/lower). The CR
13666 bits are set as follows:
13668 BIT0 | BIT 1 | BIT 2 | BIT 3
13669 U | L | (U | L) | (U & L)
13671 So, for an "all" relationship, BIT 3 would be set.
13672 For an "any" relationship, BIT 2 would be set. Etc.
13674 Following traditional nomenclature, these bits map to:
13676 BIT0 | BIT 1 | BIT 2 | BIT 3
13679 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
13684 /* All variant. OV bit. */
13686 /* We need to get to the OV bit, which is the ORDERED bit. We
13687 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
13688 that's ugly and will make validate_condition_mode die.
13689 So let's just use another pattern. */
13690 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
13692 /* Any variant. EQ bit. */
13696 /* Upper variant. LT bit. */
13700 /* Lower variant. GT bit. */
13705 error ("argument 1 of __builtin_spe_predicate is out of range");
13709 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
13710 emit_move_insn (target
, tmp
);
13715 /* The evsel builtins look like this:
13717 e = __builtin_spe_evsel_OP (a, b, c, d);
13719 and work like this:
13721 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
13722 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
13726 spe_expand_evsel_builtin (enum insn_code icode
, tree exp
, rtx target
)
13729 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13730 tree arg1
= CALL_EXPR_ARG (exp
, 1);
13731 tree arg2
= CALL_EXPR_ARG (exp
, 2);
13732 tree arg3
= CALL_EXPR_ARG (exp
, 3);
13733 rtx op0
= expand_normal (arg0
);
13734 rtx op1
= expand_normal (arg1
);
13735 rtx op2
= expand_normal (arg2
);
13736 rtx op3
= expand_normal (arg3
);
13737 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13738 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
13740 gcc_assert (mode0
== mode1
);
13742 if (arg0
== error_mark_node
|| arg1
== error_mark_node
13743 || arg2
== error_mark_node
|| arg3
== error_mark_node
)
13747 || GET_MODE (target
) != mode0
13748 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode0
))
13749 target
= gen_reg_rtx (mode0
);
13751 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13752 op0
= copy_to_mode_reg (mode0
, op0
);
13753 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
13754 op1
= copy_to_mode_reg (mode0
, op1
);
13755 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
13756 op2
= copy_to_mode_reg (mode0
, op2
);
13757 if (! (*insn_data
[icode
].operand
[1].predicate
) (op3
, mode1
))
13758 op3
= copy_to_mode_reg (mode0
, op3
);
13760 /* Generate the compare. */
13761 scratch
= gen_reg_rtx (CCmode
);
13762 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
13767 if (mode0
== V2SImode
)
13768 emit_insn (gen_spe_evsel (target
, op2
, op3
, scratch
));
13770 emit_insn (gen_spe_evsel_fs (target
, op2
, op3
, scratch
));
13775 /* Raise an error message for a builtin function that is called without the
13776 appropriate target options being set. */
13779 rs6000_invalid_builtin (enum rs6000_builtins fncode
)
13781 size_t uns_fncode
= (size_t)fncode
;
13782 const char *name
= rs6000_builtin_info
[uns_fncode
].name
;
13783 HOST_WIDE_INT fnmask
= rs6000_builtin_info
[uns_fncode
].mask
;
13785 gcc_assert (name
!= NULL
);
13786 if ((fnmask
& RS6000_BTM_CELL
) != 0)
13787 error ("Builtin function %s is only valid for the cell processor", name
);
13788 else if ((fnmask
& RS6000_BTM_VSX
) != 0)
13789 error ("Builtin function %s requires the -mvsx option", name
);
13790 else if ((fnmask
& RS6000_BTM_HTM
) != 0)
13791 error ("Builtin function %s requires the -mhtm option", name
);
13792 else if ((fnmask
& RS6000_BTM_ALTIVEC
) != 0)
13793 error ("Builtin function %s requires the -maltivec option", name
);
13794 else if ((fnmask
& RS6000_BTM_PAIRED
) != 0)
13795 error ("Builtin function %s requires the -mpaired option", name
);
13796 else if ((fnmask
& RS6000_BTM_SPE
) != 0)
13797 error ("Builtin function %s requires the -mspe option", name
);
13798 else if ((fnmask
& (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
13799 == (RS6000_BTM_DFP
| RS6000_BTM_P8_VECTOR
))
13800 error ("Builtin function %s requires the -mhard-dfp and"
13801 " -mpower8-vector options", name
);
13802 else if ((fnmask
& RS6000_BTM_DFP
) != 0)
13803 error ("Builtin function %s requires the -mhard-dfp option", name
);
13804 else if ((fnmask
& RS6000_BTM_P8_VECTOR
) != 0)
13805 error ("Builtin function %s requires the -mpower8-vector option", name
);
13806 else if ((fnmask
& (RS6000_BTM_HARD_FLOAT
| RS6000_BTM_LDBL128
))
13807 == (RS6000_BTM_HARD_FLOAT
| RS6000_BTM_LDBL128
))
13808 error ("Builtin function %s requires the -mhard-float and"
13809 " -mlong-double-128 options", name
);
13810 else if ((fnmask
& RS6000_BTM_HARD_FLOAT
) != 0)
13811 error ("Builtin function %s requires the -mhard-float option", name
);
13813 error ("Builtin function %s is not supported with the current options",
13817 /* Expand an expression EXP that calls a built-in function,
13818 with result going to TARGET if that's convenient
13819 (and in mode MODE if that's convenient).
13820 SUBTARGET may be used as the target for computing one of EXP's operands.
13821 IGNORE is nonzero if the value is to be ignored. */
13824 rs6000_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
13825 enum machine_mode mode ATTRIBUTE_UNUSED
,
13826 int ignore ATTRIBUTE_UNUSED
)
13828 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
13829 enum rs6000_builtins fcode
13830 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
13831 size_t uns_fcode
= (size_t)fcode
;
13832 const struct builtin_description
*d
;
13836 HOST_WIDE_INT mask
= rs6000_builtin_info
[uns_fcode
].mask
;
13837 bool func_valid_p
= ((rs6000_builtin_mask
& mask
) == mask
);
13839 if (TARGET_DEBUG_BUILTIN
)
13841 enum insn_code icode
= rs6000_builtin_info
[uns_fcode
].icode
;
13842 const char *name1
= rs6000_builtin_info
[uns_fcode
].name
;
13843 const char *name2
= ((icode
!= CODE_FOR_nothing
)
13844 ? get_insn_name ((int)icode
)
13848 switch (rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
)
13850 default: name3
= "unknown"; break;
13851 case RS6000_BTC_SPECIAL
: name3
= "special"; break;
13852 case RS6000_BTC_UNARY
: name3
= "unary"; break;
13853 case RS6000_BTC_BINARY
: name3
= "binary"; break;
13854 case RS6000_BTC_TERNARY
: name3
= "ternary"; break;
13855 case RS6000_BTC_PREDICATE
: name3
= "predicate"; break;
13856 case RS6000_BTC_ABS
: name3
= "abs"; break;
13857 case RS6000_BTC_EVSEL
: name3
= "evsel"; break;
13858 case RS6000_BTC_DST
: name3
= "dst"; break;
13863 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
13864 (name1
) ? name1
: "---", fcode
,
13865 (name2
) ? name2
: "---", (int)icode
,
13867 func_valid_p
? "" : ", not valid");
13872 rs6000_invalid_builtin (fcode
);
13874 /* Given it is invalid, just generate a normal call. */
13875 return expand_call (exp
, target
, ignore
);
13880 case RS6000_BUILTIN_RECIP
:
13881 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3
, exp
, target
);
13883 case RS6000_BUILTIN_RECIPF
:
13884 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3
, exp
, target
);
13886 case RS6000_BUILTIN_RSQRTF
:
13887 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2
, exp
, target
);
13889 case RS6000_BUILTIN_RSQRT
:
13890 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2
, exp
, target
);
13892 case POWER7_BUILTIN_BPERMD
:
13893 return rs6000_expand_binop_builtin (((TARGET_64BIT
)
13894 ? CODE_FOR_bpermd_di
13895 : CODE_FOR_bpermd_si
), exp
, target
);
13897 case RS6000_BUILTIN_GET_TB
:
13898 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase
,
13901 case RS6000_BUILTIN_MFTB
:
13902 return rs6000_expand_zeroop_builtin (((TARGET_64BIT
)
13903 ? CODE_FOR_rs6000_mftb_di
13904 : CODE_FOR_rs6000_mftb_si
),
13907 case RS6000_BUILTIN_MFFS
:
13908 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs
, target
);
13910 case RS6000_BUILTIN_MTFSF
:
13911 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf
, exp
);
13913 case ALTIVEC_BUILTIN_MASK_FOR_LOAD
:
13914 case ALTIVEC_BUILTIN_MASK_FOR_STORE
:
13916 int icode
= (BYTES_BIG_ENDIAN
? (int) CODE_FOR_altivec_lvsr_direct
13917 : (int) CODE_FOR_altivec_lvsl_direct
);
13918 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13919 enum machine_mode mode
= insn_data
[icode
].operand
[1].mode
;
13923 gcc_assert (TARGET_ALTIVEC
);
13925 arg
= CALL_EXPR_ARG (exp
, 0);
13926 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg
)));
13927 op
= expand_expr (arg
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
13928 addr
= memory_address (mode
, op
);
13929 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
13933 /* For the load case need to negate the address. */
13934 op
= gen_reg_rtx (GET_MODE (addr
));
13935 emit_insn (gen_rtx_SET (VOIDmode
, op
,
13936 gen_rtx_NEG (GET_MODE (addr
), addr
)));
13938 op
= gen_rtx_MEM (mode
, op
);
13941 || GET_MODE (target
) != tmode
13942 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13943 target
= gen_reg_rtx (tmode
);
13945 pat
= GEN_FCN (icode
) (target
, op
);
13953 case ALTIVEC_BUILTIN_VCFUX
:
13954 case ALTIVEC_BUILTIN_VCFSX
:
13955 case ALTIVEC_BUILTIN_VCTUXS
:
13956 case ALTIVEC_BUILTIN_VCTSXS
:
13957 /* FIXME: There's got to be a nicer way to handle this case than
13958 constructing a new CALL_EXPR. */
13959 if (call_expr_nargs (exp
) == 1)
13961 exp
= build_call_nary (TREE_TYPE (exp
), CALL_EXPR_FN (exp
),
13962 2, CALL_EXPR_ARG (exp
, 0), integer_zero_node
);
13970 if (TARGET_ALTIVEC
)
13972 ret
= altivec_expand_builtin (exp
, target
, &success
);
13979 ret
= spe_expand_builtin (exp
, target
, &success
);
13984 if (TARGET_PAIRED_FLOAT
)
13986 ret
= paired_expand_builtin (exp
, target
, &success
);
13993 ret
= htm_expand_builtin (exp
, target
, &success
);
13999 unsigned attr
= rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
;
14000 gcc_assert (attr
== RS6000_BTC_UNARY
14001 || attr
== RS6000_BTC_BINARY
14002 || attr
== RS6000_BTC_TERNARY
);
14004 /* Handle simple unary operations. */
14006 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
14007 if (d
->code
== fcode
)
14008 return rs6000_expand_unop_builtin (d
->icode
, exp
, target
);
14010 /* Handle simple binary operations. */
14012 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
14013 if (d
->code
== fcode
)
14014 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
14016 /* Handle simple ternary operations. */
14018 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
14019 if (d
->code
== fcode
)
14020 return rs6000_expand_ternop_builtin (d
->icode
, exp
, target
);
14022 gcc_unreachable ();
14026 rs6000_init_builtins (void)
14030 enum machine_mode mode
;
14032 if (TARGET_DEBUG_BUILTIN
)
14033 fprintf (stderr
, "rs6000_init_builtins%s%s%s%s\n",
14034 (TARGET_PAIRED_FLOAT
) ? ", paired" : "",
14035 (TARGET_SPE
) ? ", spe" : "",
14036 (TARGET_ALTIVEC
) ? ", altivec" : "",
14037 (TARGET_VSX
) ? ", vsx" : "");
14039 V2SI_type_node
= build_vector_type (intSI_type_node
, 2);
14040 V2SF_type_node
= build_vector_type (float_type_node
, 2);
14041 V2DI_type_node
= build_vector_type (intDI_type_node
, 2);
14042 V2DF_type_node
= build_vector_type (double_type_node
, 2);
14043 V4HI_type_node
= build_vector_type (intHI_type_node
, 4);
14044 V4SI_type_node
= build_vector_type (intSI_type_node
, 4);
14045 V4SF_type_node
= build_vector_type (float_type_node
, 4);
14046 V8HI_type_node
= build_vector_type (intHI_type_node
, 8);
14047 V16QI_type_node
= build_vector_type (intQI_type_node
, 16);
14049 unsigned_V16QI_type_node
= build_vector_type (unsigned_intQI_type_node
, 16);
14050 unsigned_V8HI_type_node
= build_vector_type (unsigned_intHI_type_node
, 8);
14051 unsigned_V4SI_type_node
= build_vector_type (unsigned_intSI_type_node
, 4);
14052 unsigned_V2DI_type_node
= build_vector_type (unsigned_intDI_type_node
, 2);
14054 opaque_V2SF_type_node
= build_opaque_vector_type (float_type_node
, 2);
14055 opaque_V2SI_type_node
= build_opaque_vector_type (intSI_type_node
, 2);
14056 opaque_p_V2SI_type_node
= build_pointer_type (opaque_V2SI_type_node
);
14057 opaque_V4SI_type_node
= build_opaque_vector_type (intSI_type_node
, 4);
14059 /* We use V1TI mode as a special container to hold __int128_t items that
14060 must live in VSX registers. */
14061 if (intTI_type_node
)
14063 V1TI_type_node
= build_vector_type (intTI_type_node
, 1);
14064 unsigned_V1TI_type_node
= build_vector_type (unsigned_intTI_type_node
, 1);
14067 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
14068 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
14069 'vector unsigned short'. */
14071 bool_char_type_node
= build_distinct_type_copy (unsigned_intQI_type_node
);
14072 bool_short_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
14073 bool_int_type_node
= build_distinct_type_copy (unsigned_intSI_type_node
);
14074 bool_long_type_node
= build_distinct_type_copy (unsigned_intDI_type_node
);
14075 pixel_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
14077 long_integer_type_internal_node
= long_integer_type_node
;
14078 long_unsigned_type_internal_node
= long_unsigned_type_node
;
14079 long_long_integer_type_internal_node
= long_long_integer_type_node
;
14080 long_long_unsigned_type_internal_node
= long_long_unsigned_type_node
;
14081 intQI_type_internal_node
= intQI_type_node
;
14082 uintQI_type_internal_node
= unsigned_intQI_type_node
;
14083 intHI_type_internal_node
= intHI_type_node
;
14084 uintHI_type_internal_node
= unsigned_intHI_type_node
;
14085 intSI_type_internal_node
= intSI_type_node
;
14086 uintSI_type_internal_node
= unsigned_intSI_type_node
;
14087 intDI_type_internal_node
= intDI_type_node
;
14088 uintDI_type_internal_node
= unsigned_intDI_type_node
;
14089 intTI_type_internal_node
= intTI_type_node
;
14090 uintTI_type_internal_node
= unsigned_intTI_type_node
;
14091 float_type_internal_node
= float_type_node
;
14092 double_type_internal_node
= double_type_node
;
14093 long_double_type_internal_node
= long_double_type_node
;
14094 dfloat64_type_internal_node
= dfloat64_type_node
;
14095 dfloat128_type_internal_node
= dfloat128_type_node
;
14096 void_type_internal_node
= void_type_node
;
14098 /* Initialize the modes for builtin_function_type, mapping a machine mode to
14100 builtin_mode_to_type
[QImode
][0] = integer_type_node
;
14101 builtin_mode_to_type
[HImode
][0] = integer_type_node
;
14102 builtin_mode_to_type
[SImode
][0] = intSI_type_node
;
14103 builtin_mode_to_type
[SImode
][1] = unsigned_intSI_type_node
;
14104 builtin_mode_to_type
[DImode
][0] = intDI_type_node
;
14105 builtin_mode_to_type
[DImode
][1] = unsigned_intDI_type_node
;
14106 builtin_mode_to_type
[TImode
][0] = intTI_type_node
;
14107 builtin_mode_to_type
[TImode
][1] = unsigned_intTI_type_node
;
14108 builtin_mode_to_type
[SFmode
][0] = float_type_node
;
14109 builtin_mode_to_type
[DFmode
][0] = double_type_node
;
14110 builtin_mode_to_type
[TFmode
][0] = long_double_type_node
;
14111 builtin_mode_to_type
[DDmode
][0] = dfloat64_type_node
;
14112 builtin_mode_to_type
[TDmode
][0] = dfloat128_type_node
;
14113 builtin_mode_to_type
[V1TImode
][0] = V1TI_type_node
;
14114 builtin_mode_to_type
[V1TImode
][1] = unsigned_V1TI_type_node
;
14115 builtin_mode_to_type
[V2SImode
][0] = V2SI_type_node
;
14116 builtin_mode_to_type
[V2SFmode
][0] = V2SF_type_node
;
14117 builtin_mode_to_type
[V2DImode
][0] = V2DI_type_node
;
14118 builtin_mode_to_type
[V2DImode
][1] = unsigned_V2DI_type_node
;
14119 builtin_mode_to_type
[V2DFmode
][0] = V2DF_type_node
;
14120 builtin_mode_to_type
[V4HImode
][0] = V4HI_type_node
;
14121 builtin_mode_to_type
[V4SImode
][0] = V4SI_type_node
;
14122 builtin_mode_to_type
[V4SImode
][1] = unsigned_V4SI_type_node
;
14123 builtin_mode_to_type
[V4SFmode
][0] = V4SF_type_node
;
14124 builtin_mode_to_type
[V8HImode
][0] = V8HI_type_node
;
14125 builtin_mode_to_type
[V8HImode
][1] = unsigned_V8HI_type_node
;
14126 builtin_mode_to_type
[V16QImode
][0] = V16QI_type_node
;
14127 builtin_mode_to_type
[V16QImode
][1] = unsigned_V16QI_type_node
;
14129 tdecl
= add_builtin_type ("__bool char", bool_char_type_node
);
14130 TYPE_NAME (bool_char_type_node
) = tdecl
;
14132 tdecl
= add_builtin_type ("__bool short", bool_short_type_node
);
14133 TYPE_NAME (bool_short_type_node
) = tdecl
;
14135 tdecl
= add_builtin_type ("__bool int", bool_int_type_node
);
14136 TYPE_NAME (bool_int_type_node
) = tdecl
;
14138 tdecl
= add_builtin_type ("__pixel", pixel_type_node
);
14139 TYPE_NAME (pixel_type_node
) = tdecl
;
14141 bool_V16QI_type_node
= build_vector_type (bool_char_type_node
, 16);
14142 bool_V8HI_type_node
= build_vector_type (bool_short_type_node
, 8);
14143 bool_V4SI_type_node
= build_vector_type (bool_int_type_node
, 4);
14144 bool_V2DI_type_node
= build_vector_type (bool_long_type_node
, 2);
14145 pixel_V8HI_type_node
= build_vector_type (pixel_type_node
, 8);
14147 tdecl
= add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node
);
14148 TYPE_NAME (unsigned_V16QI_type_node
) = tdecl
;
14150 tdecl
= add_builtin_type ("__vector signed char", V16QI_type_node
);
14151 TYPE_NAME (V16QI_type_node
) = tdecl
;
14153 tdecl
= add_builtin_type ("__vector __bool char", bool_V16QI_type_node
);
14154 TYPE_NAME ( bool_V16QI_type_node
) = tdecl
;
14156 tdecl
= add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node
);
14157 TYPE_NAME (unsigned_V8HI_type_node
) = tdecl
;
14159 tdecl
= add_builtin_type ("__vector signed short", V8HI_type_node
);
14160 TYPE_NAME (V8HI_type_node
) = tdecl
;
14162 tdecl
= add_builtin_type ("__vector __bool short", bool_V8HI_type_node
);
14163 TYPE_NAME (bool_V8HI_type_node
) = tdecl
;
14165 tdecl
= add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node
);
14166 TYPE_NAME (unsigned_V4SI_type_node
) = tdecl
;
14168 tdecl
= add_builtin_type ("__vector signed int", V4SI_type_node
);
14169 TYPE_NAME (V4SI_type_node
) = tdecl
;
14171 tdecl
= add_builtin_type ("__vector __bool int", bool_V4SI_type_node
);
14172 TYPE_NAME (bool_V4SI_type_node
) = tdecl
;
14174 tdecl
= add_builtin_type ("__vector float", V4SF_type_node
);
14175 TYPE_NAME (V4SF_type_node
) = tdecl
;
14177 tdecl
= add_builtin_type ("__vector __pixel", pixel_V8HI_type_node
);
14178 TYPE_NAME (pixel_V8HI_type_node
) = tdecl
;
14180 tdecl
= add_builtin_type ("__vector double", V2DF_type_node
);
14181 TYPE_NAME (V2DF_type_node
) = tdecl
;
14183 if (TARGET_POWERPC64
)
14185 tdecl
= add_builtin_type ("__vector long", V2DI_type_node
);
14186 TYPE_NAME (V2DI_type_node
) = tdecl
;
14188 tdecl
= add_builtin_type ("__vector unsigned long",
14189 unsigned_V2DI_type_node
);
14190 TYPE_NAME (unsigned_V2DI_type_node
) = tdecl
;
14192 tdecl
= add_builtin_type ("__vector __bool long", bool_V2DI_type_node
);
14193 TYPE_NAME (bool_V2DI_type_node
) = tdecl
;
14197 tdecl
= add_builtin_type ("__vector long long", V2DI_type_node
);
14198 TYPE_NAME (V2DI_type_node
) = tdecl
;
14200 tdecl
= add_builtin_type ("__vector unsigned long long",
14201 unsigned_V2DI_type_node
);
14202 TYPE_NAME (unsigned_V2DI_type_node
) = tdecl
;
14204 tdecl
= add_builtin_type ("__vector __bool long long",
14205 bool_V2DI_type_node
);
14206 TYPE_NAME (bool_V2DI_type_node
) = tdecl
;
14209 if (V1TI_type_node
)
14211 tdecl
= add_builtin_type ("__vector __int128", V1TI_type_node
);
14212 TYPE_NAME (V1TI_type_node
) = tdecl
;
14214 tdecl
= add_builtin_type ("__vector unsigned __int128",
14215 unsigned_V1TI_type_node
);
14216 TYPE_NAME (unsigned_V1TI_type_node
) = tdecl
;
14219 /* Paired and SPE builtins are only available if you build a compiler with
14220 the appropriate options, so only create those builtins with the
14221 appropriate compiler option. Create Altivec and VSX builtins on machines
14222 with at least the general purpose extensions (970 and newer) to allow the
14223 use of the target attribute. */
14224 if (TARGET_PAIRED_FLOAT
)
14225 paired_init_builtins ();
14227 spe_init_builtins ();
14228 if (TARGET_EXTRA_BUILTINS
)
14229 altivec_init_builtins ();
14231 htm_init_builtins ();
14233 if (TARGET_EXTRA_BUILTINS
|| TARGET_SPE
|| TARGET_PAIRED_FLOAT
)
14234 rs6000_common_init_builtins ();
14236 ftype
= builtin_function_type (DFmode
, DFmode
, DFmode
, VOIDmode
,
14237 RS6000_BUILTIN_RECIP
, "__builtin_recipdiv");
14238 def_builtin ("__builtin_recipdiv", ftype
, RS6000_BUILTIN_RECIP
);
14240 ftype
= builtin_function_type (SFmode
, SFmode
, SFmode
, VOIDmode
,
14241 RS6000_BUILTIN_RECIPF
, "__builtin_recipdivf");
14242 def_builtin ("__builtin_recipdivf", ftype
, RS6000_BUILTIN_RECIPF
);
14244 ftype
= builtin_function_type (DFmode
, DFmode
, VOIDmode
, VOIDmode
,
14245 RS6000_BUILTIN_RSQRT
, "__builtin_rsqrt");
14246 def_builtin ("__builtin_rsqrt", ftype
, RS6000_BUILTIN_RSQRT
);
14248 ftype
= builtin_function_type (SFmode
, SFmode
, VOIDmode
, VOIDmode
,
14249 RS6000_BUILTIN_RSQRTF
, "__builtin_rsqrtf");
14250 def_builtin ("__builtin_rsqrtf", ftype
, RS6000_BUILTIN_RSQRTF
);
14252 mode
= (TARGET_64BIT
) ? DImode
: SImode
;
14253 ftype
= builtin_function_type (mode
, mode
, mode
, VOIDmode
,
14254 POWER7_BUILTIN_BPERMD
, "__builtin_bpermd");
14255 def_builtin ("__builtin_bpermd", ftype
, POWER7_BUILTIN_BPERMD
);
14257 ftype
= build_function_type_list (unsigned_intDI_type_node
,
14259 def_builtin ("__builtin_ppc_get_timebase", ftype
, RS6000_BUILTIN_GET_TB
);
14262 ftype
= build_function_type_list (unsigned_intDI_type_node
,
14265 ftype
= build_function_type_list (unsigned_intSI_type_node
,
14267 def_builtin ("__builtin_ppc_mftb", ftype
, RS6000_BUILTIN_MFTB
);
14269 ftype
= build_function_type_list (double_type_node
, NULL_TREE
);
14270 def_builtin ("__builtin_mffs", ftype
, RS6000_BUILTIN_MFFS
);
14272 ftype
= build_function_type_list (void_type_node
,
14273 intSI_type_node
, double_type_node
,
14275 def_builtin ("__builtin_mtfsf", ftype
, RS6000_BUILTIN_MTFSF
);
14278 /* AIX libm provides clog as __clog. */
14279 if ((tdecl
= builtin_decl_explicit (BUILT_IN_CLOG
)) != NULL_TREE
)
14280 set_user_assembler_name (tdecl
, "__clog");
14283 #ifdef SUBTARGET_INIT_BUILTINS
14284 SUBTARGET_INIT_BUILTINS
;
14288 /* Returns the rs6000 builtin decl for CODE. */
14291 rs6000_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
14293 HOST_WIDE_INT fnmask
;
14295 if (code
>= RS6000_BUILTIN_COUNT
)
14296 return error_mark_node
;
14298 fnmask
= rs6000_builtin_info
[code
].mask
;
14299 if ((fnmask
& rs6000_builtin_mask
) != fnmask
)
14301 rs6000_invalid_builtin ((enum rs6000_builtins
)code
);
14302 return error_mark_node
;
14305 return rs6000_builtin_decls
[code
];
14309 spe_init_builtins (void)
14311 tree puint_type_node
= build_pointer_type (unsigned_type_node
);
14312 tree pushort_type_node
= build_pointer_type (short_unsigned_type_node
);
14313 const struct builtin_description
*d
;
14316 tree v2si_ftype_4_v2si
14317 = build_function_type_list (opaque_V2SI_type_node
,
14318 opaque_V2SI_type_node
,
14319 opaque_V2SI_type_node
,
14320 opaque_V2SI_type_node
,
14321 opaque_V2SI_type_node
,
14324 tree v2sf_ftype_4_v2sf
14325 = build_function_type_list (opaque_V2SF_type_node
,
14326 opaque_V2SF_type_node
,
14327 opaque_V2SF_type_node
,
14328 opaque_V2SF_type_node
,
14329 opaque_V2SF_type_node
,
14332 tree int_ftype_int_v2si_v2si
14333 = build_function_type_list (integer_type_node
,
14335 opaque_V2SI_type_node
,
14336 opaque_V2SI_type_node
,
14339 tree int_ftype_int_v2sf_v2sf
14340 = build_function_type_list (integer_type_node
,
14342 opaque_V2SF_type_node
,
14343 opaque_V2SF_type_node
,
14346 tree void_ftype_v2si_puint_int
14347 = build_function_type_list (void_type_node
,
14348 opaque_V2SI_type_node
,
14353 tree void_ftype_v2si_puint_char
14354 = build_function_type_list (void_type_node
,
14355 opaque_V2SI_type_node
,
14360 tree void_ftype_v2si_pv2si_int
14361 = build_function_type_list (void_type_node
,
14362 opaque_V2SI_type_node
,
14363 opaque_p_V2SI_type_node
,
14367 tree void_ftype_v2si_pv2si_char
14368 = build_function_type_list (void_type_node
,
14369 opaque_V2SI_type_node
,
14370 opaque_p_V2SI_type_node
,
14374 tree void_ftype_int
14375 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
14377 tree int_ftype_void
14378 = build_function_type_list (integer_type_node
, NULL_TREE
);
14380 tree v2si_ftype_pv2si_int
14381 = build_function_type_list (opaque_V2SI_type_node
,
14382 opaque_p_V2SI_type_node
,
14386 tree v2si_ftype_puint_int
14387 = build_function_type_list (opaque_V2SI_type_node
,
14392 tree v2si_ftype_pushort_int
14393 = build_function_type_list (opaque_V2SI_type_node
,
14398 tree v2si_ftype_signed_char
14399 = build_function_type_list (opaque_V2SI_type_node
,
14400 signed_char_type_node
,
14403 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node
);
14405 /* Initialize irregular SPE builtins. */
14407 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int
, SPE_BUILTIN_MTSPEFSCR
);
14408 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void
, SPE_BUILTIN_MFSPEFSCR
);
14409 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDDX
);
14410 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDHX
);
14411 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDWX
);
14412 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHEX
);
14413 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHOX
);
14414 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWEX
);
14415 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWOX
);
14416 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDD
);
14417 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDH
);
14418 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDW
);
14419 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHE
);
14420 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHO
);
14421 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWE
);
14422 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWO
);
14423 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATFI
);
14424 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATI
);
14427 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDDX
);
14428 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDWX
);
14429 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDHX
);
14430 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHEX
);
14431 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOUX
);
14432 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOSX
);
14433 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLATX
);
14434 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLATX
);
14435 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLATX
);
14436 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLATX
);
14437 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLATX
);
14438 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDD
);
14439 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDW
);
14440 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDH
);
14441 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLAT
);
14442 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLAT
);
14443 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLAT
);
14444 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHE
);
14445 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOS
);
14446 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOU
);
14447 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLAT
);
14448 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLAT
);
14451 d
= bdesc_spe_predicates
;
14452 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, d
++)
14456 switch (insn_data
[d
->icode
].operand
[1].mode
)
14459 type
= int_ftype_int_v2si_v2si
;
14462 type
= int_ftype_int_v2sf_v2sf
;
14465 gcc_unreachable ();
14468 def_builtin (d
->name
, type
, d
->code
);
14471 /* Evsel predicates. */
14472 d
= bdesc_spe_evsel
;
14473 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, d
++)
14477 switch (insn_data
[d
->icode
].operand
[1].mode
)
14480 type
= v2si_ftype_4_v2si
;
14483 type
= v2sf_ftype_4_v2sf
;
14486 gcc_unreachable ();
14489 def_builtin (d
->name
, type
, d
->code
);
14494 paired_init_builtins (void)
14496 const struct builtin_description
*d
;
14499 tree int_ftype_int_v2sf_v2sf
14500 = build_function_type_list (integer_type_node
,
14505 tree pcfloat_type_node
=
14506 build_pointer_type (build_qualified_type
14507 (float_type_node
, TYPE_QUAL_CONST
));
14509 tree v2sf_ftype_long_pcfloat
= build_function_type_list (V2SF_type_node
,
14510 long_integer_type_node
,
14513 tree void_ftype_v2sf_long_pcfloat
=
14514 build_function_type_list (void_type_node
,
14516 long_integer_type_node
,
14521 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat
,
14522 PAIRED_BUILTIN_LX
);
14525 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat
,
14526 PAIRED_BUILTIN_STX
);
14529 d
= bdesc_paired_preds
;
14530 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); ++i
, d
++)
14534 if (TARGET_DEBUG_BUILTIN
)
14535 fprintf (stderr
, "paired pred #%d, insn = %s [%d], mode = %s\n",
14536 (int)i
, get_insn_name (d
->icode
), (int)d
->icode
,
14537 GET_MODE_NAME (insn_data
[d
->icode
].operand
[1].mode
));
14539 switch (insn_data
[d
->icode
].operand
[1].mode
)
14542 type
= int_ftype_int_v2sf_v2sf
;
14545 gcc_unreachable ();
14548 def_builtin (d
->name
, type
, d
->code
);
14553 altivec_init_builtins (void)
14555 const struct builtin_description
*d
;
14560 tree pvoid_type_node
= build_pointer_type (void_type_node
);
14562 tree pcvoid_type_node
14563 = build_pointer_type (build_qualified_type (void_type_node
,
14566 tree int_ftype_opaque
14567 = build_function_type_list (integer_type_node
,
14568 opaque_V4SI_type_node
, NULL_TREE
);
14569 tree opaque_ftype_opaque
14570 = build_function_type_list (integer_type_node
, NULL_TREE
);
14571 tree opaque_ftype_opaque_int
14572 = build_function_type_list (opaque_V4SI_type_node
,
14573 opaque_V4SI_type_node
, integer_type_node
, NULL_TREE
);
14574 tree opaque_ftype_opaque_opaque_int
14575 = build_function_type_list (opaque_V4SI_type_node
,
14576 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
14577 integer_type_node
, NULL_TREE
);
14578 tree int_ftype_int_opaque_opaque
14579 = build_function_type_list (integer_type_node
,
14580 integer_type_node
, opaque_V4SI_type_node
,
14581 opaque_V4SI_type_node
, NULL_TREE
);
14582 tree int_ftype_int_v4si_v4si
14583 = build_function_type_list (integer_type_node
,
14584 integer_type_node
, V4SI_type_node
,
14585 V4SI_type_node
, NULL_TREE
);
14586 tree int_ftype_int_v2di_v2di
14587 = build_function_type_list (integer_type_node
,
14588 integer_type_node
, V2DI_type_node
,
14589 V2DI_type_node
, NULL_TREE
);
14590 tree void_ftype_v4si
14591 = build_function_type_list (void_type_node
, V4SI_type_node
, NULL_TREE
);
14592 tree v8hi_ftype_void
14593 = build_function_type_list (V8HI_type_node
, NULL_TREE
);
14594 tree void_ftype_void
14595 = build_function_type_list (void_type_node
, NULL_TREE
);
14596 tree void_ftype_int
14597 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
14599 tree opaque_ftype_long_pcvoid
14600 = build_function_type_list (opaque_V4SI_type_node
,
14601 long_integer_type_node
, pcvoid_type_node
,
14603 tree v16qi_ftype_long_pcvoid
14604 = build_function_type_list (V16QI_type_node
,
14605 long_integer_type_node
, pcvoid_type_node
,
14607 tree v8hi_ftype_long_pcvoid
14608 = build_function_type_list (V8HI_type_node
,
14609 long_integer_type_node
, pcvoid_type_node
,
14611 tree v4si_ftype_long_pcvoid
14612 = build_function_type_list (V4SI_type_node
,
14613 long_integer_type_node
, pcvoid_type_node
,
14615 tree v4sf_ftype_long_pcvoid
14616 = build_function_type_list (V4SF_type_node
,
14617 long_integer_type_node
, pcvoid_type_node
,
14619 tree v2df_ftype_long_pcvoid
14620 = build_function_type_list (V2DF_type_node
,
14621 long_integer_type_node
, pcvoid_type_node
,
14623 tree v2di_ftype_long_pcvoid
14624 = build_function_type_list (V2DI_type_node
,
14625 long_integer_type_node
, pcvoid_type_node
,
14628 tree void_ftype_opaque_long_pvoid
14629 = build_function_type_list (void_type_node
,
14630 opaque_V4SI_type_node
, long_integer_type_node
,
14631 pvoid_type_node
, NULL_TREE
);
14632 tree void_ftype_v4si_long_pvoid
14633 = build_function_type_list (void_type_node
,
14634 V4SI_type_node
, long_integer_type_node
,
14635 pvoid_type_node
, NULL_TREE
);
14636 tree void_ftype_v16qi_long_pvoid
14637 = build_function_type_list (void_type_node
,
14638 V16QI_type_node
, long_integer_type_node
,
14639 pvoid_type_node
, NULL_TREE
);
14640 tree void_ftype_v8hi_long_pvoid
14641 = build_function_type_list (void_type_node
,
14642 V8HI_type_node
, long_integer_type_node
,
14643 pvoid_type_node
, NULL_TREE
);
14644 tree void_ftype_v4sf_long_pvoid
14645 = build_function_type_list (void_type_node
,
14646 V4SF_type_node
, long_integer_type_node
,
14647 pvoid_type_node
, NULL_TREE
);
14648 tree void_ftype_v2df_long_pvoid
14649 = build_function_type_list (void_type_node
,
14650 V2DF_type_node
, long_integer_type_node
,
14651 pvoid_type_node
, NULL_TREE
);
14652 tree void_ftype_v2di_long_pvoid
14653 = build_function_type_list (void_type_node
,
14654 V2DI_type_node
, long_integer_type_node
,
14655 pvoid_type_node
, NULL_TREE
);
14656 tree int_ftype_int_v8hi_v8hi
14657 = build_function_type_list (integer_type_node
,
14658 integer_type_node
, V8HI_type_node
,
14659 V8HI_type_node
, NULL_TREE
);
14660 tree int_ftype_int_v16qi_v16qi
14661 = build_function_type_list (integer_type_node
,
14662 integer_type_node
, V16QI_type_node
,
14663 V16QI_type_node
, NULL_TREE
);
14664 tree int_ftype_int_v4sf_v4sf
14665 = build_function_type_list (integer_type_node
,
14666 integer_type_node
, V4SF_type_node
,
14667 V4SF_type_node
, NULL_TREE
);
14668 tree int_ftype_int_v2df_v2df
14669 = build_function_type_list (integer_type_node
,
14670 integer_type_node
, V2DF_type_node
,
14671 V2DF_type_node
, NULL_TREE
);
14672 tree v2di_ftype_v2di
14673 = build_function_type_list (V2DI_type_node
, V2DI_type_node
, NULL_TREE
);
14674 tree v4si_ftype_v4si
14675 = build_function_type_list (V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
14676 tree v8hi_ftype_v8hi
14677 = build_function_type_list (V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
14678 tree v16qi_ftype_v16qi
14679 = build_function_type_list (V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
14680 tree v4sf_ftype_v4sf
14681 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
14682 tree v2df_ftype_v2df
14683 = build_function_type_list (V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
14684 tree void_ftype_pcvoid_int_int
14685 = build_function_type_list (void_type_node
,
14686 pcvoid_type_node
, integer_type_node
,
14687 integer_type_node
, NULL_TREE
);
14689 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si
, ALTIVEC_BUILTIN_MTVSCR
);
14690 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void
, ALTIVEC_BUILTIN_MFVSCR
);
14691 def_builtin ("__builtin_altivec_dssall", void_ftype_void
, ALTIVEC_BUILTIN_DSSALL
);
14692 def_builtin ("__builtin_altivec_dss", void_ftype_int
, ALTIVEC_BUILTIN_DSS
);
14693 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSL
);
14694 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSR
);
14695 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEBX
);
14696 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEHX
);
14697 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEWX
);
14698 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVXL
);
14699 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid
,
14700 ALTIVEC_BUILTIN_LVXL_V2DF
);
14701 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid
,
14702 ALTIVEC_BUILTIN_LVXL_V2DI
);
14703 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid
,
14704 ALTIVEC_BUILTIN_LVXL_V4SF
);
14705 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid
,
14706 ALTIVEC_BUILTIN_LVXL_V4SI
);
14707 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid
,
14708 ALTIVEC_BUILTIN_LVXL_V8HI
);
14709 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid
,
14710 ALTIVEC_BUILTIN_LVXL_V16QI
);
14711 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVX
);
14712 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid
,
14713 ALTIVEC_BUILTIN_LVX_V2DF
);
14714 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid
,
14715 ALTIVEC_BUILTIN_LVX_V2DI
);
14716 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid
,
14717 ALTIVEC_BUILTIN_LVX_V4SF
);
14718 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid
,
14719 ALTIVEC_BUILTIN_LVX_V4SI
);
14720 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid
,
14721 ALTIVEC_BUILTIN_LVX_V8HI
);
14722 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid
,
14723 ALTIVEC_BUILTIN_LVX_V16QI
);
14724 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVX
);
14725 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid
,
14726 ALTIVEC_BUILTIN_STVX_V2DF
);
14727 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid
,
14728 ALTIVEC_BUILTIN_STVX_V2DI
);
14729 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid
,
14730 ALTIVEC_BUILTIN_STVX_V4SF
);
14731 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid
,
14732 ALTIVEC_BUILTIN_STVX_V4SI
);
14733 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid
,
14734 ALTIVEC_BUILTIN_STVX_V8HI
);
14735 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid
,
14736 ALTIVEC_BUILTIN_STVX_V16QI
);
14737 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVEWX
);
14738 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVXL
);
14739 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid
,
14740 ALTIVEC_BUILTIN_STVXL_V2DF
);
14741 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid
,
14742 ALTIVEC_BUILTIN_STVXL_V2DI
);
14743 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid
,
14744 ALTIVEC_BUILTIN_STVXL_V4SF
);
14745 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid
,
14746 ALTIVEC_BUILTIN_STVXL_V4SI
);
14747 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid
,
14748 ALTIVEC_BUILTIN_STVXL_V8HI
);
14749 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid
,
14750 ALTIVEC_BUILTIN_STVXL_V16QI
);
14751 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVEBX
);
14752 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid
, ALTIVEC_BUILTIN_STVEHX
);
14753 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LD
);
14754 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDE
);
14755 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDL
);
14756 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSL
);
14757 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSR
);
14758 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEBX
);
14759 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEHX
);
14760 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEWX
);
14761 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_ST
);
14762 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STE
);
14763 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STL
);
14764 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEWX
);
14765 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEBX
);
14766 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEHX
);
14768 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid
,
14769 VSX_BUILTIN_LXVD2X_V2DF
);
14770 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid
,
14771 VSX_BUILTIN_LXVD2X_V2DI
);
14772 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid
,
14773 VSX_BUILTIN_LXVW4X_V4SF
);
14774 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid
,
14775 VSX_BUILTIN_LXVW4X_V4SI
);
14776 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid
,
14777 VSX_BUILTIN_LXVW4X_V8HI
);
14778 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid
,
14779 VSX_BUILTIN_LXVW4X_V16QI
);
14780 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid
,
14781 VSX_BUILTIN_STXVD2X_V2DF
);
14782 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid
,
14783 VSX_BUILTIN_STXVD2X_V2DI
);
14784 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid
,
14785 VSX_BUILTIN_STXVW4X_V4SF
);
14786 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid
,
14787 VSX_BUILTIN_STXVW4X_V4SI
);
14788 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid
,
14789 VSX_BUILTIN_STXVW4X_V8HI
);
14790 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid
,
14791 VSX_BUILTIN_STXVW4X_V16QI
);
14792 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid
,
14793 VSX_BUILTIN_VEC_LD
);
14794 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid
,
14795 VSX_BUILTIN_VEC_ST
);
14797 def_builtin ("__builtin_vec_step", int_ftype_opaque
, ALTIVEC_BUILTIN_VEC_STEP
);
14798 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_SPLATS
);
14799 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_PROMOTE
);
14801 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_SLD
);
14802 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_SPLAT
);
14803 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_EXTRACT
);
14804 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_INSERT
);
14805 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTW
);
14806 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTH
);
14807 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTB
);
14808 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTF
);
14809 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFSX
);
14810 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFUX
);
14811 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTS
);
14812 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTU
);
14814 /* Cell builtins. */
14815 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLX
);
14816 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLXL
);
14817 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRX
);
14818 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRXL
);
14820 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLX
);
14821 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLXL
);
14822 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRX
);
14823 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRXL
);
14825 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLX
);
14826 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLXL
);
14827 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRX
);
14828 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRXL
);
14830 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLX
);
14831 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLXL
);
14832 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRX
);
14833 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRXL
);
14835 /* Add the DST variants. */
14837 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
14838 def_builtin (d
->name
, void_ftype_pcvoid_int_int
, d
->code
);
14840 /* Initialize the predicates. */
14841 d
= bdesc_altivec_preds
;
14842 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
14844 enum machine_mode mode1
;
14847 if (rs6000_overloaded_builtin_p (d
->code
))
14850 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
14855 type
= int_ftype_int_opaque_opaque
;
14858 type
= int_ftype_int_v2di_v2di
;
14861 type
= int_ftype_int_v4si_v4si
;
14864 type
= int_ftype_int_v8hi_v8hi
;
14867 type
= int_ftype_int_v16qi_v16qi
;
14870 type
= int_ftype_int_v4sf_v4sf
;
14873 type
= int_ftype_int_v2df_v2df
;
14876 gcc_unreachable ();
14879 def_builtin (d
->name
, type
, d
->code
);
14882 /* Initialize the abs* operators. */
14884 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
14886 enum machine_mode mode0
;
14889 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
14894 type
= v2di_ftype_v2di
;
14897 type
= v4si_ftype_v4si
;
14900 type
= v8hi_ftype_v8hi
;
14903 type
= v16qi_ftype_v16qi
;
14906 type
= v4sf_ftype_v4sf
;
14909 type
= v2df_ftype_v2df
;
14912 gcc_unreachable ();
14915 def_builtin (d
->name
, type
, d
->code
);
14918 /* Initialize target builtin that implements
14919 targetm.vectorize.builtin_mask_for_load. */
14921 decl
= add_builtin_function ("__builtin_altivec_mask_for_load",
14922 v16qi_ftype_long_pcvoid
,
14923 ALTIVEC_BUILTIN_MASK_FOR_LOAD
,
14924 BUILT_IN_MD
, NULL
, NULL_TREE
);
14925 TREE_READONLY (decl
) = 1;
14926 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
14927 altivec_builtin_mask_for_load
= decl
;
14929 /* Access to the vec_init patterns. */
14930 ftype
= build_function_type_list (V4SI_type_node
, integer_type_node
,
14931 integer_type_node
, integer_type_node
,
14932 integer_type_node
, NULL_TREE
);
14933 def_builtin ("__builtin_vec_init_v4si", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SI
);
14935 ftype
= build_function_type_list (V8HI_type_node
, short_integer_type_node
,
14936 short_integer_type_node
,
14937 short_integer_type_node
,
14938 short_integer_type_node
,
14939 short_integer_type_node
,
14940 short_integer_type_node
,
14941 short_integer_type_node
,
14942 short_integer_type_node
, NULL_TREE
);
14943 def_builtin ("__builtin_vec_init_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V8HI
);
14945 ftype
= build_function_type_list (V16QI_type_node
, char_type_node
,
14946 char_type_node
, char_type_node
,
14947 char_type_node
, char_type_node
,
14948 char_type_node
, char_type_node
,
14949 char_type_node
, char_type_node
,
14950 char_type_node
, char_type_node
,
14951 char_type_node
, char_type_node
,
14952 char_type_node
, char_type_node
,
14953 char_type_node
, NULL_TREE
);
14954 def_builtin ("__builtin_vec_init_v16qi", ftype
,
14955 ALTIVEC_BUILTIN_VEC_INIT_V16QI
);
14957 ftype
= build_function_type_list (V4SF_type_node
, float_type_node
,
14958 float_type_node
, float_type_node
,
14959 float_type_node
, NULL_TREE
);
14960 def_builtin ("__builtin_vec_init_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SF
);
14962 /* VSX builtins. */
14963 ftype
= build_function_type_list (V2DF_type_node
, double_type_node
,
14964 double_type_node
, NULL_TREE
);
14965 def_builtin ("__builtin_vec_init_v2df", ftype
, VSX_BUILTIN_VEC_INIT_V2DF
);
14967 ftype
= build_function_type_list (V2DI_type_node
, intDI_type_node
,
14968 intDI_type_node
, NULL_TREE
);
14969 def_builtin ("__builtin_vec_init_v2di", ftype
, VSX_BUILTIN_VEC_INIT_V2DI
);
14971 /* Access to the vec_set patterns. */
14972 ftype
= build_function_type_list (V4SI_type_node
, V4SI_type_node
,
14974 integer_type_node
, NULL_TREE
);
14975 def_builtin ("__builtin_vec_set_v4si", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SI
);
14977 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
14979 integer_type_node
, NULL_TREE
);
14980 def_builtin ("__builtin_vec_set_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V8HI
);
14982 ftype
= build_function_type_list (V16QI_type_node
, V16QI_type_node
,
14984 integer_type_node
, NULL_TREE
);
14985 def_builtin ("__builtin_vec_set_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V16QI
);
14987 ftype
= build_function_type_list (V4SF_type_node
, V4SF_type_node
,
14989 integer_type_node
, NULL_TREE
);
14990 def_builtin ("__builtin_vec_set_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SF
);
14992 ftype
= build_function_type_list (V2DF_type_node
, V2DF_type_node
,
14994 integer_type_node
, NULL_TREE
);
14995 def_builtin ("__builtin_vec_set_v2df", ftype
, VSX_BUILTIN_VEC_SET_V2DF
);
14997 ftype
= build_function_type_list (V2DI_type_node
, V2DI_type_node
,
14999 integer_type_node
, NULL_TREE
);
15000 def_builtin ("__builtin_vec_set_v2di", ftype
, VSX_BUILTIN_VEC_SET_V2DI
);
15002 /* Access to the vec_extract patterns. */
15003 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
15004 integer_type_node
, NULL_TREE
);
15005 def_builtin ("__builtin_vec_ext_v4si", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SI
);
15007 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
15008 integer_type_node
, NULL_TREE
);
15009 def_builtin ("__builtin_vec_ext_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V8HI
);
15011 ftype
= build_function_type_list (intQI_type_node
, V16QI_type_node
,
15012 integer_type_node
, NULL_TREE
);
15013 def_builtin ("__builtin_vec_ext_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V16QI
);
15015 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
15016 integer_type_node
, NULL_TREE
);
15017 def_builtin ("__builtin_vec_ext_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SF
);
15019 ftype
= build_function_type_list (double_type_node
, V2DF_type_node
,
15020 integer_type_node
, NULL_TREE
);
15021 def_builtin ("__builtin_vec_ext_v2df", ftype
, VSX_BUILTIN_VEC_EXT_V2DF
);
15023 ftype
= build_function_type_list (intDI_type_node
, V2DI_type_node
,
15024 integer_type_node
, NULL_TREE
);
15025 def_builtin ("__builtin_vec_ext_v2di", ftype
, VSX_BUILTIN_VEC_EXT_V2DI
);
15028 if (V1TI_type_node
)
15030 tree v1ti_ftype_long_pcvoid
15031 = build_function_type_list (V1TI_type_node
,
15032 long_integer_type_node
, pcvoid_type_node
,
15034 tree void_ftype_v1ti_long_pvoid
15035 = build_function_type_list (void_type_node
,
15036 V1TI_type_node
, long_integer_type_node
,
15037 pvoid_type_node
, NULL_TREE
);
15038 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid
,
15039 VSX_BUILTIN_LXVD2X_V1TI
);
15040 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid
,
15041 VSX_BUILTIN_STXVD2X_V1TI
);
15042 ftype
= build_function_type_list (V1TI_type_node
, intTI_type_node
,
15043 NULL_TREE
, NULL_TREE
);
15044 def_builtin ("__builtin_vec_init_v1ti", ftype
, VSX_BUILTIN_VEC_INIT_V1TI
);
15045 ftype
= build_function_type_list (V1TI_type_node
, V1TI_type_node
,
15047 integer_type_node
, NULL_TREE
);
15048 def_builtin ("__builtin_vec_set_v1ti", ftype
, VSX_BUILTIN_VEC_SET_V1TI
);
15049 ftype
= build_function_type_list (intTI_type_node
, V1TI_type_node
,
15050 integer_type_node
, NULL_TREE
);
15051 def_builtin ("__builtin_vec_ext_v1ti", ftype
, VSX_BUILTIN_VEC_EXT_V1TI
);
15057 htm_init_builtins (void)
15059 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
15060 const struct builtin_description
*d
;
15064 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
15066 tree op
[MAX_HTM_OPERANDS
], type
;
15067 HOST_WIDE_INT mask
= d
->mask
;
15068 unsigned attr
= rs6000_builtin_info
[d
->code
].attr
;
15069 bool void_func
= (attr
& RS6000_BTC_VOID
);
15070 int attr_args
= (attr
& RS6000_BTC_TYPE_MASK
);
15072 tree argtype
= (attr
& RS6000_BTC_SPR
) ? long_unsigned_type_node
15073 : unsigned_type_node
;
15075 if ((mask
& builtin_mask
) != mask
)
15077 if (TARGET_DEBUG_BUILTIN
)
15078 fprintf (stderr
, "htm_builtin, skip binary %s\n", d
->name
);
15084 if (TARGET_DEBUG_BUILTIN
)
15085 fprintf (stderr
, "htm_builtin, bdesc_htm[%ld] no name\n",
15086 (long unsigned) i
);
15090 op
[nopnds
++] = (void_func
) ? void_type_node
: argtype
;
15092 if (attr_args
== RS6000_BTC_UNARY
)
15093 op
[nopnds
++] = argtype
;
15094 else if (attr_args
== RS6000_BTC_BINARY
)
15096 op
[nopnds
++] = argtype
;
15097 op
[nopnds
++] = argtype
;
15099 else if (attr_args
== RS6000_BTC_TERNARY
)
15101 op
[nopnds
++] = argtype
;
15102 op
[nopnds
++] = argtype
;
15103 op
[nopnds
++] = argtype
;
15109 type
= build_function_type_list (op
[0], NULL_TREE
);
15112 type
= build_function_type_list (op
[0], op
[1], NULL_TREE
);
15115 type
= build_function_type_list (op
[0], op
[1], op
[2], NULL_TREE
);
15118 type
= build_function_type_list (op
[0], op
[1], op
[2], op
[3],
15122 gcc_unreachable ();
15125 def_builtin (d
->name
, type
, d
->code
);
15129 /* Hash function for builtin functions with up to 3 arguments and a return
15132 builtin_hasher::hash (builtin_hash_struct
*bh
)
15137 for (i
= 0; i
< 4; i
++)
15139 ret
= (ret
* (unsigned)MAX_MACHINE_MODE
) + ((unsigned)bh
->mode
[i
]);
15140 ret
= (ret
* 2) + bh
->uns_p
[i
];
15146 /* Compare builtin hash entries H1 and H2 for equivalence. */
15148 builtin_hasher::equal (builtin_hash_struct
*p1
, builtin_hash_struct
*p2
)
15150 return ((p1
->mode
[0] == p2
->mode
[0])
15151 && (p1
->mode
[1] == p2
->mode
[1])
15152 && (p1
->mode
[2] == p2
->mode
[2])
15153 && (p1
->mode
[3] == p2
->mode
[3])
15154 && (p1
->uns_p
[0] == p2
->uns_p
[0])
15155 && (p1
->uns_p
[1] == p2
->uns_p
[1])
15156 && (p1
->uns_p
[2] == p2
->uns_p
[2])
15157 && (p1
->uns_p
[3] == p2
->uns_p
[3]));
15160 /* Map types for builtin functions with an explicit return type and up to 3
15161 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
15162 of the argument. */
15164 builtin_function_type (enum machine_mode mode_ret
, enum machine_mode mode_arg0
,
15165 enum machine_mode mode_arg1
, enum machine_mode mode_arg2
,
15166 enum rs6000_builtins builtin
, const char *name
)
15168 struct builtin_hash_struct h
;
15169 struct builtin_hash_struct
*h2
;
15172 tree ret_type
= NULL_TREE
;
15173 tree arg_type
[3] = { NULL_TREE
, NULL_TREE
, NULL_TREE
};
15175 /* Create builtin_hash_table. */
15176 if (builtin_hash_table
== NULL
)
15177 builtin_hash_table
= hash_table
<builtin_hasher
>::create_ggc (1500);
15179 h
.type
= NULL_TREE
;
15180 h
.mode
[0] = mode_ret
;
15181 h
.mode
[1] = mode_arg0
;
15182 h
.mode
[2] = mode_arg1
;
15183 h
.mode
[3] = mode_arg2
;
15189 /* If the builtin is a type that produces unsigned results or takes unsigned
15190 arguments, and it is returned as a decl for the vectorizer (such as
15191 widening multiplies, permute), make sure the arguments and return value
15192 are type correct. */
15195 /* unsigned 1 argument functions. */
15196 case CRYPTO_BUILTIN_VSBOX
:
15197 case P8V_BUILTIN_VGBBD
:
15198 case MISC_BUILTIN_CDTBCD
:
15199 case MISC_BUILTIN_CBCDTD
:
15204 /* unsigned 2 argument functions. */
15205 case ALTIVEC_BUILTIN_VMULEUB_UNS
:
15206 case ALTIVEC_BUILTIN_VMULEUH_UNS
:
15207 case ALTIVEC_BUILTIN_VMULOUB_UNS
:
15208 case ALTIVEC_BUILTIN_VMULOUH_UNS
:
15209 case CRYPTO_BUILTIN_VCIPHER
:
15210 case CRYPTO_BUILTIN_VCIPHERLAST
:
15211 case CRYPTO_BUILTIN_VNCIPHER
:
15212 case CRYPTO_BUILTIN_VNCIPHERLAST
:
15213 case CRYPTO_BUILTIN_VPMSUMB
:
15214 case CRYPTO_BUILTIN_VPMSUMH
:
15215 case CRYPTO_BUILTIN_VPMSUMW
:
15216 case CRYPTO_BUILTIN_VPMSUMD
:
15217 case CRYPTO_BUILTIN_VPMSUM
:
15218 case MISC_BUILTIN_ADDG6S
:
15219 case MISC_BUILTIN_DIVWEU
:
15220 case MISC_BUILTIN_DIVWEUO
:
15221 case MISC_BUILTIN_DIVDEU
:
15222 case MISC_BUILTIN_DIVDEUO
:
15228 /* unsigned 3 argument functions. */
15229 case ALTIVEC_BUILTIN_VPERM_16QI_UNS
:
15230 case ALTIVEC_BUILTIN_VPERM_8HI_UNS
:
15231 case ALTIVEC_BUILTIN_VPERM_4SI_UNS
:
15232 case ALTIVEC_BUILTIN_VPERM_2DI_UNS
:
15233 case ALTIVEC_BUILTIN_VSEL_16QI_UNS
:
15234 case ALTIVEC_BUILTIN_VSEL_8HI_UNS
:
15235 case ALTIVEC_BUILTIN_VSEL_4SI_UNS
:
15236 case ALTIVEC_BUILTIN_VSEL_2DI_UNS
:
15237 case VSX_BUILTIN_VPERM_16QI_UNS
:
15238 case VSX_BUILTIN_VPERM_8HI_UNS
:
15239 case VSX_BUILTIN_VPERM_4SI_UNS
:
15240 case VSX_BUILTIN_VPERM_2DI_UNS
:
15241 case VSX_BUILTIN_XXSEL_16QI_UNS
:
15242 case VSX_BUILTIN_XXSEL_8HI_UNS
:
15243 case VSX_BUILTIN_XXSEL_4SI_UNS
:
15244 case VSX_BUILTIN_XXSEL_2DI_UNS
:
15245 case CRYPTO_BUILTIN_VPERMXOR
:
15246 case CRYPTO_BUILTIN_VPERMXOR_V2DI
:
15247 case CRYPTO_BUILTIN_VPERMXOR_V4SI
:
15248 case CRYPTO_BUILTIN_VPERMXOR_V8HI
:
15249 case CRYPTO_BUILTIN_VPERMXOR_V16QI
:
15250 case CRYPTO_BUILTIN_VSHASIGMAW
:
15251 case CRYPTO_BUILTIN_VSHASIGMAD
:
15252 case CRYPTO_BUILTIN_VSHASIGMA
:
15259 /* signed permute functions with unsigned char mask. */
15260 case ALTIVEC_BUILTIN_VPERM_16QI
:
15261 case ALTIVEC_BUILTIN_VPERM_8HI
:
15262 case ALTIVEC_BUILTIN_VPERM_4SI
:
15263 case ALTIVEC_BUILTIN_VPERM_4SF
:
15264 case ALTIVEC_BUILTIN_VPERM_2DI
:
15265 case ALTIVEC_BUILTIN_VPERM_2DF
:
15266 case VSX_BUILTIN_VPERM_16QI
:
15267 case VSX_BUILTIN_VPERM_8HI
:
15268 case VSX_BUILTIN_VPERM_4SI
:
15269 case VSX_BUILTIN_VPERM_4SF
:
15270 case VSX_BUILTIN_VPERM_2DI
:
15271 case VSX_BUILTIN_VPERM_2DF
:
15275 /* unsigned args, signed return. */
15276 case VSX_BUILTIN_XVCVUXDDP_UNS
:
15277 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF
:
15281 /* signed args, unsigned return. */
15282 case VSX_BUILTIN_XVCVDPUXDS_UNS
:
15283 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI
:
15284 case MISC_BUILTIN_UNPACK_TD
:
15285 case MISC_BUILTIN_UNPACK_V1TI
:
15289 /* unsigned arguments for 128-bit pack instructions. */
15290 case MISC_BUILTIN_PACK_TD
:
15291 case MISC_BUILTIN_PACK_V1TI
:
15300 /* Figure out how many args are present. */
15301 while (num_args
> 0 && h
.mode
[num_args
] == VOIDmode
)
15305 fatal_error ("internal error: builtin function %s had no type", name
);
15307 ret_type
= builtin_mode_to_type
[h
.mode
[0]][h
.uns_p
[0]];
15308 if (!ret_type
&& h
.uns_p
[0])
15309 ret_type
= builtin_mode_to_type
[h
.mode
[0]][0];
15312 fatal_error ("internal error: builtin function %s had an unexpected "
15313 "return type %s", name
, GET_MODE_NAME (h
.mode
[0]));
15315 for (i
= 0; i
< (int) ARRAY_SIZE (arg_type
); i
++)
15316 arg_type
[i
] = NULL_TREE
;
15318 for (i
= 0; i
< num_args
; i
++)
15320 int m
= (int) h
.mode
[i
+1];
15321 int uns_p
= h
.uns_p
[i
+1];
15323 arg_type
[i
] = builtin_mode_to_type
[m
][uns_p
];
15324 if (!arg_type
[i
] && uns_p
)
15325 arg_type
[i
] = builtin_mode_to_type
[m
][0];
15328 fatal_error ("internal error: builtin function %s, argument %d "
15329 "had unexpected argument type %s", name
, i
,
15330 GET_MODE_NAME (m
));
15333 builtin_hash_struct
**found
= builtin_hash_table
->find_slot (&h
, INSERT
);
15334 if (*found
== NULL
)
15336 h2
= ggc_alloc
<builtin_hash_struct
> ();
15340 h2
->type
= build_function_type_list (ret_type
, arg_type
[0], arg_type
[1],
15341 arg_type
[2], NULL_TREE
);
15344 return (*found
)->type
;
15348 rs6000_common_init_builtins (void)
15350 const struct builtin_description
*d
;
15353 tree opaque_ftype_opaque
= NULL_TREE
;
15354 tree opaque_ftype_opaque_opaque
= NULL_TREE
;
15355 tree opaque_ftype_opaque_opaque_opaque
= NULL_TREE
;
15356 tree v2si_ftype_qi
= NULL_TREE
;
15357 tree v2si_ftype_v2si_qi
= NULL_TREE
;
15358 tree v2si_ftype_int_qi
= NULL_TREE
;
15359 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
15361 if (!TARGET_PAIRED_FLOAT
)
15363 builtin_mode_to_type
[V2SImode
][0] = opaque_V2SI_type_node
;
15364 builtin_mode_to_type
[V2SFmode
][0] = opaque_V2SF_type_node
;
15367 /* Paired and SPE builtins are only available if you build a compiler with
15368 the appropriate options, so only create those builtins with the
15369 appropriate compiler option. Create Altivec and VSX builtins on machines
15370 with at least the general purpose extensions (970 and newer) to allow the
15371 use of the target attribute.. */
15373 if (TARGET_EXTRA_BUILTINS
)
15374 builtin_mask
|= RS6000_BTM_COMMON
;
15376 /* Add the ternary operators. */
15378 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
15381 HOST_WIDE_INT mask
= d
->mask
;
15383 if ((mask
& builtin_mask
) != mask
)
15385 if (TARGET_DEBUG_BUILTIN
)
15386 fprintf (stderr
, "rs6000_builtin, skip ternary %s\n", d
->name
);
15390 if (rs6000_overloaded_builtin_p (d
->code
))
15392 if (! (type
= opaque_ftype_opaque_opaque_opaque
))
15393 type
= opaque_ftype_opaque_opaque_opaque
15394 = build_function_type_list (opaque_V4SI_type_node
,
15395 opaque_V4SI_type_node
,
15396 opaque_V4SI_type_node
,
15397 opaque_V4SI_type_node
,
15402 enum insn_code icode
= d
->icode
;
15405 if (TARGET_DEBUG_BUILTIN
)
15406 fprintf (stderr
, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
15412 if (icode
== CODE_FOR_nothing
)
15414 if (TARGET_DEBUG_BUILTIN
)
15415 fprintf (stderr
, "rs6000_builtin, skip ternary %s (no code)\n",
15421 type
= builtin_function_type (insn_data
[icode
].operand
[0].mode
,
15422 insn_data
[icode
].operand
[1].mode
,
15423 insn_data
[icode
].operand
[2].mode
,
15424 insn_data
[icode
].operand
[3].mode
,
15428 def_builtin (d
->name
, type
, d
->code
);
15431 /* Add the binary operators. */
15433 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
15435 enum machine_mode mode0
, mode1
, mode2
;
15437 HOST_WIDE_INT mask
= d
->mask
;
15439 if ((mask
& builtin_mask
) != mask
)
15441 if (TARGET_DEBUG_BUILTIN
)
15442 fprintf (stderr
, "rs6000_builtin, skip binary %s\n", d
->name
);
15446 if (rs6000_overloaded_builtin_p (d
->code
))
15448 if (! (type
= opaque_ftype_opaque_opaque
))
15449 type
= opaque_ftype_opaque_opaque
15450 = build_function_type_list (opaque_V4SI_type_node
,
15451 opaque_V4SI_type_node
,
15452 opaque_V4SI_type_node
,
15457 enum insn_code icode
= d
->icode
;
15460 if (TARGET_DEBUG_BUILTIN
)
15461 fprintf (stderr
, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
15467 if (icode
== CODE_FOR_nothing
)
15469 if (TARGET_DEBUG_BUILTIN
)
15470 fprintf (stderr
, "rs6000_builtin, skip binary %s (no code)\n",
15476 mode0
= insn_data
[icode
].operand
[0].mode
;
15477 mode1
= insn_data
[icode
].operand
[1].mode
;
15478 mode2
= insn_data
[icode
].operand
[2].mode
;
15480 if (mode0
== V2SImode
&& mode1
== V2SImode
&& mode2
== QImode
)
15482 if (! (type
= v2si_ftype_v2si_qi
))
15483 type
= v2si_ftype_v2si_qi
15484 = build_function_type_list (opaque_V2SI_type_node
,
15485 opaque_V2SI_type_node
,
15490 else if (mode0
== V2SImode
&& GET_MODE_CLASS (mode1
) == MODE_INT
15491 && mode2
== QImode
)
15493 if (! (type
= v2si_ftype_int_qi
))
15494 type
= v2si_ftype_int_qi
15495 = build_function_type_list (opaque_V2SI_type_node
,
15502 type
= builtin_function_type (mode0
, mode1
, mode2
, VOIDmode
,
15506 def_builtin (d
->name
, type
, d
->code
);
15509 /* Add the simple unary operators. */
15511 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
15513 enum machine_mode mode0
, mode1
;
15515 HOST_WIDE_INT mask
= d
->mask
;
15517 if ((mask
& builtin_mask
) != mask
)
15519 if (TARGET_DEBUG_BUILTIN
)
15520 fprintf (stderr
, "rs6000_builtin, skip unary %s\n", d
->name
);
15524 if (rs6000_overloaded_builtin_p (d
->code
))
15526 if (! (type
= opaque_ftype_opaque
))
15527 type
= opaque_ftype_opaque
15528 = build_function_type_list (opaque_V4SI_type_node
,
15529 opaque_V4SI_type_node
,
15534 enum insn_code icode
= d
->icode
;
15537 if (TARGET_DEBUG_BUILTIN
)
15538 fprintf (stderr
, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
15544 if (icode
== CODE_FOR_nothing
)
15546 if (TARGET_DEBUG_BUILTIN
)
15547 fprintf (stderr
, "rs6000_builtin, skip unary %s (no code)\n",
15553 mode0
= insn_data
[icode
].operand
[0].mode
;
15554 mode1
= insn_data
[icode
].operand
[1].mode
;
15556 if (mode0
== V2SImode
&& mode1
== QImode
)
15558 if (! (type
= v2si_ftype_qi
))
15559 type
= v2si_ftype_qi
15560 = build_function_type_list (opaque_V2SI_type_node
,
15566 type
= builtin_function_type (mode0
, mode1
, VOIDmode
, VOIDmode
,
15570 def_builtin (d
->name
, type
, d
->code
);
15575 rs6000_init_libfuncs (void)
15577 if (!TARGET_IEEEQUAD
)
15578 /* AIX/Darwin/64-bit Linux quad floating point routines. */
15579 if (!TARGET_XL_COMPAT
)
15581 set_optab_libfunc (add_optab
, TFmode
, "__gcc_qadd");
15582 set_optab_libfunc (sub_optab
, TFmode
, "__gcc_qsub");
15583 set_optab_libfunc (smul_optab
, TFmode
, "__gcc_qmul");
15584 set_optab_libfunc (sdiv_optab
, TFmode
, "__gcc_qdiv");
15586 if (!(TARGET_HARD_FLOAT
&& (TARGET_FPRS
|| TARGET_E500_DOUBLE
)))
15588 set_optab_libfunc (neg_optab
, TFmode
, "__gcc_qneg");
15589 set_optab_libfunc (eq_optab
, TFmode
, "__gcc_qeq");
15590 set_optab_libfunc (ne_optab
, TFmode
, "__gcc_qne");
15591 set_optab_libfunc (gt_optab
, TFmode
, "__gcc_qgt");
15592 set_optab_libfunc (ge_optab
, TFmode
, "__gcc_qge");
15593 set_optab_libfunc (lt_optab
, TFmode
, "__gcc_qlt");
15594 set_optab_libfunc (le_optab
, TFmode
, "__gcc_qle");
15596 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "__gcc_stoq");
15597 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "__gcc_dtoq");
15598 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "__gcc_qtos");
15599 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "__gcc_qtod");
15600 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "__gcc_qtoi");
15601 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "__gcc_qtou");
15602 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "__gcc_itoq");
15603 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "__gcc_utoq");
15606 if (!(TARGET_HARD_FLOAT
&& TARGET_FPRS
))
15607 set_optab_libfunc (unord_optab
, TFmode
, "__gcc_qunord");
15611 set_optab_libfunc (add_optab
, TFmode
, "_xlqadd");
15612 set_optab_libfunc (sub_optab
, TFmode
, "_xlqsub");
15613 set_optab_libfunc (smul_optab
, TFmode
, "_xlqmul");
15614 set_optab_libfunc (sdiv_optab
, TFmode
, "_xlqdiv");
15618 /* 32-bit SVR4 quad floating point routines. */
15620 set_optab_libfunc (add_optab
, TFmode
, "_q_add");
15621 set_optab_libfunc (sub_optab
, TFmode
, "_q_sub");
15622 set_optab_libfunc (neg_optab
, TFmode
, "_q_neg");
15623 set_optab_libfunc (smul_optab
, TFmode
, "_q_mul");
15624 set_optab_libfunc (sdiv_optab
, TFmode
, "_q_div");
15625 if (TARGET_PPC_GPOPT
)
15626 set_optab_libfunc (sqrt_optab
, TFmode
, "_q_sqrt");
15628 set_optab_libfunc (eq_optab
, TFmode
, "_q_feq");
15629 set_optab_libfunc (ne_optab
, TFmode
, "_q_fne");
15630 set_optab_libfunc (gt_optab
, TFmode
, "_q_fgt");
15631 set_optab_libfunc (ge_optab
, TFmode
, "_q_fge");
15632 set_optab_libfunc (lt_optab
, TFmode
, "_q_flt");
15633 set_optab_libfunc (le_optab
, TFmode
, "_q_fle");
15635 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_q_stoq");
15636 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_q_dtoq");
15637 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_q_qtos");
15638 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_q_qtod");
15639 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_q_qtoi");
15640 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_q_qtou");
15641 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "_q_itoq");
15642 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "_q_utoq");
15647 /* Expand a block clear operation, and return 1 if successful. Return 0
15648 if we should let the compiler generate normal code.
15650 operands[0] is the destination
15651 operands[1] is the length
15652 operands[3] is the alignment */
15655 expand_block_clear (rtx operands
[])
15657 rtx orig_dest
= operands
[0];
15658 rtx bytes_rtx
= operands
[1];
15659 rtx align_rtx
= operands
[3];
15660 bool constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
15661 HOST_WIDE_INT align
;
15662 HOST_WIDE_INT bytes
;
15667 /* If this is not a fixed size move, just call memcpy */
15671 /* This must be a fixed size alignment */
15672 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
15673 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
15675 /* Anything to clear? */
15676 bytes
= INTVAL (bytes_rtx
);
15680 /* Use the builtin memset after a point, to avoid huge code bloat.
15681 When optimize_size, avoid any significant code bloat; calling
15682 memset is about 4 instructions, so allow for one instruction to
15683 load zero and three to do clearing. */
15684 if (TARGET_ALTIVEC
&& align
>= 128)
15686 else if (TARGET_POWERPC64
&& (align
>= 64 || !STRICT_ALIGNMENT
))
15688 else if (TARGET_SPE
&& align
>= 64)
15693 if (optimize_size
&& bytes
> 3 * clear_step
)
15695 if (! optimize_size
&& bytes
> 8 * clear_step
)
15698 for (offset
= 0; bytes
> 0; offset
+= clear_bytes
, bytes
-= clear_bytes
)
15700 enum machine_mode mode
= BLKmode
;
15703 if (bytes
>= 16 && TARGET_ALTIVEC
&& align
>= 128)
15708 else if (bytes
>= 8 && TARGET_SPE
&& align
>= 64)
15713 else if (bytes
>= 8 && TARGET_POWERPC64
15714 && (align
>= 64 || !STRICT_ALIGNMENT
))
15718 if (offset
== 0 && align
< 64)
15722 /* If the address form is reg+offset with offset not a
15723 multiple of four, reload into reg indirect form here
15724 rather than waiting for reload. This way we get one
15725 reload, not one per store. */
15726 addr
= XEXP (orig_dest
, 0);
15727 if ((GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
)
15728 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
15729 && (INTVAL (XEXP (addr
, 1)) & 3) != 0)
15731 addr
= copy_addr_to_reg (addr
);
15732 orig_dest
= replace_equiv_address (orig_dest
, addr
);
15736 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
15737 { /* move 4 bytes */
15741 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
15742 { /* move 2 bytes */
15746 else /* move 1 byte at a time */
15752 dest
= adjust_address (orig_dest
, mode
, offset
);
15754 emit_move_insn (dest
, CONST0_RTX (mode
));
15761 /* Expand a block move operation, and return 1 if successful. Return 0
15762 if we should let the compiler generate normal code.
15764 operands[0] is the destination
15765 operands[1] is the source
15766 operands[2] is the length
15767 operands[3] is the alignment */
15769 #define MAX_MOVE_REG 4
15772 expand_block_move (rtx operands
[])
15774 rtx orig_dest
= operands
[0];
15775 rtx orig_src
= operands
[1];
15776 rtx bytes_rtx
= operands
[2];
15777 rtx align_rtx
= operands
[3];
15778 int constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
15783 rtx stores
[MAX_MOVE_REG
];
15786 /* If this is not a fixed size move, just call memcpy */
15790 /* This must be a fixed size alignment */
15791 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
15792 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
15794 /* Anything to move? */
15795 bytes
= INTVAL (bytes_rtx
);
15799 if (bytes
> rs6000_block_move_inline_limit
)
15802 for (offset
= 0; bytes
> 0; offset
+= move_bytes
, bytes
-= move_bytes
)
15805 rtx (*movmemsi
) (rtx
, rtx
, rtx
, rtx
);
15806 rtx (*mov
) (rtx
, rtx
);
15808 enum machine_mode mode
= BLKmode
;
15811 /* Altivec first, since it will be faster than a string move
15812 when it applies, and usually not significantly larger. */
15813 if (TARGET_ALTIVEC
&& bytes
>= 16 && align
>= 128)
15817 gen_func
.mov
= gen_movv4si
;
15819 else if (TARGET_SPE
&& bytes
>= 8 && align
>= 64)
15823 gen_func
.mov
= gen_movv2si
;
15825 else if (TARGET_STRING
15826 && bytes
> 24 /* move up to 32 bytes at a time */
15832 && ! fixed_regs
[10]
15833 && ! fixed_regs
[11]
15834 && ! fixed_regs
[12])
15836 move_bytes
= (bytes
> 32) ? 32 : bytes
;
15837 gen_func
.movmemsi
= gen_movmemsi_8reg
;
15839 else if (TARGET_STRING
15840 && bytes
> 16 /* move up to 24 bytes at a time */
15846 && ! fixed_regs
[10])
15848 move_bytes
= (bytes
> 24) ? 24 : bytes
;
15849 gen_func
.movmemsi
= gen_movmemsi_6reg
;
15851 else if (TARGET_STRING
15852 && bytes
> 8 /* move up to 16 bytes at a time */
15856 && ! fixed_regs
[8])
15858 move_bytes
= (bytes
> 16) ? 16 : bytes
;
15859 gen_func
.movmemsi
= gen_movmemsi_4reg
;
15861 else if (bytes
>= 8 && TARGET_POWERPC64
15862 && (align
>= 64 || !STRICT_ALIGNMENT
))
15866 gen_func
.mov
= gen_movdi
;
15867 if (offset
== 0 && align
< 64)
15871 /* If the address form is reg+offset with offset not a
15872 multiple of four, reload into reg indirect form here
15873 rather than waiting for reload. This way we get one
15874 reload, not one per load and/or store. */
15875 addr
= XEXP (orig_dest
, 0);
15876 if ((GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
)
15877 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
15878 && (INTVAL (XEXP (addr
, 1)) & 3) != 0)
15880 addr
= copy_addr_to_reg (addr
);
15881 orig_dest
= replace_equiv_address (orig_dest
, addr
);
15883 addr
= XEXP (orig_src
, 0);
15884 if ((GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
)
15885 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
15886 && (INTVAL (XEXP (addr
, 1)) & 3) != 0)
15888 addr
= copy_addr_to_reg (addr
);
15889 orig_src
= replace_equiv_address (orig_src
, addr
);
15893 else if (TARGET_STRING
&& bytes
> 4 && !TARGET_POWERPC64
)
15894 { /* move up to 8 bytes at a time */
15895 move_bytes
= (bytes
> 8) ? 8 : bytes
;
15896 gen_func
.movmemsi
= gen_movmemsi_2reg
;
15898 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
15899 { /* move 4 bytes */
15902 gen_func
.mov
= gen_movsi
;
15904 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
15905 { /* move 2 bytes */
15908 gen_func
.mov
= gen_movhi
;
15910 else if (TARGET_STRING
&& bytes
> 1)
15911 { /* move up to 4 bytes at a time */
15912 move_bytes
= (bytes
> 4) ? 4 : bytes
;
15913 gen_func
.movmemsi
= gen_movmemsi_1reg
;
15915 else /* move 1 byte at a time */
15919 gen_func
.mov
= gen_movqi
;
15922 src
= adjust_address (orig_src
, mode
, offset
);
15923 dest
= adjust_address (orig_dest
, mode
, offset
);
15925 if (mode
!= BLKmode
)
15927 rtx tmp_reg
= gen_reg_rtx (mode
);
15929 emit_insn ((*gen_func
.mov
) (tmp_reg
, src
));
15930 stores
[num_reg
++] = (*gen_func
.mov
) (dest
, tmp_reg
);
15933 if (mode
== BLKmode
|| num_reg
>= MAX_MOVE_REG
|| bytes
== move_bytes
)
15936 for (i
= 0; i
< num_reg
; i
++)
15937 emit_insn (stores
[i
]);
15941 if (mode
== BLKmode
)
15943 /* Move the address into scratch registers. The movmemsi
15944 patterns require zero offset. */
15945 if (!REG_P (XEXP (src
, 0)))
15947 rtx src_reg
= copy_addr_to_reg (XEXP (src
, 0));
15948 src
= replace_equiv_address (src
, src_reg
);
15950 set_mem_size (src
, move_bytes
);
15952 if (!REG_P (XEXP (dest
, 0)))
15954 rtx dest_reg
= copy_addr_to_reg (XEXP (dest
, 0));
15955 dest
= replace_equiv_address (dest
, dest_reg
);
15957 set_mem_size (dest
, move_bytes
);
15959 emit_insn ((*gen_func
.movmemsi
) (dest
, src
,
15960 GEN_INT (move_bytes
& 31),
15969 /* Return a string to perform a load_multiple operation.
15970 operands[0] is the vector.
15971 operands[1] is the source address.
15972 operands[2] is the first destination register. */
15975 rs6000_output_load_multiple (rtx operands
[3])
15977 /* We have to handle the case where the pseudo used to contain the address
15978 is assigned to one of the output registers. */
15980 int words
= XVECLEN (operands
[0], 0);
15983 if (XVECLEN (operands
[0], 0) == 1)
15984 return "lwz %2,0(%1)";
15986 for (i
= 0; i
< words
; i
++)
15987 if (refers_to_regno_p (REGNO (operands
[2]) + i
,
15988 REGNO (operands
[2]) + i
+ 1, operands
[1], 0))
15992 xop
[0] = GEN_INT (4 * (words
-1));
15993 xop
[1] = operands
[1];
15994 xop
[2] = operands
[2];
15995 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop
);
16000 xop
[0] = GEN_INT (4 * (words
-1));
16001 xop
[1] = operands
[1];
16002 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
16003 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop
);
16008 for (j
= 0; j
< words
; j
++)
16011 xop
[0] = GEN_INT (j
* 4);
16012 xop
[1] = operands
[1];
16013 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + j
);
16014 output_asm_insn ("lwz %2,%0(%1)", xop
);
16016 xop
[0] = GEN_INT (i
* 4);
16017 xop
[1] = operands
[1];
16018 output_asm_insn ("lwz %1,%0(%1)", xop
);
16023 return "lswi %2,%1,%N0";
16027 /* A validation routine: say whether CODE, a condition code, and MODE
16028 match. The other alternatives either don't make sense or should
16029 never be generated. */
16032 validate_condition_mode (enum rtx_code code
, enum machine_mode mode
)
16034 gcc_assert ((GET_RTX_CLASS (code
) == RTX_COMPARE
16035 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
16036 && GET_MODE_CLASS (mode
) == MODE_CC
);
16038 /* These don't make sense. */
16039 gcc_assert ((code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
)
16040 || mode
!= CCUNSmode
);
16042 gcc_assert ((code
!= GTU
&& code
!= LTU
&& code
!= GEU
&& code
!= LEU
)
16043 || mode
== CCUNSmode
);
16045 gcc_assert (mode
== CCFPmode
16046 || (code
!= ORDERED
&& code
!= UNORDERED
16047 && code
!= UNEQ
&& code
!= LTGT
16048 && code
!= UNGT
&& code
!= UNLT
16049 && code
!= UNGE
&& code
!= UNLE
));
16051 /* These should never be generated except for
16052 flag_finite_math_only. */
16053 gcc_assert (mode
!= CCFPmode
16054 || flag_finite_math_only
16055 || (code
!= LE
&& code
!= GE
16056 && code
!= UNEQ
&& code
!= LTGT
16057 && code
!= UNGT
&& code
!= UNLT
));
16059 /* These are invalid; the information is not there. */
16060 gcc_assert (mode
!= CCEQmode
|| code
== EQ
|| code
== NE
);
16064 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
16065 mask required to convert the result of a rotate insn into a shift
16066 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
16069 includes_lshift_p (rtx shiftop
, rtx andop
)
16071 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
16073 shift_mask
<<= INTVAL (shiftop
);
16075 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
16078 /* Similar, but for right shift. */
16081 includes_rshift_p (rtx shiftop
, rtx andop
)
16083 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
16085 shift_mask
>>= INTVAL (shiftop
);
16087 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
16090 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
16091 to perform a left shift. It must have exactly SHIFTOP least
16092 significant 0's, then one or more 1's, then zero or more 0's. */
16095 includes_rldic_lshift_p (rtx shiftop
, rtx andop
)
16097 if (GET_CODE (andop
) == CONST_INT
)
16099 HOST_WIDE_INT c
, lsb
, shift_mask
;
16101 c
= INTVAL (andop
);
16102 if (c
== 0 || c
== ~0)
16106 shift_mask
<<= INTVAL (shiftop
);
16108 /* Find the least significant one bit. */
16111 /* It must coincide with the LSB of the shift mask. */
16112 if (-lsb
!= shift_mask
)
16115 /* Invert to look for the next transition (if any). */
16118 /* Remove the low group of ones (originally low group of zeros). */
16121 /* Again find the lsb, and check we have all 1's above. */
16129 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
16130 to perform a left shift. It must have SHIFTOP or more least
16131 significant 0's, with the remainder of the word 1's. */
16134 includes_rldicr_lshift_p (rtx shiftop
, rtx andop
)
16136 if (GET_CODE (andop
) == CONST_INT
)
16138 HOST_WIDE_INT c
, lsb
, shift_mask
;
16141 shift_mask
<<= INTVAL (shiftop
);
16142 c
= INTVAL (andop
);
16144 /* Find the least significant one bit. */
16147 /* It must be covered by the shift mask.
16148 This test also rejects c == 0. */
16149 if ((lsb
& shift_mask
) == 0)
16152 /* Check we have all 1's above the transition, and reject all 1's. */
16153 return c
== -lsb
&& lsb
!= 1;
16159 /* Return 1 if operands will generate a valid arguments to rlwimi
16160 instruction for insert with right shift in 64-bit mode. The mask may
16161 not start on the first bit or stop on the last bit because wrap-around
16162 effects of instruction do not correspond to semantics of RTL insn. */
16165 insvdi_rshift_rlwimi_p (rtx sizeop
, rtx startop
, rtx shiftop
)
16167 if (INTVAL (startop
) > 32
16168 && INTVAL (startop
) < 64
16169 && INTVAL (sizeop
) > 1
16170 && INTVAL (sizeop
) + INTVAL (startop
) < 64
16171 && INTVAL (shiftop
) > 0
16172 && INTVAL (sizeop
) + INTVAL (shiftop
) < 32
16173 && (64 - (INTVAL (shiftop
) & 63)) >= INTVAL (sizeop
))
16179 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
16180 for lfq and stfq insns iff the registers are hard registers. */
16183 registers_ok_for_quad_peep (rtx reg1
, rtx reg2
)
16185 /* We might have been passed a SUBREG. */
16186 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
16189 /* We might have been passed non floating point registers. */
16190 if (!FP_REGNO_P (REGNO (reg1
))
16191 || !FP_REGNO_P (REGNO (reg2
)))
16194 return (REGNO (reg1
) == REGNO (reg2
) - 1);
16197 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
16198 addr1 and addr2 must be in consecutive memory locations
16199 (addr2 == addr1 + 8). */
16202 mems_ok_for_quad_peep (rtx mem1
, rtx mem2
)
16205 unsigned int reg1
, reg2
;
16206 int offset1
, offset2
;
16208 /* The mems cannot be volatile. */
16209 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
16212 addr1
= XEXP (mem1
, 0);
16213 addr2
= XEXP (mem2
, 0);
16215 /* Extract an offset (if used) from the first addr. */
16216 if (GET_CODE (addr1
) == PLUS
)
16218 /* If not a REG, return zero. */
16219 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
16223 reg1
= REGNO (XEXP (addr1
, 0));
16224 /* The offset must be constant! */
16225 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
16227 offset1
= INTVAL (XEXP (addr1
, 1));
16230 else if (GET_CODE (addr1
) != REG
)
16234 reg1
= REGNO (addr1
);
16235 /* This was a simple (mem (reg)) expression. Offset is 0. */
16239 /* And now for the second addr. */
16240 if (GET_CODE (addr2
) == PLUS
)
16242 /* If not a REG, return zero. */
16243 if (GET_CODE (XEXP (addr2
, 0)) != REG
)
16247 reg2
= REGNO (XEXP (addr2
, 0));
16248 /* The offset must be constant. */
16249 if (GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
16251 offset2
= INTVAL (XEXP (addr2
, 1));
16254 else if (GET_CODE (addr2
) != REG
)
16258 reg2
= REGNO (addr2
);
16259 /* This was a simple (mem (reg)) expression. Offset is 0. */
16263 /* Both of these must have the same base register. */
16267 /* The offset for the second addr must be 8 more than the first addr. */
16268 if (offset2
!= offset1
+ 8)
16271 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
16278 rs6000_secondary_memory_needed_rtx (enum machine_mode mode
)
16280 static bool eliminated
= false;
16283 if (mode
!= SDmode
|| TARGET_NO_SDMODE_STACK
)
16284 ret
= assign_stack_local (mode
, GET_MODE_SIZE (mode
), 0);
16287 rtx mem
= cfun
->machine
->sdmode_stack_slot
;
16288 gcc_assert (mem
!= NULL_RTX
);
16292 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
16293 cfun
->machine
->sdmode_stack_slot
= mem
;
16299 if (TARGET_DEBUG_ADDR
)
16301 fprintf (stderr
, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
16302 GET_MODE_NAME (mode
));
16304 fprintf (stderr
, "\tNULL_RTX\n");
16312 /* Return the mode to be used for memory when a secondary memory
16313 location is needed. For SDmode values we need to use DDmode, in
16314 all other cases we can use the same mode. */
16316 rs6000_secondary_memory_needed_mode (enum machine_mode mode
)
16318 if (lra_in_progress
&& mode
== SDmode
)
16324 rs6000_check_sdmode (tree
*tp
, int *walk_subtrees
, void *data ATTRIBUTE_UNUSED
)
16326 /* Don't walk into types. */
16327 if (*tp
== NULL_TREE
|| *tp
== error_mark_node
|| TYPE_P (*tp
))
16329 *walk_subtrees
= 0;
16333 switch (TREE_CODE (*tp
))
16342 case VIEW_CONVERT_EXPR
:
16343 if (TYPE_MODE (TREE_TYPE (*tp
)) == SDmode
)
16353 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
16354 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
16355 only work on the traditional altivec registers, note if an altivec register
16358 static enum rs6000_reg_type
16359 register_to_reg_type (rtx reg
, bool *is_altivec
)
16361 HOST_WIDE_INT regno
;
16362 enum reg_class rclass
;
16364 if (GET_CODE (reg
) == SUBREG
)
16365 reg
= SUBREG_REG (reg
);
16368 return NO_REG_TYPE
;
16370 regno
= REGNO (reg
);
16371 if (regno
>= FIRST_PSEUDO_REGISTER
)
16373 if (!lra_in_progress
&& !reload_in_progress
&& !reload_completed
)
16374 return PSEUDO_REG_TYPE
;
16376 regno
= true_regnum (reg
);
16377 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
16378 return PSEUDO_REG_TYPE
;
16381 gcc_assert (regno
>= 0);
16383 if (is_altivec
&& ALTIVEC_REGNO_P (regno
))
16384 *is_altivec
= true;
16386 rclass
= rs6000_regno_regclass
[regno
];
16387 return reg_class_to_reg_type
[(int)rclass
];
16390 /* Helper function for rs6000_secondary_reload to return true if a move to a
16391 different register classe is really a simple move. */
16394 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type
,
16395 enum rs6000_reg_type from_type
,
16396 enum machine_mode mode
)
16400 /* Add support for various direct moves available. In this function, we only
16401 look at cases where we don't need any extra registers, and one or more
16402 simple move insns are issued. At present, 32-bit integers are not allowed
16403 in FPR/VSX registers. Single precision binary floating is not a simple
16404 move because we need to convert to the single precision memory layout.
16405 The 4-byte SDmode can be moved. */
16406 size
= GET_MODE_SIZE (mode
);
16407 if (TARGET_DIRECT_MOVE
16408 && ((mode
== SDmode
) || (TARGET_POWERPC64
&& size
== 8))
16409 && ((to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
16410 || (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
16413 else if (TARGET_MFPGPR
&& TARGET_POWERPC64
&& size
== 8
16414 && ((to_type
== GPR_REG_TYPE
&& from_type
== FPR_REG_TYPE
)
16415 || (to_type
== FPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
16418 else if ((size
== 4 || (TARGET_POWERPC64
&& size
== 8))
16419 && ((to_type
== GPR_REG_TYPE
&& from_type
== SPR_REG_TYPE
)
16420 || (to_type
== SPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
16426 /* Power8 helper function for rs6000_secondary_reload, handle all of the
16427 special direct moves that involve allocating an extra register, return the
16428 insn code of the helper function if there is such a function or
16429 CODE_FOR_nothing if not. */
16432 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type
,
16433 enum rs6000_reg_type from_type
,
16434 enum machine_mode mode
,
16435 secondary_reload_info
*sri
,
16439 enum insn_code icode
= CODE_FOR_nothing
;
16441 int size
= GET_MODE_SIZE (mode
);
16443 if (TARGET_POWERPC64
)
16447 /* Handle moving 128-bit values from GPRs to VSX point registers on
16448 power8 when running in 64-bit mode using XXPERMDI to glue the two
16449 64-bit values back together. */
16450 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
16452 cost
= 3; /* 2 mtvsrd's, 1 xxpermdi. */
16453 icode
= reg_addr
[mode
].reload_vsx_gpr
;
16456 /* Handle moving 128-bit values from VSX point registers to GPRs on
16457 power8 when running in 64-bit mode using XXPERMDI to get access to the
16458 bottom 64-bit value. */
16459 else if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
16461 cost
= 3; /* 2 mfvsrd's, 1 xxpermdi. */
16462 icode
= reg_addr
[mode
].reload_gpr_vsx
;
16466 else if (mode
== SFmode
)
16468 if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
16470 cost
= 3; /* xscvdpspn, mfvsrd, and. */
16471 icode
= reg_addr
[mode
].reload_gpr_vsx
;
16474 else if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
16476 cost
= 2; /* mtvsrz, xscvspdpn. */
16477 icode
= reg_addr
[mode
].reload_vsx_gpr
;
16482 if (TARGET_POWERPC64
&& size
== 16)
16484 /* Handle moving 128-bit values from GPRs to VSX point registers on
16485 power8 when running in 64-bit mode using XXPERMDI to glue the two
16486 64-bit values back together. */
16487 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
16489 cost
= 3; /* 2 mtvsrd's, 1 xxpermdi. */
16490 icode
= reg_addr
[mode
].reload_vsx_gpr
;
16493 /* Handle moving 128-bit values from VSX point registers to GPRs on
16494 power8 when running in 64-bit mode using XXPERMDI to get access to the
16495 bottom 64-bit value. */
16496 else if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
16498 cost
= 3; /* 2 mfvsrd's, 1 xxpermdi. */
16499 icode
= reg_addr
[mode
].reload_gpr_vsx
;
16503 else if (!TARGET_POWERPC64
&& size
== 8)
16505 /* Handle moving 64-bit values from GPRs to floating point registers on
16506 power8 when running in 32-bit mode using FMRGOW to glue the two 32-bit
16507 values back together. Altivec register classes must be handled
16508 specially since a different instruction is used, and the secondary
16509 reload support requires a single instruction class in the scratch
16510 register constraint. However, right now TFmode is not allowed in
16511 Altivec registers, so the pattern will never match. */
16512 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
&& !altivec_p
)
16514 cost
= 3; /* 2 mtvsrwz's, 1 fmrgow. */
16515 icode
= reg_addr
[mode
].reload_fpr_gpr
;
16519 if (icode
!= CODE_FOR_nothing
)
16524 sri
->icode
= icode
;
16525 sri
->extra_cost
= cost
;
16532 /* Return whether a move between two register classes can be done either
16533 directly (simple move) or via a pattern that uses a single extra temporary
16534 (using power8's direct move in this case. */
16537 rs6000_secondary_reload_move (enum rs6000_reg_type to_type
,
16538 enum rs6000_reg_type from_type
,
16539 enum machine_mode mode
,
16540 secondary_reload_info
*sri
,
16543 /* Fall back to load/store reloads if either type is not a register. */
16544 if (to_type
== NO_REG_TYPE
|| from_type
== NO_REG_TYPE
)
16547 /* If we haven't allocated registers yet, assume the move can be done for the
16548 standard register types. */
16549 if ((to_type
== PSEUDO_REG_TYPE
&& from_type
== PSEUDO_REG_TYPE
)
16550 || (to_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (from_type
))
16551 || (from_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (to_type
)))
16554 /* Moves to the same set of registers is a simple move for non-specialized
16556 if (to_type
== from_type
&& IS_STD_REG_TYPE (to_type
))
16559 /* Check whether a simple move can be done directly. */
16560 if (rs6000_secondary_reload_simple_move (to_type
, from_type
, mode
))
16564 sri
->icode
= CODE_FOR_nothing
;
16565 sri
->extra_cost
= 0;
16570 /* Now check if we can do it in a few steps. */
16571 return rs6000_secondary_reload_direct_move (to_type
, from_type
, mode
, sri
,
16575 /* Inform reload about cases where moving X with a mode MODE to a register in
16576 RCLASS requires an extra scratch or immediate register. Return the class
16577 needed for the immediate register.
16579 For VSX and Altivec, we may need a register to convert sp+offset into
16582 For misaligned 64-bit gpr loads and stores we need a register to
16583 convert an offset address to indirect. */
16586 rs6000_secondary_reload (bool in_p
,
16588 reg_class_t rclass_i
,
16589 enum machine_mode mode
,
16590 secondary_reload_info
*sri
)
16592 enum reg_class rclass
= (enum reg_class
) rclass_i
;
16593 reg_class_t ret
= ALL_REGS
;
16594 enum insn_code icode
;
16595 bool default_p
= false;
16597 sri
->icode
= CODE_FOR_nothing
;
16599 ? reg_addr
[mode
].reload_load
16600 : reg_addr
[mode
].reload_store
);
16602 if (REG_P (x
) || register_operand (x
, mode
))
16604 enum rs6000_reg_type to_type
= reg_class_to_reg_type
[(int)rclass
];
16605 bool altivec_p
= (rclass
== ALTIVEC_REGS
);
16606 enum rs6000_reg_type from_type
= register_to_reg_type (x
, &altivec_p
);
16610 enum rs6000_reg_type exchange
= to_type
;
16611 to_type
= from_type
;
16612 from_type
= exchange
;
16615 /* Can we do a direct move of some sort? */
16616 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
, sri
,
16619 icode
= (enum insn_code
)sri
->icode
;
16625 /* Handle vector moves with reload helper functions. */
16626 if (ret
== ALL_REGS
&& icode
!= CODE_FOR_nothing
)
16629 sri
->icode
= CODE_FOR_nothing
;
16630 sri
->extra_cost
= 0;
16632 if (GET_CODE (x
) == MEM
)
16634 rtx addr
= XEXP (x
, 0);
16636 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
16637 an extra register in that case, but it would need an extra
16638 register if the addressing is reg+reg or (reg+reg)&(-16). Special
16639 case load/store quad. */
16640 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
)
16642 if (TARGET_POWERPC64
&& TARGET_QUAD_MEMORY
16643 && GET_MODE_SIZE (mode
) == 16
16644 && quad_memory_operand (x
, mode
))
16646 sri
->icode
= icode
;
16647 sri
->extra_cost
= 2;
16650 else if (!legitimate_indirect_address_p (addr
, false)
16651 && !rs6000_legitimate_offset_address_p (PTImode
, addr
,
16654 sri
->icode
= icode
;
16655 /* account for splitting the loads, and converting the
16656 address from reg+reg to reg. */
16657 sri
->extra_cost
= (((TARGET_64BIT
) ? 3 : 5)
16658 + ((GET_CODE (addr
) == AND
) ? 1 : 0));
16661 /* Allow scalar loads to/from the traditional floating point
16662 registers, even if VSX memory is set. */
16663 else if ((rclass
== FLOAT_REGS
|| rclass
== NO_REGS
)
16664 && (GET_MODE_SIZE (mode
) == 4 || GET_MODE_SIZE (mode
) == 8)
16665 && (legitimate_indirect_address_p (addr
, false)
16666 || legitimate_indirect_address_p (addr
, false)
16667 || rs6000_legitimate_offset_address_p (mode
, addr
,
16671 /* Loads to and stores from vector registers can only do reg+reg
16672 addressing. Altivec registers can also do (reg+reg)&(-16). Allow
16673 scalar modes loading up the traditional floating point registers
16674 to use offset addresses. */
16675 else if (rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
16676 || rclass
== FLOAT_REGS
|| rclass
== NO_REGS
)
16678 if (!VECTOR_MEM_ALTIVEC_P (mode
)
16679 && GET_CODE (addr
) == AND
16680 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
16681 && INTVAL (XEXP (addr
, 1)) == -16
16682 && (legitimate_indirect_address_p (XEXP (addr
, 0), false)
16683 || legitimate_indexed_address_p (XEXP (addr
, 0), false)))
16685 sri
->icode
= icode
;
16686 sri
->extra_cost
= ((GET_CODE (XEXP (addr
, 0)) == PLUS
)
16689 else if (!legitimate_indirect_address_p (addr
, false)
16690 && (rclass
== NO_REGS
16691 || !legitimate_indexed_address_p (addr
, false)))
16693 sri
->icode
= icode
;
16694 sri
->extra_cost
= 1;
16697 icode
= CODE_FOR_nothing
;
16699 /* Any other loads, including to pseudo registers which haven't been
16700 assigned to a register yet, default to require a scratch
16704 sri
->icode
= icode
;
16705 sri
->extra_cost
= 2;
16708 else if (REG_P (x
))
16710 int regno
= true_regnum (x
);
16712 icode
= CODE_FOR_nothing
;
16713 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
16717 enum reg_class xclass
= REGNO_REG_CLASS (regno
);
16718 enum rs6000_reg_type rtype1
= reg_class_to_reg_type
[(int)rclass
];
16719 enum rs6000_reg_type rtype2
= reg_class_to_reg_type
[(int)xclass
];
16721 /* If memory is needed, use default_secondary_reload to create the
16723 if (rtype1
!= rtype2
|| !IS_STD_REG_TYPE (rtype1
))
16732 else if (TARGET_POWERPC64
16733 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
16735 && GET_MODE_SIZE (GET_MODE (x
)) >= UNITS_PER_WORD
)
16737 rtx addr
= XEXP (x
, 0);
16738 rtx off
= address_offset (addr
);
16740 if (off
!= NULL_RTX
)
16742 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
16743 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
16745 /* We need a secondary reload when our legitimate_address_p
16746 says the address is good (as otherwise the entire address
16747 will be reloaded), and the offset is not a multiple of
16748 four or we have an address wrap. Address wrap will only
16749 occur for LO_SUMs since legitimate_offset_address_p
16750 rejects addresses for 16-byte mems that will wrap. */
16751 if (GET_CODE (addr
) == LO_SUM
16752 ? (1 /* legitimate_address_p allows any offset for lo_sum */
16753 && ((offset
& 3) != 0
16754 || ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
))
16755 : (offset
+ 0x8000 < 0x10000 - extra
/* legitimate_address_p */
16756 && (offset
& 3) != 0))
16759 sri
->icode
= CODE_FOR_reload_di_load
;
16761 sri
->icode
= CODE_FOR_reload_di_store
;
16762 sri
->extra_cost
= 2;
16771 else if (!TARGET_POWERPC64
16772 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
16774 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
16776 rtx addr
= XEXP (x
, 0);
16777 rtx off
= address_offset (addr
);
16779 if (off
!= NULL_RTX
)
16781 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
16782 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
16784 /* We need a secondary reload when our legitimate_address_p
16785 says the address is good (as otherwise the entire address
16786 will be reloaded), and we have a wrap.
16788 legitimate_lo_sum_address_p allows LO_SUM addresses to
16789 have any offset so test for wrap in the low 16 bits.
16791 legitimate_offset_address_p checks for the range
16792 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
16793 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
16794 [0x7ff4,0x7fff] respectively, so test for the
16795 intersection of these ranges, [0x7ffc,0x7fff] and
16796 [0x7ff4,0x7ff7] respectively.
16798 Note that the address we see here may have been
16799 manipulated by legitimize_reload_address. */
16800 if (GET_CODE (addr
) == LO_SUM
16801 ? ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
16802 : offset
- (0x8000 - extra
) < UNITS_PER_WORD
)
16805 sri
->icode
= CODE_FOR_reload_si_load
;
16807 sri
->icode
= CODE_FOR_reload_si_store
;
16808 sri
->extra_cost
= 2;
16821 ret
= default_secondary_reload (in_p
, x
, rclass
, mode
, sri
);
16823 gcc_assert (ret
!= ALL_REGS
);
16825 if (TARGET_DEBUG_ADDR
)
16828 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
16830 reg_class_names
[ret
],
16831 in_p
? "true" : "false",
16832 reg_class_names
[rclass
],
16833 GET_MODE_NAME (mode
));
16836 fprintf (stderr
, ", default secondary reload");
16838 if (sri
->icode
!= CODE_FOR_nothing
)
16839 fprintf (stderr
, ", reload func = %s, extra cost = %d\n",
16840 insn_data
[sri
->icode
].name
, sri
->extra_cost
);
16842 fprintf (stderr
, "\n");
16850 /* Better tracing for rs6000_secondary_reload_inner. */
16853 rs6000_secondary_reload_trace (int line
, rtx reg
, rtx mem
, rtx scratch
,
16858 gcc_assert (reg
!= NULL_RTX
&& mem
!= NULL_RTX
&& scratch
!= NULL_RTX
);
16860 fprintf (stderr
, "rs6000_secondary_reload_inner:%d, type = %s\n", line
,
16861 store_p
? "store" : "load");
16864 set
= gen_rtx_SET (VOIDmode
, mem
, reg
);
16866 set
= gen_rtx_SET (VOIDmode
, reg
, mem
);
16868 clobber
= gen_rtx_CLOBBER (VOIDmode
, scratch
);
16869 debug_rtx (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
)));
16873 rs6000_secondary_reload_fail (int line
, rtx reg
, rtx mem
, rtx scratch
,
16876 rs6000_secondary_reload_trace (line
, reg
, mem
, scratch
, store_p
);
16877 gcc_unreachable ();
16880 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
16881 to SP+reg addressing. */
16884 rs6000_secondary_reload_inner (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
16886 int regno
= true_regnum (reg
);
16887 enum machine_mode mode
= GET_MODE (reg
);
16888 enum reg_class rclass
;
16890 rtx and_op2
= NULL_RTX
;
16893 rtx scratch_or_premodify
= scratch
;
16897 if (TARGET_DEBUG_ADDR
)
16898 rs6000_secondary_reload_trace (__LINE__
, reg
, mem
, scratch
, store_p
);
16900 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
16901 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
16903 if (GET_CODE (mem
) != MEM
)
16904 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
16906 rclass
= REGNO_REG_CLASS (regno
);
16907 addr
= find_replacement (&XEXP (mem
, 0));
16911 /* GPRs can handle reg + small constant, all other addresses need to use
16912 the scratch register. */
16915 if (GET_CODE (addr
) == AND
)
16917 and_op2
= XEXP (addr
, 1);
16918 addr
= find_replacement (&XEXP (addr
, 0));
16921 if (GET_CODE (addr
) == PRE_MODIFY
)
16923 scratch_or_premodify
= find_replacement (&XEXP (addr
, 0));
16924 if (!REG_P (scratch_or_premodify
))
16925 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
16927 addr
= find_replacement (&XEXP (addr
, 1));
16928 if (GET_CODE (addr
) != PLUS
)
16929 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
16932 if (GET_CODE (addr
) == PLUS
16933 && (and_op2
!= NULL_RTX
16934 || !rs6000_legitimate_offset_address_p (PTImode
, addr
,
16937 /* find_replacement already recurses into both operands of
16938 PLUS so we don't need to call it here. */
16939 addr_op1
= XEXP (addr
, 0);
16940 addr_op2
= XEXP (addr
, 1);
16941 if (!legitimate_indirect_address_p (addr_op1
, false))
16942 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
16944 if (!REG_P (addr_op2
)
16945 && (GET_CODE (addr_op2
) != CONST_INT
16946 || !satisfies_constraint_I (addr_op2
)))
16948 if (TARGET_DEBUG_ADDR
)
16951 "\nMove plus addr to register %s, mode = %s: ",
16952 rs6000_reg_names
[REGNO (scratch
)],
16953 GET_MODE_NAME (mode
));
16954 debug_rtx (addr_op2
);
16956 rs6000_emit_move (scratch
, addr_op2
, Pmode
);
16957 addr_op2
= scratch
;
16960 emit_insn (gen_rtx_SET (VOIDmode
,
16961 scratch_or_premodify
,
16962 gen_rtx_PLUS (Pmode
,
16966 addr
= scratch_or_premodify
;
16967 scratch_or_premodify
= scratch
;
16969 else if (!legitimate_indirect_address_p (addr
, false)
16970 && !rs6000_legitimate_offset_address_p (PTImode
, addr
,
16973 if (TARGET_DEBUG_ADDR
)
16975 fprintf (stderr
, "\nMove addr to register %s, mode = %s: ",
16976 rs6000_reg_names
[REGNO (scratch_or_premodify
)],
16977 GET_MODE_NAME (mode
));
16980 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
16981 addr
= scratch_or_premodify
;
16982 scratch_or_premodify
= scratch
;
16986 /* Float registers can do offset+reg addressing for scalar types. */
16988 if (legitimate_indirect_address_p (addr
, false) /* reg */
16989 || legitimate_indexed_address_p (addr
, false) /* reg+reg */
16990 || ((GET_MODE_SIZE (mode
) == 4 || GET_MODE_SIZE (mode
) == 8)
16991 && and_op2
== NULL_RTX
16992 && scratch_or_premodify
== scratch
16993 && rs6000_legitimate_offset_address_p (mode
, addr
, false, false)))
16996 /* If this isn't a legacy floating point load/store, fall through to the
16999 /* VSX/Altivec registers can only handle reg+reg addressing. Move other
17000 addresses into a scratch register. */
17004 /* With float regs, we need to handle the AND ourselves, since we can't
17005 use the Altivec instruction with an implicit AND -16. Allow scalar
17006 loads to float registers to use reg+offset even if VSX. */
17007 if (GET_CODE (addr
) == AND
17008 && (rclass
!= ALTIVEC_REGS
|| GET_MODE_SIZE (mode
) != 16
17009 || GET_CODE (XEXP (addr
, 1)) != CONST_INT
17010 || INTVAL (XEXP (addr
, 1)) != -16
17011 || !VECTOR_MEM_ALTIVEC_P (mode
)))
17013 and_op2
= XEXP (addr
, 1);
17014 addr
= find_replacement (&XEXP (addr
, 0));
17017 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
17018 as the address later. */
17019 if (GET_CODE (addr
) == PRE_MODIFY
17020 && ((ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
17021 && (rclass
!= FLOAT_REGS
17022 || (GET_MODE_SIZE (mode
) != 4 && GET_MODE_SIZE (mode
) != 8)))
17023 || and_op2
!= NULL_RTX
17024 || !legitimate_indexed_address_p (XEXP (addr
, 1), false)))
17026 scratch_or_premodify
= find_replacement (&XEXP (addr
, 0));
17027 if (!legitimate_indirect_address_p (scratch_or_premodify
, false))
17028 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
17030 addr
= find_replacement (&XEXP (addr
, 1));
17031 if (GET_CODE (addr
) != PLUS
)
17032 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
17035 if (legitimate_indirect_address_p (addr
, false) /* reg */
17036 || legitimate_indexed_address_p (addr
, false) /* reg+reg */
17037 || (GET_CODE (addr
) == AND
/* Altivec memory */
17038 && rclass
== ALTIVEC_REGS
17039 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
17040 && INTVAL (XEXP (addr
, 1)) == -16
17041 && (legitimate_indirect_address_p (XEXP (addr
, 0), false)
17042 || legitimate_indexed_address_p (XEXP (addr
, 0), false))))
17045 else if (GET_CODE (addr
) == PLUS
)
17047 addr_op1
= XEXP (addr
, 0);
17048 addr_op2
= XEXP (addr
, 1);
17049 if (!REG_P (addr_op1
))
17050 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
17052 if (TARGET_DEBUG_ADDR
)
17054 fprintf (stderr
, "\nMove plus addr to register %s, mode = %s: ",
17055 rs6000_reg_names
[REGNO (scratch
)], GET_MODE_NAME (mode
));
17056 debug_rtx (addr_op2
);
17058 rs6000_emit_move (scratch
, addr_op2
, Pmode
);
17059 emit_insn (gen_rtx_SET (VOIDmode
,
17060 scratch_or_premodify
,
17061 gen_rtx_PLUS (Pmode
,
17064 addr
= scratch_or_premodify
;
17065 scratch_or_premodify
= scratch
;
17068 else if (GET_CODE (addr
) == SYMBOL_REF
|| GET_CODE (addr
) == CONST
17069 || GET_CODE (addr
) == CONST_INT
|| GET_CODE (addr
) == LO_SUM
17072 if (TARGET_DEBUG_ADDR
)
17074 fprintf (stderr
, "\nMove addr to register %s, mode = %s: ",
17075 rs6000_reg_names
[REGNO (scratch_or_premodify
)],
17076 GET_MODE_NAME (mode
));
17080 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
17081 addr
= scratch_or_premodify
;
17082 scratch_or_premodify
= scratch
;
17086 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
17091 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
17094 /* If the original address involved a pre-modify that we couldn't use the VSX
17095 memory instruction with update, and we haven't taken care of already,
17096 store the address in the pre-modify register and use that as the
17098 if (scratch_or_premodify
!= scratch
&& scratch_or_premodify
!= addr
)
17100 emit_insn (gen_rtx_SET (VOIDmode
, scratch_or_premodify
, addr
));
17101 addr
= scratch_or_premodify
;
17104 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
17105 memory instruction, recreate the AND now, including the clobber which is
17106 generated by the general ANDSI3/ANDDI3 patterns for the
17107 andi. instruction. */
17108 if (and_op2
!= NULL_RTX
)
17110 if (! legitimate_indirect_address_p (addr
, false))
17112 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, addr
));
17116 if (TARGET_DEBUG_ADDR
)
17118 fprintf (stderr
, "\nAnd addr to register %s, mode = %s: ",
17119 rs6000_reg_names
[REGNO (scratch
)], GET_MODE_NAME (mode
));
17120 debug_rtx (and_op2
);
17123 and_rtx
= gen_rtx_SET (VOIDmode
,
17125 gen_rtx_AND (Pmode
,
17129 cc_clobber
= gen_rtx_CLOBBER (CCmode
, gen_rtx_SCRATCH (CCmode
));
17130 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
17131 gen_rtvec (2, and_rtx
, cc_clobber
)));
17135 /* Adjust the address if it changed. */
17136 if (addr
!= XEXP (mem
, 0))
17138 mem
= replace_equiv_address_nv (mem
, addr
);
17139 if (TARGET_DEBUG_ADDR
)
17140 fprintf (stderr
, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
17143 /* Now create the move. */
17145 emit_insn (gen_rtx_SET (VOIDmode
, mem
, reg
));
17147 emit_insn (gen_rtx_SET (VOIDmode
, reg
, mem
));
17152 /* Convert reloads involving 64-bit gprs and misaligned offset
17153 addressing, or multiple 32-bit gprs and offsets that are too large,
17154 to use indirect addressing. */
17157 rs6000_secondary_reload_gpr (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
17159 int regno
= true_regnum (reg
);
17160 enum reg_class rclass
;
17162 rtx scratch_or_premodify
= scratch
;
17164 if (TARGET_DEBUG_ADDR
)
17166 fprintf (stderr
, "\nrs6000_secondary_reload_gpr, type = %s\n",
17167 store_p
? "store" : "load");
17168 fprintf (stderr
, "reg:\n");
17170 fprintf (stderr
, "mem:\n");
17172 fprintf (stderr
, "scratch:\n");
17173 debug_rtx (scratch
);
17176 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
17177 gcc_assert (GET_CODE (mem
) == MEM
);
17178 rclass
= REGNO_REG_CLASS (regno
);
17179 gcc_assert (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
);
17180 addr
= XEXP (mem
, 0);
17182 if (GET_CODE (addr
) == PRE_MODIFY
)
17184 scratch_or_premodify
= XEXP (addr
, 0);
17185 gcc_assert (REG_P (scratch_or_premodify
));
17186 addr
= XEXP (addr
, 1);
17188 gcc_assert (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
);
17190 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
17192 mem
= replace_equiv_address_nv (mem
, scratch_or_premodify
);
17194 /* Now create the move. */
17196 emit_insn (gen_rtx_SET (VOIDmode
, mem
, reg
));
17198 emit_insn (gen_rtx_SET (VOIDmode
, reg
, mem
));
17203 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
17204 this function has any SDmode references. If we are on a power7 or later, we
17205 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
17206 can load/store the value. */
17209 rs6000_alloc_sdmode_stack_slot (void)
17213 gimple_stmt_iterator gsi
;
17215 gcc_assert (cfun
->machine
->sdmode_stack_slot
== NULL_RTX
);
17216 /* We use a different approach for dealing with the secondary
17221 if (TARGET_NO_SDMODE_STACK
)
17224 FOR_EACH_BB_FN (bb
, cfun
)
17225 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
17227 tree ret
= walk_gimple_op (gsi_stmt (gsi
), rs6000_check_sdmode
, NULL
);
17230 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
17231 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
17237 /* Check for any SDmode parameters of the function. */
17238 for (t
= DECL_ARGUMENTS (cfun
->decl
); t
; t
= DECL_CHAIN (t
))
17240 if (TREE_TYPE (t
) == error_mark_node
)
17243 if (TYPE_MODE (TREE_TYPE (t
)) == SDmode
17244 || TYPE_MODE (DECL_ARG_TYPE (t
)) == SDmode
)
17246 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
17247 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
17255 rs6000_instantiate_decls (void)
17257 if (cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
17258 instantiate_decl_rtl (cfun
->machine
->sdmode_stack_slot
);
17261 /* Given an rtx X being reloaded into a reg required to be
17262 in class CLASS, return the class of reg to actually use.
17263 In general this is just CLASS; but on some machines
17264 in some cases it is preferable to use a more restrictive class.
17266 On the RS/6000, we have to return NO_REGS when we want to reload a
17267 floating-point CONST_DOUBLE to force it to be copied to memory.
17269 We also don't want to reload integer values into floating-point
17270 registers if we can at all help it. In fact, this can
17271 cause reload to die, if it tries to generate a reload of CTR
17272 into a FP register and discovers it doesn't have the memory location
17275 ??? Would it be a good idea to have reload do the converse, that is
17276 try to reload floating modes into FP registers if possible?
17279 static enum reg_class
17280 rs6000_preferred_reload_class (rtx x
, enum reg_class rclass
)
17282 enum machine_mode mode
= GET_MODE (x
);
17284 if (TARGET_VSX
&& x
== CONST0_RTX (mode
) && VSX_REG_CLASS_P (rclass
))
17287 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
17288 && (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
17289 && easy_vector_constant (x
, mode
))
17290 return ALTIVEC_REGS
;
17292 if ((CONSTANT_P (x
) || GET_CODE (x
) == PLUS
))
17294 if (reg_class_subset_p (GENERAL_REGS
, rclass
))
17295 return GENERAL_REGS
;
17296 if (reg_class_subset_p (BASE_REGS
, rclass
))
17301 if (GET_MODE_CLASS (mode
) == MODE_INT
&& rclass
== NON_SPECIAL_REGS
)
17302 return GENERAL_REGS
;
17304 /* For VSX, prefer the traditional registers for 64-bit values because we can
17305 use the non-VSX loads. Prefer the Altivec registers if Altivec is
17306 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
17307 prefer Altivec loads.. */
17308 if (rclass
== VSX_REGS
)
17310 if (MEM_P (x
) && reg_addr
[mode
].scalar_in_vmx_p
)
17312 rtx addr
= XEXP (x
, 0);
17313 if (rs6000_legitimate_offset_address_p (mode
, addr
, false, true)
17314 || legitimate_lo_sum_address_p (mode
, addr
, false))
17317 else if (GET_MODE_SIZE (mode
) <= 8 && !reg_addr
[mode
].scalar_in_vmx_p
)
17320 if (VECTOR_UNIT_ALTIVEC_P (mode
) || VECTOR_MEM_ALTIVEC_P (mode
)
17321 || mode
== V1TImode
)
17322 return ALTIVEC_REGS
;
17330 /* Debug version of rs6000_preferred_reload_class. */
17331 static enum reg_class
17332 rs6000_debug_preferred_reload_class (rtx x
, enum reg_class rclass
)
17334 enum reg_class ret
= rs6000_preferred_reload_class (x
, rclass
);
17337 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
17339 reg_class_names
[ret
], reg_class_names
[rclass
],
17340 GET_MODE_NAME (GET_MODE (x
)));
17346 /* If we are copying between FP or AltiVec registers and anything else, we need
17347 a memory location. The exception is when we are targeting ppc64 and the
17348 move to/from fpr to gpr instructions are available. Also, under VSX, you
17349 can copy vector registers from the FP register set to the Altivec register
17350 set and vice versa. */
17353 rs6000_secondary_memory_needed (enum reg_class from_class
,
17354 enum reg_class to_class
,
17355 enum machine_mode mode
)
17357 enum rs6000_reg_type from_type
, to_type
;
17358 bool altivec_p
= ((from_class
== ALTIVEC_REGS
)
17359 || (to_class
== ALTIVEC_REGS
));
17361 /* If a simple/direct move is available, we don't need secondary memory */
17362 from_type
= reg_class_to_reg_type
[(int)from_class
];
17363 to_type
= reg_class_to_reg_type
[(int)to_class
];
17365 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
,
17366 (secondary_reload_info
*)0, altivec_p
))
17369 /* If we have a floating point or vector register class, we need to use
17370 memory to transfer the data. */
17371 if (IS_FP_VECT_REG_TYPE (from_type
) || IS_FP_VECT_REG_TYPE (to_type
))
17377 /* Debug version of rs6000_secondary_memory_needed. */
17379 rs6000_debug_secondary_memory_needed (enum reg_class from_class
,
17380 enum reg_class to_class
,
17381 enum machine_mode mode
)
17383 bool ret
= rs6000_secondary_memory_needed (from_class
, to_class
, mode
);
17386 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
17387 "to_class = %s, mode = %s\n",
17388 ret
? "true" : "false",
17389 reg_class_names
[from_class
],
17390 reg_class_names
[to_class
],
17391 GET_MODE_NAME (mode
));
17396 /* Return the register class of a scratch register needed to copy IN into
17397 or out of a register in RCLASS in MODE. If it can be done directly,
17398 NO_REGS is returned. */
17400 static enum reg_class
17401 rs6000_secondary_reload_class (enum reg_class rclass
, enum machine_mode mode
,
17406 if (TARGET_ELF
|| (DEFAULT_ABI
== ABI_DARWIN
17408 && MACHOPIC_INDIRECT
17412 /* We cannot copy a symbolic operand directly into anything
17413 other than BASE_REGS for TARGET_ELF. So indicate that a
17414 register from BASE_REGS is needed as an intermediate
17417 On Darwin, pic addresses require a load from memory, which
17418 needs a base register. */
17419 if (rclass
!= BASE_REGS
17420 && (GET_CODE (in
) == SYMBOL_REF
17421 || GET_CODE (in
) == HIGH
17422 || GET_CODE (in
) == LABEL_REF
17423 || GET_CODE (in
) == CONST
))
17427 if (GET_CODE (in
) == REG
)
17429 regno
= REGNO (in
);
17430 if (regno
>= FIRST_PSEUDO_REGISTER
)
17432 regno
= true_regnum (in
);
17433 if (regno
>= FIRST_PSEUDO_REGISTER
)
17437 else if (GET_CODE (in
) == SUBREG
)
17439 regno
= true_regnum (in
);
17440 if (regno
>= FIRST_PSEUDO_REGISTER
)
17446 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
17448 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
17449 || (regno
>= 0 && INT_REGNO_P (regno
)))
17452 /* Constants, memory, and FP registers can go into FP registers. */
17453 if ((regno
== -1 || FP_REGNO_P (regno
))
17454 && (rclass
== FLOAT_REGS
|| rclass
== NON_SPECIAL_REGS
))
17455 return (mode
!= SDmode
|| lra_in_progress
) ? NO_REGS
: GENERAL_REGS
;
17457 /* Memory, and FP/altivec registers can go into fp/altivec registers under
17458 VSX. However, for scalar variables, use the traditional floating point
17459 registers so that we can use offset+register addressing. */
17461 && (regno
== -1 || VSX_REGNO_P (regno
))
17462 && VSX_REG_CLASS_P (rclass
))
17464 if (GET_MODE_SIZE (mode
) < 16)
17470 /* Memory, and AltiVec registers can go into AltiVec registers. */
17471 if ((regno
== -1 || ALTIVEC_REGNO_P (regno
))
17472 && rclass
== ALTIVEC_REGS
)
17475 /* We can copy among the CR registers. */
17476 if ((rclass
== CR_REGS
|| rclass
== CR0_REGS
)
17477 && regno
>= 0 && CR_REGNO_P (regno
))
17480 /* Otherwise, we need GENERAL_REGS. */
17481 return GENERAL_REGS
;
17484 /* Debug version of rs6000_secondary_reload_class. */
17485 static enum reg_class
17486 rs6000_debug_secondary_reload_class (enum reg_class rclass
,
17487 enum machine_mode mode
, rtx in
)
17489 enum reg_class ret
= rs6000_secondary_reload_class (rclass
, mode
, in
);
17491 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
17492 "mode = %s, input rtx:\n",
17493 reg_class_names
[ret
], reg_class_names
[rclass
],
17494 GET_MODE_NAME (mode
));
17500 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
17503 rs6000_cannot_change_mode_class (enum machine_mode from
,
17504 enum machine_mode to
,
17505 enum reg_class rclass
)
17507 unsigned from_size
= GET_MODE_SIZE (from
);
17508 unsigned to_size
= GET_MODE_SIZE (to
);
17510 if (from_size
!= to_size
)
17512 enum reg_class xclass
= (TARGET_VSX
) ? VSX_REGS
: FLOAT_REGS
;
17514 if (reg_classes_intersect_p (xclass
, rclass
))
17516 unsigned to_nregs
= hard_regno_nregs
[FIRST_FPR_REGNO
][to
];
17517 unsigned from_nregs
= hard_regno_nregs
[FIRST_FPR_REGNO
][from
];
17519 /* Don't allow 64-bit types to overlap with 128-bit types that take a
17520 single register under VSX because the scalar part of the register
17521 is in the upper 64-bits, and not the lower 64-bits. Types like
17522 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
17523 IEEE floating point can't overlap, and neither can small
17526 if (TARGET_IEEEQUAD
&& (to
== TFmode
|| from
== TFmode
))
17529 /* TDmode in floating-mode registers must always go into a register
17530 pair with the most significant word in the even-numbered register
17531 to match ISA requirements. In little-endian mode, this does not
17532 match subreg numbering, so we cannot allow subregs. */
17533 if (!BYTES_BIG_ENDIAN
&& (to
== TDmode
|| from
== TDmode
))
17536 if (from_size
< 8 || to_size
< 8)
17539 if (from_size
== 8 && (8 * to_nregs
) != to_size
)
17542 if (to_size
== 8 && (8 * from_nregs
) != from_size
)
17551 if (TARGET_E500_DOUBLE
17552 && ((((to
) == DFmode
) + ((from
) == DFmode
)) == 1
17553 || (((to
) == TFmode
) + ((from
) == TFmode
)) == 1
17554 || (((to
) == DDmode
) + ((from
) == DDmode
)) == 1
17555 || (((to
) == TDmode
) + ((from
) == TDmode
)) == 1
17556 || (((to
) == DImode
) + ((from
) == DImode
)) == 1))
17559 /* Since the VSX register set includes traditional floating point registers
17560 and altivec registers, just check for the size being different instead of
17561 trying to check whether the modes are vector modes. Otherwise it won't
17562 allow say DF and DI to change classes. For types like TFmode and TDmode
17563 that take 2 64-bit registers, rather than a single 128-bit register, don't
17564 allow subregs of those types to other 128 bit types. */
17565 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
))
17567 unsigned num_regs
= (from_size
+ 15) / 16;
17568 if (hard_regno_nregs
[FIRST_FPR_REGNO
][to
] > num_regs
17569 || hard_regno_nregs
[FIRST_FPR_REGNO
][from
] > num_regs
)
17572 return (from_size
!= 8 && from_size
!= 16);
17575 if (TARGET_ALTIVEC
&& rclass
== ALTIVEC_REGS
17576 && (ALTIVEC_VECTOR_MODE (from
) + ALTIVEC_VECTOR_MODE (to
)) == 1)
17579 if (TARGET_SPE
&& (SPE_VECTOR_MODE (from
) + SPE_VECTOR_MODE (to
)) == 1
17580 && reg_classes_intersect_p (GENERAL_REGS
, rclass
))
17586 /* Debug version of rs6000_cannot_change_mode_class. */
17588 rs6000_debug_cannot_change_mode_class (enum machine_mode from
,
17589 enum machine_mode to
,
17590 enum reg_class rclass
)
17592 bool ret
= rs6000_cannot_change_mode_class (from
, to
, rclass
);
17595 "rs6000_cannot_change_mode_class, return %s, from = %s, "
17596 "to = %s, rclass = %s\n",
17597 ret
? "true" : "false",
17598 GET_MODE_NAME (from
), GET_MODE_NAME (to
),
17599 reg_class_names
[rclass
]);
17604 /* Return a string to do a move operation of 128 bits of data. */
17607 rs6000_output_move_128bit (rtx operands
[])
17609 rtx dest
= operands
[0];
17610 rtx src
= operands
[1];
17611 enum machine_mode mode
= GET_MODE (dest
);
17614 bool dest_gpr_p
, dest_fp_p
, dest_vmx_p
, dest_vsx_p
;
17615 bool src_gpr_p
, src_fp_p
, src_vmx_p
, src_vsx_p
;
17619 dest_regno
= REGNO (dest
);
17620 dest_gpr_p
= INT_REGNO_P (dest_regno
);
17621 dest_fp_p
= FP_REGNO_P (dest_regno
);
17622 dest_vmx_p
= ALTIVEC_REGNO_P (dest_regno
);
17623 dest_vsx_p
= dest_fp_p
| dest_vmx_p
;
17628 dest_gpr_p
= dest_fp_p
= dest_vmx_p
= dest_vsx_p
= false;
17633 src_regno
= REGNO (src
);
17634 src_gpr_p
= INT_REGNO_P (src_regno
);
17635 src_fp_p
= FP_REGNO_P (src_regno
);
17636 src_vmx_p
= ALTIVEC_REGNO_P (src_regno
);
17637 src_vsx_p
= src_fp_p
| src_vmx_p
;
17642 src_gpr_p
= src_fp_p
= src_vmx_p
= src_vsx_p
= false;
17645 /* Register moves. */
17646 if (dest_regno
>= 0 && src_regno
>= 0)
17653 else if (TARGET_VSX
&& TARGET_DIRECT_MOVE
&& src_vsx_p
)
17657 else if (TARGET_VSX
&& dest_vsx_p
)
17660 return "xxlor %x0,%x1,%x1";
17662 else if (TARGET_DIRECT_MOVE
&& src_gpr_p
)
17666 else if (TARGET_ALTIVEC
&& dest_vmx_p
&& src_vmx_p
)
17667 return "vor %0,%1,%1";
17669 else if (dest_fp_p
&& src_fp_p
)
17674 else if (dest_regno
>= 0 && MEM_P (src
))
17678 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
17684 else if (TARGET_ALTIVEC
&& dest_vmx_p
17685 && altivec_indexed_or_indirect_operand (src
, mode
))
17686 return "lvx %0,%y1";
17688 else if (TARGET_VSX
&& dest_vsx_p
)
17690 if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
17691 return "lxvw4x %x0,%y1";
17693 return "lxvd2x %x0,%y1";
17696 else if (TARGET_ALTIVEC
&& dest_vmx_p
)
17697 return "lvx %0,%y1";
17699 else if (dest_fp_p
)
17704 else if (src_regno
>= 0 && MEM_P (dest
))
17708 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
17709 return "stq %1,%0";
17714 else if (TARGET_ALTIVEC
&& src_vmx_p
17715 && altivec_indexed_or_indirect_operand (src
, mode
))
17716 return "stvx %1,%y0";
17718 else if (TARGET_VSX
&& src_vsx_p
)
17720 if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
17721 return "stxvw4x %x1,%y0";
17723 return "stxvd2x %x1,%y0";
17726 else if (TARGET_ALTIVEC
&& src_vmx_p
)
17727 return "stvx %1,%y0";
17734 else if (dest_regno
>= 0
17735 && (GET_CODE (src
) == CONST_INT
17736 || GET_CODE (src
) == CONST_WIDE_INT
17737 || GET_CODE (src
) == CONST_DOUBLE
17738 || GET_CODE (src
) == CONST_VECTOR
))
17743 else if (TARGET_VSX
&& dest_vsx_p
&& zero_constant (src
, mode
))
17744 return "xxlxor %x0,%x0,%x0";
17746 else if (TARGET_ALTIVEC
&& dest_vmx_p
)
17747 return output_vec_const_move (operands
);
17750 if (TARGET_DEBUG_ADDR
)
17752 fprintf (stderr
, "\n===== Bad 128 bit move:\n");
17753 debug_rtx (gen_rtx_SET (VOIDmode
, dest
, src
));
17756 gcc_unreachable ();
17759 /* Validate a 128-bit move. */
17761 rs6000_move_128bit_ok_p (rtx operands
[])
17763 enum machine_mode mode
= GET_MODE (operands
[0]);
17764 return (gpc_reg_operand (operands
[0], mode
)
17765 || gpc_reg_operand (operands
[1], mode
));
17768 /* Return true if a 128-bit move needs to be split. */
17770 rs6000_split_128bit_ok_p (rtx operands
[])
17772 if (!reload_completed
)
17775 if (!gpr_or_gpr_p (operands
[0], operands
[1]))
17778 if (quad_load_store_p (operands
[0], operands
[1]))
17785 /* Given a comparison operation, return the bit number in CCR to test. We
17786 know this is a valid comparison.
17788 SCC_P is 1 if this is for an scc. That means that %D will have been
17789 used instead of %C, so the bits will be in different places.
17791 Return -1 if OP isn't a valid comparison for some reason. */
17794 ccr_bit (rtx op
, int scc_p
)
17796 enum rtx_code code
= GET_CODE (op
);
17797 enum machine_mode cc_mode
;
17802 if (!COMPARISON_P (op
))
17805 reg
= XEXP (op
, 0);
17807 gcc_assert (GET_CODE (reg
) == REG
&& CR_REGNO_P (REGNO (reg
)));
17809 cc_mode
= GET_MODE (reg
);
17810 cc_regnum
= REGNO (reg
);
17811 base_bit
= 4 * (cc_regnum
- CR0_REGNO
);
17813 validate_condition_mode (code
, cc_mode
);
17815 /* When generating a sCOND operation, only positive conditions are
17818 || code
== EQ
|| code
== GT
|| code
== LT
|| code
== UNORDERED
17819 || code
== GTU
|| code
== LTU
);
17824 return scc_p
? base_bit
+ 3 : base_bit
+ 2;
17826 return base_bit
+ 2;
17827 case GT
: case GTU
: case UNLE
:
17828 return base_bit
+ 1;
17829 case LT
: case LTU
: case UNGE
:
17831 case ORDERED
: case UNORDERED
:
17832 return base_bit
+ 3;
17835 /* If scc, we will have done a cror to put the bit in the
17836 unordered position. So test that bit. For integer, this is ! LT
17837 unless this is an scc insn. */
17838 return scc_p
? base_bit
+ 3 : base_bit
;
17841 return scc_p
? base_bit
+ 3 : base_bit
+ 1;
17844 gcc_unreachable ();
17848 /* Return the GOT register. */
17851 rs6000_got_register (rtx value ATTRIBUTE_UNUSED
)
17853 /* The second flow pass currently (June 1999) can't update
17854 regs_ever_live without disturbing other parts of the compiler, so
17855 update it here to make the prolog/epilogue code happy. */
17856 if (!can_create_pseudo_p ()
17857 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
17858 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM
, true);
17860 crtl
->uses_pic_offset_table
= 1;
17862 return pic_offset_table_rtx
;
17865 static rs6000_stack_t stack_info
;
17867 /* Function to init struct machine_function.
17868 This will be called, via a pointer variable,
17869 from push_function_context. */
17871 static struct machine_function
*
17872 rs6000_init_machine_status (void)
17874 stack_info
.reload_completed
= 0;
17875 return ggc_cleared_alloc
<machine_function
> ();
17878 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
17881 extract_MB (rtx op
)
17884 unsigned long val
= INTVAL (op
);
17886 /* If the high bit is zero, the value is the first 1 bit we find
17888 if ((val
& 0x80000000) == 0)
17890 gcc_assert (val
& 0xffffffff);
17893 while (((val
<<= 1) & 0x80000000) == 0)
17898 /* If the high bit is set and the low bit is not, or the mask is all
17899 1's, the value is zero. */
17900 if ((val
& 1) == 0 || (val
& 0xffffffff) == 0xffffffff)
17903 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
17906 while (((val
>>= 1) & 1) != 0)
17913 extract_ME (rtx op
)
17916 unsigned long val
= INTVAL (op
);
17918 /* If the low bit is zero, the value is the first 1 bit we find from
17920 if ((val
& 1) == 0)
17922 gcc_assert (val
& 0xffffffff);
17925 while (((val
>>= 1) & 1) == 0)
17931 /* If the low bit is set and the high bit is not, or the mask is all
17932 1's, the value is 31. */
17933 if ((val
& 0x80000000) == 0 || (val
& 0xffffffff) == 0xffffffff)
17936 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
17939 while (((val
<<= 1) & 0x80000000) != 0)
17945 /* Write out a function code label. */
17948 rs6000_output_function_entry (FILE *file
, const char *fname
)
17950 if (fname
[0] != '.')
17952 switch (DEFAULT_ABI
)
17955 gcc_unreachable ();
17961 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "L.");
17971 RS6000_OUTPUT_BASENAME (file
, fname
);
17974 /* Print an operand. Recognize special options, documented below. */
17977 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
17978 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
17980 #define SMALL_DATA_RELOC "sda21"
17981 #define SMALL_DATA_REG 0
17985 print_operand (FILE *file
, rtx x
, int code
)
17988 unsigned HOST_WIDE_INT uval
;
17992 /* %a is output_address. */
17995 /* If constant, low-order 16 bits of constant, unsigned.
17996 Otherwise, write normally. */
17998 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 0xffff);
18000 print_operand (file
, x
, 0);
18004 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
18005 for 64-bit mask direction. */
18006 putc (((INTVAL (x
) & 1) == 0 ? 'r' : 'l'), file
);
18009 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
18013 /* Like 'J' but get to the GT bit only. */
18014 gcc_assert (REG_P (x
));
18016 /* Bit 1 is GT bit. */
18017 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 1;
18019 /* Add one for shift count in rlinm for scc. */
18020 fprintf (file
, "%d", i
+ 1);
18024 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
18027 output_operand_lossage ("invalid %%e value");
18032 if ((uval
& 0xffff) == 0 && uval
!= 0)
18037 /* X is a CR register. Print the number of the EQ bit of the CR */
18038 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
18039 output_operand_lossage ("invalid %%E value");
18041 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 2);
18045 /* X is a CR register. Print the shift count needed to move it
18046 to the high-order four bits. */
18047 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
18048 output_operand_lossage ("invalid %%f value");
18050 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
));
18054 /* Similar, but print the count for the rotate in the opposite
18056 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
18057 output_operand_lossage ("invalid %%F value");
18059 fprintf (file
, "%d", 32 - 4 * (REGNO (x
) - CR0_REGNO
));
18063 /* X is a constant integer. If it is negative, print "m",
18064 otherwise print "z". This is to make an aze or ame insn. */
18065 if (GET_CODE (x
) != CONST_INT
)
18066 output_operand_lossage ("invalid %%G value");
18067 else if (INTVAL (x
) >= 0)
18074 /* If constant, output low-order five bits. Otherwise, write
18077 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 31);
18079 print_operand (file
, x
, 0);
18083 /* If constant, output low-order six bits. Otherwise, write
18086 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 63);
18088 print_operand (file
, x
, 0);
18092 /* Print `i' if this is a constant, else nothing. */
18098 /* Write the bit number in CCR for jump. */
18099 i
= ccr_bit (x
, 0);
18101 output_operand_lossage ("invalid %%j code");
18103 fprintf (file
, "%d", i
);
18107 /* Similar, but add one for shift count in rlinm for scc and pass
18108 scc flag to `ccr_bit'. */
18109 i
= ccr_bit (x
, 1);
18111 output_operand_lossage ("invalid %%J code");
18113 /* If we want bit 31, write a shift count of zero, not 32. */
18114 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
18118 /* X must be a constant. Write the 1's complement of the
18121 output_operand_lossage ("invalid %%k value");
18123 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
18127 /* X must be a symbolic constant on ELF. Write an
18128 expression suitable for an 'addi' that adds in the low 16
18129 bits of the MEM. */
18130 if (GET_CODE (x
) == CONST
)
18132 if (GET_CODE (XEXP (x
, 0)) != PLUS
18133 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
18134 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
18135 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
18136 output_operand_lossage ("invalid %%K value");
18138 print_operand_address (file
, x
);
18139 fputs ("@l", file
);
18142 /* %l is output_asm_label. */
18145 /* Write second word of DImode or DFmode reference. Works on register
18146 or non-indexed memory only. */
18148 fputs (reg_names
[REGNO (x
) + 1], file
);
18149 else if (MEM_P (x
))
18151 /* Handle possible auto-increment. Since it is pre-increment and
18152 we have already done it, we can just use an offset of word. */
18153 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
18154 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
18155 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
18157 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
18158 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
18161 output_address (XEXP (adjust_address_nv (x
, SImode
,
18165 if (small_data_operand (x
, GET_MODE (x
)))
18166 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
18167 reg_names
[SMALL_DATA_REG
]);
18172 /* MB value for a mask operand. */
18173 if (! mask_operand (x
, SImode
))
18174 output_operand_lossage ("invalid %%m value");
18176 fprintf (file
, "%d", extract_MB (x
));
18180 /* ME value for a mask operand. */
18181 if (! mask_operand (x
, SImode
))
18182 output_operand_lossage ("invalid %%M value");
18184 fprintf (file
, "%d", extract_ME (x
));
18187 /* %n outputs the negative of its operand. */
18190 /* Write the number of elements in the vector times 4. */
18191 if (GET_CODE (x
) != PARALLEL
)
18192 output_operand_lossage ("invalid %%N value");
18194 fprintf (file
, "%d", XVECLEN (x
, 0) * 4);
18198 /* Similar, but subtract 1 first. */
18199 if (GET_CODE (x
) != PARALLEL
)
18200 output_operand_lossage ("invalid %%O value");
18202 fprintf (file
, "%d", (XVECLEN (x
, 0) - 1) * 4);
18206 /* X is a CONST_INT that is a power of two. Output the logarithm. */
18209 || (i
= exact_log2 (INTVAL (x
))) < 0)
18210 output_operand_lossage ("invalid %%p value");
18212 fprintf (file
, "%d", i
);
18216 /* The operand must be an indirect memory reference. The result
18217 is the register name. */
18218 if (GET_CODE (x
) != MEM
|| GET_CODE (XEXP (x
, 0)) != REG
18219 || REGNO (XEXP (x
, 0)) >= 32)
18220 output_operand_lossage ("invalid %%P value");
18222 fputs (reg_names
[REGNO (XEXP (x
, 0))], file
);
18226 /* This outputs the logical code corresponding to a boolean
18227 expression. The expression may have one or both operands
18228 negated (if one, only the first one). For condition register
18229 logical operations, it will also treat the negated
18230 CR codes as NOTs, but not handle NOTs of them. */
18232 const char *const *t
= 0;
18234 enum rtx_code code
= GET_CODE (x
);
18235 static const char * const tbl
[3][3] = {
18236 { "and", "andc", "nor" },
18237 { "or", "orc", "nand" },
18238 { "xor", "eqv", "xor" } };
18242 else if (code
== IOR
)
18244 else if (code
== XOR
)
18247 output_operand_lossage ("invalid %%q value");
18249 if (GET_CODE (XEXP (x
, 0)) != NOT
)
18253 if (GET_CODE (XEXP (x
, 1)) == NOT
)
18264 if (! TARGET_MFCRF
)
18270 /* X is a CR register. Print the mask for `mtcrf'. */
18271 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
18272 output_operand_lossage ("invalid %%R value");
18274 fprintf (file
, "%d", 128 >> (REGNO (x
) - CR0_REGNO
));
18278 /* Low 5 bits of 32 - value */
18280 output_operand_lossage ("invalid %%s value");
18282 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (32 - INTVAL (x
)) & 31);
18286 /* PowerPC64 mask position. All 0's is excluded.
18287 CONST_INT 32-bit mask is considered sign-extended so any
18288 transition must occur within the CONST_INT, not on the boundary. */
18289 if (! mask64_operand (x
, DImode
))
18290 output_operand_lossage ("invalid %%S value");
18294 if (uval
& 1) /* Clear Left */
18296 #if HOST_BITS_PER_WIDE_INT > 64
18297 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
18301 else /* Clear Right */
18304 #if HOST_BITS_PER_WIDE_INT > 64
18305 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
18311 gcc_assert (i
>= 0);
18312 fprintf (file
, "%d", i
);
18316 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
18317 gcc_assert (REG_P (x
) && GET_MODE (x
) == CCmode
);
18319 /* Bit 3 is OV bit. */
18320 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 3;
18322 /* If we want bit 31, write a shift count of zero, not 32. */
18323 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
18327 /* Print the symbolic name of a branch target register. */
18328 if (GET_CODE (x
) != REG
|| (REGNO (x
) != LR_REGNO
18329 && REGNO (x
) != CTR_REGNO
))
18330 output_operand_lossage ("invalid %%T value");
18331 else if (REGNO (x
) == LR_REGNO
)
18332 fputs ("lr", file
);
18334 fputs ("ctr", file
);
18338 /* High-order or low-order 16 bits of constant, whichever is non-zero,
18339 for use in unsigned operand. */
18342 output_operand_lossage ("invalid %%u value");
18347 if ((uval
& 0xffff) == 0)
18350 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
, uval
& 0xffff);
18354 /* High-order 16 bits of constant for use in signed operand. */
18356 output_operand_lossage ("invalid %%v value");
18358 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
18359 (INTVAL (x
) >> 16) & 0xffff);
18363 /* Print `u' if this has an auto-increment or auto-decrement. */
18365 && (GET_CODE (XEXP (x
, 0)) == PRE_INC
18366 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
18367 || GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
))
18372 /* Print the trap code for this operand. */
18373 switch (GET_CODE (x
))
18376 fputs ("eq", file
); /* 4 */
18379 fputs ("ne", file
); /* 24 */
18382 fputs ("lt", file
); /* 16 */
18385 fputs ("le", file
); /* 20 */
18388 fputs ("gt", file
); /* 8 */
18391 fputs ("ge", file
); /* 12 */
18394 fputs ("llt", file
); /* 2 */
18397 fputs ("lle", file
); /* 6 */
18400 fputs ("lgt", file
); /* 1 */
18403 fputs ("lge", file
); /* 5 */
18406 gcc_unreachable ();
18411 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
18414 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
18415 ((INTVAL (x
) & 0xffff) ^ 0x8000) - 0x8000);
18417 print_operand (file
, x
, 0);
18421 /* MB value for a PowerPC64 rldic operand. */
18422 i
= clz_hwi (INTVAL (x
));
18424 fprintf (file
, "%d", i
);
18428 /* X is a FPR or Altivec register used in a VSX context. */
18429 if (GET_CODE (x
) != REG
|| !VSX_REGNO_P (REGNO (x
)))
18430 output_operand_lossage ("invalid %%x value");
18433 int reg
= REGNO (x
);
18434 int vsx_reg
= (FP_REGNO_P (reg
)
18436 : reg
- FIRST_ALTIVEC_REGNO
+ 32);
18438 #ifdef TARGET_REGNAMES
18439 if (TARGET_REGNAMES
)
18440 fprintf (file
, "%%vs%d", vsx_reg
);
18443 fprintf (file
, "%d", vsx_reg
);
18449 && (legitimate_indexed_address_p (XEXP (x
, 0), 0)
18450 || (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
18451 && legitimate_indexed_address_p (XEXP (XEXP (x
, 0), 1), 0))))
18456 /* Like 'L', for third word of TImode/PTImode */
18458 fputs (reg_names
[REGNO (x
) + 2], file
);
18459 else if (MEM_P (x
))
18461 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
18462 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
18463 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 8));
18464 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
18465 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 8));
18467 output_address (XEXP (adjust_address_nv (x
, SImode
, 8), 0));
18468 if (small_data_operand (x
, GET_MODE (x
)))
18469 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
18470 reg_names
[SMALL_DATA_REG
]);
18475 /* X is a SYMBOL_REF. Write out the name preceded by a
18476 period and without any trailing data in brackets. Used for function
18477 names. If we are configured for System V (or the embedded ABI) on
18478 the PowerPC, do not emit the period, since those systems do not use
18479 TOCs and the like. */
18480 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
18482 /* For macho, check to see if we need a stub. */
18485 const char *name
= XSTR (x
, 0);
18487 if (darwin_emit_branch_islands
18488 && MACHOPIC_INDIRECT
18489 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
18490 name
= machopic_indirection_name (x
, /*stub_p=*/true);
18492 assemble_name (file
, name
);
18494 else if (!DOT_SYMBOLS
)
18495 assemble_name (file
, XSTR (x
, 0));
18497 rs6000_output_function_entry (file
, XSTR (x
, 0));
18501 /* Like 'L', for last word of TImode/PTImode. */
18503 fputs (reg_names
[REGNO (x
) + 3], file
);
18504 else if (MEM_P (x
))
18506 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
18507 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
18508 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 12));
18509 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
18510 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 12));
18512 output_address (XEXP (adjust_address_nv (x
, SImode
, 12), 0));
18513 if (small_data_operand (x
, GET_MODE (x
)))
18514 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
18515 reg_names
[SMALL_DATA_REG
]);
18519 /* Print AltiVec or SPE memory operand. */
18524 gcc_assert (MEM_P (x
));
18528 /* Ugly hack because %y is overloaded. */
18529 if ((TARGET_SPE
|| TARGET_E500_DOUBLE
)
18530 && (GET_MODE_SIZE (GET_MODE (x
)) == 8
18531 || GET_MODE (x
) == TFmode
18532 || GET_MODE (x
) == TImode
18533 || GET_MODE (x
) == PTImode
))
18535 /* Handle [reg]. */
18538 fprintf (file
, "0(%s)", reg_names
[REGNO (tmp
)]);
18541 /* Handle [reg+UIMM]. */
18542 else if (GET_CODE (tmp
) == PLUS
&&
18543 GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
18547 gcc_assert (REG_P (XEXP (tmp
, 0)));
18549 x
= INTVAL (XEXP (tmp
, 1));
18550 fprintf (file
, "%d(%s)", x
, reg_names
[REGNO (XEXP (tmp
, 0))]);
18554 /* Fall through. Must be [reg+reg]. */
18556 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x
))
18557 && GET_CODE (tmp
) == AND
18558 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
18559 && INTVAL (XEXP (tmp
, 1)) == -16)
18560 tmp
= XEXP (tmp
, 0);
18561 else if (VECTOR_MEM_VSX_P (GET_MODE (x
))
18562 && GET_CODE (tmp
) == PRE_MODIFY
)
18563 tmp
= XEXP (tmp
, 1);
18565 fprintf (file
, "0,%s", reg_names
[REGNO (tmp
)]);
18568 if (GET_CODE (tmp
) != PLUS
18569 || !REG_P (XEXP (tmp
, 0))
18570 || !REG_P (XEXP (tmp
, 1)))
18572 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
18576 if (REGNO (XEXP (tmp
, 0)) == 0)
18577 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 1)) ],
18578 reg_names
[ REGNO (XEXP (tmp
, 0)) ]);
18580 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 0)) ],
18581 reg_names
[ REGNO (XEXP (tmp
, 1)) ]);
18588 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
18589 else if (MEM_P (x
))
18591 /* We need to handle PRE_INC and PRE_DEC here, since we need to
18592 know the width from the mode. */
18593 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
)
18594 fprintf (file
, "%d(%s)", GET_MODE_SIZE (GET_MODE (x
)),
18595 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
18596 else if (GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
18597 fprintf (file
, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x
)),
18598 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
18599 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
18600 output_address (XEXP (XEXP (x
, 0), 1));
18602 output_address (XEXP (x
, 0));
18606 if (toc_relative_expr_p (x
, false))
18607 /* This hack along with a corresponding hack in
18608 rs6000_output_addr_const_extra arranges to output addends
18609 where the assembler expects to find them. eg.
18610 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
18611 without this hack would be output as "x@toc+4". We
18613 output_addr_const (file
, CONST_CAST_RTX (tocrel_base
));
18615 output_addr_const (file
, x
);
18620 if (const char *name
= get_some_local_dynamic_name ())
18621 assemble_name (file
, name
);
18623 output_operand_lossage ("'%%&' used without any "
18624 "local dynamic TLS references");
18628 output_operand_lossage ("invalid %%xn code");
18632 /* Print the address of an operand. */
18635 print_operand_address (FILE *file
, rtx x
)
18638 fprintf (file
, "0(%s)", reg_names
[ REGNO (x
) ]);
18639 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
18640 || GET_CODE (x
) == LABEL_REF
)
18642 output_addr_const (file
, x
);
18643 if (small_data_operand (x
, GET_MODE (x
)))
18644 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
18645 reg_names
[SMALL_DATA_REG
]);
18647 gcc_assert (!TARGET_TOC
);
18649 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
18650 && REG_P (XEXP (x
, 1)))
18652 if (REGNO (XEXP (x
, 0)) == 0)
18653 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 1)) ],
18654 reg_names
[ REGNO (XEXP (x
, 0)) ]);
18656 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 0)) ],
18657 reg_names
[ REGNO (XEXP (x
, 1)) ]);
18659 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
18660 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
18661 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"(%s)",
18662 INTVAL (XEXP (x
, 1)), reg_names
[ REGNO (XEXP (x
, 0)) ]);
18664 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
18665 && CONSTANT_P (XEXP (x
, 1)))
18667 fprintf (file
, "lo16(");
18668 output_addr_const (file
, XEXP (x
, 1));
18669 fprintf (file
, ")(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
18673 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
18674 && CONSTANT_P (XEXP (x
, 1)))
18676 output_addr_const (file
, XEXP (x
, 1));
18677 fprintf (file
, "@l(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
18680 else if (toc_relative_expr_p (x
, false))
18682 /* This hack along with a corresponding hack in
18683 rs6000_output_addr_const_extra arranges to output addends
18684 where the assembler expects to find them. eg.
18686 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
18687 without this hack would be output as "x@toc+8@l(9)". We
18688 want "x+8@toc@l(9)". */
18689 output_addr_const (file
, CONST_CAST_RTX (tocrel_base
));
18690 if (GET_CODE (x
) == LO_SUM
)
18691 fprintf (file
, "@l(%s)", reg_names
[REGNO (XEXP (x
, 0))]);
18693 fprintf (file
, "(%s)", reg_names
[REGNO (XVECEXP (tocrel_base
, 0, 1))]);
18696 gcc_unreachable ();
18699 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
18702 rs6000_output_addr_const_extra (FILE *file
, rtx x
)
18704 if (GET_CODE (x
) == UNSPEC
)
18705 switch (XINT (x
, 1))
18707 case UNSPEC_TOCREL
:
18708 gcc_checking_assert (GET_CODE (XVECEXP (x
, 0, 0)) == SYMBOL_REF
18709 && REG_P (XVECEXP (x
, 0, 1))
18710 && REGNO (XVECEXP (x
, 0, 1)) == TOC_REGISTER
);
18711 output_addr_const (file
, XVECEXP (x
, 0, 0));
18712 if (x
== tocrel_base
&& tocrel_offset
!= const0_rtx
)
18714 if (INTVAL (tocrel_offset
) >= 0)
18715 fprintf (file
, "+");
18716 output_addr_const (file
, CONST_CAST_RTX (tocrel_offset
));
18718 if (!TARGET_AIX
|| (TARGET_ELF
&& TARGET_MINIMAL_TOC
))
18721 assemble_name (file
, toc_label_name
);
18723 else if (TARGET_ELF
)
18724 fputs ("@toc", file
);
18728 case UNSPEC_MACHOPIC_OFFSET
:
18729 output_addr_const (file
, XVECEXP (x
, 0, 0));
18731 machopic_output_function_base_name (file
);
18738 /* Target hook for assembling integer objects. The PowerPC version has
18739 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
18740 is defined. It also needs to handle DI-mode objects on 64-bit
18744 rs6000_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
18746 #ifdef RELOCATABLE_NEEDS_FIXUP
18747 /* Special handling for SI values. */
18748 if (RELOCATABLE_NEEDS_FIXUP
&& size
== 4 && aligned_p
)
18750 static int recurse
= 0;
18752 /* For -mrelocatable, we mark all addresses that need to be fixed up in
18753 the .fixup section. Since the TOC section is already relocated, we
18754 don't need to mark it here. We used to skip the text section, but it
18755 should never be valid for relocated addresses to be placed in the text
18757 if (TARGET_RELOCATABLE
18758 && in_section
!= toc_section
18760 && !CONST_SCALAR_INT_P (x
)
18766 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCP", fixuplabelno
);
18768 ASM_OUTPUT_LABEL (asm_out_file
, buf
);
18769 fprintf (asm_out_file
, "\t.long\t(");
18770 output_addr_const (asm_out_file
, x
);
18771 fprintf (asm_out_file
, ")@fixup\n");
18772 fprintf (asm_out_file
, "\t.section\t\".fixup\",\"aw\"\n");
18773 ASM_OUTPUT_ALIGN (asm_out_file
, 2);
18774 fprintf (asm_out_file
, "\t.long\t");
18775 assemble_name (asm_out_file
, buf
);
18776 fprintf (asm_out_file
, "\n\t.previous\n");
18780 /* Remove initial .'s to turn a -mcall-aixdesc function
18781 address into the address of the descriptor, not the function
18783 else if (GET_CODE (x
) == SYMBOL_REF
18784 && XSTR (x
, 0)[0] == '.'
18785 && DEFAULT_ABI
== ABI_AIX
)
18787 const char *name
= XSTR (x
, 0);
18788 while (*name
== '.')
18791 fprintf (asm_out_file
, "\t.long\t%s\n", name
);
18795 #endif /* RELOCATABLE_NEEDS_FIXUP */
18796 return default_assemble_integer (x
, size
, aligned_p
);
18799 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
18800 /* Emit an assembler directive to set symbol visibility for DECL to
18801 VISIBILITY_TYPE. */
18804 rs6000_assemble_visibility (tree decl
, int vis
)
18809 /* Functions need to have their entry point symbol visibility set as
18810 well as their descriptor symbol visibility. */
18811 if (DEFAULT_ABI
== ABI_AIX
18813 && TREE_CODE (decl
) == FUNCTION_DECL
)
18815 static const char * const visibility_types
[] = {
18816 NULL
, "internal", "hidden", "protected"
18819 const char *name
, *type
;
18821 name
= ((* targetm
.strip_name_encoding
)
18822 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
))));
18823 type
= visibility_types
[vis
];
18825 fprintf (asm_out_file
, "\t.%s\t%s\n", type
, name
);
18826 fprintf (asm_out_file
, "\t.%s\t.%s\n", type
, name
);
18829 default_assemble_visibility (decl
, vis
);
18834 rs6000_reverse_condition (enum machine_mode mode
, enum rtx_code code
)
18836 /* Reversal of FP compares takes care -- an ordered compare
18837 becomes an unordered compare and vice versa. */
18838 if (mode
== CCFPmode
18839 && (!flag_finite_math_only
18840 || code
== UNLT
|| code
== UNLE
|| code
== UNGT
|| code
== UNGE
18841 || code
== UNEQ
|| code
== LTGT
))
18842 return reverse_condition_maybe_unordered (code
);
18844 return reverse_condition (code
);
18847 /* Generate a compare for CODE. Return a brand-new rtx that
18848 represents the result of the compare. */
18851 rs6000_generate_compare (rtx cmp
, enum machine_mode mode
)
18853 enum machine_mode comp_mode
;
18854 rtx compare_result
;
18855 enum rtx_code code
= GET_CODE (cmp
);
18856 rtx op0
= XEXP (cmp
, 0);
18857 rtx op1
= XEXP (cmp
, 1);
18859 if (FLOAT_MODE_P (mode
))
18860 comp_mode
= CCFPmode
;
18861 else if (code
== GTU
|| code
== LTU
18862 || code
== GEU
|| code
== LEU
)
18863 comp_mode
= CCUNSmode
;
18864 else if ((code
== EQ
|| code
== NE
)
18865 && unsigned_reg_p (op0
)
18866 && (unsigned_reg_p (op1
)
18867 || (CONST_INT_P (op1
) && INTVAL (op1
) != 0)))
18868 /* These are unsigned values, perhaps there will be a later
18869 ordering compare that can be shared with this one. */
18870 comp_mode
= CCUNSmode
;
18872 comp_mode
= CCmode
;
18874 /* If we have an unsigned compare, make sure we don't have a signed value as
18876 if (comp_mode
== CCUNSmode
&& GET_CODE (op1
) == CONST_INT
18877 && INTVAL (op1
) < 0)
18879 op0
= copy_rtx_if_shared (op0
);
18880 op1
= force_reg (GET_MODE (op0
), op1
);
18881 cmp
= gen_rtx_fmt_ee (code
, GET_MODE (cmp
), op0
, op1
);
18884 /* First, the compare. */
18885 compare_result
= gen_reg_rtx (comp_mode
);
18887 /* E500 FP compare instructions on the GPRs. Yuck! */
18888 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
)
18889 && FLOAT_MODE_P (mode
))
18891 rtx cmp
, or_result
, compare_result2
;
18892 enum machine_mode op_mode
= GET_MODE (op0
);
18895 if (op_mode
== VOIDmode
)
18896 op_mode
= GET_MODE (op1
);
18898 /* First reverse the condition codes that aren't directly supported. */
18906 code
= reverse_condition_maybe_unordered (code
);
18919 gcc_unreachable ();
18922 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
18923 This explains the following mess. */
18931 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18932 ? gen_tstsfeq_gpr (compare_result
, op0
, op1
)
18933 : gen_cmpsfeq_gpr (compare_result
, op0
, op1
);
18937 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18938 ? gen_tstdfeq_gpr (compare_result
, op0
, op1
)
18939 : gen_cmpdfeq_gpr (compare_result
, op0
, op1
);
18943 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18944 ? gen_tsttfeq_gpr (compare_result
, op0
, op1
)
18945 : gen_cmptfeq_gpr (compare_result
, op0
, op1
);
18949 gcc_unreachable ();
18958 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18959 ? gen_tstsfgt_gpr (compare_result
, op0
, op1
)
18960 : gen_cmpsfgt_gpr (compare_result
, op0
, op1
);
18964 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18965 ? gen_tstdfgt_gpr (compare_result
, op0
, op1
)
18966 : gen_cmpdfgt_gpr (compare_result
, op0
, op1
);
18970 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18971 ? gen_tsttfgt_gpr (compare_result
, op0
, op1
)
18972 : gen_cmptfgt_gpr (compare_result
, op0
, op1
);
18976 gcc_unreachable ();
18985 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18986 ? gen_tstsflt_gpr (compare_result
, op0
, op1
)
18987 : gen_cmpsflt_gpr (compare_result
, op0
, op1
);
18991 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18992 ? gen_tstdflt_gpr (compare_result
, op0
, op1
)
18993 : gen_cmpdflt_gpr (compare_result
, op0
, op1
);
18997 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18998 ? gen_tsttflt_gpr (compare_result
, op0
, op1
)
18999 : gen_cmptflt_gpr (compare_result
, op0
, op1
);
19003 gcc_unreachable ();
19008 gcc_unreachable ();
19011 /* Synthesize LE and GE from LT/GT || EQ. */
19012 if (code
== LE
|| code
== GE
)
19016 compare_result2
= gen_reg_rtx (CCFPmode
);
19022 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
19023 ? gen_tstsfeq_gpr (compare_result2
, op0
, op1
)
19024 : gen_cmpsfeq_gpr (compare_result2
, op0
, op1
);
19028 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
19029 ? gen_tstdfeq_gpr (compare_result2
, op0
, op1
)
19030 : gen_cmpdfeq_gpr (compare_result2
, op0
, op1
);
19034 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
19035 ? gen_tsttfeq_gpr (compare_result2
, op0
, op1
)
19036 : gen_cmptfeq_gpr (compare_result2
, op0
, op1
);
19040 gcc_unreachable ();
19045 /* OR them together. */
19046 or_result
= gen_reg_rtx (CCFPmode
);
19047 cmp
= gen_e500_cr_ior_compare (or_result
, compare_result
,
19049 compare_result
= or_result
;
19052 code
= reverse_p
? NE
: EQ
;
19058 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
19059 CLOBBERs to match cmptf_internal2 pattern. */
19060 if (comp_mode
== CCFPmode
&& TARGET_XL_COMPAT
19061 && GET_MODE (op0
) == TFmode
19062 && !TARGET_IEEEQUAD
19063 && TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_LONG_DOUBLE_128
)
19064 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
19066 gen_rtx_SET (VOIDmode
,
19068 gen_rtx_COMPARE (comp_mode
, op0
, op1
)),
19069 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
19070 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
19071 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
19072 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
19073 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
19074 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
19075 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
19076 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
19077 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (Pmode
)))));
19078 else if (GET_CODE (op1
) == UNSPEC
19079 && XINT (op1
, 1) == UNSPEC_SP_TEST
)
19081 rtx op1b
= XVECEXP (op1
, 0, 0);
19082 comp_mode
= CCEQmode
;
19083 compare_result
= gen_reg_rtx (CCEQmode
);
19085 emit_insn (gen_stack_protect_testdi (compare_result
, op0
, op1b
));
19087 emit_insn (gen_stack_protect_testsi (compare_result
, op0
, op1b
));
19090 emit_insn (gen_rtx_SET (VOIDmode
, compare_result
,
19091 gen_rtx_COMPARE (comp_mode
, op0
, op1
)));
19094 /* Some kinds of FP comparisons need an OR operation;
19095 under flag_finite_math_only we don't bother. */
19096 if (FLOAT_MODE_P (mode
)
19097 && !flag_finite_math_only
19098 && !(TARGET_HARD_FLOAT
&& !TARGET_FPRS
)
19099 && (code
== LE
|| code
== GE
19100 || code
== UNEQ
|| code
== LTGT
19101 || code
== UNGT
|| code
== UNLT
))
19103 enum rtx_code or1
, or2
;
19104 rtx or1_rtx
, or2_rtx
, compare2_rtx
;
19105 rtx or_result
= gen_reg_rtx (CCEQmode
);
19109 case LE
: or1
= LT
; or2
= EQ
; break;
19110 case GE
: or1
= GT
; or2
= EQ
; break;
19111 case UNEQ
: or1
= UNORDERED
; or2
= EQ
; break;
19112 case LTGT
: or1
= LT
; or2
= GT
; break;
19113 case UNGT
: or1
= UNORDERED
; or2
= GT
; break;
19114 case UNLT
: or1
= UNORDERED
; or2
= LT
; break;
19115 default: gcc_unreachable ();
19117 validate_condition_mode (or1
, comp_mode
);
19118 validate_condition_mode (or2
, comp_mode
);
19119 or1_rtx
= gen_rtx_fmt_ee (or1
, SImode
, compare_result
, const0_rtx
);
19120 or2_rtx
= gen_rtx_fmt_ee (or2
, SImode
, compare_result
, const0_rtx
);
19121 compare2_rtx
= gen_rtx_COMPARE (CCEQmode
,
19122 gen_rtx_IOR (SImode
, or1_rtx
, or2_rtx
),
19124 emit_insn (gen_rtx_SET (VOIDmode
, or_result
, compare2_rtx
));
19126 compare_result
= or_result
;
19130 validate_condition_mode (code
, GET_MODE (compare_result
));
19132 return gen_rtx_fmt_ee (code
, VOIDmode
, compare_result
, const0_rtx
);
19136 /* Emit the RTL for an sISEL pattern. */
19139 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx operands
[])
19141 rs6000_emit_int_cmove (operands
[0], operands
[1], const1_rtx
, const0_rtx
);
19145 rs6000_emit_sCOND (enum machine_mode mode
, rtx operands
[])
19148 enum machine_mode op_mode
;
19149 enum rtx_code cond_code
;
19150 rtx result
= operands
[0];
19152 if (TARGET_ISEL
&& (mode
== SImode
|| mode
== DImode
))
19154 rs6000_emit_sISEL (mode
, operands
);
19158 condition_rtx
= rs6000_generate_compare (operands
[1], mode
);
19159 cond_code
= GET_CODE (condition_rtx
);
19161 if (FLOAT_MODE_P (mode
)
19162 && !TARGET_FPRS
&& TARGET_HARD_FLOAT
)
19166 PUT_MODE (condition_rtx
, SImode
);
19167 t
= XEXP (condition_rtx
, 0);
19169 gcc_assert (cond_code
== NE
|| cond_code
== EQ
);
19171 if (cond_code
== NE
)
19172 emit_insn (gen_e500_flip_gt_bit (t
, t
));
19174 emit_insn (gen_move_from_CR_gt_bit (result
, t
));
19178 if (cond_code
== NE
19179 || cond_code
== GE
|| cond_code
== LE
19180 || cond_code
== GEU
|| cond_code
== LEU
19181 || cond_code
== ORDERED
|| cond_code
== UNGE
|| cond_code
== UNLE
)
19183 rtx not_result
= gen_reg_rtx (CCEQmode
);
19184 rtx not_op
, rev_cond_rtx
;
19185 enum machine_mode cc_mode
;
19187 cc_mode
= GET_MODE (XEXP (condition_rtx
, 0));
19189 rev_cond_rtx
= gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode
, cond_code
),
19190 SImode
, XEXP (condition_rtx
, 0), const0_rtx
);
19191 not_op
= gen_rtx_COMPARE (CCEQmode
, rev_cond_rtx
, const0_rtx
);
19192 emit_insn (gen_rtx_SET (VOIDmode
, not_result
, not_op
));
19193 condition_rtx
= gen_rtx_EQ (VOIDmode
, not_result
, const0_rtx
);
19196 op_mode
= GET_MODE (XEXP (operands
[1], 0));
19197 if (op_mode
== VOIDmode
)
19198 op_mode
= GET_MODE (XEXP (operands
[1], 1));
19200 if (TARGET_POWERPC64
&& (op_mode
== DImode
|| FLOAT_MODE_P (mode
)))
19202 PUT_MODE (condition_rtx
, DImode
);
19203 convert_move (result
, condition_rtx
, 0);
19207 PUT_MODE (condition_rtx
, SImode
);
19208 emit_insn (gen_rtx_SET (VOIDmode
, result
, condition_rtx
));
19212 /* Emit a branch of kind CODE to location LOC. */
19215 rs6000_emit_cbranch (enum machine_mode mode
, rtx operands
[])
19217 rtx condition_rtx
, loc_ref
;
19219 condition_rtx
= rs6000_generate_compare (operands
[0], mode
);
19220 loc_ref
= gen_rtx_LABEL_REF (VOIDmode
, operands
[3]);
19221 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
,
19222 gen_rtx_IF_THEN_ELSE (VOIDmode
, condition_rtx
,
19223 loc_ref
, pc_rtx
)));
19226 /* Return the string to output a conditional branch to LABEL, which is
19227 the operand template of the label, or NULL if the branch is really a
19228 conditional return.
19230 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
19231 condition code register and its mode specifies what kind of
19232 comparison we made.
19234 REVERSED is nonzero if we should reverse the sense of the comparison.
19236 INSN is the insn. */
19239 output_cbranch (rtx op
, const char *label
, int reversed
, rtx_insn
*insn
)
19241 static char string
[64];
19242 enum rtx_code code
= GET_CODE (op
);
19243 rtx cc_reg
= XEXP (op
, 0);
19244 enum machine_mode mode
= GET_MODE (cc_reg
);
19245 int cc_regno
= REGNO (cc_reg
) - CR0_REGNO
;
19246 int need_longbranch
= label
!= NULL
&& get_attr_length (insn
) == 8;
19247 int really_reversed
= reversed
^ need_longbranch
;
19253 validate_condition_mode (code
, mode
);
19255 /* Work out which way this really branches. We could use
19256 reverse_condition_maybe_unordered here always but this
19257 makes the resulting assembler clearer. */
19258 if (really_reversed
)
19260 /* Reversal of FP compares takes care -- an ordered compare
19261 becomes an unordered compare and vice versa. */
19262 if (mode
== CCFPmode
)
19263 code
= reverse_condition_maybe_unordered (code
);
19265 code
= reverse_condition (code
);
19268 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
) && mode
== CCFPmode
)
19270 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
19275 /* Opposite of GT. */
19284 gcc_unreachable ();
19290 /* Not all of these are actually distinct opcodes, but
19291 we distinguish them for clarity of the resulting assembler. */
19292 case NE
: case LTGT
:
19293 ccode
= "ne"; break;
19294 case EQ
: case UNEQ
:
19295 ccode
= "eq"; break;
19297 ccode
= "ge"; break;
19298 case GT
: case GTU
: case UNGT
:
19299 ccode
= "gt"; break;
19301 ccode
= "le"; break;
19302 case LT
: case LTU
: case UNLT
:
19303 ccode
= "lt"; break;
19304 case UNORDERED
: ccode
= "un"; break;
19305 case ORDERED
: ccode
= "nu"; break;
19306 case UNGE
: ccode
= "nl"; break;
19307 case UNLE
: ccode
= "ng"; break;
19309 gcc_unreachable ();
19312 /* Maybe we have a guess as to how likely the branch is. */
19314 note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
);
19315 if (note
!= NULL_RTX
)
19317 /* PROB is the difference from 50%. */
19318 int prob
= XINT (note
, 0) - REG_BR_PROB_BASE
/ 2;
19320 /* Only hint for highly probable/improbable branches on newer
19321 cpus as static prediction overrides processor dynamic
19322 prediction. For older cpus we may as well always hint, but
19323 assume not taken for branches that are very close to 50% as a
19324 mispredicted taken branch is more expensive than a
19325 mispredicted not-taken branch. */
19326 if (rs6000_always_hint
19327 || (abs (prob
) > REG_BR_PROB_BASE
/ 100 * 48
19328 && br_prob_note_reliable_p (note
)))
19330 if (abs (prob
) > REG_BR_PROB_BASE
/ 20
19331 && ((prob
> 0) ^ need_longbranch
))
19339 s
+= sprintf (s
, "b%slr%s ", ccode
, pred
);
19341 s
+= sprintf (s
, "b%s%s ", ccode
, pred
);
19343 /* We need to escape any '%' characters in the reg_names string.
19344 Assume they'd only be the first character.... */
19345 if (reg_names
[cc_regno
+ CR0_REGNO
][0] == '%')
19347 s
+= sprintf (s
, "%s", reg_names
[cc_regno
+ CR0_REGNO
]);
19351 /* If the branch distance was too far, we may have to use an
19352 unconditional branch to go the distance. */
19353 if (need_longbranch
)
19354 s
+= sprintf (s
, ",$+8\n\tb %s", label
);
19356 s
+= sprintf (s
, ",%s", label
);
19362 /* Return the string to flip the GT bit on a CR. */
19364 output_e500_flip_gt_bit (rtx dst
, rtx src
)
19366 static char string
[64];
19369 gcc_assert (GET_CODE (dst
) == REG
&& CR_REGNO_P (REGNO (dst
))
19370 && GET_CODE (src
) == REG
&& CR_REGNO_P (REGNO (src
)));
19373 a
= 4 * (REGNO (dst
) - CR0_REGNO
) + 1;
19374 b
= 4 * (REGNO (src
) - CR0_REGNO
) + 1;
19376 sprintf (string
, "crnot %d,%d", a
, b
);
19380 /* Return insn for VSX or Altivec comparisons. */
19383 rs6000_emit_vector_compare_inner (enum rtx_code code
, rtx op0
, rtx op1
)
19386 enum machine_mode mode
= GET_MODE (op0
);
19394 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
19404 mask
= gen_reg_rtx (mode
);
19405 emit_insn (gen_rtx_SET (VOIDmode
,
19407 gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
19414 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
19415 DMODE is expected destination mode. This is a recursive function. */
19418 rs6000_emit_vector_compare (enum rtx_code rcode
,
19420 enum machine_mode dmode
)
19423 bool swap_operands
= false;
19424 bool try_again
= false;
19426 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode
));
19427 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
19429 /* See if the comparison works as is. */
19430 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
19438 swap_operands
= true;
19443 swap_operands
= true;
19451 /* Invert condition and try again.
19452 e.g., A != B becomes ~(A==B). */
19454 enum rtx_code rev_code
;
19455 enum insn_code nor_code
;
19458 rev_code
= reverse_condition_maybe_unordered (rcode
);
19459 if (rev_code
== UNKNOWN
)
19462 nor_code
= optab_handler (one_cmpl_optab
, dmode
);
19463 if (nor_code
== CODE_FOR_nothing
)
19466 mask2
= rs6000_emit_vector_compare (rev_code
, op0
, op1
, dmode
);
19470 mask
= gen_reg_rtx (dmode
);
19471 emit_insn (GEN_FCN (nor_code
) (mask
, mask2
));
19479 /* Try GT/GTU/LT/LTU OR EQ */
19482 enum insn_code ior_code
;
19483 enum rtx_code new_code
;
19504 gcc_unreachable ();
19507 ior_code
= optab_handler (ior_optab
, dmode
);
19508 if (ior_code
== CODE_FOR_nothing
)
19511 c_rtx
= rs6000_emit_vector_compare (new_code
, op0
, op1
, dmode
);
19515 eq_rtx
= rs6000_emit_vector_compare (EQ
, op0
, op1
, dmode
);
19519 mask
= gen_reg_rtx (dmode
);
19520 emit_insn (GEN_FCN (ior_code
) (mask
, c_rtx
, eq_rtx
));
19538 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
19543 /* You only get two chances. */
19547 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
19548 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
19549 operands for the relation operation COND. */
19552 rs6000_emit_vector_cond_expr (rtx dest
, rtx op_true
, rtx op_false
,
19553 rtx cond
, rtx cc_op0
, rtx cc_op1
)
19555 enum machine_mode dest_mode
= GET_MODE (dest
);
19556 enum machine_mode mask_mode
= GET_MODE (cc_op0
);
19557 enum rtx_code rcode
= GET_CODE (cond
);
19558 enum machine_mode cc_mode
= CCmode
;
19562 bool invert_move
= false;
19564 if (VECTOR_UNIT_NONE_P (dest_mode
))
19567 gcc_assert (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (mask_mode
)
19568 && GET_MODE_NUNITS (dest_mode
) == GET_MODE_NUNITS (mask_mode
));
19572 /* Swap operands if we can, and fall back to doing the operation as
19573 specified, and doing a NOR to invert the test. */
19579 /* Invert condition and try again.
19580 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
19581 invert_move
= true;
19582 rcode
= reverse_condition_maybe_unordered (rcode
);
19583 if (rcode
== UNKNOWN
)
19587 /* Mark unsigned tests with CCUNSmode. */
19592 cc_mode
= CCUNSmode
;
19599 /* Get the vector mask for the given relational operations. */
19600 mask
= rs6000_emit_vector_compare (rcode
, cc_op0
, cc_op1
, mask_mode
);
19608 op_true
= op_false
;
19612 cond2
= gen_rtx_fmt_ee (NE
, cc_mode
, gen_lowpart (dest_mode
, mask
),
19613 CONST0_RTX (dest_mode
));
19614 emit_insn (gen_rtx_SET (VOIDmode
,
19616 gen_rtx_IF_THEN_ELSE (dest_mode
,
19623 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
19624 operands of the last comparison is nonzero/true, FALSE_COND if it
19625 is zero/false. Return 0 if the hardware has no such operation. */
19628 rs6000_emit_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
19630 enum rtx_code code
= GET_CODE (op
);
19631 rtx op0
= XEXP (op
, 0);
19632 rtx op1
= XEXP (op
, 1);
19633 REAL_VALUE_TYPE c1
;
19634 enum machine_mode compare_mode
= GET_MODE (op0
);
19635 enum machine_mode result_mode
= GET_MODE (dest
);
19637 bool is_against_zero
;
19639 /* These modes should always match. */
19640 if (GET_MODE (op1
) != compare_mode
19641 /* In the isel case however, we can use a compare immediate, so
19642 op1 may be a small constant. */
19643 && (!TARGET_ISEL
|| !short_cint_operand (op1
, VOIDmode
)))
19645 if (GET_MODE (true_cond
) != result_mode
)
19647 if (GET_MODE (false_cond
) != result_mode
)
19650 /* Don't allow using floating point comparisons for integer results for
19652 if (FLOAT_MODE_P (compare_mode
) && !FLOAT_MODE_P (result_mode
))
19655 /* First, work out if the hardware can do this at all, or
19656 if it's too slow.... */
19657 if (!FLOAT_MODE_P (compare_mode
))
19660 return rs6000_emit_int_cmove (dest
, op
, true_cond
, false_cond
);
19663 else if (TARGET_HARD_FLOAT
&& !TARGET_FPRS
19664 && SCALAR_FLOAT_MODE_P (compare_mode
))
19667 is_against_zero
= op1
== CONST0_RTX (compare_mode
);
19669 /* A floating-point subtract might overflow, underflow, or produce
19670 an inexact result, thus changing the floating-point flags, so it
19671 can't be generated if we care about that. It's safe if one side
19672 of the construct is zero, since then no subtract will be
19674 if (SCALAR_FLOAT_MODE_P (compare_mode
)
19675 && flag_trapping_math
&& ! is_against_zero
)
19678 /* Eliminate half of the comparisons by switching operands, this
19679 makes the remaining code simpler. */
19680 if (code
== UNLT
|| code
== UNGT
|| code
== UNORDERED
|| code
== NE
19681 || code
== LTGT
|| code
== LT
|| code
== UNLE
)
19683 code
= reverse_condition_maybe_unordered (code
);
19685 true_cond
= false_cond
;
19689 /* UNEQ and LTGT take four instructions for a comparison with zero,
19690 it'll probably be faster to use a branch here too. */
19691 if (code
== UNEQ
&& HONOR_NANS (compare_mode
))
19694 if (GET_CODE (op1
) == CONST_DOUBLE
)
19695 REAL_VALUE_FROM_CONST_DOUBLE (c1
, op1
);
19697 /* We're going to try to implement comparisons by performing
19698 a subtract, then comparing against zero. Unfortunately,
19699 Inf - Inf is NaN which is not zero, and so if we don't
19700 know that the operand is finite and the comparison
19701 would treat EQ different to UNORDERED, we can't do it. */
19702 if (HONOR_INFINITIES (compare_mode
)
19703 && code
!= GT
&& code
!= UNGE
19704 && (GET_CODE (op1
) != CONST_DOUBLE
|| real_isinf (&c1
))
19705 /* Constructs of the form (a OP b ? a : b) are safe. */
19706 && ((! rtx_equal_p (op0
, false_cond
) && ! rtx_equal_p (op1
, false_cond
))
19707 || (! rtx_equal_p (op0
, true_cond
)
19708 && ! rtx_equal_p (op1
, true_cond
))))
19711 /* At this point we know we can use fsel. */
19713 /* Reduce the comparison to a comparison against zero. */
19714 if (! is_against_zero
)
19716 temp
= gen_reg_rtx (compare_mode
);
19717 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
19718 gen_rtx_MINUS (compare_mode
, op0
, op1
)));
19720 op1
= CONST0_RTX (compare_mode
);
19723 /* If we don't care about NaNs we can reduce some of the comparisons
19724 down to faster ones. */
19725 if (! HONOR_NANS (compare_mode
))
19731 true_cond
= false_cond
;
19744 /* Now, reduce everything down to a GE. */
19751 temp
= gen_reg_rtx (compare_mode
);
19752 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
19757 temp
= gen_reg_rtx (compare_mode
);
19758 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_ABS (compare_mode
, op0
)));
19763 temp
= gen_reg_rtx (compare_mode
);
19764 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
19765 gen_rtx_NEG (compare_mode
,
19766 gen_rtx_ABS (compare_mode
, op0
))));
19771 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
19772 temp
= gen_reg_rtx (result_mode
);
19773 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
19774 gen_rtx_IF_THEN_ELSE (result_mode
,
19775 gen_rtx_GE (VOIDmode
,
19777 true_cond
, false_cond
)));
19778 false_cond
= true_cond
;
19781 temp
= gen_reg_rtx (compare_mode
);
19782 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
19787 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
19788 temp
= gen_reg_rtx (result_mode
);
19789 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
19790 gen_rtx_IF_THEN_ELSE (result_mode
,
19791 gen_rtx_GE (VOIDmode
,
19793 true_cond
, false_cond
)));
19794 true_cond
= false_cond
;
19797 temp
= gen_reg_rtx (compare_mode
);
19798 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
19803 gcc_unreachable ();
19806 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
19807 gen_rtx_IF_THEN_ELSE (result_mode
,
19808 gen_rtx_GE (VOIDmode
,
19810 true_cond
, false_cond
)));
19814 /* Same as above, but for ints (isel). */
19817 rs6000_emit_int_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
19819 rtx condition_rtx
, cr
;
19820 enum machine_mode mode
= GET_MODE (dest
);
19821 enum rtx_code cond_code
;
19822 rtx (*isel_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
19825 if (mode
!= SImode
&& (!TARGET_POWERPC64
|| mode
!= DImode
))
19828 /* We still have to do the compare, because isel doesn't do a
19829 compare, it just looks at the CRx bits set by a previous compare
19831 condition_rtx
= rs6000_generate_compare (op
, mode
);
19832 cond_code
= GET_CODE (condition_rtx
);
19833 cr
= XEXP (condition_rtx
, 0);
19834 signedp
= GET_MODE (cr
) == CCmode
;
19836 isel_func
= (mode
== SImode
19837 ? (signedp
? gen_isel_signed_si
: gen_isel_unsigned_si
)
19838 : (signedp
? gen_isel_signed_di
: gen_isel_unsigned_di
));
19842 case LT
: case GT
: case LTU
: case GTU
: case EQ
:
19843 /* isel handles these directly. */
19847 /* We need to swap the sense of the comparison. */
19850 true_cond
= false_cond
;
19852 PUT_CODE (condition_rtx
, reverse_condition (cond_code
));
19857 false_cond
= force_reg (mode
, false_cond
);
19858 if (true_cond
!= const0_rtx
)
19859 true_cond
= force_reg (mode
, true_cond
);
19861 emit_insn (isel_func (dest
, condition_rtx
, true_cond
, false_cond
, cr
));
19867 output_isel (rtx
*operands
)
19869 enum rtx_code code
;
19871 code
= GET_CODE (operands
[1]);
19873 if (code
== GE
|| code
== GEU
|| code
== LE
|| code
== LEU
|| code
== NE
)
19875 gcc_assert (GET_CODE (operands
[2]) == REG
19876 && GET_CODE (operands
[3]) == REG
);
19877 PUT_CODE (operands
[1], reverse_condition (code
));
19878 return "isel %0,%3,%2,%j1";
19881 return "isel %0,%2,%3,%j1";
19885 rs6000_emit_minmax (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
19887 enum machine_mode mode
= GET_MODE (op0
);
19891 /* VSX/altivec have direct min/max insns. */
19892 if ((code
== SMAX
|| code
== SMIN
)
19893 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
19894 || (mode
== SFmode
&& VECTOR_UNIT_VSX_P (DFmode
))))
19896 emit_insn (gen_rtx_SET (VOIDmode
,
19898 gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
19902 if (code
== SMAX
|| code
== SMIN
)
19907 if (code
== SMAX
|| code
== UMAX
)
19908 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
19909 op0
, op1
, mode
, 0);
19911 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
19912 op1
, op0
, mode
, 0);
19913 gcc_assert (target
);
19914 if (target
!= dest
)
19915 emit_move_insn (dest
, target
);
19918 /* A subroutine of the atomic operation splitters. Jump to LABEL if
19919 COND is true. Mark the jump as unlikely to be taken. */
19922 emit_unlikely_jump (rtx cond
, rtx label
)
19924 int very_unlikely
= REG_BR_PROB_BASE
/ 100 - 1;
19927 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
19928 x
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, x
));
19929 add_int_reg_note (x
, REG_BR_PROB
, very_unlikely
);
19932 /* A subroutine of the atomic operation splitters. Emit a load-locked
19933 instruction in MODE. For QI/HImode, possibly use a pattern than includes
19934 the zero_extend operation. */
19937 emit_load_locked (enum machine_mode mode
, rtx reg
, rtx mem
)
19939 rtx (*fn
) (rtx
, rtx
) = NULL
;
19944 fn
= gen_load_lockedqi
;
19947 fn
= gen_load_lockedhi
;
19950 if (GET_MODE (mem
) == QImode
)
19951 fn
= gen_load_lockedqi_si
;
19952 else if (GET_MODE (mem
) == HImode
)
19953 fn
= gen_load_lockedhi_si
;
19955 fn
= gen_load_lockedsi
;
19958 fn
= gen_load_lockeddi
;
19961 fn
= gen_load_lockedti
;
19964 gcc_unreachable ();
19966 emit_insn (fn (reg
, mem
));
19969 /* A subroutine of the atomic operation splitters. Emit a store-conditional
19970 instruction in MODE. */
19973 emit_store_conditional (enum machine_mode mode
, rtx res
, rtx mem
, rtx val
)
19975 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
19980 fn
= gen_store_conditionalqi
;
19983 fn
= gen_store_conditionalhi
;
19986 fn
= gen_store_conditionalsi
;
19989 fn
= gen_store_conditionaldi
;
19992 fn
= gen_store_conditionalti
;
19995 gcc_unreachable ();
19998 /* Emit sync before stwcx. to address PPC405 Erratum. */
19999 if (PPC405_ERRATUM77
)
20000 emit_insn (gen_hwsync ());
20002 emit_insn (fn (res
, mem
, val
));
20005 /* Expand barriers before and after a load_locked/store_cond sequence. */
20008 rs6000_pre_atomic_barrier (rtx mem
, enum memmodel model
)
20010 rtx addr
= XEXP (mem
, 0);
20011 int strict_p
= (reload_in_progress
|| reload_completed
);
20013 if (!legitimate_indirect_address_p (addr
, strict_p
)
20014 && !legitimate_indexed_address_p (addr
, strict_p
))
20016 addr
= force_reg (Pmode
, addr
);
20017 mem
= replace_equiv_address_nv (mem
, addr
);
20022 case MEMMODEL_RELAXED
:
20023 case MEMMODEL_CONSUME
:
20024 case MEMMODEL_ACQUIRE
:
20026 case MEMMODEL_RELEASE
:
20027 case MEMMODEL_ACQ_REL
:
20028 emit_insn (gen_lwsync ());
20030 case MEMMODEL_SEQ_CST
:
20031 emit_insn (gen_hwsync ());
20034 gcc_unreachable ();
20040 rs6000_post_atomic_barrier (enum memmodel model
)
20044 case MEMMODEL_RELAXED
:
20045 case MEMMODEL_CONSUME
:
20046 case MEMMODEL_RELEASE
:
20048 case MEMMODEL_ACQUIRE
:
20049 case MEMMODEL_ACQ_REL
:
20050 case MEMMODEL_SEQ_CST
:
20051 emit_insn (gen_isync ());
20054 gcc_unreachable ();
20058 /* A subroutine of the various atomic expanders. For sub-word operations,
20059 we must adjust things to operate on SImode. Given the original MEM,
20060 return a new aligned memory. Also build and return the quantities by
20061 which to shift and mask. */
20064 rs6000_adjust_atomic_subword (rtx orig_mem
, rtx
*pshift
, rtx
*pmask
)
20066 rtx addr
, align
, shift
, mask
, mem
;
20067 HOST_WIDE_INT shift_mask
;
20068 enum machine_mode mode
= GET_MODE (orig_mem
);
20070 /* For smaller modes, we have to implement this via SImode. */
20071 shift_mask
= (mode
== QImode
? 0x18 : 0x10);
20073 addr
= XEXP (orig_mem
, 0);
20074 addr
= force_reg (GET_MODE (addr
), addr
);
20076 /* Aligned memory containing subword. Generate a new memory. We
20077 do not want any of the existing MEM_ATTR data, as we're now
20078 accessing memory outside the original object. */
20079 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-4),
20080 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
20081 mem
= gen_rtx_MEM (SImode
, align
);
20082 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
20083 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
20084 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
20086 /* Shift amount for subword relative to aligned word. */
20087 shift
= gen_reg_rtx (SImode
);
20088 addr
= gen_lowpart (SImode
, addr
);
20089 emit_insn (gen_rlwinm (shift
, addr
, GEN_INT (3), GEN_INT (shift_mask
)));
20090 if (BYTES_BIG_ENDIAN
)
20091 shift
= expand_simple_binop (SImode
, XOR
, shift
, GEN_INT (shift_mask
),
20092 shift
, 1, OPTAB_LIB_WIDEN
);
20095 /* Mask for insertion. */
20096 mask
= expand_simple_binop (SImode
, ASHIFT
, GEN_INT (GET_MODE_MASK (mode
)),
20097 shift
, NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
20103 /* A subroutine of the various atomic expanders. For sub-word operands,
20104 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
20107 rs6000_mask_atomic_subword (rtx oldval
, rtx newval
, rtx mask
)
20111 x
= gen_reg_rtx (SImode
);
20112 emit_insn (gen_rtx_SET (VOIDmode
, x
,
20113 gen_rtx_AND (SImode
,
20114 gen_rtx_NOT (SImode
, mask
),
20117 x
= expand_simple_binop (SImode
, IOR
, newval
, x
, x
, 1, OPTAB_LIB_WIDEN
);
20122 /* A subroutine of the various atomic expanders. For sub-word operands,
20123 extract WIDE to NARROW via SHIFT. */
20126 rs6000_finish_atomic_subword (rtx narrow
, rtx wide
, rtx shift
)
20128 wide
= expand_simple_binop (SImode
, LSHIFTRT
, wide
, shift
,
20129 wide
, 1, OPTAB_LIB_WIDEN
);
20130 emit_move_insn (narrow
, gen_lowpart (GET_MODE (narrow
), wide
));
20133 /* Expand an atomic compare and swap operation. */
20136 rs6000_expand_atomic_compare_and_swap (rtx operands
[])
20138 rtx boolval
, retval
, mem
, oldval
, newval
, cond
;
20139 rtx label1
, label2
, x
, mask
, shift
;
20140 enum machine_mode mode
, orig_mode
;
20141 enum memmodel mod_s
, mod_f
;
20144 boolval
= operands
[0];
20145 retval
= operands
[1];
20147 oldval
= operands
[3];
20148 newval
= operands
[4];
20149 is_weak
= (INTVAL (operands
[5]) != 0);
20150 mod_s
= (enum memmodel
) INTVAL (operands
[6]);
20151 mod_f
= (enum memmodel
) INTVAL (operands
[7]);
20152 orig_mode
= mode
= GET_MODE (mem
);
20154 mask
= shift
= NULL_RTX
;
20155 if (mode
== QImode
|| mode
== HImode
)
20157 /* Before power8, we didn't have access to lbarx/lharx, so generate a
20158 lwarx and shift/mask operations. With power8, we need to do the
20159 comparison in SImode, but the store is still done in QI/HImode. */
20160 oldval
= convert_modes (SImode
, mode
, oldval
, 1);
20162 if (!TARGET_SYNC_HI_QI
)
20164 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
20166 /* Shift and mask OLDVAL into position with the word. */
20167 oldval
= expand_simple_binop (SImode
, ASHIFT
, oldval
, shift
,
20168 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
20170 /* Shift and mask NEWVAL into position within the word. */
20171 newval
= convert_modes (SImode
, mode
, newval
, 1);
20172 newval
= expand_simple_binop (SImode
, ASHIFT
, newval
, shift
,
20173 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
20176 /* Prepare to adjust the return value. */
20177 retval
= gen_reg_rtx (SImode
);
20180 else if (reg_overlap_mentioned_p (retval
, oldval
))
20181 oldval
= copy_to_reg (oldval
);
20183 mem
= rs6000_pre_atomic_barrier (mem
, mod_s
);
20188 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
20189 emit_label (XEXP (label1
, 0));
20191 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
20193 emit_load_locked (mode
, retval
, mem
);
20198 x
= expand_simple_binop (SImode
, AND
, retval
, mask
,
20199 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
20202 cond
= gen_reg_rtx (CCmode
);
20203 /* If we have TImode, synthesize a comparison. */
20204 if (mode
!= TImode
)
20205 x
= gen_rtx_COMPARE (CCmode
, x
, oldval
);
20208 rtx xor1_result
= gen_reg_rtx (DImode
);
20209 rtx xor2_result
= gen_reg_rtx (DImode
);
20210 rtx or_result
= gen_reg_rtx (DImode
);
20211 rtx new_word0
= simplify_gen_subreg (DImode
, x
, TImode
, 0);
20212 rtx new_word1
= simplify_gen_subreg (DImode
, x
, TImode
, 8);
20213 rtx old_word0
= simplify_gen_subreg (DImode
, oldval
, TImode
, 0);
20214 rtx old_word1
= simplify_gen_subreg (DImode
, oldval
, TImode
, 8);
20216 emit_insn (gen_xordi3 (xor1_result
, new_word0
, old_word0
));
20217 emit_insn (gen_xordi3 (xor2_result
, new_word1
, old_word1
));
20218 emit_insn (gen_iordi3 (or_result
, xor1_result
, xor2_result
));
20219 x
= gen_rtx_COMPARE (CCmode
, or_result
, const0_rtx
);
20222 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
20224 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
20225 emit_unlikely_jump (x
, label2
);
20229 x
= rs6000_mask_atomic_subword (retval
, newval
, mask
);
20231 emit_store_conditional (orig_mode
, cond
, mem
, x
);
20235 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
20236 emit_unlikely_jump (x
, label1
);
20239 if (mod_f
!= MEMMODEL_RELAXED
)
20240 emit_label (XEXP (label2
, 0));
20242 rs6000_post_atomic_barrier (mod_s
);
20244 if (mod_f
== MEMMODEL_RELAXED
)
20245 emit_label (XEXP (label2
, 0));
20248 rs6000_finish_atomic_subword (operands
[1], retval
, shift
);
20249 else if (mode
!= GET_MODE (operands
[1]))
20250 convert_move (operands
[1], retval
, 1);
20252 /* In all cases, CR0 contains EQ on success, and NE on failure. */
20253 x
= gen_rtx_EQ (SImode
, cond
, const0_rtx
);
20254 emit_insn (gen_rtx_SET (VOIDmode
, boolval
, x
));
20257 /* Expand an atomic exchange operation. */
20260 rs6000_expand_atomic_exchange (rtx operands
[])
20262 rtx retval
, mem
, val
, cond
;
20263 enum machine_mode mode
;
20264 enum memmodel model
;
20265 rtx label
, x
, mask
, shift
;
20267 retval
= operands
[0];
20270 model
= (enum memmodel
) INTVAL (operands
[3]);
20271 mode
= GET_MODE (mem
);
20273 mask
= shift
= NULL_RTX
;
20274 if (!TARGET_SYNC_HI_QI
&& (mode
== QImode
|| mode
== HImode
))
20276 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
20278 /* Shift and mask VAL into position with the word. */
20279 val
= convert_modes (SImode
, mode
, val
, 1);
20280 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
20281 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
20283 /* Prepare to adjust the return value. */
20284 retval
= gen_reg_rtx (SImode
);
20288 mem
= rs6000_pre_atomic_barrier (mem
, model
);
20290 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
20291 emit_label (XEXP (label
, 0));
20293 emit_load_locked (mode
, retval
, mem
);
20297 x
= rs6000_mask_atomic_subword (retval
, val
, mask
);
20299 cond
= gen_reg_rtx (CCmode
);
20300 emit_store_conditional (mode
, cond
, mem
, x
);
20302 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
20303 emit_unlikely_jump (x
, label
);
20305 rs6000_post_atomic_barrier (model
);
20308 rs6000_finish_atomic_subword (operands
[0], retval
, shift
);
20311 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
20312 to perform. MEM is the memory on which to operate. VAL is the second
20313 operand of the binary operator. BEFORE and AFTER are optional locations to
20314 return the value of MEM either before of after the operation. MODEL_RTX
20315 is a CONST_INT containing the memory model to use. */
20318 rs6000_expand_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
20319 rtx orig_before
, rtx orig_after
, rtx model_rtx
)
20321 enum memmodel model
= (enum memmodel
) INTVAL (model_rtx
);
20322 enum machine_mode mode
= GET_MODE (mem
);
20323 enum machine_mode store_mode
= mode
;
20324 rtx label
, x
, cond
, mask
, shift
;
20325 rtx before
= orig_before
, after
= orig_after
;
20327 mask
= shift
= NULL_RTX
;
20328 /* On power8, we want to use SImode for the operation. On previous systems,
20329 use the operation in a subword and shift/mask to get the proper byte or
20331 if (mode
== QImode
|| mode
== HImode
)
20333 if (TARGET_SYNC_HI_QI
)
20335 val
= convert_modes (SImode
, mode
, val
, 1);
20337 /* Prepare to adjust the return value. */
20338 before
= gen_reg_rtx (SImode
);
20340 after
= gen_reg_rtx (SImode
);
20345 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
20347 /* Shift and mask VAL into position with the word. */
20348 val
= convert_modes (SImode
, mode
, val
, 1);
20349 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
20350 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
20356 /* We've already zero-extended VAL. That is sufficient to
20357 make certain that it does not affect other bits. */
20362 /* If we make certain that all of the other bits in VAL are
20363 set, that will be sufficient to not affect other bits. */
20364 x
= gen_rtx_NOT (SImode
, mask
);
20365 x
= gen_rtx_IOR (SImode
, x
, val
);
20366 emit_insn (gen_rtx_SET (VOIDmode
, val
, x
));
20373 /* These will all affect bits outside the field and need
20374 adjustment via MASK within the loop. */
20378 gcc_unreachable ();
20381 /* Prepare to adjust the return value. */
20382 before
= gen_reg_rtx (SImode
);
20384 after
= gen_reg_rtx (SImode
);
20385 store_mode
= mode
= SImode
;
20389 mem
= rs6000_pre_atomic_barrier (mem
, model
);
20391 label
= gen_label_rtx ();
20392 emit_label (label
);
20393 label
= gen_rtx_LABEL_REF (VOIDmode
, label
);
20395 if (before
== NULL_RTX
)
20396 before
= gen_reg_rtx (mode
);
20398 emit_load_locked (mode
, before
, mem
);
20402 x
= expand_simple_binop (mode
, AND
, before
, val
,
20403 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
20404 after
= expand_simple_unop (mode
, NOT
, x
, after
, 1);
20408 after
= expand_simple_binop (mode
, code
, before
, val
,
20409 after
, 1, OPTAB_LIB_WIDEN
);
20415 x
= expand_simple_binop (SImode
, AND
, after
, mask
,
20416 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
20417 x
= rs6000_mask_atomic_subword (before
, x
, mask
);
20419 else if (store_mode
!= mode
)
20420 x
= convert_modes (store_mode
, mode
, x
, 1);
20422 cond
= gen_reg_rtx (CCmode
);
20423 emit_store_conditional (store_mode
, cond
, mem
, x
);
20425 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
20426 emit_unlikely_jump (x
, label
);
20428 rs6000_post_atomic_barrier (model
);
20432 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
20433 then do the calcuations in a SImode register. */
20435 rs6000_finish_atomic_subword (orig_before
, before
, shift
);
20437 rs6000_finish_atomic_subword (orig_after
, after
, shift
);
20439 else if (store_mode
!= mode
)
20441 /* QImode/HImode on machines with lbarx/lharx where we do the native
20442 operation and then do the calcuations in a SImode register. */
20444 convert_move (orig_before
, before
, 1);
20446 convert_move (orig_after
, after
, 1);
20448 else if (orig_after
&& after
!= orig_after
)
20449 emit_move_insn (orig_after
, after
);
20452 /* Emit instructions to move SRC to DST. Called by splitters for
20453 multi-register moves. It will emit at most one instruction for
20454 each register that is accessed; that is, it won't emit li/lis pairs
20455 (or equivalent for 64-bit code). One of SRC or DST must be a hard
20459 rs6000_split_multireg_move (rtx dst
, rtx src
)
20461 /* The register number of the first register being moved. */
20463 /* The mode that is to be moved. */
20464 enum machine_mode mode
;
20465 /* The mode that the move is being done in, and its size. */
20466 enum machine_mode reg_mode
;
20468 /* The number of registers that will be moved. */
20471 reg
= REG_P (dst
) ? REGNO (dst
) : REGNO (src
);
20472 mode
= GET_MODE (dst
);
20473 nregs
= hard_regno_nregs
[reg
][mode
];
20474 if (FP_REGNO_P (reg
))
20475 reg_mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
:
20476 ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? DFmode
: SFmode
);
20477 else if (ALTIVEC_REGNO_P (reg
))
20478 reg_mode
= V16QImode
;
20479 else if (TARGET_E500_DOUBLE
&& mode
== TFmode
)
20482 reg_mode
= word_mode
;
20483 reg_mode_size
= GET_MODE_SIZE (reg_mode
);
20485 gcc_assert (reg_mode_size
* nregs
== GET_MODE_SIZE (mode
));
20487 /* TDmode residing in FP registers is special, since the ISA requires that
20488 the lower-numbered word of a register pair is always the most significant
20489 word, even in little-endian mode. This does not match the usual subreg
20490 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
20491 the appropriate constituent registers "by hand" in little-endian mode.
20493 Note we do not need to check for destructive overlap here since TDmode
20494 can only reside in even/odd register pairs. */
20495 if (FP_REGNO_P (reg
) && DECIMAL_FLOAT_MODE_P (mode
) && !BYTES_BIG_ENDIAN
)
20500 for (i
= 0; i
< nregs
; i
++)
20502 if (REG_P (src
) && FP_REGNO_P (REGNO (src
)))
20503 p_src
= gen_rtx_REG (reg_mode
, REGNO (src
) + nregs
- 1 - i
);
20505 p_src
= simplify_gen_subreg (reg_mode
, src
, mode
,
20506 i
* reg_mode_size
);
20508 if (REG_P (dst
) && FP_REGNO_P (REGNO (dst
)))
20509 p_dst
= gen_rtx_REG (reg_mode
, REGNO (dst
) + nregs
- 1 - i
);
20511 p_dst
= simplify_gen_subreg (reg_mode
, dst
, mode
,
20512 i
* reg_mode_size
);
20514 emit_insn (gen_rtx_SET (VOIDmode
, p_dst
, p_src
));
20520 if (REG_P (src
) && REG_P (dst
) && (REGNO (src
) < REGNO (dst
)))
20522 /* Move register range backwards, if we might have destructive
20525 for (i
= nregs
- 1; i
>= 0; i
--)
20526 emit_insn (gen_rtx_SET (VOIDmode
,
20527 simplify_gen_subreg (reg_mode
, dst
, mode
,
20528 i
* reg_mode_size
),
20529 simplify_gen_subreg (reg_mode
, src
, mode
,
20530 i
* reg_mode_size
)));
20536 bool used_update
= false;
20537 rtx restore_basereg
= NULL_RTX
;
20539 if (MEM_P (src
) && INT_REGNO_P (reg
))
20543 if (GET_CODE (XEXP (src
, 0)) == PRE_INC
20544 || GET_CODE (XEXP (src
, 0)) == PRE_DEC
)
20547 breg
= XEXP (XEXP (src
, 0), 0);
20548 delta_rtx
= (GET_CODE (XEXP (src
, 0)) == PRE_INC
20549 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src
)))
20550 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src
))));
20551 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
20552 src
= replace_equiv_address (src
, breg
);
20554 else if (! rs6000_offsettable_memref_p (src
, reg_mode
))
20556 if (GET_CODE (XEXP (src
, 0)) == PRE_MODIFY
)
20558 rtx basereg
= XEXP (XEXP (src
, 0), 0);
20561 rtx ndst
= simplify_gen_subreg (reg_mode
, dst
, mode
, 0);
20562 emit_insn (gen_rtx_SET (VOIDmode
, ndst
,
20563 gen_rtx_MEM (reg_mode
, XEXP (src
, 0))));
20564 used_update
= true;
20567 emit_insn (gen_rtx_SET (VOIDmode
, basereg
,
20568 XEXP (XEXP (src
, 0), 1)));
20569 src
= replace_equiv_address (src
, basereg
);
20573 rtx basereg
= gen_rtx_REG (Pmode
, reg
);
20574 emit_insn (gen_rtx_SET (VOIDmode
, basereg
, XEXP (src
, 0)));
20575 src
= replace_equiv_address (src
, basereg
);
20579 breg
= XEXP (src
, 0);
20580 if (GET_CODE (breg
) == PLUS
|| GET_CODE (breg
) == LO_SUM
)
20581 breg
= XEXP (breg
, 0);
20583 /* If the base register we are using to address memory is
20584 also a destination reg, then change that register last. */
20586 && REGNO (breg
) >= REGNO (dst
)
20587 && REGNO (breg
) < REGNO (dst
) + nregs
)
20588 j
= REGNO (breg
) - REGNO (dst
);
20590 else if (MEM_P (dst
) && INT_REGNO_P (reg
))
20594 if (GET_CODE (XEXP (dst
, 0)) == PRE_INC
20595 || GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
20598 breg
= XEXP (XEXP (dst
, 0), 0);
20599 delta_rtx
= (GET_CODE (XEXP (dst
, 0)) == PRE_INC
20600 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst
)))
20601 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst
))));
20603 /* We have to update the breg before doing the store.
20604 Use store with update, if available. */
20608 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
20609 emit_insn (TARGET_32BIT
20610 ? (TARGET_POWERPC64
20611 ? gen_movdi_si_update (breg
, breg
, delta_rtx
, nsrc
)
20612 : gen_movsi_update (breg
, breg
, delta_rtx
, nsrc
))
20613 : gen_movdi_di_update (breg
, breg
, delta_rtx
, nsrc
));
20614 used_update
= true;
20617 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
20618 dst
= replace_equiv_address (dst
, breg
);
20620 else if (!rs6000_offsettable_memref_p (dst
, reg_mode
)
20621 && GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
20623 if (GET_CODE (XEXP (dst
, 0)) == PRE_MODIFY
)
20625 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
20628 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
20629 emit_insn (gen_rtx_SET (VOIDmode
,
20630 gen_rtx_MEM (reg_mode
, XEXP (dst
, 0)), nsrc
));
20631 used_update
= true;
20634 emit_insn (gen_rtx_SET (VOIDmode
, basereg
,
20635 XEXP (XEXP (dst
, 0), 1)));
20636 dst
= replace_equiv_address (dst
, basereg
);
20640 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
20641 rtx offsetreg
= XEXP (XEXP (dst
, 0), 1);
20642 gcc_assert (GET_CODE (XEXP (dst
, 0)) == PLUS
20644 && REG_P (offsetreg
)
20645 && REGNO (basereg
) != REGNO (offsetreg
));
20646 if (REGNO (basereg
) == 0)
20648 rtx tmp
= offsetreg
;
20649 offsetreg
= basereg
;
20652 emit_insn (gen_add3_insn (basereg
, basereg
, offsetreg
));
20653 restore_basereg
= gen_sub3_insn (basereg
, basereg
, offsetreg
);
20654 dst
= replace_equiv_address (dst
, basereg
);
20657 else if (GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
20658 gcc_assert (rs6000_offsettable_memref_p (dst
, reg_mode
));
20661 for (i
= 0; i
< nregs
; i
++)
20663 /* Calculate index to next subword. */
20668 /* If compiler already emitted move of first word by
20669 store with update, no need to do anything. */
20670 if (j
== 0 && used_update
)
20673 emit_insn (gen_rtx_SET (VOIDmode
,
20674 simplify_gen_subreg (reg_mode
, dst
, mode
,
20675 j
* reg_mode_size
),
20676 simplify_gen_subreg (reg_mode
, src
, mode
,
20677 j
* reg_mode_size
)));
20679 if (restore_basereg
!= NULL_RTX
)
20680 emit_insn (restore_basereg
);
20685 /* This page contains routines that are used to determine what the
20686 function prologue and epilogue code will do and write them out. */
20691 return !call_used_regs
[r
] && df_regs_ever_live_p (r
);
20694 /* Return the first fixed-point register that is required to be
20695 saved. 32 if none. */
20698 first_reg_to_save (void)
20702 /* Find lowest numbered live register. */
20703 for (first_reg
= 13; first_reg
<= 31; first_reg
++)
20704 if (save_reg_p (first_reg
))
20707 if (first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
20708 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
20709 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
20710 || (TARGET_TOC
&& TARGET_MINIMAL_TOC
))
20711 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
20712 first_reg
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
20716 && crtl
->uses_pic_offset_table
20717 && first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
20718 return RS6000_PIC_OFFSET_TABLE_REGNUM
;
20724 /* Similar, for FP regs. */
20727 first_fp_reg_to_save (void)
20731 /* Find lowest numbered live register. */
20732 for (first_reg
= 14 + 32; first_reg
<= 63; first_reg
++)
20733 if (save_reg_p (first_reg
))
20739 /* Similar, for AltiVec regs. */
20742 first_altivec_reg_to_save (void)
20746 /* Stack frame remains as is unless we are in AltiVec ABI. */
20747 if (! TARGET_ALTIVEC_ABI
)
20748 return LAST_ALTIVEC_REGNO
+ 1;
20750 /* On Darwin, the unwind routines are compiled without
20751 TARGET_ALTIVEC, and use save_world to save/restore the
20752 altivec registers when necessary. */
20753 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
20754 && ! TARGET_ALTIVEC
)
20755 return FIRST_ALTIVEC_REGNO
+ 20;
20757 /* Find lowest numbered live register. */
20758 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20759 if (save_reg_p (i
))
20765 /* Return a 32-bit mask of the AltiVec registers we need to set in
20766 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
20767 the 32-bit word is 0. */
20769 static unsigned int
20770 compute_vrsave_mask (void)
20772 unsigned int i
, mask
= 0;
20774 /* On Darwin, the unwind routines are compiled without
20775 TARGET_ALTIVEC, and use save_world to save/restore the
20776 call-saved altivec registers when necessary. */
20777 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
20778 && ! TARGET_ALTIVEC
)
20781 /* First, find out if we use _any_ altivec registers. */
20782 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
20783 if (df_regs_ever_live_p (i
))
20784 mask
|= ALTIVEC_REG_BIT (i
);
20789 /* Next, remove the argument registers from the set. These must
20790 be in the VRSAVE mask set by the caller, so we don't need to add
20791 them in again. More importantly, the mask we compute here is
20792 used to generate CLOBBERs in the set_vrsave insn, and we do not
20793 wish the argument registers to die. */
20794 for (i
= crtl
->args
.info
.vregno
- 1; i
>= ALTIVEC_ARG_MIN_REG
; --i
)
20795 mask
&= ~ALTIVEC_REG_BIT (i
);
20797 /* Similarly, remove the return value from the set. */
20800 diddle_return_value (is_altivec_return_reg
, &yes
);
20802 mask
&= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN
);
20808 /* For a very restricted set of circumstances, we can cut down the
20809 size of prologues/epilogues by calling our own save/restore-the-world
20813 compute_save_world_info (rs6000_stack_t
*info_ptr
)
20815 info_ptr
->world_save_p
= 1;
20816 info_ptr
->world_save_p
20817 = (WORLD_SAVE_P (info_ptr
)
20818 && DEFAULT_ABI
== ABI_DARWIN
20819 && !cfun
->has_nonlocal_label
20820 && info_ptr
->first_fp_reg_save
== FIRST_SAVED_FP_REGNO
20821 && info_ptr
->first_gp_reg_save
== FIRST_SAVED_GP_REGNO
20822 && info_ptr
->first_altivec_reg_save
== FIRST_SAVED_ALTIVEC_REGNO
20823 && info_ptr
->cr_save_p
);
20825 /* This will not work in conjunction with sibcalls. Make sure there
20826 are none. (This check is expensive, but seldom executed.) */
20827 if (WORLD_SAVE_P (info_ptr
))
20830 for (insn
= get_last_insn_anywhere (); insn
; insn
= PREV_INSN (insn
))
20831 if (CALL_P (insn
) && SIBLING_CALL_P (insn
))
20833 info_ptr
->world_save_p
= 0;
20838 if (WORLD_SAVE_P (info_ptr
))
20840 /* Even if we're not touching VRsave, make sure there's room on the
20841 stack for it, if it looks like we're calling SAVE_WORLD, which
20842 will attempt to save it. */
20843 info_ptr
->vrsave_size
= 4;
20845 /* If we are going to save the world, we need to save the link register too. */
20846 info_ptr
->lr_save_p
= 1;
20848 /* "Save" the VRsave register too if we're saving the world. */
20849 if (info_ptr
->vrsave_mask
== 0)
20850 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
20852 /* Because the Darwin register save/restore routines only handle
20853 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
20855 gcc_assert (info_ptr
->first_fp_reg_save
>= FIRST_SAVED_FP_REGNO
20856 && (info_ptr
->first_altivec_reg_save
20857 >= FIRST_SAVED_ALTIVEC_REGNO
));
20864 is_altivec_return_reg (rtx reg
, void *xyes
)
20866 bool *yes
= (bool *) xyes
;
20867 if (REGNO (reg
) == ALTIVEC_ARG_RETURN
)
20872 /* Look for user-defined global regs in the range FIRST to LAST-1.
20873 We should not restore these, and so cannot use lmw or out-of-line
20874 restore functions if there are any. We also can't save them
20875 (well, emit frame notes for them), because frame unwinding during
20876 exception handling will restore saved registers. */
20879 global_regs_p (unsigned first
, unsigned last
)
20881 while (first
< last
)
20882 if (global_regs
[first
++])
20887 /* Determine the strategy for savings/restoring registers. */
20890 SAVRES_MULTIPLE
= 0x1,
20891 SAVE_INLINE_FPRS
= 0x2,
20892 SAVE_INLINE_GPRS
= 0x4,
20893 REST_INLINE_FPRS
= 0x8,
20894 REST_INLINE_GPRS
= 0x10,
20895 SAVE_NOINLINE_GPRS_SAVES_LR
= 0x20,
20896 SAVE_NOINLINE_FPRS_SAVES_LR
= 0x40,
20897 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
= 0x80,
20898 SAVE_INLINE_VRS
= 0x100,
20899 REST_INLINE_VRS
= 0x200
20903 rs6000_savres_strategy (rs6000_stack_t
*info
,
20904 bool using_static_chain_p
)
20909 if (TARGET_MULTIPLE
20910 && !TARGET_POWERPC64
20911 && !(TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
)
20912 && info
->first_gp_reg_save
< 31
20913 && !global_regs_p (info
->first_gp_reg_save
, 32))
20914 strategy
|= SAVRES_MULTIPLE
;
20916 if (crtl
->calls_eh_return
20917 || cfun
->machine
->ra_need_lr
)
20918 strategy
|= (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
20919 | SAVE_INLINE_GPRS
| REST_INLINE_GPRS
20920 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
20922 if (info
->first_fp_reg_save
== 64
20923 /* The out-of-line FP routines use double-precision stores;
20924 we can't use those routines if we don't have such stores. */
20925 || (TARGET_HARD_FLOAT
&& !TARGET_DOUBLE_FLOAT
)
20926 || global_regs_p (info
->first_fp_reg_save
, 64))
20927 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
20929 if (info
->first_gp_reg_save
== 32
20930 || (!(strategy
& SAVRES_MULTIPLE
)
20931 && global_regs_p (info
->first_gp_reg_save
, 32)))
20932 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
20934 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
20935 || global_regs_p (info
->first_altivec_reg_save
, LAST_ALTIVEC_REGNO
+ 1))
20936 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
20938 /* Define cutoff for using out-of-line functions to save registers. */
20939 if (DEFAULT_ABI
== ABI_V4
|| TARGET_ELF
)
20941 if (!optimize_size
)
20943 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
20944 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
20945 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
20949 /* Prefer out-of-line restore if it will exit. */
20950 if (info
->first_fp_reg_save
> 61)
20951 strategy
|= SAVE_INLINE_FPRS
;
20952 if (info
->first_gp_reg_save
> 29)
20954 if (info
->first_fp_reg_save
== 64)
20955 strategy
|= SAVE_INLINE_GPRS
;
20957 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
20959 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
)
20960 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
20963 else if (DEFAULT_ABI
== ABI_DARWIN
)
20965 if (info
->first_fp_reg_save
> 60)
20966 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
20967 if (info
->first_gp_reg_save
> 29)
20968 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
20969 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
20973 gcc_checking_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
20974 if (info
->first_fp_reg_save
> 61)
20975 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
20976 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
20977 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
20980 /* Don't bother to try to save things out-of-line if r11 is occupied
20981 by the static chain. It would require too much fiddling and the
20982 static chain is rarely used anyway. FPRs are saved w.r.t the stack
20983 pointer on Darwin, and AIX uses r1 or r12. */
20984 if (using_static_chain_p
20985 && (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
20986 strategy
|= ((DEFAULT_ABI
== ABI_DARWIN
? 0 : SAVE_INLINE_FPRS
)
20988 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
20990 /* We can only use the out-of-line routines to restore if we've
20991 saved all the registers from first_fp_reg_save in the prologue.
20992 Otherwise, we risk loading garbage. */
20993 if ((strategy
& (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
)) == SAVE_INLINE_FPRS
)
20997 for (i
= info
->first_fp_reg_save
; i
< 64; i
++)
20998 if (!save_reg_p (i
))
21000 strategy
|= REST_INLINE_FPRS
;
21005 /* If we are going to use store multiple, then don't even bother
21006 with the out-of-line routines, since the store-multiple
21007 instruction will always be smaller. */
21008 if ((strategy
& SAVRES_MULTIPLE
))
21009 strategy
|= SAVE_INLINE_GPRS
;
21011 /* info->lr_save_p isn't yet set if the only reason lr needs to be
21012 saved is an out-of-line save or restore. Set up the value for
21013 the next test (excluding out-of-line gpr restore). */
21014 lr_save_p
= (info
->lr_save_p
21015 || !(strategy
& SAVE_INLINE_GPRS
)
21016 || !(strategy
& SAVE_INLINE_FPRS
)
21017 || !(strategy
& SAVE_INLINE_VRS
)
21018 || !(strategy
& REST_INLINE_FPRS
)
21019 || !(strategy
& REST_INLINE_VRS
));
21021 /* The situation is more complicated with load multiple. We'd
21022 prefer to use the out-of-line routines for restores, since the
21023 "exit" out-of-line routines can handle the restore of LR and the
21024 frame teardown. However if doesn't make sense to use the
21025 out-of-line routine if that is the only reason we'd need to save
21026 LR, and we can't use the "exit" out-of-line gpr restore if we
21027 have saved some fprs; In those cases it is advantageous to use
21028 load multiple when available. */
21029 if ((strategy
& SAVRES_MULTIPLE
)
21031 || info
->first_fp_reg_save
!= 64))
21032 strategy
|= REST_INLINE_GPRS
;
21034 /* Saving CR interferes with the exit routines used on the SPE, so
21037 && info
->spe_64bit_regs_used
21038 && info
->cr_save_p
)
21039 strategy
|= REST_INLINE_GPRS
;
21041 /* We can only use load multiple or the out-of-line routines to
21042 restore if we've used store multiple or out-of-line routines
21043 in the prologue, i.e. if we've saved all the registers from
21044 first_gp_reg_save. Otherwise, we risk loading garbage. */
21045 if ((strategy
& (SAVE_INLINE_GPRS
| REST_INLINE_GPRS
| SAVRES_MULTIPLE
))
21046 == SAVE_INLINE_GPRS
)
21050 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
21051 if (!save_reg_p (i
))
21053 strategy
|= REST_INLINE_GPRS
;
21058 if (TARGET_ELF
&& TARGET_64BIT
)
21060 if (!(strategy
& SAVE_INLINE_FPRS
))
21061 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
21062 else if (!(strategy
& SAVE_INLINE_GPRS
)
21063 && info
->first_fp_reg_save
== 64)
21064 strategy
|= SAVE_NOINLINE_GPRS_SAVES_LR
;
21066 else if (TARGET_AIX
&& !(strategy
& REST_INLINE_FPRS
))
21067 strategy
|= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
;
21069 if (TARGET_MACHO
&& !(strategy
& SAVE_INLINE_FPRS
))
21070 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
21075 /* Calculate the stack information for the current function. This is
21076 complicated by having two separate calling sequences, the AIX calling
21077 sequence and the V.4 calling sequence.
21079 AIX (and Darwin/Mac OS X) stack frames look like:
21081 SP----> +---------------------------------------+
21082 | back chain to caller | 0 0
21083 +---------------------------------------+
21084 | saved CR | 4 8 (8-11)
21085 +---------------------------------------+
21087 +---------------------------------------+
21088 | reserved for compilers | 12 24
21089 +---------------------------------------+
21090 | reserved for binders | 16 32
21091 +---------------------------------------+
21092 | saved TOC pointer | 20 40
21093 +---------------------------------------+
21094 | Parameter save area (P) | 24 48
21095 +---------------------------------------+
21096 | Alloca space (A) | 24+P etc.
21097 +---------------------------------------+
21098 | Local variable space (L) | 24+P+A
21099 +---------------------------------------+
21100 | Float/int conversion temporary (X) | 24+P+A+L
21101 +---------------------------------------+
21102 | Save area for AltiVec registers (W) | 24+P+A+L+X
21103 +---------------------------------------+
21104 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
21105 +---------------------------------------+
21106 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
21107 +---------------------------------------+
21108 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
21109 +---------------------------------------+
21110 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
21111 +---------------------------------------+
21112 old SP->| back chain to caller's caller |
21113 +---------------------------------------+
21115 The required alignment for AIX configurations is two words (i.e., 8
21118 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
21120 SP----> +---------------------------------------+
21121 | Back chain to caller | 0
21122 +---------------------------------------+
21123 | Save area for CR | 8
21124 +---------------------------------------+
21126 +---------------------------------------+
21127 | Saved TOC pointer | 24
21128 +---------------------------------------+
21129 | Parameter save area (P) | 32
21130 +---------------------------------------+
21131 | Alloca space (A) | 32+P
21132 +---------------------------------------+
21133 | Local variable space (L) | 32+P+A
21134 +---------------------------------------+
21135 | Save area for AltiVec registers (W) | 32+P+A+L
21136 +---------------------------------------+
21137 | AltiVec alignment padding (Y) | 32+P+A+L+W
21138 +---------------------------------------+
21139 | Save area for GP registers (G) | 32+P+A+L+W+Y
21140 +---------------------------------------+
21141 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
21142 +---------------------------------------+
21143 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
21144 +---------------------------------------+
21147 V.4 stack frames look like:
21149 SP----> +---------------------------------------+
21150 | back chain to caller | 0
21151 +---------------------------------------+
21152 | caller's saved LR | 4
21153 +---------------------------------------+
21154 | Parameter save area (P) | 8
21155 +---------------------------------------+
21156 | Alloca space (A) | 8+P
21157 +---------------------------------------+
21158 | Varargs save area (V) | 8+P+A
21159 +---------------------------------------+
21160 | Local variable space (L) | 8+P+A+V
21161 +---------------------------------------+
21162 | Float/int conversion temporary (X) | 8+P+A+V+L
21163 +---------------------------------------+
21164 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
21165 +---------------------------------------+
21166 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
21167 +---------------------------------------+
21168 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
21169 +---------------------------------------+
21170 | SPE: area for 64-bit GP registers |
21171 +---------------------------------------+
21172 | SPE alignment padding |
21173 +---------------------------------------+
21174 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
21175 +---------------------------------------+
21176 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
21177 +---------------------------------------+
21178 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
21179 +---------------------------------------+
21180 old SP->| back chain to caller's caller |
21181 +---------------------------------------+
21183 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
21184 given. (But note below and in sysv4.h that we require only 8 and
21185 may round up the size of our stack frame anyways. The historical
21186 reason is early versions of powerpc-linux which didn't properly
21187 align the stack at program startup. A happy side-effect is that
21188 -mno-eabi libraries can be used with -meabi programs.)
21190 The EABI configuration defaults to the V.4 layout. However,
21191 the stack alignment requirements may differ. If -mno-eabi is not
21192 given, the required stack alignment is 8 bytes; if -mno-eabi is
21193 given, the required alignment is 16 bytes. (But see V.4 comment
21196 #ifndef ABI_STACK_BOUNDARY
21197 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
21200 static rs6000_stack_t
*
21201 rs6000_stack_info (void)
21203 rs6000_stack_t
*info_ptr
= &stack_info
;
21204 int reg_size
= TARGET_32BIT
? 4 : 8;
21209 HOST_WIDE_INT non_fixed_size
;
21210 bool using_static_chain_p
;
21212 if (reload_completed
&& info_ptr
->reload_completed
)
21215 memset (info_ptr
, 0, sizeof (*info_ptr
));
21216 info_ptr
->reload_completed
= reload_completed
;
21220 /* Cache value so we don't rescan instruction chain over and over. */
21221 if (cfun
->machine
->insn_chain_scanned_p
== 0)
21222 cfun
->machine
->insn_chain_scanned_p
21223 = spe_func_has_64bit_regs_p () + 1;
21224 info_ptr
->spe_64bit_regs_used
= cfun
->machine
->insn_chain_scanned_p
- 1;
21227 /* Select which calling sequence. */
21228 info_ptr
->abi
= DEFAULT_ABI
;
21230 /* Calculate which registers need to be saved & save area size. */
21231 info_ptr
->first_gp_reg_save
= first_reg_to_save ();
21232 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
21233 even if it currently looks like we won't. Reload may need it to
21234 get at a constant; if so, it will have already created a constant
21235 pool entry for it. */
21236 if (((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
21237 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
21238 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
21239 && crtl
->uses_const_pool
21240 && info_ptr
->first_gp_reg_save
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
21241 first_gp
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
21243 first_gp
= info_ptr
->first_gp_reg_save
;
21245 info_ptr
->gp_size
= reg_size
* (32 - first_gp
);
21247 /* For the SPE, we have an additional upper 32-bits on each GPR.
21248 Ideally we should save the entire 64-bits only when the upper
21249 half is used in SIMD instructions. Since we only record
21250 registers live (not the size they are used in), this proves
21251 difficult because we'd have to traverse the instruction chain at
21252 the right time, taking reload into account. This is a real pain,
21253 so we opt to save the GPRs in 64-bits always if but one register
21254 gets used in 64-bits. Otherwise, all the registers in the frame
21255 get saved in 32-bits.
21257 So... since when we save all GPRs (except the SP) in 64-bits, the
21258 traditional GP save area will be empty. */
21259 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
21260 info_ptr
->gp_size
= 0;
21262 info_ptr
->first_fp_reg_save
= first_fp_reg_to_save ();
21263 info_ptr
->fp_size
= 8 * (64 - info_ptr
->first_fp_reg_save
);
21265 info_ptr
->first_altivec_reg_save
= first_altivec_reg_to_save ();
21266 info_ptr
->altivec_size
= 16 * (LAST_ALTIVEC_REGNO
+ 1
21267 - info_ptr
->first_altivec_reg_save
);
21269 /* Does this function call anything? */
21270 info_ptr
->calls_p
= (! crtl
->is_leaf
21271 || cfun
->machine
->ra_needs_full_frame
);
21273 /* Determine if we need to save the condition code registers. */
21274 if (df_regs_ever_live_p (CR2_REGNO
)
21275 || df_regs_ever_live_p (CR3_REGNO
)
21276 || df_regs_ever_live_p (CR4_REGNO
))
21278 info_ptr
->cr_save_p
= 1;
21279 if (DEFAULT_ABI
== ABI_V4
)
21280 info_ptr
->cr_size
= reg_size
;
21283 /* If the current function calls __builtin_eh_return, then we need
21284 to allocate stack space for registers that will hold data for
21285 the exception handler. */
21286 if (crtl
->calls_eh_return
)
21289 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
21292 /* SPE saves EH registers in 64-bits. */
21293 ehrd_size
= i
* (TARGET_SPE_ABI
21294 && info_ptr
->spe_64bit_regs_used
!= 0
21295 ? UNITS_PER_SPE_WORD
: UNITS_PER_WORD
);
21300 /* In the ELFv2 ABI, we also need to allocate space for separate
21301 CR field save areas if the function calls __builtin_eh_return. */
21302 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
21304 /* This hard-codes that we have three call-saved CR fields. */
21305 ehcr_size
= 3 * reg_size
;
21306 /* We do *not* use the regular CR save mechanism. */
21307 info_ptr
->cr_save_p
= 0;
21312 /* Determine various sizes. */
21313 info_ptr
->reg_size
= reg_size
;
21314 info_ptr
->fixed_size
= RS6000_SAVE_AREA
;
21315 info_ptr
->vars_size
= RS6000_ALIGN (get_frame_size (), 8);
21316 info_ptr
->parm_size
= RS6000_ALIGN (crtl
->outgoing_args_size
,
21317 TARGET_ALTIVEC
? 16 : 8);
21318 if (FRAME_GROWS_DOWNWARD
)
21319 info_ptr
->vars_size
21320 += RS6000_ALIGN (info_ptr
->fixed_size
+ info_ptr
->vars_size
21321 + info_ptr
->parm_size
,
21322 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
)
21323 - (info_ptr
->fixed_size
+ info_ptr
->vars_size
21324 + info_ptr
->parm_size
);
21326 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
21327 info_ptr
->spe_gp_size
= 8 * (32 - first_gp
);
21329 info_ptr
->spe_gp_size
= 0;
21331 if (TARGET_ALTIVEC_ABI
)
21332 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
21334 info_ptr
->vrsave_mask
= 0;
21336 if (TARGET_ALTIVEC_VRSAVE
&& info_ptr
->vrsave_mask
)
21337 info_ptr
->vrsave_size
= 4;
21339 info_ptr
->vrsave_size
= 0;
21341 compute_save_world_info (info_ptr
);
21343 /* Calculate the offsets. */
21344 switch (DEFAULT_ABI
)
21348 gcc_unreachable ();
21353 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
21354 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
21356 if (TARGET_ALTIVEC_ABI
)
21358 info_ptr
->vrsave_save_offset
21359 = info_ptr
->gp_save_offset
- info_ptr
->vrsave_size
;
21361 /* Align stack so vector save area is on a quadword boundary.
21362 The padding goes above the vectors. */
21363 if (info_ptr
->altivec_size
!= 0)
21364 info_ptr
->altivec_padding_size
21365 = info_ptr
->vrsave_save_offset
& 0xF;
21367 info_ptr
->altivec_padding_size
= 0;
21369 info_ptr
->altivec_save_offset
21370 = info_ptr
->vrsave_save_offset
21371 - info_ptr
->altivec_padding_size
21372 - info_ptr
->altivec_size
;
21373 gcc_assert (info_ptr
->altivec_size
== 0
21374 || info_ptr
->altivec_save_offset
% 16 == 0);
21376 /* Adjust for AltiVec case. */
21377 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
- ehrd_size
;
21380 info_ptr
->ehrd_offset
= info_ptr
->gp_save_offset
- ehrd_size
;
21382 info_ptr
->ehcr_offset
= info_ptr
->ehrd_offset
- ehcr_size
;
21383 info_ptr
->cr_save_offset
= reg_size
; /* first word when 64-bit. */
21384 info_ptr
->lr_save_offset
= 2*reg_size
;
21388 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
21389 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
21390 info_ptr
->cr_save_offset
= info_ptr
->gp_save_offset
- info_ptr
->cr_size
;
21392 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
21394 /* Align stack so SPE GPR save area is aligned on a
21395 double-word boundary. */
21396 if (info_ptr
->spe_gp_size
!= 0 && info_ptr
->cr_save_offset
!= 0)
21397 info_ptr
->spe_padding_size
21398 = 8 - (-info_ptr
->cr_save_offset
% 8);
21400 info_ptr
->spe_padding_size
= 0;
21402 info_ptr
->spe_gp_save_offset
21403 = info_ptr
->cr_save_offset
21404 - info_ptr
->spe_padding_size
21405 - info_ptr
->spe_gp_size
;
21407 /* Adjust for SPE case. */
21408 info_ptr
->ehrd_offset
= info_ptr
->spe_gp_save_offset
;
21410 else if (TARGET_ALTIVEC_ABI
)
21412 info_ptr
->vrsave_save_offset
21413 = info_ptr
->cr_save_offset
- info_ptr
->vrsave_size
;
21415 /* Align stack so vector save area is on a quadword boundary. */
21416 if (info_ptr
->altivec_size
!= 0)
21417 info_ptr
->altivec_padding_size
21418 = 16 - (-info_ptr
->vrsave_save_offset
% 16);
21420 info_ptr
->altivec_padding_size
= 0;
21422 info_ptr
->altivec_save_offset
21423 = info_ptr
->vrsave_save_offset
21424 - info_ptr
->altivec_padding_size
21425 - info_ptr
->altivec_size
;
21427 /* Adjust for AltiVec case. */
21428 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
;
21431 info_ptr
->ehrd_offset
= info_ptr
->cr_save_offset
;
21432 info_ptr
->ehrd_offset
-= ehrd_size
;
21433 info_ptr
->lr_save_offset
= reg_size
;
21437 save_align
= (TARGET_ALTIVEC_ABI
|| DEFAULT_ABI
== ABI_DARWIN
) ? 16 : 8;
21438 info_ptr
->save_size
= RS6000_ALIGN (info_ptr
->fp_size
21439 + info_ptr
->gp_size
21440 + info_ptr
->altivec_size
21441 + info_ptr
->altivec_padding_size
21442 + info_ptr
->spe_gp_size
21443 + info_ptr
->spe_padding_size
21446 + info_ptr
->cr_size
21447 + info_ptr
->vrsave_size
,
21450 non_fixed_size
= (info_ptr
->vars_size
21451 + info_ptr
->parm_size
21452 + info_ptr
->save_size
);
21454 info_ptr
->total_size
= RS6000_ALIGN (non_fixed_size
+ info_ptr
->fixed_size
,
21455 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
);
21457 /* Determine if we need to save the link register. */
21458 if (info_ptr
->calls_p
21459 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
21461 && !TARGET_PROFILE_KERNEL
)
21462 || (DEFAULT_ABI
== ABI_V4
&& cfun
->calls_alloca
)
21463 #ifdef TARGET_RELOCATABLE
21464 || (TARGET_RELOCATABLE
&& (get_pool_size () != 0))
21466 || rs6000_ra_ever_killed ())
21467 info_ptr
->lr_save_p
= 1;
21469 using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
21470 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
21471 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
21472 info_ptr
->savres_strategy
= rs6000_savres_strategy (info_ptr
,
21473 using_static_chain_p
);
21475 if (!(info_ptr
->savres_strategy
& SAVE_INLINE_GPRS
)
21476 || !(info_ptr
->savres_strategy
& SAVE_INLINE_FPRS
)
21477 || !(info_ptr
->savres_strategy
& SAVE_INLINE_VRS
)
21478 || !(info_ptr
->savres_strategy
& REST_INLINE_GPRS
)
21479 || !(info_ptr
->savres_strategy
& REST_INLINE_FPRS
)
21480 || !(info_ptr
->savres_strategy
& REST_INLINE_VRS
))
21481 info_ptr
->lr_save_p
= 1;
21483 if (info_ptr
->lr_save_p
)
21484 df_set_regs_ever_live (LR_REGNO
, true);
21486 /* Determine if we need to allocate any stack frame:
21488 For AIX we need to push the stack if a frame pointer is needed
21489 (because the stack might be dynamically adjusted), if we are
21490 debugging, if we make calls, or if the sum of fp_save, gp_save,
21491 and local variables are more than the space needed to save all
21492 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
21493 + 18*8 = 288 (GPR13 reserved).
21495 For V.4 we don't have the stack cushion that AIX uses, but assume
21496 that the debugger can handle stackless frames. */
21498 if (info_ptr
->calls_p
)
21499 info_ptr
->push_p
= 1;
21501 else if (DEFAULT_ABI
== ABI_V4
)
21502 info_ptr
->push_p
= non_fixed_size
!= 0;
21504 else if (frame_pointer_needed
)
21505 info_ptr
->push_p
= 1;
21507 else if (TARGET_XCOFF
&& write_symbols
!= NO_DEBUG
)
21508 info_ptr
->push_p
= 1;
21511 info_ptr
->push_p
= non_fixed_size
> (TARGET_32BIT
? 220 : 288);
21513 /* Zero offsets if we're not saving those registers. */
21514 if (info_ptr
->fp_size
== 0)
21515 info_ptr
->fp_save_offset
= 0;
21517 if (info_ptr
->gp_size
== 0)
21518 info_ptr
->gp_save_offset
= 0;
21520 if (! TARGET_ALTIVEC_ABI
|| info_ptr
->altivec_size
== 0)
21521 info_ptr
->altivec_save_offset
= 0;
21523 /* Zero VRSAVE offset if not saved and restored. */
21524 if (! TARGET_ALTIVEC_VRSAVE
|| info_ptr
->vrsave_mask
== 0)
21525 info_ptr
->vrsave_save_offset
= 0;
21527 if (! TARGET_SPE_ABI
21528 || info_ptr
->spe_64bit_regs_used
== 0
21529 || info_ptr
->spe_gp_size
== 0)
21530 info_ptr
->spe_gp_save_offset
= 0;
21532 if (! info_ptr
->lr_save_p
)
21533 info_ptr
->lr_save_offset
= 0;
21535 if (! info_ptr
->cr_save_p
)
21536 info_ptr
->cr_save_offset
= 0;
21541 /* Return true if the current function uses any GPRs in 64-bit SIMD
21545 spe_func_has_64bit_regs_p (void)
21547 rtx_insn
*insns
, *insn
;
21549 /* Functions that save and restore all the call-saved registers will
21550 need to save/restore the registers in 64-bits. */
21551 if (crtl
->calls_eh_return
21552 || cfun
->calls_setjmp
21553 || crtl
->has_nonlocal_goto
)
21556 insns
= get_insns ();
21558 for (insn
= NEXT_INSN (insns
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
21564 /* FIXME: This should be implemented with attributes...
21566 (set_attr "spe64" "true")....then,
21567 if (get_spe64(insn)) return true;
21569 It's the only reliable way to do the stuff below. */
21571 i
= PATTERN (insn
);
21572 if (GET_CODE (i
) == SET
)
21574 enum machine_mode mode
= GET_MODE (SET_SRC (i
));
21576 if (SPE_VECTOR_MODE (mode
))
21578 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
))
21588 debug_stack_info (rs6000_stack_t
*info
)
21590 const char *abi_string
;
21593 info
= rs6000_stack_info ();
21595 fprintf (stderr
, "\nStack information for function %s:\n",
21596 ((current_function_decl
&& DECL_NAME (current_function_decl
))
21597 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl
))
21602 default: abi_string
= "Unknown"; break;
21603 case ABI_NONE
: abi_string
= "NONE"; break;
21604 case ABI_AIX
: abi_string
= "AIX"; break;
21605 case ABI_ELFv2
: abi_string
= "ELFv2"; break;
21606 case ABI_DARWIN
: abi_string
= "Darwin"; break;
21607 case ABI_V4
: abi_string
= "V.4"; break;
21610 fprintf (stderr
, "\tABI = %5s\n", abi_string
);
21612 if (TARGET_ALTIVEC_ABI
)
21613 fprintf (stderr
, "\tALTIVEC ABI extensions enabled.\n");
21615 if (TARGET_SPE_ABI
)
21616 fprintf (stderr
, "\tSPE ABI extensions enabled.\n");
21618 if (info
->first_gp_reg_save
!= 32)
21619 fprintf (stderr
, "\tfirst_gp_reg_save = %5d\n", info
->first_gp_reg_save
);
21621 if (info
->first_fp_reg_save
!= 64)
21622 fprintf (stderr
, "\tfirst_fp_reg_save = %5d\n", info
->first_fp_reg_save
);
21624 if (info
->first_altivec_reg_save
<= LAST_ALTIVEC_REGNO
)
21625 fprintf (stderr
, "\tfirst_altivec_reg_save = %5d\n",
21626 info
->first_altivec_reg_save
);
21628 if (info
->lr_save_p
)
21629 fprintf (stderr
, "\tlr_save_p = %5d\n", info
->lr_save_p
);
21631 if (info
->cr_save_p
)
21632 fprintf (stderr
, "\tcr_save_p = %5d\n", info
->cr_save_p
);
21634 if (info
->vrsave_mask
)
21635 fprintf (stderr
, "\tvrsave_mask = 0x%x\n", info
->vrsave_mask
);
21638 fprintf (stderr
, "\tpush_p = %5d\n", info
->push_p
);
21641 fprintf (stderr
, "\tcalls_p = %5d\n", info
->calls_p
);
21643 if (info
->gp_save_offset
)
21644 fprintf (stderr
, "\tgp_save_offset = %5d\n", info
->gp_save_offset
);
21646 if (info
->fp_save_offset
)
21647 fprintf (stderr
, "\tfp_save_offset = %5d\n", info
->fp_save_offset
);
21649 if (info
->altivec_save_offset
)
21650 fprintf (stderr
, "\taltivec_save_offset = %5d\n",
21651 info
->altivec_save_offset
);
21653 if (info
->spe_gp_save_offset
)
21654 fprintf (stderr
, "\tspe_gp_save_offset = %5d\n",
21655 info
->spe_gp_save_offset
);
21657 if (info
->vrsave_save_offset
)
21658 fprintf (stderr
, "\tvrsave_save_offset = %5d\n",
21659 info
->vrsave_save_offset
);
21661 if (info
->lr_save_offset
)
21662 fprintf (stderr
, "\tlr_save_offset = %5d\n", info
->lr_save_offset
);
21664 if (info
->cr_save_offset
)
21665 fprintf (stderr
, "\tcr_save_offset = %5d\n", info
->cr_save_offset
);
21667 if (info
->varargs_save_offset
)
21668 fprintf (stderr
, "\tvarargs_save_offset = %5d\n", info
->varargs_save_offset
);
21670 if (info
->total_size
)
21671 fprintf (stderr
, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
21674 if (info
->vars_size
)
21675 fprintf (stderr
, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
21678 if (info
->parm_size
)
21679 fprintf (stderr
, "\tparm_size = %5d\n", info
->parm_size
);
21681 if (info
->fixed_size
)
21682 fprintf (stderr
, "\tfixed_size = %5d\n", info
->fixed_size
);
21685 fprintf (stderr
, "\tgp_size = %5d\n", info
->gp_size
);
21687 if (info
->spe_gp_size
)
21688 fprintf (stderr
, "\tspe_gp_size = %5d\n", info
->spe_gp_size
);
21691 fprintf (stderr
, "\tfp_size = %5d\n", info
->fp_size
);
21693 if (info
->altivec_size
)
21694 fprintf (stderr
, "\taltivec_size = %5d\n", info
->altivec_size
);
21696 if (info
->vrsave_size
)
21697 fprintf (stderr
, "\tvrsave_size = %5d\n", info
->vrsave_size
);
21699 if (info
->altivec_padding_size
)
21700 fprintf (stderr
, "\taltivec_padding_size= %5d\n",
21701 info
->altivec_padding_size
);
21703 if (info
->spe_padding_size
)
21704 fprintf (stderr
, "\tspe_padding_size = %5d\n",
21705 info
->spe_padding_size
);
21708 fprintf (stderr
, "\tcr_size = %5d\n", info
->cr_size
);
21710 if (info
->save_size
)
21711 fprintf (stderr
, "\tsave_size = %5d\n", info
->save_size
);
21713 if (info
->reg_size
!= 4)
21714 fprintf (stderr
, "\treg_size = %5d\n", info
->reg_size
);
21716 fprintf (stderr
, "\tsave-strategy = %04x\n", info
->savres_strategy
);
21718 fprintf (stderr
, "\n");
21722 rs6000_return_addr (int count
, rtx frame
)
21724 /* Currently we don't optimize very well between prolog and body
21725 code and for PIC code the code can be actually quite bad, so
21726 don't try to be too clever here. */
21728 || ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
) && flag_pic
))
21730 cfun
->machine
->ra_needs_full_frame
= 1;
21737 plus_constant (Pmode
,
21739 (gen_rtx_MEM (Pmode
,
21740 memory_address (Pmode
, frame
))),
21741 RETURN_ADDRESS_OFFSET
)));
21744 cfun
->machine
->ra_need_lr
= 1;
21745 return get_hard_reg_initial_val (Pmode
, LR_REGNO
);
21748 /* Say whether a function is a candidate for sibcall handling or not. */
21751 rs6000_function_ok_for_sibcall (tree decl
, tree exp
)
21756 fntype
= TREE_TYPE (decl
);
21758 fntype
= TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp
)));
21760 /* We can't do it if the called function has more vector parameters
21761 than the current function; there's nowhere to put the VRsave code. */
21762 if (TARGET_ALTIVEC_ABI
21763 && TARGET_ALTIVEC_VRSAVE
21764 && !(decl
&& decl
== current_function_decl
))
21766 function_args_iterator args_iter
;
21770 /* Functions with vector parameters are required to have a
21771 prototype, so the argument type info must be available
21773 FOREACH_FUNCTION_ARGS(fntype
, type
, args_iter
)
21774 if (TREE_CODE (type
) == VECTOR_TYPE
21775 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
21778 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl
), type
, args_iter
)
21779 if (TREE_CODE (type
) == VECTOR_TYPE
21780 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
21787 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
21788 functions, because the callee may have a different TOC pointer to
21789 the caller and there's no way to ensure we restore the TOC when
21790 we return. With the secure-plt SYSV ABI we can't make non-local
21791 calls when -fpic/PIC because the plt call stubs use r30. */
21792 if (DEFAULT_ABI
== ABI_DARWIN
21793 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
21795 && !DECL_EXTERNAL (decl
)
21796 && (*targetm
.binds_local_p
) (decl
))
21797 || (DEFAULT_ABI
== ABI_V4
21798 && (!TARGET_SECURE_PLT
21801 && (*targetm
.binds_local_p
) (decl
)))))
21803 tree attr_list
= TYPE_ATTRIBUTES (fntype
);
21805 if (!lookup_attribute ("longcall", attr_list
)
21806 || lookup_attribute ("shortcall", attr_list
))
21814 rs6000_ra_ever_killed (void)
21820 if (cfun
->is_thunk
)
21823 if (cfun
->machine
->lr_save_state
)
21824 return cfun
->machine
->lr_save_state
- 1;
21826 /* regs_ever_live has LR marked as used if any sibcalls are present,
21827 but this should not force saving and restoring in the
21828 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
21829 clobbers LR, so that is inappropriate. */
21831 /* Also, the prologue can generate a store into LR that
21832 doesn't really count, like this:
21835 bcl to set PIC register
21839 When we're called from the epilogue, we need to avoid counting
21840 this as a store. */
21842 push_topmost_sequence ();
21843 top
= get_insns ();
21844 pop_topmost_sequence ();
21845 reg
= gen_rtx_REG (Pmode
, LR_REGNO
);
21847 for (insn
= NEXT_INSN (top
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
21853 if (!SIBLING_CALL_P (insn
))
21856 else if (find_regno_note (insn
, REG_INC
, LR_REGNO
))
21858 else if (set_of (reg
, insn
) != NULL_RTX
21859 && !prologue_epilogue_contains (insn
))
21866 /* Emit instructions needed to load the TOC register.
21867 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
21868 a constant pool; or for SVR4 -fpic. */
21871 rs6000_emit_load_toc_table (int fromprolog
)
21874 dest
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
21876 if (TARGET_ELF
&& TARGET_SECURE_PLT
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
)
21879 rtx lab
, tmp1
, tmp2
, got
;
21881 lab
= gen_label_rtx ();
21882 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (lab
));
21883 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
21885 got
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
21887 got
= rs6000_got_sym ();
21888 tmp1
= tmp2
= dest
;
21891 tmp1
= gen_reg_rtx (Pmode
);
21892 tmp2
= gen_reg_rtx (Pmode
);
21894 emit_insn (gen_load_toc_v4_PIC_1 (lab
));
21895 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
21896 emit_insn (gen_load_toc_v4_PIC_3b (tmp2
, tmp1
, got
, lab
));
21897 emit_insn (gen_load_toc_v4_PIC_3c (dest
, tmp2
, got
, lab
));
21899 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
21901 emit_insn (gen_load_toc_v4_pic_si ());
21902 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
21904 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
21907 rtx temp0
= (fromprolog
21908 ? gen_rtx_REG (Pmode
, 0)
21909 : gen_reg_rtx (Pmode
));
21915 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
21916 symF
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
21918 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
21919 symL
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
21921 emit_insn (gen_load_toc_v4_PIC_1 (symF
));
21922 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
21923 emit_insn (gen_load_toc_v4_PIC_2 (temp0
, dest
, symL
, symF
));
21929 tocsym
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
21930 lab
= gen_label_rtx ();
21931 emit_insn (gen_load_toc_v4_PIC_1b (tocsym
, lab
));
21932 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
21933 if (TARGET_LINK_STACK
)
21934 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (4)));
21935 emit_move_insn (temp0
, gen_rtx_MEM (Pmode
, dest
));
21937 emit_insn (gen_addsi3 (dest
, temp0
, dest
));
21939 else if (TARGET_ELF
&& !TARGET_AIX
&& flag_pic
== 0 && TARGET_MINIMAL_TOC
)
21941 /* This is for AIX code running in non-PIC ELF32. */
21944 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
21945 realsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
21947 emit_insn (gen_elf_high (dest
, realsym
));
21948 emit_insn (gen_elf_low (dest
, dest
, realsym
));
21952 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
21955 emit_insn (gen_load_toc_aix_si (dest
));
21957 emit_insn (gen_load_toc_aix_di (dest
));
21961 /* Emit instructions to restore the link register after determining where
21962 its value has been stored. */
21965 rs6000_emit_eh_reg_restore (rtx source
, rtx scratch
)
21967 rs6000_stack_t
*info
= rs6000_stack_info ();
21970 operands
[0] = source
;
21971 operands
[1] = scratch
;
21973 if (info
->lr_save_p
)
21975 rtx frame_rtx
= stack_pointer_rtx
;
21976 HOST_WIDE_INT sp_offset
= 0;
21979 if (frame_pointer_needed
21980 || cfun
->calls_alloca
21981 || info
->total_size
> 32767)
21983 tmp
= gen_frame_mem (Pmode
, frame_rtx
);
21984 emit_move_insn (operands
[1], tmp
);
21985 frame_rtx
= operands
[1];
21987 else if (info
->push_p
)
21988 sp_offset
= info
->total_size
;
21990 tmp
= plus_constant (Pmode
, frame_rtx
,
21991 info
->lr_save_offset
+ sp_offset
);
21992 tmp
= gen_frame_mem (Pmode
, tmp
);
21993 emit_move_insn (tmp
, operands
[0]);
21996 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
), operands
[0]);
21998 /* Freeze lr_save_p. We've just emitted rtl that depends on the
21999 state of lr_save_p so any change from here on would be a bug. In
22000 particular, stop rs6000_ra_ever_killed from considering the SET
22001 of lr we may have added just above. */
22002 cfun
->machine
->lr_save_state
= info
->lr_save_p
+ 1;
22005 static GTY(()) alias_set_type set
= -1;
22008 get_TOC_alias_set (void)
22011 set
= new_alias_set ();
22015 /* This returns nonzero if the current function uses the TOC. This is
22016 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
22017 is generated by the ABI_V4 load_toc_* patterns. */
22024 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
22027 rtx pat
= PATTERN (insn
);
22030 if (GET_CODE (pat
) == PARALLEL
)
22031 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
22033 rtx sub
= XVECEXP (pat
, 0, i
);
22034 if (GET_CODE (sub
) == USE
)
22036 sub
= XEXP (sub
, 0);
22037 if (GET_CODE (sub
) == UNSPEC
22038 && XINT (sub
, 1) == UNSPEC_TOC
)
22048 create_TOC_reference (rtx symbol
, rtx largetoc_reg
)
22050 rtx tocrel
, tocreg
, hi
;
22052 if (TARGET_DEBUG_ADDR
)
22054 if (GET_CODE (symbol
) == SYMBOL_REF
)
22055 fprintf (stderr
, "\ncreate_TOC_reference, (symbol_ref %s)\n",
22059 fprintf (stderr
, "\ncreate_TOC_reference, code %s:\n",
22060 GET_RTX_NAME (GET_CODE (symbol
)));
22061 debug_rtx (symbol
);
22065 if (!can_create_pseudo_p ())
22066 df_set_regs_ever_live (TOC_REGISTER
, true);
22068 tocreg
= gen_rtx_REG (Pmode
, TOC_REGISTER
);
22069 tocrel
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, symbol
, tocreg
), UNSPEC_TOCREL
);
22070 if (TARGET_CMODEL
== CMODEL_SMALL
|| can_create_pseudo_p ())
22073 hi
= gen_rtx_HIGH (Pmode
, copy_rtx (tocrel
));
22074 if (largetoc_reg
!= NULL
)
22076 emit_move_insn (largetoc_reg
, hi
);
22079 return gen_rtx_LO_SUM (Pmode
, hi
, tocrel
);
22082 /* Issue assembly directives that create a reference to the given DWARF
22083 FRAME_TABLE_LABEL from the current function section. */
22085 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label
)
22087 fprintf (asm_out_file
, "\t.ref %s\n",
22088 (* targetm
.strip_name_encoding
) (frame_table_label
));
22091 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
22092 and the change to the stack pointer. */
22095 rs6000_emit_stack_tie (rtx fp
, bool hard_frame_needed
)
22102 regs
[i
++] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
22103 if (hard_frame_needed
)
22104 regs
[i
++] = gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
);
22105 if (!(REGNO (fp
) == STACK_POINTER_REGNUM
22106 || (hard_frame_needed
22107 && REGNO (fp
) == HARD_FRAME_POINTER_REGNUM
)))
22110 p
= rtvec_alloc (i
);
22113 rtx mem
= gen_frame_mem (BLKmode
, regs
[i
]);
22114 RTVEC_ELT (p
, i
) = gen_rtx_SET (VOIDmode
, mem
, const0_rtx
);
22117 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode
, p
)));
22120 /* Emit the correct code for allocating stack space, as insns.
22121 If COPY_REG, make sure a copy of the old frame is left there.
22122 The generated code may use hard register 0 as a temporary. */
22125 rs6000_emit_allocate_stack (HOST_WIDE_INT size
, rtx copy_reg
, int copy_off
)
22128 rtx stack_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
22129 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
22130 rtx todec
= gen_int_mode (-size
, Pmode
);
22133 if (INTVAL (todec
) != -size
)
22135 warning (0, "stack frame too large");
22136 emit_insn (gen_trap ());
22140 if (crtl
->limit_stack
)
22142 if (REG_P (stack_limit_rtx
)
22143 && REGNO (stack_limit_rtx
) > 1
22144 && REGNO (stack_limit_rtx
) <= 31)
22146 emit_insn (gen_add3_insn (tmp_reg
, stack_limit_rtx
, GEN_INT (size
)));
22147 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
22150 else if (GET_CODE (stack_limit_rtx
) == SYMBOL_REF
22152 && DEFAULT_ABI
== ABI_V4
)
22154 rtx toload
= gen_rtx_CONST (VOIDmode
,
22155 gen_rtx_PLUS (Pmode
,
22159 emit_insn (gen_elf_high (tmp_reg
, toload
));
22160 emit_insn (gen_elf_low (tmp_reg
, tmp_reg
, toload
));
22161 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
22165 warning (0, "stack limit expression is not supported");
22171 emit_insn (gen_add3_insn (copy_reg
, stack_reg
, GEN_INT (copy_off
)));
22173 emit_move_insn (copy_reg
, stack_reg
);
22178 /* Need a note here so that try_split doesn't get confused. */
22179 if (get_last_insn () == NULL_RTX
)
22180 emit_note (NOTE_INSN_DELETED
);
22181 insn
= emit_move_insn (tmp_reg
, todec
);
22182 try_split (PATTERN (insn
), insn
, 0);
22186 insn
= emit_insn (TARGET_32BIT
22187 ? gen_movsi_update_stack (stack_reg
, stack_reg
,
22189 : gen_movdi_di_update_stack (stack_reg
, stack_reg
,
22190 todec
, stack_reg
));
22191 /* Since we didn't use gen_frame_mem to generate the MEM, grab
22192 it now and set the alias set/attributes. The above gen_*_update
22193 calls will generate a PARALLEL with the MEM set being the first
22195 par
= PATTERN (insn
);
22196 gcc_assert (GET_CODE (par
) == PARALLEL
);
22197 set
= XVECEXP (par
, 0, 0);
22198 gcc_assert (GET_CODE (set
) == SET
);
22199 mem
= SET_DEST (set
);
22200 gcc_assert (MEM_P (mem
));
22201 MEM_NOTRAP_P (mem
) = 1;
22202 set_mem_alias_set (mem
, get_frame_alias_set ());
22204 RTX_FRAME_RELATED_P (insn
) = 1;
22205 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
22206 gen_rtx_SET (VOIDmode
, stack_reg
,
22207 gen_rtx_PLUS (Pmode
, stack_reg
,
22208 GEN_INT (-size
))));
22211 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
22213 #if PROBE_INTERVAL > 32768
22214 #error Cannot use indexed addressing mode for stack probing
22217 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
22218 inclusive. These are offsets from the current stack pointer. */
22221 rs6000_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
22223 /* See if we have a constant small number of probes to generate. If so,
22224 that's the easy case. */
22225 if (first
+ size
<= 32768)
22229 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
22230 it exceeds SIZE. If only one probe is needed, this will not
22231 generate any code. Then probe at FIRST + SIZE. */
22232 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
22233 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
22236 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
22240 /* Otherwise, do the same as above, but in a loop. Note that we must be
22241 extra careful with variables wrapping around because we might be at
22242 the very top (or the very bottom) of the address space and we have
22243 to be able to handle this case properly; in particular, we use an
22244 equality test for the loop condition. */
22247 HOST_WIDE_INT rounded_size
;
22248 rtx r12
= gen_rtx_REG (Pmode
, 12);
22249 rtx r0
= gen_rtx_REG (Pmode
, 0);
22251 /* Sanity check for the addressing mode we're going to use. */
22252 gcc_assert (first
<= 32768);
22254 /* Step 1: round SIZE to the previous multiple of the interval. */
22256 rounded_size
= size
& -PROBE_INTERVAL
;
22259 /* Step 2: compute initial and final value of the loop counter. */
22261 /* TEST_ADDR = SP + FIRST. */
22262 emit_insn (gen_rtx_SET (VOIDmode
, r12
,
22263 plus_constant (Pmode
, stack_pointer_rtx
,
22266 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
22267 if (rounded_size
> 32768)
22269 emit_move_insn (r0
, GEN_INT (-rounded_size
));
22270 emit_insn (gen_rtx_SET (VOIDmode
, r0
,
22271 gen_rtx_PLUS (Pmode
, r12
, r0
)));
22274 emit_insn (gen_rtx_SET (VOIDmode
, r0
,
22275 plus_constant (Pmode
, r12
, -rounded_size
)));
22278 /* Step 3: the loop
22280 while (TEST_ADDR != LAST_ADDR)
22282 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
22286 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
22287 until it is equal to ROUNDED_SIZE. */
22290 emit_insn (gen_probe_stack_rangedi (r12
, r12
, r0
));
22292 emit_insn (gen_probe_stack_rangesi (r12
, r12
, r0
));
22295 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
22296 that SIZE is equal to ROUNDED_SIZE. */
22298 if (size
!= rounded_size
)
22299 emit_stack_probe (plus_constant (Pmode
, r12
, rounded_size
- size
));
22303 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
22304 absolute addresses. */
22307 output_probe_stack_range (rtx reg1
, rtx reg2
)
22309 static int labelno
= 0;
22310 char loop_lab
[32], end_lab
[32];
22313 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
);
22314 ASM_GENERATE_INTERNAL_LABEL (end_lab
, "LPSRE", labelno
++);
22316 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
22318 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
22322 output_asm_insn ("cmpd 0,%0,%1", xops
);
22324 output_asm_insn ("cmpw 0,%0,%1", xops
);
22326 fputs ("\tbeq 0,", asm_out_file
);
22327 assemble_name_raw (asm_out_file
, end_lab
);
22328 fputc ('\n', asm_out_file
);
22330 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
22331 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
22332 output_asm_insn ("addi %0,%0,%1", xops
);
22334 /* Probe at TEST_ADDR and branch. */
22335 xops
[1] = gen_rtx_REG (Pmode
, 0);
22336 output_asm_insn ("stw %1,0(%0)", xops
);
22337 fprintf (asm_out_file
, "\tb ");
22338 assemble_name_raw (asm_out_file
, loop_lab
);
22339 fputc ('\n', asm_out_file
);
22341 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, end_lab
);
22346 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
22347 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
22348 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
22349 deduce these equivalences by itself so it wasn't necessary to hold
22350 its hand so much. Don't be tempted to always supply d2_f_d_e with
22351 the actual cfa register, ie. r31 when we are using a hard frame
22352 pointer. That fails when saving regs off r1, and sched moves the
22353 r31 setup past the reg saves. */
22356 rs6000_frame_related (rtx insn
, rtx reg
, HOST_WIDE_INT val
,
22357 rtx reg2
, rtx rreg
, rtx split_reg
)
22361 if (REGNO (reg
) == STACK_POINTER_REGNUM
&& reg2
== NULL_RTX
)
22363 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
22366 gcc_checking_assert (val
== 0);
22367 real
= PATTERN (insn
);
22368 if (GET_CODE (real
) == PARALLEL
)
22369 for (i
= 0; i
< XVECLEN (real
, 0); i
++)
22370 if (GET_CODE (XVECEXP (real
, 0, i
)) == SET
)
22372 rtx set
= XVECEXP (real
, 0, i
);
22374 RTX_FRAME_RELATED_P (set
) = 1;
22376 RTX_FRAME_RELATED_P (insn
) = 1;
22380 /* copy_rtx will not make unique copies of registers, so we need to
22381 ensure we don't have unwanted sharing here. */
22383 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
22386 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
22388 real
= copy_rtx (PATTERN (insn
));
22390 if (reg2
!= NULL_RTX
)
22391 real
= replace_rtx (real
, reg2
, rreg
);
22393 if (REGNO (reg
) == STACK_POINTER_REGNUM
)
22394 gcc_checking_assert (val
== 0);
22396 real
= replace_rtx (real
, reg
,
22397 gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
,
22398 STACK_POINTER_REGNUM
),
22401 /* We expect that 'real' is either a SET or a PARALLEL containing
22402 SETs (and possibly other stuff). In a PARALLEL, all the SETs
22403 are important so they all have to be marked RTX_FRAME_RELATED_P. */
22405 if (GET_CODE (real
) == SET
)
22409 temp
= simplify_rtx (SET_SRC (set
));
22411 SET_SRC (set
) = temp
;
22412 temp
= simplify_rtx (SET_DEST (set
));
22414 SET_DEST (set
) = temp
;
22415 if (GET_CODE (SET_DEST (set
)) == MEM
)
22417 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
22419 XEXP (SET_DEST (set
), 0) = temp
;
22426 gcc_assert (GET_CODE (real
) == PARALLEL
);
22427 for (i
= 0; i
< XVECLEN (real
, 0); i
++)
22428 if (GET_CODE (XVECEXP (real
, 0, i
)) == SET
)
22430 rtx set
= XVECEXP (real
, 0, i
);
22432 temp
= simplify_rtx (SET_SRC (set
));
22434 SET_SRC (set
) = temp
;
22435 temp
= simplify_rtx (SET_DEST (set
));
22437 SET_DEST (set
) = temp
;
22438 if (GET_CODE (SET_DEST (set
)) == MEM
)
22440 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
22442 XEXP (SET_DEST (set
), 0) = temp
;
22444 RTX_FRAME_RELATED_P (set
) = 1;
22448 /* If a store insn has been split into multiple insns, the
22449 true source register is given by split_reg. */
22450 if (split_reg
!= NULL_RTX
)
22451 real
= gen_rtx_SET (VOIDmode
, SET_DEST (real
), split_reg
);
22453 RTX_FRAME_RELATED_P (insn
) = 1;
22454 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, real
);
22459 /* Returns an insn that has a vrsave set operation with the
22460 appropriate CLOBBERs. */
22463 generate_set_vrsave (rtx reg
, rs6000_stack_t
*info
, int epiloguep
)
22466 rtx insn
, clobs
[TOTAL_ALTIVEC_REGS
+ 1];
22467 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
22470 = gen_rtx_SET (VOIDmode
,
22472 gen_rtx_UNSPEC_VOLATILE (SImode
,
22473 gen_rtvec (2, reg
, vrsave
),
22474 UNSPECV_SET_VRSAVE
));
22478 /* We need to clobber the registers in the mask so the scheduler
22479 does not move sets to VRSAVE before sets of AltiVec registers.
22481 However, if the function receives nonlocal gotos, reload will set
22482 all call saved registers live. We will end up with:
22484 (set (reg 999) (mem))
22485 (parallel [ (set (reg vrsave) (unspec blah))
22486 (clobber (reg 999))])
22488 The clobber will cause the store into reg 999 to be dead, and
22489 flow will attempt to delete an epilogue insn. In this case, we
22490 need an unspec use/set of the register. */
22492 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
22493 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
22495 if (!epiloguep
|| call_used_regs
[i
])
22496 clobs
[nclobs
++] = gen_rtx_CLOBBER (VOIDmode
,
22497 gen_rtx_REG (V4SImode
, i
));
22500 rtx reg
= gen_rtx_REG (V4SImode
, i
);
22503 = gen_rtx_SET (VOIDmode
,
22505 gen_rtx_UNSPEC (V4SImode
,
22506 gen_rtvec (1, reg
), 27));
22510 insn
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nclobs
));
22512 for (i
= 0; i
< nclobs
; ++i
)
22513 XVECEXP (insn
, 0, i
) = clobs
[i
];
22519 gen_frame_set (rtx reg
, rtx frame_reg
, int offset
, bool store
)
22523 addr
= gen_rtx_PLUS (Pmode
, frame_reg
, GEN_INT (offset
));
22524 mem
= gen_frame_mem (GET_MODE (reg
), addr
);
22525 return gen_rtx_SET (VOIDmode
, store
? mem
: reg
, store
? reg
: mem
);
22529 gen_frame_load (rtx reg
, rtx frame_reg
, int offset
)
22531 return gen_frame_set (reg
, frame_reg
, offset
, false);
22535 gen_frame_store (rtx reg
, rtx frame_reg
, int offset
)
22537 return gen_frame_set (reg
, frame_reg
, offset
, true);
22540 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
22541 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
22544 emit_frame_save (rtx frame_reg
, enum machine_mode mode
,
22545 unsigned int regno
, int offset
, HOST_WIDE_INT frame_reg_to_sp
)
22549 /* Some cases that need register indexed addressing. */
22550 gcc_checking_assert (!((TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
22551 || (TARGET_VSX
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
22552 || (TARGET_E500_DOUBLE
&& mode
== DFmode
)
22554 && SPE_VECTOR_MODE (mode
)
22555 && !SPE_CONST_OFFSET_OK (offset
))));
22557 reg
= gen_rtx_REG (mode
, regno
);
22558 insn
= emit_insn (gen_frame_store (reg
, frame_reg
, offset
));
22559 return rs6000_frame_related (insn
, frame_reg
, frame_reg_to_sp
,
22560 NULL_RTX
, NULL_RTX
, NULL_RTX
);
22563 /* Emit an offset memory reference suitable for a frame store, while
22564 converting to a valid addressing mode. */
22567 gen_frame_mem_offset (enum machine_mode mode
, rtx reg
, int offset
)
22569 rtx int_rtx
, offset_rtx
;
22571 int_rtx
= GEN_INT (offset
);
22573 if ((TARGET_SPE_ABI
&& SPE_VECTOR_MODE (mode
) && !SPE_CONST_OFFSET_OK (offset
))
22574 || (TARGET_E500_DOUBLE
&& mode
== DFmode
))
22576 offset_rtx
= gen_rtx_REG (Pmode
, FIXED_SCRATCH
);
22577 emit_move_insn (offset_rtx
, int_rtx
);
22580 offset_rtx
= int_rtx
;
22582 return gen_frame_mem (mode
, gen_rtx_PLUS (Pmode
, reg
, offset_rtx
));
22585 #ifndef TARGET_FIX_AND_CONTINUE
22586 #define TARGET_FIX_AND_CONTINUE 0
22589 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
22590 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
22591 #define LAST_SAVRES_REGISTER 31
22592 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
22603 static GTY(()) rtx savres_routine_syms
[N_SAVRES_REGISTERS
][12];
22605 /* Temporary holding space for an out-of-line register save/restore
22607 static char savres_routine_name
[30];
22609 /* Return the name for an out-of-line register save/restore routine.
22610 We are saving/restoring GPRs if GPR is true. */
22613 rs6000_savres_routine_name (rs6000_stack_t
*info
, int regno
, int sel
)
22615 const char *prefix
= "";
22616 const char *suffix
= "";
22618 /* Different targets are supposed to define
22619 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
22620 routine name could be defined with:
22622 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
22624 This is a nice idea in practice, but in reality, things are
22625 complicated in several ways:
22627 - ELF targets have save/restore routines for GPRs.
22629 - SPE targets use different prefixes for 32/64-bit registers, and
22630 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
22632 - PPC64 ELF targets have routines for save/restore of GPRs that
22633 differ in what they do with the link register, so having a set
22634 prefix doesn't work. (We only use one of the save routines at
22635 the moment, though.)
22637 - PPC32 elf targets have "exit" versions of the restore routines
22638 that restore the link register and can save some extra space.
22639 These require an extra suffix. (There are also "tail" versions
22640 of the restore routines and "GOT" versions of the save routines,
22641 but we don't generate those at present. Same problems apply,
22644 We deal with all this by synthesizing our own prefix/suffix and
22645 using that for the simple sprintf call shown above. */
22648 /* No floating point saves on the SPE. */
22649 gcc_assert ((sel
& SAVRES_REG
) == SAVRES_GPR
);
22651 if ((sel
& SAVRES_SAVE
))
22652 prefix
= info
->spe_64bit_regs_used
? "_save64gpr_" : "_save32gpr_";
22654 prefix
= info
->spe_64bit_regs_used
? "_rest64gpr_" : "_rest32gpr_";
22656 if ((sel
& SAVRES_LR
))
22659 else if (DEFAULT_ABI
== ABI_V4
)
22664 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
22665 prefix
= (sel
& SAVRES_SAVE
) ? "_savegpr_" : "_restgpr_";
22666 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
22667 prefix
= (sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_";
22668 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
22669 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
22673 if ((sel
& SAVRES_LR
))
22676 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
22678 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
22679 /* No out-of-line save/restore routines for GPRs on AIX. */
22680 gcc_assert (!TARGET_AIX
|| (sel
& SAVRES_REG
) != SAVRES_GPR
);
22684 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
22685 prefix
= ((sel
& SAVRES_SAVE
)
22686 ? ((sel
& SAVRES_LR
) ? "_savegpr0_" : "_savegpr1_")
22687 : ((sel
& SAVRES_LR
) ? "_restgpr0_" : "_restgpr1_"));
22688 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
22690 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
22691 if ((sel
& SAVRES_LR
))
22692 prefix
= ((sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_");
22696 prefix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_PREFIX
: RESTORE_FP_PREFIX
;
22697 suffix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_SUFFIX
: RESTORE_FP_SUFFIX
;
22700 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
22701 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
22706 if (DEFAULT_ABI
== ABI_DARWIN
)
22708 /* The Darwin approach is (slightly) different, in order to be
22709 compatible with code generated by the system toolchain. There is a
22710 single symbol for the start of save sequence, and the code here
22711 embeds an offset into that code on the basis of the first register
22713 prefix
= (sel
& SAVRES_SAVE
) ? "save" : "rest" ;
22714 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
22715 sprintf (savres_routine_name
, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix
,
22716 ((sel
& SAVRES_LR
) ? "x" : ""), (regno
== 13 ? "" : "+"),
22717 (regno
- 13) * 4, prefix
, regno
);
22718 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
22719 sprintf (savres_routine_name
, "*%sFP%s%.0d ; %s f%d-f31", prefix
,
22720 (regno
== 14 ? "" : "+"), (regno
- 14) * 4, prefix
, regno
);
22721 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
22722 sprintf (savres_routine_name
, "*%sVEC%s%.0d ; %s v%d-v31", prefix
,
22723 (regno
== 20 ? "" : "+"), (regno
- 20) * 8, prefix
, regno
);
22728 sprintf (savres_routine_name
, "%s%d%s", prefix
, regno
, suffix
);
22730 return savres_routine_name
;
22733 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
22734 We are saving/restoring GPRs if GPR is true. */
22737 rs6000_savres_routine_sym (rs6000_stack_t
*info
, int sel
)
22739 int regno
= ((sel
& SAVRES_REG
) == SAVRES_GPR
22740 ? info
->first_gp_reg_save
22741 : (sel
& SAVRES_REG
) == SAVRES_FPR
22742 ? info
->first_fp_reg_save
- 32
22743 : (sel
& SAVRES_REG
) == SAVRES_VR
22744 ? info
->first_altivec_reg_save
- FIRST_ALTIVEC_REGNO
22749 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
22750 versions of the gpr routines. */
22751 if (TARGET_SPE_ABI
&& (sel
& SAVRES_REG
) == SAVRES_GPR
22752 && info
->spe_64bit_regs_used
)
22753 select
^= SAVRES_FPR
^ SAVRES_GPR
;
22755 /* Don't generate bogus routine names. */
22756 gcc_assert (FIRST_SAVRES_REGISTER
<= regno
22757 && regno
<= LAST_SAVRES_REGISTER
22758 && select
>= 0 && select
<= 12);
22760 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
];
22766 name
= rs6000_savres_routine_name (info
, regno
, sel
);
22768 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
]
22769 = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
22770 SYMBOL_REF_FLAGS (sym
) |= SYMBOL_FLAG_FUNCTION
;
22776 /* Emit a sequence of insns, including a stack tie if needed, for
22777 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
22778 reset the stack pointer, but move the base of the frame into
22779 reg UPDT_REGNO for use by out-of-line register restore routines. */
22782 rs6000_emit_stack_reset (rs6000_stack_t
*info
,
22783 rtx frame_reg_rtx
, HOST_WIDE_INT frame_off
,
22784 unsigned updt_regno
)
22788 /* This blockage is needed so that sched doesn't decide to move
22789 the sp change before the register restores. */
22790 if (DEFAULT_ABI
== ABI_V4
22792 && info
->spe_64bit_regs_used
!= 0
22793 && info
->first_gp_reg_save
!= 32))
22794 rs6000_emit_stack_tie (frame_reg_rtx
, frame_pointer_needed
);
22796 /* If we are restoring registers out-of-line, we will be using the
22797 "exit" variants of the restore routines, which will reset the
22798 stack for us. But we do need to point updt_reg into the
22799 right place for those routines. */
22800 updt_reg_rtx
= gen_rtx_REG (Pmode
, updt_regno
);
22802 if (frame_off
!= 0)
22803 return emit_insn (gen_add3_insn (updt_reg_rtx
,
22804 frame_reg_rtx
, GEN_INT (frame_off
)));
22805 else if (REGNO (frame_reg_rtx
) != updt_regno
)
22806 return emit_move_insn (updt_reg_rtx
, frame_reg_rtx
);
22811 /* Return the register number used as a pointer by out-of-line
22812 save/restore functions. */
22814 static inline unsigned
22815 ptr_regno_for_savres (int sel
)
22817 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
22818 return (sel
& SAVRES_REG
) == SAVRES_FPR
|| (sel
& SAVRES_LR
) ? 1 : 12;
22819 return DEFAULT_ABI
== ABI_DARWIN
&& (sel
& SAVRES_REG
) == SAVRES_FPR
? 1 : 11;
22822 /* Construct a parallel rtx describing the effect of a call to an
22823 out-of-line register save/restore routine, and emit the insn
22824 or jump_insn as appropriate. */
22827 rs6000_emit_savres_rtx (rs6000_stack_t
*info
,
22828 rtx frame_reg_rtx
, int save_area_offset
, int lr_offset
,
22829 enum machine_mode reg_mode
, int sel
)
22832 int offset
, start_reg
, end_reg
, n_regs
, use_reg
;
22833 int reg_size
= GET_MODE_SIZE (reg_mode
);
22839 start_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
22840 ? info
->first_gp_reg_save
22841 : (sel
& SAVRES_REG
) == SAVRES_FPR
22842 ? info
->first_fp_reg_save
22843 : (sel
& SAVRES_REG
) == SAVRES_VR
22844 ? info
->first_altivec_reg_save
22846 end_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
22848 : (sel
& SAVRES_REG
) == SAVRES_FPR
22850 : (sel
& SAVRES_REG
) == SAVRES_VR
22851 ? LAST_ALTIVEC_REGNO
+ 1
22853 n_regs
= end_reg
- start_reg
;
22854 p
= rtvec_alloc (3 + ((sel
& SAVRES_LR
) ? 1 : 0)
22855 + ((sel
& SAVRES_REG
) == SAVRES_VR
? 1 : 0)
22858 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
22859 RTVEC_ELT (p
, offset
++) = ret_rtx
;
22861 RTVEC_ELT (p
, offset
++)
22862 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
22864 sym
= rs6000_savres_routine_sym (info
, sel
);
22865 RTVEC_ELT (p
, offset
++) = gen_rtx_USE (VOIDmode
, sym
);
22867 use_reg
= ptr_regno_for_savres (sel
);
22868 if ((sel
& SAVRES_REG
) == SAVRES_VR
)
22870 /* Vector regs are saved/restored using [reg+reg] addressing. */
22871 RTVEC_ELT (p
, offset
++)
22872 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
22873 RTVEC_ELT (p
, offset
++)
22874 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 0));
22877 RTVEC_ELT (p
, offset
++)
22878 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
22880 for (i
= 0; i
< end_reg
- start_reg
; i
++)
22881 RTVEC_ELT (p
, i
+ offset
)
22882 = gen_frame_set (gen_rtx_REG (reg_mode
, start_reg
+ i
),
22883 frame_reg_rtx
, save_area_offset
+ reg_size
* i
,
22884 (sel
& SAVRES_SAVE
) != 0);
22886 if ((sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
22887 RTVEC_ELT (p
, i
+ offset
)
22888 = gen_frame_store (gen_rtx_REG (Pmode
, 0), frame_reg_rtx
, lr_offset
);
22890 par
= gen_rtx_PARALLEL (VOIDmode
, p
);
22892 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
22894 insn
= emit_jump_insn (par
);
22895 JUMP_LABEL (insn
) = ret_rtx
;
22898 insn
= emit_insn (par
);
22902 /* Emit code to store CR fields that need to be saved into REG. */
22905 rs6000_emit_move_from_cr (rtx reg
)
22907 /* Only the ELFv2 ABI allows storing only selected fields. */
22908 if (DEFAULT_ABI
== ABI_ELFv2
&& TARGET_MFCRF
)
22910 int i
, cr_reg
[8], count
= 0;
22912 /* Collect CR fields that must be saved. */
22913 for (i
= 0; i
< 8; i
++)
22914 if (save_reg_p (CR0_REGNO
+ i
))
22915 cr_reg
[count
++] = i
;
22917 /* If it's just a single one, use mfcrf. */
22920 rtvec p
= rtvec_alloc (1);
22921 rtvec r
= rtvec_alloc (2);
22922 RTVEC_ELT (r
, 0) = gen_rtx_REG (CCmode
, CR0_REGNO
+ cr_reg
[0]);
22923 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7 - cr_reg
[0]));
22925 = gen_rtx_SET (VOIDmode
, reg
,
22926 gen_rtx_UNSPEC (SImode
, r
, UNSPEC_MOVESI_FROM_CR
));
22928 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
22932 /* ??? It might be better to handle count == 2 / 3 cases here
22933 as well, using logical operations to combine the values. */
22936 emit_insn (gen_movesi_from_cr (reg
));
22939 /* Determine whether the gp REG is really used. */
22942 rs6000_reg_live_or_pic_offset_p (int reg
)
22944 /* If the function calls eh_return, claim used all the registers that would
22945 be checked for liveness otherwise. This is required for the PIC offset
22946 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
22947 register allocation purposes in this case. */
22949 return (((crtl
->calls_eh_return
|| df_regs_ever_live_p (reg
))
22950 && (!call_used_regs
[reg
]
22951 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
22952 && !TARGET_SINGLE_PIC_BASE
22953 && TARGET_TOC
&& TARGET_MINIMAL_TOC
)))
22954 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
22955 && !TARGET_SINGLE_PIC_BASE
22956 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
22957 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
))));
22960 /* Emit function prologue as insns. */
22963 rs6000_emit_prologue (void)
22965 rs6000_stack_t
*info
= rs6000_stack_info ();
22966 enum machine_mode reg_mode
= Pmode
;
22967 int reg_size
= TARGET_32BIT
? 4 : 8;
22968 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
22969 rtx frame_reg_rtx
= sp_reg_rtx
;
22970 unsigned int cr_save_regno
;
22971 rtx cr_save_rtx
= NULL_RTX
;
22974 int using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
22975 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
22976 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
22977 /* Offset to top of frame for frame_reg and sp respectively. */
22978 HOST_WIDE_INT frame_off
= 0;
22979 HOST_WIDE_INT sp_off
= 0;
22981 #ifdef ENABLE_CHECKING
22982 /* Track and check usage of r0, r11, r12. */
22983 int reg_inuse
= using_static_chain_p
? 1 << 11 : 0;
22984 #define START_USE(R) do \
22986 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
22987 reg_inuse |= 1 << (R); \
22989 #define END_USE(R) do \
22991 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
22992 reg_inuse &= ~(1 << (R)); \
22994 #define NOT_INUSE(R) do \
22996 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
22999 #define START_USE(R) do {} while (0)
23000 #define END_USE(R) do {} while (0)
23001 #define NOT_INUSE(R) do {} while (0)
23004 if (DEFAULT_ABI
== ABI_ELFv2
)
23006 cfun
->machine
->r2_setup_needed
= df_regs_ever_live_p (TOC_REGNUM
);
23008 /* With -mminimal-toc we may generate an extra use of r2 below. */
23009 if (!TARGET_SINGLE_PIC_BASE
23010 && TARGET_TOC
&& TARGET_MINIMAL_TOC
&& get_pool_size () != 0)
23011 cfun
->machine
->r2_setup_needed
= true;
23015 if (flag_stack_usage_info
)
23016 current_function_static_stack_size
= info
->total_size
;
23018 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
23020 HOST_WIDE_INT size
= info
->total_size
;
23022 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
23024 if (size
> PROBE_INTERVAL
&& size
> STACK_CHECK_PROTECT
)
23025 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT
,
23026 size
- STACK_CHECK_PROTECT
);
23029 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT
, size
);
23032 if (TARGET_FIX_AND_CONTINUE
)
23034 /* gdb on darwin arranges to forward a function from the old
23035 address by modifying the first 5 instructions of the function
23036 to branch to the overriding function. This is necessary to
23037 permit function pointers that point to the old function to
23038 actually forward to the new function. */
23039 emit_insn (gen_nop ());
23040 emit_insn (gen_nop ());
23041 emit_insn (gen_nop ());
23042 emit_insn (gen_nop ());
23043 emit_insn (gen_nop ());
23046 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
23048 reg_mode
= V2SImode
;
23052 /* Handle world saves specially here. */
23053 if (WORLD_SAVE_P (info
))
23060 /* save_world expects lr in r0. */
23061 reg0
= gen_rtx_REG (Pmode
, 0);
23062 if (info
->lr_save_p
)
23064 insn
= emit_move_insn (reg0
,
23065 gen_rtx_REG (Pmode
, LR_REGNO
));
23066 RTX_FRAME_RELATED_P (insn
) = 1;
23069 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
23070 assumptions about the offsets of various bits of the stack
23072 gcc_assert (info
->gp_save_offset
== -220
23073 && info
->fp_save_offset
== -144
23074 && info
->lr_save_offset
== 8
23075 && info
->cr_save_offset
== 4
23078 && (!crtl
->calls_eh_return
23079 || info
->ehrd_offset
== -432)
23080 && info
->vrsave_save_offset
== -224
23081 && info
->altivec_save_offset
== -416);
23083 treg
= gen_rtx_REG (SImode
, 11);
23084 emit_move_insn (treg
, GEN_INT (-info
->total_size
));
23086 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
23087 in R11. It also clobbers R12, so beware! */
23089 /* Preserve CR2 for save_world prologues */
23091 sz
+= 32 - info
->first_gp_reg_save
;
23092 sz
+= 64 - info
->first_fp_reg_save
;
23093 sz
+= LAST_ALTIVEC_REGNO
- info
->first_altivec_reg_save
+ 1;
23094 p
= rtvec_alloc (sz
);
23096 RTVEC_ELT (p
, j
++) = gen_rtx_CLOBBER (VOIDmode
,
23097 gen_rtx_REG (SImode
,
23099 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
23100 gen_rtx_SYMBOL_REF (Pmode
,
23102 /* We do floats first so that the instruction pattern matches
23104 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
23106 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
23108 info
->first_fp_reg_save
+ i
),
23110 info
->fp_save_offset
+ frame_off
+ 8 * i
);
23111 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
23113 = gen_frame_store (gen_rtx_REG (V4SImode
,
23114 info
->first_altivec_reg_save
+ i
),
23116 info
->altivec_save_offset
+ frame_off
+ 16 * i
);
23117 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
23119 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
23121 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
23123 /* CR register traditionally saved as CR2. */
23125 = gen_frame_store (gen_rtx_REG (SImode
, CR2_REGNO
),
23126 frame_reg_rtx
, info
->cr_save_offset
+ frame_off
);
23127 /* Explain about use of R0. */
23128 if (info
->lr_save_p
)
23130 = gen_frame_store (reg0
,
23131 frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
23132 /* Explain what happens to the stack pointer. */
23134 rtx newval
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
, treg
);
23135 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, sp_reg_rtx
, newval
);
23138 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
23139 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
23140 treg
, GEN_INT (-info
->total_size
), NULL_RTX
);
23141 sp_off
= frame_off
= info
->total_size
;
23144 strategy
= info
->savres_strategy
;
23146 /* For V.4, update stack before we do any saving and set back pointer. */
23147 if (! WORLD_SAVE_P (info
)
23149 && (DEFAULT_ABI
== ABI_V4
23150 || crtl
->calls_eh_return
))
23152 bool need_r11
= (TARGET_SPE
23153 ? (!(strategy
& SAVE_INLINE_GPRS
)
23154 && info
->spe_64bit_regs_used
== 0)
23155 : (!(strategy
& SAVE_INLINE_FPRS
)
23156 || !(strategy
& SAVE_INLINE_GPRS
)
23157 || !(strategy
& SAVE_INLINE_VRS
)));
23158 int ptr_regno
= -1;
23159 rtx ptr_reg
= NULL_RTX
;
23162 if (info
->total_size
< 32767)
23163 frame_off
= info
->total_size
;
23166 else if (info
->cr_save_p
23168 || info
->first_fp_reg_save
< 64
23169 || info
->first_gp_reg_save
< 32
23170 || info
->altivec_size
!= 0
23171 || info
->vrsave_mask
!= 0
23172 || crtl
->calls_eh_return
)
23176 /* The prologue won't be saving any regs so there is no need
23177 to set up a frame register to access any frame save area.
23178 We also won't be using frame_off anywhere below, but set
23179 the correct value anyway to protect against future
23180 changes to this function. */
23181 frame_off
= info
->total_size
;
23183 if (ptr_regno
!= -1)
23185 /* Set up the frame offset to that needed by the first
23186 out-of-line save function. */
23187 START_USE (ptr_regno
);
23188 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
23189 frame_reg_rtx
= ptr_reg
;
23190 if (!(strategy
& SAVE_INLINE_FPRS
) && info
->fp_size
!= 0)
23191 gcc_checking_assert (info
->fp_save_offset
+ info
->fp_size
== 0);
23192 else if (!(strategy
& SAVE_INLINE_GPRS
) && info
->first_gp_reg_save
< 32)
23193 ptr_off
= info
->gp_save_offset
+ info
->gp_size
;
23194 else if (!(strategy
& SAVE_INLINE_VRS
) && info
->altivec_size
!= 0)
23195 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
23196 frame_off
= -ptr_off
;
23198 rs6000_emit_allocate_stack (info
->total_size
, ptr_reg
, ptr_off
);
23199 sp_off
= info
->total_size
;
23200 if (frame_reg_rtx
!= sp_reg_rtx
)
23201 rs6000_emit_stack_tie (frame_reg_rtx
, false);
23204 /* If we use the link register, get it into r0. */
23205 if (!WORLD_SAVE_P (info
) && info
->lr_save_p
)
23207 rtx addr
, reg
, mem
;
23209 reg
= gen_rtx_REG (Pmode
, 0);
23211 insn
= emit_move_insn (reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
23212 RTX_FRAME_RELATED_P (insn
) = 1;
23214 if (!(strategy
& (SAVE_NOINLINE_GPRS_SAVES_LR
23215 | SAVE_NOINLINE_FPRS_SAVES_LR
)))
23217 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
23218 GEN_INT (info
->lr_save_offset
+ frame_off
));
23219 mem
= gen_rtx_MEM (Pmode
, addr
);
23220 /* This should not be of rs6000_sr_alias_set, because of
23221 __builtin_return_address. */
23223 insn
= emit_move_insn (mem
, reg
);
23224 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
23225 NULL_RTX
, NULL_RTX
, NULL_RTX
);
23230 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
23231 r12 will be needed by out-of-line gpr restore. */
23232 cr_save_regno
= ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
23233 && !(strategy
& (SAVE_INLINE_GPRS
23234 | SAVE_NOINLINE_GPRS_SAVES_LR
))
23236 if (!WORLD_SAVE_P (info
)
23238 && REGNO (frame_reg_rtx
) != cr_save_regno
23239 && !(using_static_chain_p
&& cr_save_regno
== 11))
23241 cr_save_rtx
= gen_rtx_REG (SImode
, cr_save_regno
);
23242 START_USE (cr_save_regno
);
23243 rs6000_emit_move_from_cr (cr_save_rtx
);
23246 /* Do any required saving of fpr's. If only one or two to save, do
23247 it ourselves. Otherwise, call function. */
23248 if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_INLINE_FPRS
))
23251 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
23252 if (save_reg_p (info
->first_fp_reg_save
+ i
))
23253 emit_frame_save (frame_reg_rtx
,
23254 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
23255 ? DFmode
: SFmode
),
23256 info
->first_fp_reg_save
+ i
,
23257 info
->fp_save_offset
+ frame_off
+ 8 * i
,
23258 sp_off
- frame_off
);
23260 else if (!WORLD_SAVE_P (info
) && info
->first_fp_reg_save
!= 64)
23262 bool lr
= (strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
23263 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
23264 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
23265 rtx ptr_reg
= frame_reg_rtx
;
23267 if (REGNO (frame_reg_rtx
) == ptr_regno
)
23268 gcc_checking_assert (frame_off
== 0);
23271 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
23272 NOT_INUSE (ptr_regno
);
23273 emit_insn (gen_add3_insn (ptr_reg
,
23274 frame_reg_rtx
, GEN_INT (frame_off
)));
23276 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
23277 info
->fp_save_offset
,
23278 info
->lr_save_offset
,
23280 rs6000_frame_related (insn
, ptr_reg
, sp_off
,
23281 NULL_RTX
, NULL_RTX
, NULL_RTX
);
23286 /* Save GPRs. This is done as a PARALLEL if we are using
23287 the store-multiple instructions. */
23288 if (!WORLD_SAVE_P (info
)
23290 && info
->spe_64bit_regs_used
!= 0
23291 && info
->first_gp_reg_save
!= 32)
23294 rtx spe_save_area_ptr
;
23295 HOST_WIDE_INT save_off
;
23296 int ool_adjust
= 0;
23298 /* Determine whether we can address all of the registers that need
23299 to be saved with an offset from frame_reg_rtx that fits in
23300 the small const field for SPE memory instructions. */
23301 int spe_regs_addressable
23302 = (SPE_CONST_OFFSET_OK (info
->spe_gp_save_offset
+ frame_off
23303 + reg_size
* (32 - info
->first_gp_reg_save
- 1))
23304 && (strategy
& SAVE_INLINE_GPRS
));
23306 if (spe_regs_addressable
)
23308 spe_save_area_ptr
= frame_reg_rtx
;
23309 save_off
= frame_off
;
23313 /* Make r11 point to the start of the SPE save area. We need
23314 to be careful here if r11 is holding the static chain. If
23315 it is, then temporarily save it in r0. */
23316 HOST_WIDE_INT offset
;
23318 if (!(strategy
& SAVE_INLINE_GPRS
))
23319 ool_adjust
= 8 * (info
->first_gp_reg_save
- FIRST_SAVED_GP_REGNO
);
23320 offset
= info
->spe_gp_save_offset
+ frame_off
- ool_adjust
;
23321 spe_save_area_ptr
= gen_rtx_REG (Pmode
, 11);
23322 save_off
= frame_off
- offset
;
23324 if (using_static_chain_p
)
23326 rtx r0
= gen_rtx_REG (Pmode
, 0);
23329 gcc_assert (info
->first_gp_reg_save
> 11);
23331 emit_move_insn (r0
, spe_save_area_ptr
);
23333 else if (REGNO (frame_reg_rtx
) != 11)
23336 emit_insn (gen_addsi3 (spe_save_area_ptr
,
23337 frame_reg_rtx
, GEN_INT (offset
)));
23338 if (!using_static_chain_p
&& REGNO (frame_reg_rtx
) == 11)
23339 frame_off
= -info
->spe_gp_save_offset
+ ool_adjust
;
23342 if ((strategy
& SAVE_INLINE_GPRS
))
23344 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
23345 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
23346 emit_frame_save (spe_save_area_ptr
, reg_mode
,
23347 info
->first_gp_reg_save
+ i
,
23348 (info
->spe_gp_save_offset
+ save_off
23350 sp_off
- save_off
);
23354 insn
= rs6000_emit_savres_rtx (info
, spe_save_area_ptr
,
23355 info
->spe_gp_save_offset
+ save_off
,
23357 SAVRES_SAVE
| SAVRES_GPR
);
23359 rs6000_frame_related (insn
, spe_save_area_ptr
, sp_off
- save_off
,
23360 NULL_RTX
, NULL_RTX
, NULL_RTX
);
23363 /* Move the static chain pointer back. */
23364 if (!spe_regs_addressable
)
23366 if (using_static_chain_p
)
23368 emit_move_insn (spe_save_area_ptr
, gen_rtx_REG (Pmode
, 0));
23371 else if (REGNO (frame_reg_rtx
) != 11)
23375 else if (!WORLD_SAVE_P (info
) && !(strategy
& SAVE_INLINE_GPRS
))
23377 bool lr
= (strategy
& SAVE_NOINLINE_GPRS_SAVES_LR
) != 0;
23378 int sel
= SAVRES_SAVE
| SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
23379 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
23380 rtx ptr_reg
= frame_reg_rtx
;
23381 bool ptr_set_up
= REGNO (ptr_reg
) == ptr_regno
;
23382 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
23386 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
23388 /* Need to adjust r11 (r12) if we saved any FPRs. */
23389 if (end_save
+ frame_off
!= 0)
23391 rtx offset
= GEN_INT (end_save
+ frame_off
);
23394 frame_off
= -end_save
;
23396 NOT_INUSE (ptr_regno
);
23397 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
23399 else if (!ptr_set_up
)
23401 NOT_INUSE (ptr_regno
);
23402 emit_move_insn (ptr_reg
, frame_reg_rtx
);
23404 ptr_off
= -end_save
;
23405 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
23406 info
->gp_save_offset
+ ptr_off
,
23407 info
->lr_save_offset
+ ptr_off
,
23409 rs6000_frame_related (insn
, ptr_reg
, sp_off
- ptr_off
,
23410 NULL_RTX
, NULL_RTX
, NULL_RTX
);
23414 else if (!WORLD_SAVE_P (info
) && (strategy
& SAVRES_MULTIPLE
))
23418 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
23419 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
23421 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
23423 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
23424 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
23425 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
23426 NULL_RTX
, NULL_RTX
, NULL_RTX
);
23428 else if (!WORLD_SAVE_P (info
))
23431 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
23432 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
23433 emit_frame_save (frame_reg_rtx
, reg_mode
,
23434 info
->first_gp_reg_save
+ i
,
23435 info
->gp_save_offset
+ frame_off
+ reg_size
* i
,
23436 sp_off
- frame_off
);
23439 if (crtl
->calls_eh_return
)
23446 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
23447 if (regno
== INVALID_REGNUM
)
23451 p
= rtvec_alloc (i
);
23455 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
23456 if (regno
== INVALID_REGNUM
)
23460 = gen_frame_store (gen_rtx_REG (reg_mode
, regno
),
23462 info
->ehrd_offset
+ sp_off
+ reg_size
* (int) i
);
23463 RTVEC_ELT (p
, i
) = insn
;
23464 RTX_FRAME_RELATED_P (insn
) = 1;
23467 insn
= emit_insn (gen_blockage ());
23468 RTX_FRAME_RELATED_P (insn
) = 1;
23469 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, gen_rtx_PARALLEL (VOIDmode
, p
));
23472 /* In AIX ABI we need to make sure r2 is really saved. */
23473 if (TARGET_AIX
&& crtl
->calls_eh_return
)
23475 rtx tmp_reg
, tmp_reg_si
, hi
, lo
, compare_result
, toc_save_done
, jump
;
23476 rtx save_insn
, join_insn
, note
;
23477 long toc_restore_insn
;
23479 tmp_reg
= gen_rtx_REG (Pmode
, 11);
23480 tmp_reg_si
= gen_rtx_REG (SImode
, 11);
23481 if (using_static_chain_p
)
23484 emit_move_insn (gen_rtx_REG (Pmode
, 0), tmp_reg
);
23488 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
23489 /* Peek at instruction to which this function returns. If it's
23490 restoring r2, then we know we've already saved r2. We can't
23491 unconditionally save r2 because the value we have will already
23492 be updated if we arrived at this function via a plt call or
23493 toc adjusting stub. */
23494 emit_move_insn (tmp_reg_si
, gen_rtx_MEM (SImode
, tmp_reg
));
23495 toc_restore_insn
= ((TARGET_32BIT
? 0x80410000 : 0xE8410000)
23496 + RS6000_TOC_SAVE_SLOT
);
23497 hi
= gen_int_mode (toc_restore_insn
& ~0xffff, SImode
);
23498 emit_insn (gen_xorsi3 (tmp_reg_si
, tmp_reg_si
, hi
));
23499 compare_result
= gen_rtx_REG (CCUNSmode
, CR0_REGNO
);
23500 validate_condition_mode (EQ
, CCUNSmode
);
23501 lo
= gen_int_mode (toc_restore_insn
& 0xffff, SImode
);
23502 emit_insn (gen_rtx_SET (VOIDmode
, compare_result
,
23503 gen_rtx_COMPARE (CCUNSmode
, tmp_reg_si
, lo
)));
23504 toc_save_done
= gen_label_rtx ();
23505 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
23506 gen_rtx_EQ (VOIDmode
, compare_result
,
23508 gen_rtx_LABEL_REF (VOIDmode
, toc_save_done
),
23510 jump
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, jump
));
23511 JUMP_LABEL (jump
) = toc_save_done
;
23512 LABEL_NUSES (toc_save_done
) += 1;
23514 save_insn
= emit_frame_save (frame_reg_rtx
, reg_mode
,
23515 TOC_REGNUM
, frame_off
+ RS6000_TOC_SAVE_SLOT
,
23516 sp_off
- frame_off
);
23518 emit_label (toc_save_done
);
23520 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
23521 have a CFG that has different saves along different paths.
23522 Move the note to a dummy blockage insn, which describes that
23523 R2 is unconditionally saved after the label. */
23524 /* ??? An alternate representation might be a special insn pattern
23525 containing both the branch and the store. That might let the
23526 code that minimizes the number of DW_CFA_advance opcodes better
23527 freedom in placing the annotations. */
23528 note
= find_reg_note (save_insn
, REG_FRAME_RELATED_EXPR
, NULL
);
23530 remove_note (save_insn
, note
);
23532 note
= alloc_reg_note (REG_FRAME_RELATED_EXPR
,
23533 copy_rtx (PATTERN (save_insn
)), NULL_RTX
);
23534 RTX_FRAME_RELATED_P (save_insn
) = 0;
23536 join_insn
= emit_insn (gen_blockage ());
23537 REG_NOTES (join_insn
) = note
;
23538 RTX_FRAME_RELATED_P (join_insn
) = 1;
23540 if (using_static_chain_p
)
23542 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, 0));
23549 /* Save CR if we use any that must be preserved. */
23550 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
)
23552 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
23553 GEN_INT (info
->cr_save_offset
+ frame_off
));
23554 rtx mem
= gen_frame_mem (SImode
, addr
);
23556 /* If we didn't copy cr before, do so now using r0. */
23557 if (cr_save_rtx
== NULL_RTX
)
23560 cr_save_rtx
= gen_rtx_REG (SImode
, 0);
23561 rs6000_emit_move_from_cr (cr_save_rtx
);
23564 /* Saving CR requires a two-instruction sequence: one instruction
23565 to move the CR to a general-purpose register, and a second
23566 instruction that stores the GPR to memory.
23568 We do not emit any DWARF CFI records for the first of these,
23569 because we cannot properly represent the fact that CR is saved in
23570 a register. One reason is that we cannot express that multiple
23571 CR fields are saved; another reason is that on 64-bit, the size
23572 of the CR register in DWARF (4 bytes) differs from the size of
23573 a general-purpose register.
23575 This means if any intervening instruction were to clobber one of
23576 the call-saved CR fields, we'd have incorrect CFI. To prevent
23577 this from happening, we mark the store to memory as a use of
23578 those CR fields, which prevents any such instruction from being
23579 scheduled in between the two instructions. */
23584 crsave_v
[n_crsave
++] = gen_rtx_SET (VOIDmode
, mem
, cr_save_rtx
);
23585 for (i
= 0; i
< 8; i
++)
23586 if (save_reg_p (CR0_REGNO
+ i
))
23587 crsave_v
[n_crsave
++]
23588 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
23590 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
,
23591 gen_rtvec_v (n_crsave
, crsave_v
)));
23592 END_USE (REGNO (cr_save_rtx
));
23594 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
23595 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
23596 so we need to construct a frame expression manually. */
23597 RTX_FRAME_RELATED_P (insn
) = 1;
23599 /* Update address to be stack-pointer relative, like
23600 rs6000_frame_related would do. */
23601 addr
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
23602 GEN_INT (info
->cr_save_offset
+ sp_off
));
23603 mem
= gen_frame_mem (SImode
, addr
);
23605 if (DEFAULT_ABI
== ABI_ELFv2
)
23607 /* In the ELFv2 ABI we generate separate CFI records for each
23608 CR field that was actually saved. They all point to the
23609 same 32-bit stack slot. */
23613 for (i
= 0; i
< 8; i
++)
23614 if (save_reg_p (CR0_REGNO
+ i
))
23617 = gen_rtx_SET (VOIDmode
, mem
,
23618 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
23620 RTX_FRAME_RELATED_P (crframe
[n_crframe
]) = 1;
23624 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
23625 gen_rtx_PARALLEL (VOIDmode
,
23626 gen_rtvec_v (n_crframe
, crframe
)));
23630 /* In other ABIs, by convention, we use a single CR regnum to
23631 represent the fact that all call-saved CR fields are saved.
23632 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
23633 rtx set
= gen_rtx_SET (VOIDmode
, mem
,
23634 gen_rtx_REG (SImode
, CR2_REGNO
));
23635 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
23639 /* In the ELFv2 ABI we need to save all call-saved CR fields into
23640 *separate* slots if the routine calls __builtin_eh_return, so
23641 that they can be independently restored by the unwinder. */
23642 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
23644 int i
, cr_off
= info
->ehcr_offset
;
23647 /* ??? We might get better performance by using multiple mfocrf
23649 crsave
= gen_rtx_REG (SImode
, 0);
23650 emit_insn (gen_movesi_from_cr (crsave
));
23652 for (i
= 0; i
< 8; i
++)
23653 if (!call_used_regs
[CR0_REGNO
+ i
])
23655 rtvec p
= rtvec_alloc (2);
23657 = gen_frame_store (crsave
, frame_reg_rtx
, cr_off
+ frame_off
);
23659 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
23661 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
23663 RTX_FRAME_RELATED_P (insn
) = 1;
23664 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
23665 gen_frame_store (gen_rtx_REG (SImode
, CR0_REGNO
+ i
),
23666 sp_reg_rtx
, cr_off
+ sp_off
));
23668 cr_off
+= reg_size
;
23672 /* Update stack and set back pointer unless this is V.4,
23673 for which it was done previously. */
23674 if (!WORLD_SAVE_P (info
) && info
->push_p
23675 && !(DEFAULT_ABI
== ABI_V4
|| crtl
->calls_eh_return
))
23677 rtx ptr_reg
= NULL
;
23680 /* If saving altivec regs we need to be able to address all save
23681 locations using a 16-bit offset. */
23682 if ((strategy
& SAVE_INLINE_VRS
) == 0
23683 || (info
->altivec_size
!= 0
23684 && (info
->altivec_save_offset
+ info
->altivec_size
- 16
23685 + info
->total_size
- frame_off
) > 32767)
23686 || (info
->vrsave_size
!= 0
23687 && (info
->vrsave_save_offset
23688 + info
->total_size
- frame_off
) > 32767))
23690 int sel
= SAVRES_SAVE
| SAVRES_VR
;
23691 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
23693 if (using_static_chain_p
23694 && ptr_regno
== STATIC_CHAIN_REGNUM
)
23696 if (REGNO (frame_reg_rtx
) != ptr_regno
)
23697 START_USE (ptr_regno
);
23698 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
23699 frame_reg_rtx
= ptr_reg
;
23700 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
23701 frame_off
= -ptr_off
;
23703 else if (REGNO (frame_reg_rtx
) == 1)
23704 frame_off
= info
->total_size
;
23705 rs6000_emit_allocate_stack (info
->total_size
, ptr_reg
, ptr_off
);
23706 sp_off
= info
->total_size
;
23707 if (frame_reg_rtx
!= sp_reg_rtx
)
23708 rs6000_emit_stack_tie (frame_reg_rtx
, false);
23711 /* Set frame pointer, if needed. */
23712 if (frame_pointer_needed
)
23714 insn
= emit_move_insn (gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
),
23716 RTX_FRAME_RELATED_P (insn
) = 1;
23719 /* Save AltiVec registers if needed. Save here because the red zone does
23720 not always include AltiVec registers. */
23721 if (!WORLD_SAVE_P (info
) && TARGET_ALTIVEC_ABI
23722 && info
->altivec_size
!= 0 && (strategy
& SAVE_INLINE_VRS
) == 0)
23724 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
23726 /* Oddly, the vector save/restore functions point r0 at the end
23727 of the save area, then use r11 or r12 to load offsets for
23728 [reg+reg] addressing. */
23729 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
23730 int scratch_regno
= ptr_regno_for_savres (SAVRES_SAVE
| SAVRES_VR
);
23731 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
23733 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
23735 if (end_save
+ frame_off
!= 0)
23737 rtx offset
= GEN_INT (end_save
+ frame_off
);
23739 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
23742 emit_move_insn (ptr_reg
, frame_reg_rtx
);
23744 ptr_off
= -end_save
;
23745 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
23746 info
->altivec_save_offset
+ ptr_off
,
23747 0, V4SImode
, SAVRES_SAVE
| SAVRES_VR
);
23748 rs6000_frame_related (insn
, scratch_reg
, sp_off
- ptr_off
,
23749 NULL_RTX
, NULL_RTX
, NULL_RTX
);
23750 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
23752 /* The oddity mentioned above clobbered our frame reg. */
23753 emit_move_insn (frame_reg_rtx
, ptr_reg
);
23754 frame_off
= ptr_off
;
23757 else if (!WORLD_SAVE_P (info
) && TARGET_ALTIVEC_ABI
23758 && info
->altivec_size
!= 0)
23762 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
23763 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
23765 rtx areg
, savereg
, mem
, split_reg
;
23768 offset
= (info
->altivec_save_offset
+ frame_off
23769 + 16 * (i
- info
->first_altivec_reg_save
));
23771 savereg
= gen_rtx_REG (V4SImode
, i
);
23774 areg
= gen_rtx_REG (Pmode
, 0);
23775 emit_move_insn (areg
, GEN_INT (offset
));
23777 /* AltiVec addressing mode is [reg+reg]. */
23778 mem
= gen_frame_mem (V4SImode
,
23779 gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
));
23781 insn
= emit_move_insn (mem
, savereg
);
23783 /* When we split a VSX store into two insns, we need to make
23784 sure the DWARF info knows which register we are storing.
23785 Pass it in to be used on the appropriate note. */
23786 if (!BYTES_BIG_ENDIAN
23787 && GET_CODE (PATTERN (insn
)) == SET
23788 && GET_CODE (SET_SRC (PATTERN (insn
))) == VEC_SELECT
)
23789 split_reg
= savereg
;
23791 split_reg
= NULL_RTX
;
23793 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
23794 areg
, GEN_INT (offset
), split_reg
);
23798 /* VRSAVE is a bit vector representing which AltiVec registers
23799 are used. The OS uses this to determine which vector
23800 registers to save on a context switch. We need to save
23801 VRSAVE on the stack frame, add whatever AltiVec registers we
23802 used in this function, and do the corresponding magic in the
23805 if (!WORLD_SAVE_P (info
)
23807 && TARGET_ALTIVEC_VRSAVE
23808 && info
->vrsave_mask
!= 0)
23814 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
23815 be using r12 as frame_reg_rtx and r11 as the static chain
23816 pointer for nested functions. */
23818 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
23819 && !using_static_chain_p
)
23821 else if (REGNO (frame_reg_rtx
) == 12)
23824 if (using_static_chain_p
)
23828 NOT_INUSE (save_regno
);
23829 reg
= gen_rtx_REG (SImode
, save_regno
);
23830 vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
23832 emit_insn (gen_get_vrsave_internal (reg
));
23834 emit_insn (gen_rtx_SET (VOIDmode
, reg
, vrsave
));
23837 offset
= info
->vrsave_save_offset
+ frame_off
;
23838 insn
= emit_insn (gen_frame_store (reg
, frame_reg_rtx
, offset
));
23840 /* Include the registers in the mask. */
23841 emit_insn (gen_iorsi3 (reg
, reg
, GEN_INT ((int) info
->vrsave_mask
)));
23843 insn
= emit_insn (generate_set_vrsave (reg
, info
, 0));
23846 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
23847 if (!TARGET_SINGLE_PIC_BASE
23848 && ((TARGET_TOC
&& TARGET_MINIMAL_TOC
&& get_pool_size () != 0)
23849 || (DEFAULT_ABI
== ABI_V4
23850 && (flag_pic
== 1 || (flag_pic
&& TARGET_SECURE_PLT
))
23851 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))))
23853 /* If emit_load_toc_table will use the link register, we need to save
23854 it. We use R12 for this purpose because emit_load_toc_table
23855 can use register 0. This allows us to use a plain 'blr' to return
23856 from the procedure more often. */
23857 int save_LR_around_toc_setup
= (TARGET_ELF
23858 && DEFAULT_ABI
== ABI_V4
23860 && ! info
->lr_save_p
23861 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
) > 0);
23862 if (save_LR_around_toc_setup
)
23864 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
23865 rtx tmp
= gen_rtx_REG (Pmode
, 12);
23867 insn
= emit_move_insn (tmp
, lr
);
23868 RTX_FRAME_RELATED_P (insn
) = 1;
23870 rs6000_emit_load_toc_table (TRUE
);
23872 insn
= emit_move_insn (lr
, tmp
);
23873 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
23874 RTX_FRAME_RELATED_P (insn
) = 1;
23877 rs6000_emit_load_toc_table (TRUE
);
23881 if (!TARGET_SINGLE_PIC_BASE
23882 && DEFAULT_ABI
== ABI_DARWIN
23883 && flag_pic
&& crtl
->uses_pic_offset_table
)
23885 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
23886 rtx src
= gen_rtx_SYMBOL_REF (Pmode
, MACHOPIC_FUNCTION_BASE_NAME
);
23888 /* Save and restore LR locally around this call (in R0). */
23889 if (!info
->lr_save_p
)
23890 emit_move_insn (gen_rtx_REG (Pmode
, 0), lr
);
23892 emit_insn (gen_load_macho_picbase (src
));
23894 emit_move_insn (gen_rtx_REG (Pmode
,
23895 RS6000_PIC_OFFSET_TABLE_REGNUM
),
23898 if (!info
->lr_save_p
)
23899 emit_move_insn (lr
, gen_rtx_REG (Pmode
, 0));
23903 /* If we need to, save the TOC register after doing the stack setup.
23904 Do not emit eh frame info for this save. The unwinder wants info,
23905 conceptually attached to instructions in this function, about
23906 register values in the caller of this function. This R2 may have
23907 already been changed from the value in the caller.
23908 We don't attempt to write accurate DWARF EH frame info for R2
23909 because code emitted by gcc for a (non-pointer) function call
23910 doesn't save and restore R2. Instead, R2 is managed out-of-line
23911 by a linker generated plt call stub when the function resides in
23912 a shared library. This behaviour is costly to describe in DWARF,
23913 both in terms of the size of DWARF info and the time taken in the
23914 unwinder to interpret it. R2 changes, apart from the
23915 calls_eh_return case earlier in this function, are handled by
23916 linux-unwind.h frob_update_context. */
23917 if (rs6000_save_toc_in_prologue_p ())
23919 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
23920 emit_insn (gen_frame_store (reg
, sp_reg_rtx
, RS6000_TOC_SAVE_SLOT
));
23924 /* Write function prologue. */
23927 rs6000_output_function_prologue (FILE *file
,
23928 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
23930 rs6000_stack_t
*info
= rs6000_stack_info ();
23932 if (TARGET_DEBUG_STACK
)
23933 debug_stack_info (info
);
23935 /* Write .extern for any function we will call to save and restore
23937 if (info
->first_fp_reg_save
< 64
23942 int regno
= info
->first_fp_reg_save
- 32;
23944 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
) == 0)
23946 bool lr
= (info
->savres_strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
23947 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
23948 name
= rs6000_savres_routine_name (info
, regno
, sel
);
23949 fprintf (file
, "\t.extern %s\n", name
);
23951 if ((info
->savres_strategy
& REST_INLINE_FPRS
) == 0)
23953 bool lr
= (info
->savres_strategy
23954 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
23955 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
23956 name
= rs6000_savres_routine_name (info
, regno
, sel
);
23957 fprintf (file
, "\t.extern %s\n", name
);
23961 /* ELFv2 ABI r2 setup code and local entry point. This must follow
23962 immediately after the global entry point label. */
23963 if (DEFAULT_ABI
== ABI_ELFv2
&& cfun
->machine
->r2_setup_needed
)
23965 const char *name
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
23967 fprintf (file
, "0:\taddis 2,12,.TOC.-0b@ha\n");
23968 fprintf (file
, "\taddi 2,2,.TOC.-0b@l\n");
23970 fputs ("\t.localentry\t", file
);
23971 assemble_name (file
, name
);
23972 fputs (",.-", file
);
23973 assemble_name (file
, name
);
23974 fputs ("\n", file
);
23977 /* Output -mprofile-kernel code. This needs to be done here instead of
23978 in output_function_profile since it must go after the ELFv2 ABI
23979 local entry point. */
23980 if (TARGET_PROFILE_KERNEL
&& crtl
->profile
)
23982 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
23983 gcc_assert (!TARGET_32BIT
);
23985 asm_fprintf (file
, "\tmflr %s\n", reg_names
[0]);
23986 asm_fprintf (file
, "\tstd %s,16(%s)\n", reg_names
[0], reg_names
[1]);
23988 /* In the ELFv2 ABI we have no compiler stack word. It must be
23989 the resposibility of _mcount to preserve the static chain
23990 register if required. */
23991 if (DEFAULT_ABI
!= ABI_ELFv2
23992 && cfun
->static_chain_decl
!= NULL
)
23994 asm_fprintf (file
, "\tstd %s,24(%s)\n",
23995 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
23996 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
23997 asm_fprintf (file
, "\tld %s,24(%s)\n",
23998 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
24001 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
24004 rs6000_pic_labelno
++;
24007 /* Non-zero if vmx regs are restored before the frame pop, zero if
24008 we restore after the pop when possible. */
24009 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
24011 /* Restoring cr is a two step process: loading a reg from the frame
24012 save, then moving the reg to cr. For ABI_V4 we must let the
24013 unwinder know that the stack location is no longer valid at or
24014 before the stack deallocation, but we can't emit a cfa_restore for
24015 cr at the stack deallocation like we do for other registers.
24016 The trouble is that it is possible for the move to cr to be
24017 scheduled after the stack deallocation. So say exactly where cr
24018 is located on each of the two insns. */
24021 load_cr_save (int regno
, rtx frame_reg_rtx
, int offset
, bool exit_func
)
24023 rtx mem
= gen_frame_mem_offset (SImode
, frame_reg_rtx
, offset
);
24024 rtx reg
= gen_rtx_REG (SImode
, regno
);
24025 rtx_insn
*insn
= emit_move_insn (reg
, mem
);
24027 if (!exit_func
&& DEFAULT_ABI
== ABI_V4
)
24029 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
24030 rtx set
= gen_rtx_SET (VOIDmode
, reg
, cr
);
24032 add_reg_note (insn
, REG_CFA_REGISTER
, set
);
24033 RTX_FRAME_RELATED_P (insn
) = 1;
24038 /* Reload CR from REG. */
24041 restore_saved_cr (rtx reg
, int using_mfcr_multiple
, bool exit_func
)
24046 if (using_mfcr_multiple
)
24048 for (i
= 0; i
< 8; i
++)
24049 if (save_reg_p (CR0_REGNO
+ i
))
24051 gcc_assert (count
);
24054 if (using_mfcr_multiple
&& count
> 1)
24060 p
= rtvec_alloc (count
);
24063 for (i
= 0; i
< 8; i
++)
24064 if (save_reg_p (CR0_REGNO
+ i
))
24066 rtvec r
= rtvec_alloc (2);
24067 RTVEC_ELT (r
, 0) = reg
;
24068 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7-i
));
24069 RTVEC_ELT (p
, ndx
) =
24070 gen_rtx_SET (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
24071 gen_rtx_UNSPEC (CCmode
, r
, UNSPEC_MOVESI_TO_CR
));
24074 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
24075 gcc_assert (ndx
== count
);
24077 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
24078 CR field separately. */
24079 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
24081 for (i
= 0; i
< 8; i
++)
24082 if (save_reg_p (CR0_REGNO
+ i
))
24083 add_reg_note (insn
, REG_CFA_RESTORE
,
24084 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
24086 RTX_FRAME_RELATED_P (insn
) = 1;
24090 for (i
= 0; i
< 8; i
++)
24091 if (save_reg_p (CR0_REGNO
+ i
))
24093 rtx insn
= emit_insn (gen_movsi_to_cr_one
24094 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
24096 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
24097 CR field separately, attached to the insn that in fact
24098 restores this particular CR field. */
24099 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
24101 add_reg_note (insn
, REG_CFA_RESTORE
,
24102 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
24104 RTX_FRAME_RELATED_P (insn
) = 1;
24108 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
24109 if (!exit_func
&& DEFAULT_ABI
!= ABI_ELFv2
24110 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
24112 rtx_insn
*insn
= get_last_insn ();
24113 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
24115 add_reg_note (insn
, REG_CFA_RESTORE
, cr
);
24116 RTX_FRAME_RELATED_P (insn
) = 1;
24120 /* Like cr, the move to lr instruction can be scheduled after the
24121 stack deallocation, but unlike cr, its stack frame save is still
24122 valid. So we only need to emit the cfa_restore on the correct
24126 load_lr_save (int regno
, rtx frame_reg_rtx
, int offset
)
24128 rtx mem
= gen_frame_mem_offset (Pmode
, frame_reg_rtx
, offset
);
24129 rtx reg
= gen_rtx_REG (Pmode
, regno
);
24131 emit_move_insn (reg
, mem
);
24135 restore_saved_lr (int regno
, bool exit_func
)
24137 rtx reg
= gen_rtx_REG (Pmode
, regno
);
24138 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
24139 rtx_insn
*insn
= emit_move_insn (lr
, reg
);
24141 if (!exit_func
&& flag_shrink_wrap
)
24143 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
24144 RTX_FRAME_RELATED_P (insn
) = 1;
24149 add_crlr_cfa_restore (const rs6000_stack_t
*info
, rtx cfa_restores
)
24151 if (DEFAULT_ABI
== ABI_ELFv2
)
24154 for (i
= 0; i
< 8; i
++)
24155 if (save_reg_p (CR0_REGNO
+ i
))
24157 rtx cr
= gen_rtx_REG (SImode
, CR0_REGNO
+ i
);
24158 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, cr
,
24162 else if (info
->cr_save_p
)
24163 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
24164 gen_rtx_REG (SImode
, CR2_REGNO
),
24167 if (info
->lr_save_p
)
24168 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
24169 gen_rtx_REG (Pmode
, LR_REGNO
),
24171 return cfa_restores
;
24174 /* Return true if OFFSET from stack pointer can be clobbered by signals.
24175 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
24176 below stack pointer not cloberred by signals. */
24179 offset_below_red_zone_p (HOST_WIDE_INT offset
)
24181 return offset
< (DEFAULT_ABI
== ABI_V4
24183 : TARGET_32BIT
? -220 : -288);
24186 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
24189 emit_cfa_restores (rtx cfa_restores
)
24191 rtx_insn
*insn
= get_last_insn ();
24192 rtx
*loc
= ®_NOTES (insn
);
24195 loc
= &XEXP (*loc
, 1);
24196 *loc
= cfa_restores
;
24197 RTX_FRAME_RELATED_P (insn
) = 1;
24200 /* Emit function epilogue as insns. */
24203 rs6000_emit_epilogue (int sibcall
)
24205 rs6000_stack_t
*info
;
24206 int restoring_GPRs_inline
;
24207 int restoring_FPRs_inline
;
24208 int using_load_multiple
;
24209 int using_mtcr_multiple
;
24210 int use_backchain_to_restore_sp
;
24213 HOST_WIDE_INT frame_off
= 0;
24214 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, 1);
24215 rtx frame_reg_rtx
= sp_reg_rtx
;
24216 rtx cfa_restores
= NULL_RTX
;
24218 rtx cr_save_reg
= NULL_RTX
;
24219 enum machine_mode reg_mode
= Pmode
;
24220 int reg_size
= TARGET_32BIT
? 4 : 8;
24223 unsigned ptr_regno
;
24225 info
= rs6000_stack_info ();
24227 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
24229 reg_mode
= V2SImode
;
24233 strategy
= info
->savres_strategy
;
24234 using_load_multiple
= strategy
& SAVRES_MULTIPLE
;
24235 restoring_FPRs_inline
= sibcall
|| (strategy
& REST_INLINE_FPRS
);
24236 restoring_GPRs_inline
= sibcall
|| (strategy
& REST_INLINE_GPRS
);
24237 using_mtcr_multiple
= (rs6000_cpu
== PROCESSOR_PPC601
24238 || rs6000_cpu
== PROCESSOR_PPC603
24239 || rs6000_cpu
== PROCESSOR_PPC750
24241 /* Restore via the backchain when we have a large frame, since this
24242 is more efficient than an addis, addi pair. The second condition
24243 here will not trigger at the moment; We don't actually need a
24244 frame pointer for alloca, but the generic parts of the compiler
24245 give us one anyway. */
24246 use_backchain_to_restore_sp
= (info
->total_size
> 32767 - info
->lr_save_offset
24247 || (cfun
->calls_alloca
24248 && !frame_pointer_needed
));
24249 restore_lr
= (info
->lr_save_p
24250 && (restoring_FPRs_inline
24251 || (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
))
24252 && (restoring_GPRs_inline
24253 || info
->first_fp_reg_save
< 64));
24255 if (WORLD_SAVE_P (info
))
24259 const char *alloc_rname
;
24262 /* eh_rest_world_r10 will return to the location saved in the LR
24263 stack slot (which is not likely to be our caller.)
24264 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
24265 rest_world is similar, except any R10 parameter is ignored.
24266 The exception-handling stuff that was here in 2.95 is no
24267 longer necessary. */
24271 + 32 - info
->first_gp_reg_save
24272 + LAST_ALTIVEC_REGNO
+ 1 - info
->first_altivec_reg_save
24273 + 63 + 1 - info
->first_fp_reg_save
);
24275 strcpy (rname
, ((crtl
->calls_eh_return
) ?
24276 "*eh_rest_world_r10" : "*rest_world"));
24277 alloc_rname
= ggc_strdup (rname
);
24280 RTVEC_ELT (p
, j
++) = ret_rtx
;
24281 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
24282 gen_rtx_REG (Pmode
,
24285 = gen_rtx_USE (VOIDmode
, gen_rtx_SYMBOL_REF (Pmode
, alloc_rname
));
24286 /* The instruction pattern requires a clobber here;
24287 it is shared with the restVEC helper. */
24289 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 11));
24292 /* CR register traditionally saved as CR2. */
24293 rtx reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
24295 = gen_frame_load (reg
, frame_reg_rtx
, info
->cr_save_offset
);
24296 if (flag_shrink_wrap
)
24298 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
24299 gen_rtx_REG (Pmode
, LR_REGNO
),
24301 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
24305 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
24307 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
24309 = gen_frame_load (reg
,
24310 frame_reg_rtx
, info
->gp_save_offset
+ reg_size
* i
);
24311 if (flag_shrink_wrap
)
24312 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
24314 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
24316 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
24318 = gen_frame_load (reg
,
24319 frame_reg_rtx
, info
->altivec_save_offset
+ 16 * i
);
24320 if (flag_shrink_wrap
)
24321 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
24323 for (i
= 0; info
->first_fp_reg_save
+ i
<= 63; i
++)
24325 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
24326 ? DFmode
: SFmode
),
24327 info
->first_fp_reg_save
+ i
);
24329 = gen_frame_load (reg
, frame_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
24330 if (flag_shrink_wrap
)
24331 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
24334 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 0));
24336 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 12));
24338 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 7));
24340 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 8));
24342 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, 10));
24343 insn
= emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
24345 if (flag_shrink_wrap
)
24347 REG_NOTES (insn
) = cfa_restores
;
24348 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
24349 RTX_FRAME_RELATED_P (insn
) = 1;
24354 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
24356 frame_off
= info
->total_size
;
24358 /* Restore AltiVec registers if we must do so before adjusting the
24360 if (TARGET_ALTIVEC_ABI
24361 && info
->altivec_size
!= 0
24362 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24363 || (DEFAULT_ABI
!= ABI_V4
24364 && offset_below_red_zone_p (info
->altivec_save_offset
))))
24367 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
24369 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
24370 if (use_backchain_to_restore_sp
)
24372 int frame_regno
= 11;
24374 if ((strategy
& REST_INLINE_VRS
) == 0)
24376 /* Of r11 and r12, select the one not clobbered by an
24377 out-of-line restore function for the frame register. */
24378 frame_regno
= 11 + 12 - scratch_regno
;
24380 frame_reg_rtx
= gen_rtx_REG (Pmode
, frame_regno
);
24381 emit_move_insn (frame_reg_rtx
,
24382 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
24385 else if (frame_pointer_needed
)
24386 frame_reg_rtx
= hard_frame_pointer_rtx
;
24388 if ((strategy
& REST_INLINE_VRS
) == 0)
24390 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
24392 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
24393 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
24395 if (end_save
+ frame_off
!= 0)
24397 rtx offset
= GEN_INT (end_save
+ frame_off
);
24399 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
24402 emit_move_insn (ptr_reg
, frame_reg_rtx
);
24404 ptr_off
= -end_save
;
24405 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
24406 info
->altivec_save_offset
+ ptr_off
,
24407 0, V4SImode
, SAVRES_VR
);
24411 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
24412 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
24414 rtx addr
, areg
, mem
, reg
;
24416 areg
= gen_rtx_REG (Pmode
, 0);
24418 (areg
, GEN_INT (info
->altivec_save_offset
24420 + 16 * (i
- info
->first_altivec_reg_save
)));
24422 /* AltiVec addressing mode is [reg+reg]. */
24423 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
24424 mem
= gen_frame_mem (V4SImode
, addr
);
24426 reg
= gen_rtx_REG (V4SImode
, i
);
24427 emit_move_insn (reg
, mem
);
24431 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
24432 if (((strategy
& REST_INLINE_VRS
) == 0
24433 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
24434 && (flag_shrink_wrap
24435 || (offset_below_red_zone_p
24436 (info
->altivec_save_offset
24437 + 16 * (i
- info
->first_altivec_reg_save
)))))
24439 rtx reg
= gen_rtx_REG (V4SImode
, i
);
24440 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
24444 /* Restore VRSAVE if we must do so before adjusting the stack. */
24446 && TARGET_ALTIVEC_VRSAVE
24447 && info
->vrsave_mask
!= 0
24448 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24449 || (DEFAULT_ABI
!= ABI_V4
24450 && offset_below_red_zone_p (info
->vrsave_save_offset
))))
24454 if (frame_reg_rtx
== sp_reg_rtx
)
24456 if (use_backchain_to_restore_sp
)
24458 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
24459 emit_move_insn (frame_reg_rtx
,
24460 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
24463 else if (frame_pointer_needed
)
24464 frame_reg_rtx
= hard_frame_pointer_rtx
;
24467 reg
= gen_rtx_REG (SImode
, 12);
24468 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
24469 info
->vrsave_save_offset
+ frame_off
));
24471 emit_insn (generate_set_vrsave (reg
, info
, 1));
24475 /* If we have a large stack frame, restore the old stack pointer
24476 using the backchain. */
24477 if (use_backchain_to_restore_sp
)
24479 if (frame_reg_rtx
== sp_reg_rtx
)
24481 /* Under V.4, don't reset the stack pointer until after we're done
24482 loading the saved registers. */
24483 if (DEFAULT_ABI
== ABI_V4
)
24484 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
24486 insn
= emit_move_insn (frame_reg_rtx
,
24487 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
24490 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24491 && DEFAULT_ABI
== ABI_V4
)
24492 /* frame_reg_rtx has been set up by the altivec restore. */
24496 insn
= emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
24497 frame_reg_rtx
= sp_reg_rtx
;
24500 /* If we have a frame pointer, we can restore the old stack pointer
24502 else if (frame_pointer_needed
)
24504 frame_reg_rtx
= sp_reg_rtx
;
24505 if (DEFAULT_ABI
== ABI_V4
)
24506 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
24507 /* Prevent reordering memory accesses against stack pointer restore. */
24508 else if (cfun
->calls_alloca
24509 || offset_below_red_zone_p (-info
->total_size
))
24510 rs6000_emit_stack_tie (frame_reg_rtx
, true);
24512 insn
= emit_insn (gen_add3_insn (frame_reg_rtx
, hard_frame_pointer_rtx
,
24513 GEN_INT (info
->total_size
)));
24516 else if (info
->push_p
24517 && DEFAULT_ABI
!= ABI_V4
24518 && !crtl
->calls_eh_return
)
24520 /* Prevent reordering memory accesses against stack pointer restore. */
24521 if (cfun
->calls_alloca
24522 || offset_below_red_zone_p (-info
->total_size
))
24523 rs6000_emit_stack_tie (frame_reg_rtx
, false);
24524 insn
= emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
,
24525 GEN_INT (info
->total_size
)));
24528 if (insn
&& frame_reg_rtx
== sp_reg_rtx
)
24532 REG_NOTES (insn
) = cfa_restores
;
24533 cfa_restores
= NULL_RTX
;
24535 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
24536 RTX_FRAME_RELATED_P (insn
) = 1;
24539 /* Restore AltiVec registers if we have not done so already. */
24540 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24541 && TARGET_ALTIVEC_ABI
24542 && info
->altivec_size
!= 0
24543 && (DEFAULT_ABI
== ABI_V4
24544 || !offset_below_red_zone_p (info
->altivec_save_offset
)))
24548 if ((strategy
& REST_INLINE_VRS
) == 0)
24550 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
24552 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
24553 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
24554 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
24556 if (end_save
+ frame_off
!= 0)
24558 rtx offset
= GEN_INT (end_save
+ frame_off
);
24560 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
24563 emit_move_insn (ptr_reg
, frame_reg_rtx
);
24565 ptr_off
= -end_save
;
24566 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
24567 info
->altivec_save_offset
+ ptr_off
,
24568 0, V4SImode
, SAVRES_VR
);
24569 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
24571 /* Frame reg was clobbered by out-of-line save. Restore it
24572 from ptr_reg, and if we are calling out-of-line gpr or
24573 fpr restore set up the correct pointer and offset. */
24574 unsigned newptr_regno
= 1;
24575 if (!restoring_GPRs_inline
)
24577 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
24578 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
24579 newptr_regno
= ptr_regno_for_savres (sel
);
24580 end_save
= info
->gp_save_offset
+ info
->gp_size
;
24582 else if (!restoring_FPRs_inline
)
24584 bool lr
= !(strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
);
24585 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
24586 newptr_regno
= ptr_regno_for_savres (sel
);
24587 end_save
= info
->gp_save_offset
+ info
->gp_size
;
24590 if (newptr_regno
!= 1 && REGNO (frame_reg_rtx
) != newptr_regno
)
24591 frame_reg_rtx
= gen_rtx_REG (Pmode
, newptr_regno
);
24593 if (end_save
+ ptr_off
!= 0)
24595 rtx offset
= GEN_INT (end_save
+ ptr_off
);
24597 frame_off
= -end_save
;
24598 emit_insn (gen_add3_insn (frame_reg_rtx
, ptr_reg
, offset
));
24602 frame_off
= ptr_off
;
24603 emit_move_insn (frame_reg_rtx
, ptr_reg
);
24609 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
24610 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
24612 rtx addr
, areg
, mem
, reg
;
24614 areg
= gen_rtx_REG (Pmode
, 0);
24616 (areg
, GEN_INT (info
->altivec_save_offset
24618 + 16 * (i
- info
->first_altivec_reg_save
)));
24620 /* AltiVec addressing mode is [reg+reg]. */
24621 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
24622 mem
= gen_frame_mem (V4SImode
, addr
);
24624 reg
= gen_rtx_REG (V4SImode
, i
);
24625 emit_move_insn (reg
, mem
);
24629 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
24630 if (((strategy
& REST_INLINE_VRS
) == 0
24631 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
24632 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
24634 rtx reg
= gen_rtx_REG (V4SImode
, i
);
24635 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
24639 /* Restore VRSAVE if we have not done so already. */
24640 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24642 && TARGET_ALTIVEC_VRSAVE
24643 && info
->vrsave_mask
!= 0
24644 && (DEFAULT_ABI
== ABI_V4
24645 || !offset_below_red_zone_p (info
->vrsave_save_offset
)))
24649 reg
= gen_rtx_REG (SImode
, 12);
24650 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
24651 info
->vrsave_save_offset
+ frame_off
));
24653 emit_insn (generate_set_vrsave (reg
, info
, 1));
24656 /* If we exit by an out-of-line restore function on ABI_V4 then that
24657 function will deallocate the stack, so we don't need to worry
24658 about the unwinder restoring cr from an invalid stack frame
24660 exit_func
= (!restoring_FPRs_inline
24661 || (!restoring_GPRs_inline
24662 && info
->first_fp_reg_save
== 64));
24664 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
24665 *separate* slots if the routine calls __builtin_eh_return, so
24666 that they can be independently restored by the unwinder. */
24667 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
24669 int i
, cr_off
= info
->ehcr_offset
;
24671 for (i
= 0; i
< 8; i
++)
24672 if (!call_used_regs
[CR0_REGNO
+ i
])
24674 rtx reg
= gen_rtx_REG (SImode
, 0);
24675 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
24676 cr_off
+ frame_off
));
24678 insn
= emit_insn (gen_movsi_to_cr_one
24679 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
24681 if (!exit_func
&& flag_shrink_wrap
)
24683 add_reg_note (insn
, REG_CFA_RESTORE
,
24684 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
24686 RTX_FRAME_RELATED_P (insn
) = 1;
24689 cr_off
+= reg_size
;
24693 /* Get the old lr if we saved it. If we are restoring registers
24694 out-of-line, then the out-of-line routines can do this for us. */
24695 if (restore_lr
&& restoring_GPRs_inline
)
24696 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
24698 /* Get the old cr if we saved it. */
24699 if (info
->cr_save_p
)
24701 unsigned cr_save_regno
= 12;
24703 if (!restoring_GPRs_inline
)
24705 /* Ensure we don't use the register used by the out-of-line
24706 gpr register restore below. */
24707 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
24708 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
24709 int gpr_ptr_regno
= ptr_regno_for_savres (sel
);
24711 if (gpr_ptr_regno
== 12)
24712 cr_save_regno
= 11;
24713 gcc_checking_assert (REGNO (frame_reg_rtx
) != cr_save_regno
);
24715 else if (REGNO (frame_reg_rtx
) == 12)
24716 cr_save_regno
= 11;
24718 cr_save_reg
= load_cr_save (cr_save_regno
, frame_reg_rtx
,
24719 info
->cr_save_offset
+ frame_off
,
24723 /* Set LR here to try to overlap restores below. */
24724 if (restore_lr
&& restoring_GPRs_inline
)
24725 restore_saved_lr (0, exit_func
);
24727 /* Load exception handler data registers, if needed. */
24728 if (crtl
->calls_eh_return
)
24730 unsigned int i
, regno
;
24734 rtx reg
= gen_rtx_REG (reg_mode
, 2);
24735 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
24736 frame_off
+ RS6000_TOC_SAVE_SLOT
));
24743 regno
= EH_RETURN_DATA_REGNO (i
);
24744 if (regno
== INVALID_REGNUM
)
24747 /* Note: possible use of r0 here to address SPE regs. */
24748 mem
= gen_frame_mem_offset (reg_mode
, frame_reg_rtx
,
24749 info
->ehrd_offset
+ frame_off
24750 + reg_size
* (int) i
);
24752 emit_move_insn (gen_rtx_REG (reg_mode
, regno
), mem
);
24756 /* Restore GPRs. This is done as a PARALLEL if we are using
24757 the load-multiple instructions. */
24759 && info
->spe_64bit_regs_used
24760 && info
->first_gp_reg_save
!= 32)
24762 /* Determine whether we can address all of the registers that need
24763 to be saved with an offset from frame_reg_rtx that fits in
24764 the small const field for SPE memory instructions. */
24765 int spe_regs_addressable
24766 = (SPE_CONST_OFFSET_OK (info
->spe_gp_save_offset
+ frame_off
24767 + reg_size
* (32 - info
->first_gp_reg_save
- 1))
24768 && restoring_GPRs_inline
);
24770 if (!spe_regs_addressable
)
24772 int ool_adjust
= 0;
24773 rtx old_frame_reg_rtx
= frame_reg_rtx
;
24774 /* Make r11 point to the start of the SPE save area. We worried about
24775 not clobbering it when we were saving registers in the prologue.
24776 There's no need to worry here because the static chain is passed
24777 anew to every function. */
24779 if (!restoring_GPRs_inline
)
24780 ool_adjust
= 8 * (info
->first_gp_reg_save
- FIRST_SAVED_GP_REGNO
);
24781 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
24782 emit_insn (gen_addsi3 (frame_reg_rtx
, old_frame_reg_rtx
,
24783 GEN_INT (info
->spe_gp_save_offset
24786 /* Keep the invariant that frame_reg_rtx + frame_off points
24787 at the top of the stack frame. */
24788 frame_off
= -info
->spe_gp_save_offset
+ ool_adjust
;
24791 if (restoring_GPRs_inline
)
24793 HOST_WIDE_INT spe_offset
= info
->spe_gp_save_offset
+ frame_off
;
24795 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
24796 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
24798 rtx offset
, addr
, mem
, reg
;
24800 /* We're doing all this to ensure that the immediate offset
24801 fits into the immediate field of 'evldd'. */
24802 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset
+ reg_size
* i
));
24804 offset
= GEN_INT (spe_offset
+ reg_size
* i
);
24805 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, offset
);
24806 mem
= gen_rtx_MEM (V2SImode
, addr
);
24807 reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
24809 emit_move_insn (reg
, mem
);
24813 rs6000_emit_savres_rtx (info
, frame_reg_rtx
,
24814 info
->spe_gp_save_offset
+ frame_off
,
24815 info
->lr_save_offset
+ frame_off
,
24817 SAVRES_GPR
| SAVRES_LR
);
24819 else if (!restoring_GPRs_inline
)
24821 /* We are jumping to an out-of-line function. */
24823 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
24824 bool can_use_exit
= end_save
== 0;
24825 int sel
= SAVRES_GPR
| (can_use_exit
? SAVRES_LR
: 0);
24828 /* Emit stack reset code if we need it. */
24829 ptr_regno
= ptr_regno_for_savres (sel
);
24830 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
24832 rs6000_emit_stack_reset (info
, frame_reg_rtx
, frame_off
, ptr_regno
);
24833 else if (end_save
+ frame_off
!= 0)
24834 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
,
24835 GEN_INT (end_save
+ frame_off
)));
24836 else if (REGNO (frame_reg_rtx
) != ptr_regno
)
24837 emit_move_insn (ptr_reg
, frame_reg_rtx
);
24838 if (REGNO (frame_reg_rtx
) == ptr_regno
)
24839 frame_off
= -end_save
;
24841 if (can_use_exit
&& info
->cr_save_p
)
24842 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, true);
24844 ptr_off
= -end_save
;
24845 rs6000_emit_savres_rtx (info
, ptr_reg
,
24846 info
->gp_save_offset
+ ptr_off
,
24847 info
->lr_save_offset
+ ptr_off
,
24850 else if (using_load_multiple
)
24853 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
24854 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
24856 = gen_frame_load (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
24858 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
24859 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
24863 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
24864 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
24865 emit_insn (gen_frame_load
24866 (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
24868 info
->gp_save_offset
+ frame_off
+ reg_size
* i
));
24871 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
24873 /* If the frame pointer was used then we can't delay emitting
24874 a REG_CFA_DEF_CFA note. This must happen on the insn that
24875 restores the frame pointer, r31. We may have already emitted
24876 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
24877 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
24878 be harmless if emitted. */
24879 if (frame_pointer_needed
)
24881 insn
= get_last_insn ();
24882 add_reg_note (insn
, REG_CFA_DEF_CFA
,
24883 plus_constant (Pmode
, frame_reg_rtx
, frame_off
));
24884 RTX_FRAME_RELATED_P (insn
) = 1;
24887 /* Set up cfa_restores. We always need these when
24888 shrink-wrapping. If not shrink-wrapping then we only need
24889 the cfa_restore when the stack location is no longer valid.
24890 The cfa_restores must be emitted on or before the insn that
24891 invalidates the stack, and of course must not be emitted
24892 before the insn that actually does the restore. The latter
24893 is why it is a bad idea to emit the cfa_restores as a group
24894 on the last instruction here that actually does a restore:
24895 That insn may be reordered with respect to others doing
24897 if (flag_shrink_wrap
24898 && !restoring_GPRs_inline
24899 && info
->first_fp_reg_save
== 64)
24900 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
24902 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
24903 if (!restoring_GPRs_inline
24904 || using_load_multiple
24905 || rs6000_reg_live_or_pic_offset_p (i
))
24907 rtx reg
= gen_rtx_REG (reg_mode
, i
);
24909 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
24913 if (!restoring_GPRs_inline
24914 && info
->first_fp_reg_save
== 64)
24916 /* We are jumping to an out-of-line function. */
24918 emit_cfa_restores (cfa_restores
);
24922 if (restore_lr
&& !restoring_GPRs_inline
)
24924 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
24925 restore_saved_lr (0, exit_func
);
24928 /* Restore fpr's if we need to do it without calling a function. */
24929 if (restoring_FPRs_inline
)
24930 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
24931 if (save_reg_p (info
->first_fp_reg_save
+ i
))
24933 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
24934 ? DFmode
: SFmode
),
24935 info
->first_fp_reg_save
+ i
);
24936 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
24937 info
->fp_save_offset
+ frame_off
+ 8 * i
));
24938 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
24939 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
24942 /* If we saved cr, restore it here. Just those that were used. */
24943 if (info
->cr_save_p
)
24944 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, exit_func
);
24946 /* If this is V.4, unwind the stack pointer after all of the loads
24947 have been done, or set up r11 if we are restoring fp out of line. */
24949 if (!restoring_FPRs_inline
)
24951 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
24952 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
24953 ptr_regno
= ptr_regno_for_savres (sel
);
24956 insn
= rs6000_emit_stack_reset (info
, frame_reg_rtx
, frame_off
, ptr_regno
);
24957 if (REGNO (frame_reg_rtx
) == ptr_regno
)
24960 if (insn
&& restoring_FPRs_inline
)
24964 REG_NOTES (insn
) = cfa_restores
;
24965 cfa_restores
= NULL_RTX
;
24967 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
24968 RTX_FRAME_RELATED_P (insn
) = 1;
24971 if (crtl
->calls_eh_return
)
24973 rtx sa
= EH_RETURN_STACKADJ_RTX
;
24974 emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
, sa
));
24980 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
24981 if (! restoring_FPRs_inline
)
24983 p
= rtvec_alloc (4 + 64 - info
->first_fp_reg_save
);
24984 RTVEC_ELT (p
, 0) = ret_rtx
;
24990 /* We can't hang the cfa_restores off a simple return,
24991 since the shrink-wrap code sometimes uses an existing
24992 return. This means there might be a path from
24993 pre-prologue code to this return, and dwarf2cfi code
24994 wants the eh_frame unwinder state to be the same on
24995 all paths to any point. So we need to emit the
24996 cfa_restores before the return. For -m64 we really
24997 don't need epilogue cfa_restores at all, except for
24998 this irritating dwarf2cfi with shrink-wrap
24999 requirement; The stack red-zone means eh_frame info
25000 from the prologue telling the unwinder to restore
25001 from the stack is perfectly good right to the end of
25003 emit_insn (gen_blockage ());
25004 emit_cfa_restores (cfa_restores
);
25005 cfa_restores
= NULL_RTX
;
25007 p
= rtvec_alloc (2);
25008 RTVEC_ELT (p
, 0) = simple_return_rtx
;
25011 RTVEC_ELT (p
, 1) = ((restoring_FPRs_inline
|| !lr
)
25012 ? gen_rtx_USE (VOIDmode
,
25013 gen_rtx_REG (Pmode
, LR_REGNO
))
25014 : gen_rtx_CLOBBER (VOIDmode
,
25015 gen_rtx_REG (Pmode
, LR_REGNO
)));
25017 /* If we have to restore more than two FP registers, branch to the
25018 restore function. It will return to our caller. */
25019 if (! restoring_FPRs_inline
)
25025 if (flag_shrink_wrap
)
25026 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
25028 sym
= rs6000_savres_routine_sym (info
,
25029 SAVRES_FPR
| (lr
? SAVRES_LR
: 0));
25030 RTVEC_ELT (p
, 2) = gen_rtx_USE (VOIDmode
, sym
);
25031 reg
= (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)? 1 : 11;
25032 RTVEC_ELT (p
, 3) = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, reg
));
25034 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
25036 rtx reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
25038 RTVEC_ELT (p
, i
+ 4)
25039 = gen_frame_load (reg
, sp_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
25040 if (flag_shrink_wrap
)
25041 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
25046 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
25052 /* Ensure the cfa_restores are hung off an insn that won't
25053 be reordered above other restores. */
25054 emit_insn (gen_blockage ());
25056 emit_cfa_restores (cfa_restores
);
25060 /* Write function epilogue. */
25063 rs6000_output_function_epilogue (FILE *file
,
25064 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
25067 macho_branch_islands ();
25068 /* Mach-O doesn't support labels at the end of objects, so if
25069 it looks like we might want one, insert a NOP. */
25071 rtx_insn
*insn
= get_last_insn ();
25072 rtx_insn
*deleted_debug_label
= NULL
;
25075 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
25077 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
25078 notes only, instead set their CODE_LABEL_NUMBER to -1,
25079 otherwise there would be code generation differences
25080 in between -g and -g0. */
25081 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
25082 deleted_debug_label
= insn
;
25083 insn
= PREV_INSN (insn
);
25088 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
)))
25089 fputs ("\tnop\n", file
);
25090 else if (deleted_debug_label
)
25091 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
25092 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
25093 CODE_LABEL_NUMBER (insn
) = -1;
25097 /* Output a traceback table here. See /usr/include/sys/debug.h for info
25100 We don't output a traceback table if -finhibit-size-directive was
25101 used. The documentation for -finhibit-size-directive reads
25102 ``don't output a @code{.size} assembler directive, or anything
25103 else that would cause trouble if the function is split in the
25104 middle, and the two halves are placed at locations far apart in
25105 memory.'' The traceback table has this property, since it
25106 includes the offset from the start of the function to the
25107 traceback table itself.
25109 System V.4 Powerpc's (and the embedded ABI derived from it) use a
25110 different traceback table. */
25111 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
25112 && ! flag_inhibit_size_directive
25113 && rs6000_traceback
!= traceback_none
&& !cfun
->is_thunk
)
25115 const char *fname
= NULL
;
25116 const char *language_string
= lang_hooks
.name
;
25117 int fixed_parms
= 0, float_parms
= 0, parm_info
= 0;
25119 int optional_tbtab
;
25120 rs6000_stack_t
*info
= rs6000_stack_info ();
25122 if (rs6000_traceback
== traceback_full
)
25123 optional_tbtab
= 1;
25124 else if (rs6000_traceback
== traceback_part
)
25125 optional_tbtab
= 0;
25127 optional_tbtab
= !optimize_size
&& !TARGET_ELF
;
25129 if (optional_tbtab
)
25131 fname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
25132 while (*fname
== '.') /* V.4 encodes . in the name */
25135 /* Need label immediately before tbtab, so we can compute
25136 its offset from the function start. */
25137 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
25138 ASM_OUTPUT_LABEL (file
, fname
);
25141 /* The .tbtab pseudo-op can only be used for the first eight
25142 expressions, since it can't handle the possibly variable
25143 length fields that follow. However, if you omit the optional
25144 fields, the assembler outputs zeros for all optional fields
25145 anyways, giving each variable length field is minimum length
25146 (as defined in sys/debug.h). Thus we can not use the .tbtab
25147 pseudo-op at all. */
25149 /* An all-zero word flags the start of the tbtab, for debuggers
25150 that have to find it by searching forward from the entry
25151 point or from the current pc. */
25152 fputs ("\t.long 0\n", file
);
25154 /* Tbtab format type. Use format type 0. */
25155 fputs ("\t.byte 0,", file
);
25157 /* Language type. Unfortunately, there does not seem to be any
25158 official way to discover the language being compiled, so we
25159 use language_string.
25160 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
25161 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
25162 a number, so for now use 9. LTO and Go aren't assigned numbers
25163 either, so for now use 0. */
25164 if (! strcmp (language_string
, "GNU C")
25165 || ! strcmp (language_string
, "GNU GIMPLE")
25166 || ! strcmp (language_string
, "GNU Go"))
25168 else if (! strcmp (language_string
, "GNU F77")
25169 || ! strcmp (language_string
, "GNU Fortran"))
25171 else if (! strcmp (language_string
, "GNU Pascal"))
25173 else if (! strcmp (language_string
, "GNU Ada"))
25175 else if (! strcmp (language_string
, "GNU C++")
25176 || ! strcmp (language_string
, "GNU Objective-C++"))
25178 else if (! strcmp (language_string
, "GNU Java"))
25180 else if (! strcmp (language_string
, "GNU Objective-C"))
25183 gcc_unreachable ();
25184 fprintf (file
, "%d,", i
);
25186 /* 8 single bit fields: global linkage (not set for C extern linkage,
25187 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
25188 from start of procedure stored in tbtab, internal function, function
25189 has controlled storage, function has no toc, function uses fp,
25190 function logs/aborts fp operations. */
25191 /* Assume that fp operations are used if any fp reg must be saved. */
25192 fprintf (file
, "%d,",
25193 (optional_tbtab
<< 5) | ((info
->first_fp_reg_save
!= 64) << 1));
25195 /* 6 bitfields: function is interrupt handler, name present in
25196 proc table, function calls alloca, on condition directives
25197 (controls stack walks, 3 bits), saves condition reg, saves
25199 /* The `function calls alloca' bit seems to be set whenever reg 31 is
25200 set up as a frame pointer, even when there is no alloca call. */
25201 fprintf (file
, "%d,",
25202 ((optional_tbtab
<< 6)
25203 | ((optional_tbtab
& frame_pointer_needed
) << 5)
25204 | (info
->cr_save_p
<< 1)
25205 | (info
->lr_save_p
)));
25207 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
25209 fprintf (file
, "%d,",
25210 (info
->push_p
<< 7) | (64 - info
->first_fp_reg_save
));
25212 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
25213 fprintf (file
, "%d,", (32 - first_reg_to_save ()));
25215 if (optional_tbtab
)
25217 /* Compute the parameter info from the function decl argument
25220 int next_parm_info_bit
= 31;
25222 for (decl
= DECL_ARGUMENTS (current_function_decl
);
25223 decl
; decl
= DECL_CHAIN (decl
))
25225 rtx parameter
= DECL_INCOMING_RTL (decl
);
25226 enum machine_mode mode
= GET_MODE (parameter
);
25228 if (GET_CODE (parameter
) == REG
)
25230 if (SCALAR_FLOAT_MODE_P (mode
))
25251 gcc_unreachable ();
25254 /* If only one bit will fit, don't or in this entry. */
25255 if (next_parm_info_bit
> 0)
25256 parm_info
|= (bits
<< (next_parm_info_bit
- 1));
25257 next_parm_info_bit
-= 2;
25261 fixed_parms
+= ((GET_MODE_SIZE (mode
)
25262 + (UNITS_PER_WORD
- 1))
25264 next_parm_info_bit
-= 1;
25270 /* Number of fixed point parameters. */
25271 /* This is actually the number of words of fixed point parameters; thus
25272 an 8 byte struct counts as 2; and thus the maximum value is 8. */
25273 fprintf (file
, "%d,", fixed_parms
);
25275 /* 2 bitfields: number of floating point parameters (7 bits), parameters
25277 /* This is actually the number of fp registers that hold parameters;
25278 and thus the maximum value is 13. */
25279 /* Set parameters on stack bit if parameters are not in their original
25280 registers, regardless of whether they are on the stack? Xlc
25281 seems to set the bit when not optimizing. */
25282 fprintf (file
, "%d\n", ((float_parms
<< 1) | (! optimize
)));
25284 if (! optional_tbtab
)
25287 /* Optional fields follow. Some are variable length. */
25289 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
25290 11 double float. */
25291 /* There is an entry for each parameter in a register, in the order that
25292 they occur in the parameter list. Any intervening arguments on the
25293 stack are ignored. If the list overflows a long (max possible length
25294 34 bits) then completely leave off all elements that don't fit. */
25295 /* Only emit this long if there was at least one parameter. */
25296 if (fixed_parms
|| float_parms
)
25297 fprintf (file
, "\t.long %d\n", parm_info
);
25299 /* Offset from start of code to tb table. */
25300 fputs ("\t.long ", file
);
25301 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
25302 RS6000_OUTPUT_BASENAME (file
, fname
);
25304 rs6000_output_function_entry (file
, fname
);
25307 /* Interrupt handler mask. */
25308 /* Omit this long, since we never set the interrupt handler bit
25311 /* Number of CTL (controlled storage) anchors. */
25312 /* Omit this long, since the has_ctl bit is never set above. */
25314 /* Displacement into stack of each CTL anchor. */
25315 /* Omit this list of longs, because there are no CTL anchors. */
25317 /* Length of function name. */
25320 fprintf (file
, "\t.short %d\n", (int) strlen (fname
));
25322 /* Function name. */
25323 assemble_string (fname
, strlen (fname
));
25325 /* Register for alloca automatic storage; this is always reg 31.
25326 Only emit this if the alloca bit was set above. */
25327 if (frame_pointer_needed
)
25328 fputs ("\t.byte 31\n", file
);
25330 fputs ("\t.align 2\n", file
);
25334 /* A C compound statement that outputs the assembler code for a thunk
25335 function, used to implement C++ virtual function calls with
25336 multiple inheritance. The thunk acts as a wrapper around a virtual
25337 function, adjusting the implicit object parameter before handing
25338 control off to the real function.
25340 First, emit code to add the integer DELTA to the location that
25341 contains the incoming first argument. Assume that this argument
25342 contains a pointer, and is the one used to pass the `this' pointer
25343 in C++. This is the incoming argument *before* the function
25344 prologue, e.g. `%o0' on a sparc. The addition must preserve the
25345 values of all other incoming arguments.
25347 After the addition, emit code to jump to FUNCTION, which is a
25348 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
25349 not touch the return address. Hence returning from FUNCTION will
25350 return to whoever called the current `thunk'.
25352 The effect must be as if FUNCTION had been called directly with the
25353 adjusted first argument. This macro is responsible for emitting
25354 all of the code for a thunk function; output_function_prologue()
25355 and output_function_epilogue() are not invoked.
25357 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
25358 been extracted from it.) It might possibly be useful on some
25359 targets, but probably not.
25361 If you do not define this macro, the target-independent code in the
25362 C++ frontend will generate a less efficient heavyweight thunk that
25363 calls FUNCTION instead of jumping to it. The generic approach does
25364 not support varargs. */
25367 rs6000_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
25368 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
25371 rtx this_rtx
, funexp
;
25374 reload_completed
= 1;
25375 epilogue_completed
= 1;
25377 /* Mark the end of the (empty) prologue. */
25378 emit_note (NOTE_INSN_PROLOGUE_END
);
25380 /* Find the "this" pointer. If the function returns a structure,
25381 the structure return pointer is in r3. */
25382 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
25383 this_rtx
= gen_rtx_REG (Pmode
, 4);
25385 this_rtx
= gen_rtx_REG (Pmode
, 3);
25387 /* Apply the constant offset, if required. */
25389 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, GEN_INT (delta
)));
25391 /* Apply the offset from the vtable, if required. */
25394 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
25395 rtx tmp
= gen_rtx_REG (Pmode
, 12);
25397 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
25398 if (((unsigned HOST_WIDE_INT
) vcall_offset
) + 0x8000 >= 0x10000)
25400 emit_insn (gen_add3_insn (tmp
, tmp
, vcall_offset_rtx
));
25401 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
25405 rtx loc
= gen_rtx_PLUS (Pmode
, tmp
, vcall_offset_rtx
);
25407 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, loc
));
25409 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, tmp
));
25412 /* Generate a tail call to the target function. */
25413 if (!TREE_USED (function
))
25415 assemble_external (function
);
25416 TREE_USED (function
) = 1;
25418 funexp
= XEXP (DECL_RTL (function
), 0);
25419 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
25422 if (MACHOPIC_INDIRECT
)
25423 funexp
= machopic_indirect_call_target (funexp
);
25426 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
25427 generate sibcall RTL explicitly. */
25428 insn
= emit_call_insn (
25429 gen_rtx_PARALLEL (VOIDmode
,
25431 gen_rtx_CALL (VOIDmode
,
25432 funexp
, const0_rtx
),
25433 gen_rtx_USE (VOIDmode
, const0_rtx
),
25434 gen_rtx_USE (VOIDmode
,
25435 gen_rtx_REG (SImode
,
25437 simple_return_rtx
)));
25438 SIBLING_CALL_P (insn
) = 1;
25441 /* Ensure we have a global entry point for the thunk. ??? We could
25442 avoid that if the target routine doesn't need a global entry point,
25443 but we do not know whether this is the case at this point. */
25444 if (DEFAULT_ABI
== ABI_ELFv2
)
25445 cfun
->machine
->r2_setup_needed
= true;
25447 /* Run just enough of rest_of_compilation to get the insns emitted.
25448 There's not really enough bulk here to make other passes such as
25449 instruction scheduling worth while. Note that use_thunk calls
25450 assemble_start_function and assemble_end_function. */
25451 insn
= get_insns ();
25452 shorten_branches (insn
);
25453 final_start_function (insn
, file
, 1);
25454 final (insn
, file
, 1);
25455 final_end_function ();
25457 reload_completed
= 0;
25458 epilogue_completed
= 0;
25461 /* A quick summary of the various types of 'constant-pool tables'
25464 Target Flags Name One table per
25465 AIX (none) AIX TOC object file
25466 AIX -mfull-toc AIX TOC object file
25467 AIX -mminimal-toc AIX minimal TOC translation unit
25468 SVR4/EABI (none) SVR4 SDATA object file
25469 SVR4/EABI -fpic SVR4 pic object file
25470 SVR4/EABI -fPIC SVR4 PIC translation unit
25471 SVR4/EABI -mrelocatable EABI TOC function
25472 SVR4/EABI -maix AIX TOC object file
25473 SVR4/EABI -maix -mminimal-toc
25474 AIX minimal TOC translation unit
25476 Name Reg. Set by entries contains:
25477 made by addrs? fp? sum?
25479 AIX TOC 2 crt0 as Y option option
25480 AIX minimal TOC 30 prolog gcc Y Y option
25481 SVR4 SDATA 13 crt0 gcc N Y N
25482 SVR4 pic 30 prolog ld Y not yet N
25483 SVR4 PIC 30 prolog gcc Y option option
25484 EABI TOC 30 prolog gcc Y option option
25488 /* Hash functions for the hash table. */
25491 rs6000_hash_constant (rtx k
)
25493 enum rtx_code code
= GET_CODE (k
);
25494 enum machine_mode mode
= GET_MODE (k
);
25495 unsigned result
= (code
<< 3) ^ mode
;
25496 const char *format
;
25499 format
= GET_RTX_FORMAT (code
);
25500 flen
= strlen (format
);
25506 return result
* 1231 + (unsigned) INSN_UID (XEXP (k
, 0));
25508 case CONST_WIDE_INT
:
25511 flen
= CONST_WIDE_INT_NUNITS (k
);
25512 for (i
= 0; i
< flen
; i
++)
25513 result
= result
* 613 + CONST_WIDE_INT_ELT (k
, i
);
25518 if (mode
!= VOIDmode
)
25519 return real_hash (CONST_DOUBLE_REAL_VALUE (k
)) * result
;
25531 for (; fidx
< flen
; fidx
++)
25532 switch (format
[fidx
])
25537 const char *str
= XSTR (k
, fidx
);
25538 len
= strlen (str
);
25539 result
= result
* 613 + len
;
25540 for (i
= 0; i
< len
; i
++)
25541 result
= result
* 613 + (unsigned) str
[i
];
25546 result
= result
* 1231 + rs6000_hash_constant (XEXP (k
, fidx
));
25550 result
= result
* 613 + (unsigned) XINT (k
, fidx
);
25553 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT
))
25554 result
= result
* 613 + (unsigned) XWINT (k
, fidx
);
25558 for (i
= 0; i
< sizeof (HOST_WIDE_INT
) / sizeof (unsigned); i
++)
25559 result
= result
* 613 + (unsigned) (XWINT (k
, fidx
)
25566 gcc_unreachable ();
25573 toc_hasher::hash (toc_hash_struct
*thc
)
25575 return rs6000_hash_constant (thc
->key
) ^ thc
->key_mode
;
25578 /* Compare H1 and H2 for equivalence. */
25581 toc_hasher::equal (toc_hash_struct
*h1
, toc_hash_struct
*h2
)
25586 if (h1
->key_mode
!= h2
->key_mode
)
25589 return rtx_equal_p (r1
, r2
);
25592 /* These are the names given by the C++ front-end to vtables, and
25593 vtable-like objects. Ideally, this logic should not be here;
25594 instead, there should be some programmatic way of inquiring as
25595 to whether or not an object is a vtable. */
25597 #define VTABLE_NAME_P(NAME) \
25598 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
25599 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
25600 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
25601 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
25602 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
25604 #ifdef NO_DOLLAR_IN_LABEL
25605 /* Return a GGC-allocated character string translating dollar signs in
25606 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
25609 rs6000_xcoff_strip_dollar (const char *name
)
25615 q
= (const char *) strchr (name
, '$');
25617 if (q
== 0 || q
== name
)
25620 len
= strlen (name
);
25621 strip
= XALLOCAVEC (char, len
+ 1);
25622 strcpy (strip
, name
);
25623 p
= strip
+ (q
- name
);
25627 p
= strchr (p
+ 1, '$');
25630 return ggc_alloc_string (strip
, len
);
25635 rs6000_output_symbol_ref (FILE *file
, rtx x
)
25637 /* Currently C++ toc references to vtables can be emitted before it
25638 is decided whether the vtable is public or private. If this is
25639 the case, then the linker will eventually complain that there is
25640 a reference to an unknown section. Thus, for vtables only,
25641 we emit the TOC reference to reference the symbol and not the
25643 const char *name
= XSTR (x
, 0);
25645 if (VTABLE_NAME_P (name
))
25647 RS6000_OUTPUT_BASENAME (file
, name
);
25650 assemble_name (file
, name
);
25653 /* Output a TOC entry. We derive the entry name from what is being
25657 output_toc (FILE *file
, rtx x
, int labelno
, enum machine_mode mode
)
25660 const char *name
= buf
;
25662 HOST_WIDE_INT offset
= 0;
25664 gcc_assert (!TARGET_NO_TOC
);
25666 /* When the linker won't eliminate them, don't output duplicate
25667 TOC entries (this happens on AIX if there is any kind of TOC,
25668 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
25670 if (TARGET_TOC
&& GET_CODE (x
) != LABEL_REF
)
25672 struct toc_hash_struct
*h
;
25674 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
25675 time because GGC is not initialized at that point. */
25676 if (toc_hash_table
== NULL
)
25677 toc_hash_table
= hash_table
<toc_hasher
>::create_ggc (1021);
25679 h
= ggc_alloc
<toc_hash_struct
> ();
25681 h
->key_mode
= mode
;
25682 h
->labelno
= labelno
;
25684 toc_hash_struct
**found
= toc_hash_table
->find_slot (h
, INSERT
);
25685 if (*found
== NULL
)
25687 else /* This is indeed a duplicate.
25688 Set this label equal to that label. */
25690 fputs ("\t.set ", file
);
25691 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
25692 fprintf (file
, "%d,", labelno
);
25693 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
25694 fprintf (file
, "%d\n", ((*found
)->labelno
));
25697 if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
25698 && (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_GLOBAL_DYNAMIC
25699 || SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
))
25701 fputs ("\t.set ", file
);
25702 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
25703 fprintf (file
, "%d,", labelno
);
25704 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
25705 fprintf (file
, "%d\n", ((*found
)->labelno
));
25712 /* If we're going to put a double constant in the TOC, make sure it's
25713 aligned properly when strict alignment is on. */
25714 if ((CONST_DOUBLE_P (x
) || CONST_WIDE_INT_P (x
))
25715 && STRICT_ALIGNMENT
25716 && GET_MODE_BITSIZE (mode
) >= 64
25717 && ! (TARGET_NO_FP_IN_TOC
&& ! TARGET_MINIMAL_TOC
)) {
25718 ASM_OUTPUT_ALIGN (file
, 3);
25721 (*targetm
.asm_out
.internal_label
) (file
, "LC", labelno
);
25723 /* Handle FP constants specially. Note that if we have a minimal
25724 TOC, things we put here aren't actually in the TOC, so we can allow
25726 if (GET_CODE (x
) == CONST_DOUBLE
&&
25727 (GET_MODE (x
) == TFmode
|| GET_MODE (x
) == TDmode
))
25729 REAL_VALUE_TYPE rv
;
25732 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
25733 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
25734 REAL_VALUE_TO_TARGET_DECIMAL128 (rv
, k
);
25736 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv
, k
);
25740 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
25741 fputs (DOUBLE_INT_ASM_OP
, file
);
25743 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
25744 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
25745 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
25746 fprintf (file
, "0x%lx%08lx,0x%lx%08lx\n",
25747 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
25748 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff,
25749 k
[WORDS_BIG_ENDIAN
? 2 : 3] & 0xffffffff,
25750 k
[WORDS_BIG_ENDIAN
? 3 : 2] & 0xffffffff);
25755 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
25756 fputs ("\t.long ", file
);
25758 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
25759 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
25760 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
25761 fprintf (file
, "0x%lx,0x%lx,0x%lx,0x%lx\n",
25762 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
25763 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
25767 else if (GET_CODE (x
) == CONST_DOUBLE
&&
25768 (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DDmode
))
25770 REAL_VALUE_TYPE rv
;
25773 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
25775 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
25776 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, k
);
25778 REAL_VALUE_TO_TARGET_DOUBLE (rv
, k
);
25782 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
25783 fputs (DOUBLE_INT_ASM_OP
, file
);
25785 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
25786 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
25787 fprintf (file
, "0x%lx%08lx\n",
25788 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
25789 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff);
25794 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
25795 fputs ("\t.long ", file
);
25797 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
25798 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
25799 fprintf (file
, "0x%lx,0x%lx\n",
25800 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
25804 else if (GET_CODE (x
) == CONST_DOUBLE
&&
25805 (GET_MODE (x
) == SFmode
|| GET_MODE (x
) == SDmode
))
25807 REAL_VALUE_TYPE rv
;
25810 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
25811 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
25812 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
25814 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
25818 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
25819 fputs (DOUBLE_INT_ASM_OP
, file
);
25821 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
25822 if (WORDS_BIG_ENDIAN
)
25823 fprintf (file
, "0x%lx00000000\n", l
& 0xffffffff);
25825 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
25830 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
25831 fputs ("\t.long ", file
);
25833 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
25834 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
25838 else if (GET_MODE (x
) == VOIDmode
&& GET_CODE (x
) == CONST_INT
)
25840 unsigned HOST_WIDE_INT low
;
25841 HOST_WIDE_INT high
;
25843 low
= INTVAL (x
) & 0xffffffff;
25844 high
= (HOST_WIDE_INT
) INTVAL (x
) >> 32;
25846 /* TOC entries are always Pmode-sized, so when big-endian
25847 smaller integer constants in the TOC need to be padded.
25848 (This is still a win over putting the constants in
25849 a separate constant pool, because then we'd have
25850 to have both a TOC entry _and_ the actual constant.)
25852 For a 32-bit target, CONST_INT values are loaded and shifted
25853 entirely within `low' and can be stored in one TOC entry. */
25855 /* It would be easy to make this work, but it doesn't now. */
25856 gcc_assert (!TARGET_64BIT
|| POINTER_SIZE
>= GET_MODE_BITSIZE (mode
));
25858 if (WORDS_BIG_ENDIAN
&& POINTER_SIZE
> GET_MODE_BITSIZE (mode
))
25861 low
<<= POINTER_SIZE
- GET_MODE_BITSIZE (mode
);
25862 high
= (HOST_WIDE_INT
) low
>> 32;
25868 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
25869 fputs (DOUBLE_INT_ASM_OP
, file
);
25871 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
25872 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
25873 fprintf (file
, "0x%lx%08lx\n",
25874 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
25879 if (POINTER_SIZE
< GET_MODE_BITSIZE (mode
))
25881 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
25882 fputs ("\t.long ", file
);
25884 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
25885 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
25886 fprintf (file
, "0x%lx,0x%lx\n",
25887 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
25891 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
25892 fputs ("\t.long ", file
);
25894 fprintf (file
, "\t.tc IS_%lx[TC],", (long) low
& 0xffffffff);
25895 fprintf (file
, "0x%lx\n", (long) low
& 0xffffffff);
25901 if (GET_CODE (x
) == CONST
)
25903 gcc_assert (GET_CODE (XEXP (x
, 0)) == PLUS
25904 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
);
25906 base
= XEXP (XEXP (x
, 0), 0);
25907 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
25910 switch (GET_CODE (base
))
25913 name
= XSTR (base
, 0);
25917 ASM_GENERATE_INTERNAL_LABEL (buf
, "L",
25918 CODE_LABEL_NUMBER (XEXP (base
, 0)));
25922 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (base
));
25926 gcc_unreachable ();
25929 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
25930 fputs (TARGET_32BIT
? "\t.long " : DOUBLE_INT_ASM_OP
, file
);
25933 fputs ("\t.tc ", file
);
25934 RS6000_OUTPUT_BASENAME (file
, name
);
25937 fprintf (file
, ".N" HOST_WIDE_INT_PRINT_UNSIGNED
, - offset
);
25939 fprintf (file
, ".P" HOST_WIDE_INT_PRINT_UNSIGNED
, offset
);
25941 /* Mark large TOC symbols on AIX with [TE] so they are mapped
25942 after other TOC symbols, reducing overflow of small TOC access
25943 to [TC] symbols. */
25944 fputs (TARGET_XCOFF
&& TARGET_CMODEL
!= CMODEL_SMALL
25945 ? "[TE]," : "[TC],", file
);
25948 /* Currently C++ toc references to vtables can be emitted before it
25949 is decided whether the vtable is public or private. If this is
25950 the case, then the linker will eventually complain that there is
25951 a TOC reference to an unknown section. Thus, for vtables only,
25952 we emit the TOC reference to reference the symbol and not the
25954 if (VTABLE_NAME_P (name
))
25956 RS6000_OUTPUT_BASENAME (file
, name
);
25958 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
25959 else if (offset
> 0)
25960 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
25963 output_addr_const (file
, x
);
25966 if (TARGET_XCOFF
&& GET_CODE (base
) == SYMBOL_REF
25967 && SYMBOL_REF_TLS_MODEL (base
) != 0)
25969 if (SYMBOL_REF_TLS_MODEL (base
) == TLS_MODEL_LOCAL_EXEC
)
25970 fputs ("@le", file
);
25971 else if (SYMBOL_REF_TLS_MODEL (base
) == TLS_MODEL_INITIAL_EXEC
)
25972 fputs ("@ie", file
);
25973 /* Use global-dynamic for local-dynamic. */
25974 else if (SYMBOL_REF_TLS_MODEL (base
) == TLS_MODEL_GLOBAL_DYNAMIC
25975 || SYMBOL_REF_TLS_MODEL (base
) == TLS_MODEL_LOCAL_DYNAMIC
)
25978 (*targetm
.asm_out
.internal_label
) (file
, "LCM", labelno
);
25979 fputs ("\t.tc .", file
);
25980 RS6000_OUTPUT_BASENAME (file
, name
);
25981 fputs ("[TC],", file
);
25982 output_addr_const (file
, x
);
25983 fputs ("@m", file
);
25991 /* Output an assembler pseudo-op to write an ASCII string of N characters
25992 starting at P to FILE.
25994 On the RS/6000, we have to do this using the .byte operation and
25995 write out special characters outside the quoted string.
25996 Also, the assembler is broken; very long strings are truncated,
25997 so we must artificially break them up early. */
26000 output_ascii (FILE *file
, const char *p
, int n
)
26003 int i
, count_string
;
26004 const char *for_string
= "\t.byte \"";
26005 const char *for_decimal
= "\t.byte ";
26006 const char *to_close
= NULL
;
26009 for (i
= 0; i
< n
; i
++)
26012 if (c
>= ' ' && c
< 0177)
26015 fputs (for_string
, file
);
26018 /* Write two quotes to get one. */
26026 for_decimal
= "\"\n\t.byte ";
26030 if (count_string
>= 512)
26032 fputs (to_close
, file
);
26034 for_string
= "\t.byte \"";
26035 for_decimal
= "\t.byte ";
26043 fputs (for_decimal
, file
);
26044 fprintf (file
, "%d", c
);
26046 for_string
= "\n\t.byte \"";
26047 for_decimal
= ", ";
26053 /* Now close the string if we have written one. Then end the line. */
26055 fputs (to_close
, file
);
26058 /* Generate a unique section name for FILENAME for a section type
26059 represented by SECTION_DESC. Output goes into BUF.
26061 SECTION_DESC can be any string, as long as it is different for each
26062 possible section type.
26064 We name the section in the same manner as xlc. The name begins with an
26065 underscore followed by the filename (after stripping any leading directory
26066 names) with the last period replaced by the string SECTION_DESC. If
26067 FILENAME does not contain a period, SECTION_DESC is appended to the end of
26071 rs6000_gen_section_name (char **buf
, const char *filename
,
26072 const char *section_desc
)
26074 const char *q
, *after_last_slash
, *last_period
= 0;
26078 after_last_slash
= filename
;
26079 for (q
= filename
; *q
; q
++)
26082 after_last_slash
= q
+ 1;
26083 else if (*q
== '.')
26087 len
= strlen (after_last_slash
) + strlen (section_desc
) + 2;
26088 *buf
= (char *) xmalloc (len
);
26093 for (q
= after_last_slash
; *q
; q
++)
26095 if (q
== last_period
)
26097 strcpy (p
, section_desc
);
26098 p
+= strlen (section_desc
);
26102 else if (ISALNUM (*q
))
26106 if (last_period
== 0)
26107 strcpy (p
, section_desc
);
26112 /* Emit profile function. */
26115 output_profile_hook (int labelno ATTRIBUTE_UNUSED
)
26117 /* Non-standard profiling for kernels, which just saves LR then calls
26118 _mcount without worrying about arg saves. The idea is to change
26119 the function prologue as little as possible as it isn't easy to
26120 account for arg save/restore code added just for _mcount. */
26121 if (TARGET_PROFILE_KERNEL
)
26124 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
26126 #ifndef NO_PROFILE_COUNTERS
26127 # define NO_PROFILE_COUNTERS 0
26129 if (NO_PROFILE_COUNTERS
)
26130 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
26131 LCT_NORMAL
, VOIDmode
, 0);
26135 const char *label_name
;
26138 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
26139 label_name
= ggc_strdup ((*targetm
.strip_name_encoding
) (buf
));
26140 fun
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
26142 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
26143 LCT_NORMAL
, VOIDmode
, 1, fun
, Pmode
);
26146 else if (DEFAULT_ABI
== ABI_DARWIN
)
26148 const char *mcount_name
= RS6000_MCOUNT
;
26149 int caller_addr_regno
= LR_REGNO
;
26151 /* Be conservative and always set this, at least for now. */
26152 crtl
->uses_pic_offset_table
= 1;
26155 /* For PIC code, set up a stub and collect the caller's address
26156 from r0, which is where the prologue puts it. */
26157 if (MACHOPIC_INDIRECT
26158 && crtl
->uses_pic_offset_table
)
26159 caller_addr_regno
= 0;
26161 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, mcount_name
),
26162 LCT_NORMAL
, VOIDmode
, 1,
26163 gen_rtx_REG (Pmode
, caller_addr_regno
), Pmode
);
26167 /* Write function profiler code. */
26170 output_function_profiler (FILE *file
, int labelno
)
26174 switch (DEFAULT_ABI
)
26177 gcc_unreachable ();
26182 warning (0, "no profiling of 64-bit code for this ABI");
26185 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
26186 fprintf (file
, "\tmflr %s\n", reg_names
[0]);
26187 if (NO_PROFILE_COUNTERS
)
26189 asm_fprintf (file
, "\tstw %s,4(%s)\n",
26190 reg_names
[0], reg_names
[1]);
26192 else if (TARGET_SECURE_PLT
&& flag_pic
)
26194 if (TARGET_LINK_STACK
)
26197 get_ppc476_thunk_name (name
);
26198 asm_fprintf (file
, "\tbl %s\n", name
);
26201 asm_fprintf (file
, "\tbcl 20,31,1f\n1:\n");
26202 asm_fprintf (file
, "\tstw %s,4(%s)\n",
26203 reg_names
[0], reg_names
[1]);
26204 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
26205 asm_fprintf (file
, "\taddis %s,%s,",
26206 reg_names
[12], reg_names
[12]);
26207 assemble_name (file
, buf
);
26208 asm_fprintf (file
, "-1b@ha\n\tla %s,", reg_names
[0]);
26209 assemble_name (file
, buf
);
26210 asm_fprintf (file
, "-1b@l(%s)\n", reg_names
[12]);
26212 else if (flag_pic
== 1)
26214 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file
);
26215 asm_fprintf (file
, "\tstw %s,4(%s)\n",
26216 reg_names
[0], reg_names
[1]);
26217 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
26218 asm_fprintf (file
, "\tlwz %s,", reg_names
[0]);
26219 assemble_name (file
, buf
);
26220 asm_fprintf (file
, "@got(%s)\n", reg_names
[12]);
26222 else if (flag_pic
> 1)
26224 asm_fprintf (file
, "\tstw %s,4(%s)\n",
26225 reg_names
[0], reg_names
[1]);
26226 /* Now, we need to get the address of the label. */
26227 if (TARGET_LINK_STACK
)
26230 get_ppc476_thunk_name (name
);
26231 asm_fprintf (file
, "\tbl %s\n\tb 1f\n\t.long ", name
);
26232 assemble_name (file
, buf
);
26233 fputs ("-.\n1:", file
);
26234 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
26235 asm_fprintf (file
, "\taddi %s,%s,4\n",
26236 reg_names
[11], reg_names
[11]);
26240 fputs ("\tbcl 20,31,1f\n\t.long ", file
);
26241 assemble_name (file
, buf
);
26242 fputs ("-.\n1:", file
);
26243 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
26245 asm_fprintf (file
, "\tlwz %s,0(%s)\n",
26246 reg_names
[0], reg_names
[11]);
26247 asm_fprintf (file
, "\tadd %s,%s,%s\n",
26248 reg_names
[0], reg_names
[0], reg_names
[11]);
26252 asm_fprintf (file
, "\tlis %s,", reg_names
[12]);
26253 assemble_name (file
, buf
);
26254 fputs ("@ha\n", file
);
26255 asm_fprintf (file
, "\tstw %s,4(%s)\n",
26256 reg_names
[0], reg_names
[1]);
26257 asm_fprintf (file
, "\tla %s,", reg_names
[0]);
26258 assemble_name (file
, buf
);
26259 asm_fprintf (file
, "@l(%s)\n", reg_names
[12]);
26262 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
26263 fprintf (file
, "\tbl %s%s\n",
26264 RS6000_MCOUNT
, flag_pic
? "@plt" : "");
26270 /* Don't do anything, done in output_profile_hook (). */
26277 /* The following variable value is the last issued insn. */
26279 static rtx last_scheduled_insn
;
26281 /* The following variable helps to balance issuing of load and
26282 store instructions */
26284 static int load_store_pendulum
;
26286 /* Power4 load update and store update instructions are cracked into a
26287 load or store and an integer insn which are executed in the same cycle.
26288 Branches have their own dispatch slot which does not count against the
26289 GCC issue rate, but it changes the program flow so there are no other
26290 instructions to issue in this cycle. */
26293 rs6000_variable_issue_1 (rtx_insn
*insn
, int more
)
26295 last_scheduled_insn
= insn
;
26296 if (GET_CODE (PATTERN (insn
)) == USE
26297 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
26299 cached_can_issue_more
= more
;
26300 return cached_can_issue_more
;
26303 if (insn_terminates_group_p (insn
, current_group
))
26305 cached_can_issue_more
= 0;
26306 return cached_can_issue_more
;
26309 /* If no reservation, but reach here */
26310 if (recog_memoized (insn
) < 0)
26313 if (rs6000_sched_groups
)
26315 if (is_microcoded_insn (insn
))
26316 cached_can_issue_more
= 0;
26317 else if (is_cracked_insn (insn
))
26318 cached_can_issue_more
= more
> 2 ? more
- 2 : 0;
26320 cached_can_issue_more
= more
- 1;
26322 return cached_can_issue_more
;
26325 if (rs6000_cpu_attr
== CPU_CELL
&& is_nonpipeline_insn (insn
))
26328 cached_can_issue_more
= more
- 1;
26329 return cached_can_issue_more
;
26333 rs6000_variable_issue (FILE *stream
, int verbose
, rtx_insn
*insn
, int more
)
26335 int r
= rs6000_variable_issue_1 (insn
, more
);
26337 fprintf (stream
, "// rs6000_variable_issue (more = %d) = %d\n", more
, r
);
26341 /* Adjust the cost of a scheduling dependency. Return the new cost of
26342 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
26345 rs6000_adjust_cost (rtx_insn
*insn
, rtx link
, rtx_insn
*dep_insn
, int cost
)
26347 enum attr_type attr_type
;
26349 if (! recog_memoized (insn
))
26352 switch (REG_NOTE_KIND (link
))
26356 /* Data dependency; DEP_INSN writes a register that INSN reads
26357 some cycles later. */
26359 /* Separate a load from a narrower, dependent store. */
26360 if (rs6000_sched_groups
26361 && GET_CODE (PATTERN (insn
)) == SET
26362 && GET_CODE (PATTERN (dep_insn
)) == SET
26363 && GET_CODE (XEXP (PATTERN (insn
), 1)) == MEM
26364 && GET_CODE (XEXP (PATTERN (dep_insn
), 0)) == MEM
26365 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn
), 1)))
26366 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn
), 0)))))
26369 attr_type
= get_attr_type (insn
);
26374 /* Tell the first scheduling pass about the latency between
26375 a mtctr and bctr (and mtlr and br/blr). The first
26376 scheduling pass will not know about this latency since
26377 the mtctr instruction, which has the latency associated
26378 to it, will be generated by reload. */
26381 /* Leave some extra cycles between a compare and its
26382 dependent branch, to inhibit expensive mispredicts. */
26383 if ((rs6000_cpu_attr
== CPU_PPC603
26384 || rs6000_cpu_attr
== CPU_PPC604
26385 || rs6000_cpu_attr
== CPU_PPC604E
26386 || rs6000_cpu_attr
== CPU_PPC620
26387 || rs6000_cpu_attr
== CPU_PPC630
26388 || rs6000_cpu_attr
== CPU_PPC750
26389 || rs6000_cpu_attr
== CPU_PPC7400
26390 || rs6000_cpu_attr
== CPU_PPC7450
26391 || rs6000_cpu_attr
== CPU_PPCE5500
26392 || rs6000_cpu_attr
== CPU_PPCE6500
26393 || rs6000_cpu_attr
== CPU_POWER4
26394 || rs6000_cpu_attr
== CPU_POWER5
26395 || rs6000_cpu_attr
== CPU_POWER7
26396 || rs6000_cpu_attr
== CPU_POWER8
26397 || rs6000_cpu_attr
== CPU_CELL
)
26398 && recog_memoized (dep_insn
)
26399 && (INSN_CODE (dep_insn
) >= 0))
26401 switch (get_attr_type (dep_insn
))
26405 case TYPE_FPCOMPARE
:
26406 case TYPE_CR_LOGICAL
:
26407 case TYPE_DELAYED_CR
:
26411 if (get_attr_dot (dep_insn
) == DOT_YES
)
26416 if (get_attr_dot (dep_insn
) == DOT_YES
26417 && get_attr_var_shift (dep_insn
) == VAR_SHIFT_NO
)
26428 if ((rs6000_cpu
== PROCESSOR_POWER6
)
26429 && recog_memoized (dep_insn
)
26430 && (INSN_CODE (dep_insn
) >= 0))
26433 if (GET_CODE (PATTERN (insn
)) != SET
)
26434 /* If this happens, we have to extend this to schedule
26435 optimally. Return default for now. */
26438 /* Adjust the cost for the case where the value written
26439 by a fixed point operation is used as the address
26440 gen value on a store. */
26441 switch (get_attr_type (dep_insn
))
26446 if (! store_data_bypass_p (dep_insn
, insn
))
26447 return get_attr_sign_extend (dep_insn
)
26448 == SIGN_EXTEND_YES
? 6 : 4;
26453 if (! store_data_bypass_p (dep_insn
, insn
))
26454 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
26465 if (! store_data_bypass_p (dep_insn
, insn
))
26473 if (get_attr_update (dep_insn
) == UPDATE_YES
26474 && ! store_data_bypass_p (dep_insn
, insn
))
26480 if (! store_data_bypass_p (dep_insn
, insn
))
26486 if (! store_data_bypass_p (dep_insn
, insn
))
26487 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
26497 if ((rs6000_cpu
== PROCESSOR_POWER6
)
26498 && recog_memoized (dep_insn
)
26499 && (INSN_CODE (dep_insn
) >= 0))
26502 /* Adjust the cost for the case where the value written
26503 by a fixed point instruction is used within the address
26504 gen portion of a subsequent load(u)(x) */
26505 switch (get_attr_type (dep_insn
))
26510 if (set_to_load_agen (dep_insn
, insn
))
26511 return get_attr_sign_extend (dep_insn
)
26512 == SIGN_EXTEND_YES
? 6 : 4;
26517 if (set_to_load_agen (dep_insn
, insn
))
26518 return get_attr_var_shift (dep_insn
) == VAR_SHIFT_YES
?
26529 if (set_to_load_agen (dep_insn
, insn
))
26537 if (get_attr_update (dep_insn
) == UPDATE_YES
26538 && set_to_load_agen (dep_insn
, insn
))
26544 if (set_to_load_agen (dep_insn
, insn
))
26550 if (set_to_load_agen (dep_insn
, insn
))
26551 return get_attr_size (dep_insn
) == SIZE_32
? 45 : 57;
26561 if ((rs6000_cpu
== PROCESSOR_POWER6
)
26562 && get_attr_update (insn
) == UPDATE_NO
26563 && recog_memoized (dep_insn
)
26564 && (INSN_CODE (dep_insn
) >= 0)
26565 && (get_attr_type (dep_insn
) == TYPE_MFFGPR
))
26572 /* Fall out to return default cost. */
26576 case REG_DEP_OUTPUT
:
26577 /* Output dependency; DEP_INSN writes a register that INSN writes some
26579 if ((rs6000_cpu
== PROCESSOR_POWER6
)
26580 && recog_memoized (dep_insn
)
26581 && (INSN_CODE (dep_insn
) >= 0))
26583 attr_type
= get_attr_type (insn
);
26588 if (get_attr_type (dep_insn
) == TYPE_FP
)
26592 if (get_attr_update (insn
) == UPDATE_NO
26593 && get_attr_type (dep_insn
) == TYPE_MFFGPR
)
26601 /* Anti dependency; DEP_INSN reads a register that INSN writes some
26606 gcc_unreachable ();
26612 /* Debug version of rs6000_adjust_cost. */
26615 rs6000_debug_adjust_cost (rtx_insn
*insn
, rtx link
, rtx_insn
*dep_insn
,
26618 int ret
= rs6000_adjust_cost (insn
, link
, dep_insn
, cost
);
26624 switch (REG_NOTE_KIND (link
))
26626 default: dep
= "unknown depencency"; break;
26627 case REG_DEP_TRUE
: dep
= "data dependency"; break;
26628 case REG_DEP_OUTPUT
: dep
= "output dependency"; break;
26629 case REG_DEP_ANTI
: dep
= "anti depencency"; break;
26633 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
26634 "%s, insn:\n", ret
, cost
, dep
);
26642 /* The function returns a true if INSN is microcoded.
26643 Return false otherwise. */
26646 is_microcoded_insn (rtx_insn
*insn
)
26648 if (!insn
|| !NONDEBUG_INSN_P (insn
)
26649 || GET_CODE (PATTERN (insn
)) == USE
26650 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
26653 if (rs6000_cpu_attr
== CPU_CELL
)
26654 return get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
;
26656 if (rs6000_sched_groups
26657 && (rs6000_cpu
== PROCESSOR_POWER4
|| rs6000_cpu
== PROCESSOR_POWER5
))
26659 enum attr_type type
= get_attr_type (insn
);
26660 if ((type
== TYPE_LOAD
26661 && get_attr_update (insn
) == UPDATE_YES
26662 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
)
26663 || ((type
== TYPE_LOAD
|| type
== TYPE_STORE
)
26664 && get_attr_update (insn
) == UPDATE_YES
26665 && get_attr_indexed (insn
) == INDEXED_YES
)
26666 || type
== TYPE_MFCR
)
26673 /* The function returns true if INSN is cracked into 2 instructions
26674 by the processor (and therefore occupies 2 issue slots). */
26677 is_cracked_insn (rtx_insn
*insn
)
26679 if (!insn
|| !NONDEBUG_INSN_P (insn
)
26680 || GET_CODE (PATTERN (insn
)) == USE
26681 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
26684 if (rs6000_sched_groups
26685 && (rs6000_cpu
== PROCESSOR_POWER4
|| rs6000_cpu
== PROCESSOR_POWER5
))
26687 enum attr_type type
= get_attr_type (insn
);
26688 if ((type
== TYPE_LOAD
26689 && get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
26690 && get_attr_update (insn
) == UPDATE_NO
)
26691 || (type
== TYPE_LOAD
26692 && get_attr_sign_extend (insn
) == SIGN_EXTEND_NO
26693 && get_attr_update (insn
) == UPDATE_YES
26694 && get_attr_indexed (insn
) == INDEXED_NO
)
26695 || (type
== TYPE_STORE
26696 && get_attr_update (insn
) == UPDATE_YES
26697 && get_attr_indexed (insn
) == INDEXED_NO
)
26698 || ((type
== TYPE_FPLOAD
|| type
== TYPE_FPSTORE
)
26699 && get_attr_update (insn
) == UPDATE_YES
)
26700 || type
== TYPE_DELAYED_CR
26701 || type
== TYPE_COMPARE
26702 || (type
== TYPE_EXTS
26703 && get_attr_dot (insn
) == DOT_YES
)
26704 || (type
== TYPE_SHIFT
26705 && get_attr_dot (insn
) == DOT_YES
26706 && get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
26707 || (type
== TYPE_MUL
26708 && get_attr_dot (insn
) == DOT_YES
)
26709 || type
== TYPE_DIV
26710 || (type
== TYPE_INSERT
26711 && get_attr_size (insn
) == SIZE_32
))
26718 /* The function returns true if INSN can be issued only from
26719 the branch slot. */
26722 is_branch_slot_insn (rtx_insn
*insn
)
26724 if (!insn
|| !NONDEBUG_INSN_P (insn
)
26725 || GET_CODE (PATTERN (insn
)) == USE
26726 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
26729 if (rs6000_sched_groups
)
26731 enum attr_type type
= get_attr_type (insn
);
26732 if (type
== TYPE_BRANCH
|| type
== TYPE_JMPREG
)
26740 /* The function returns true if out_inst sets a value that is
26741 used in the address generation computation of in_insn */
26743 set_to_load_agen (rtx_insn
*out_insn
, rtx_insn
*in_insn
)
26745 rtx out_set
, in_set
;
26747 /* For performance reasons, only handle the simple case where
26748 both loads are a single_set. */
26749 out_set
= single_set (out_insn
);
26752 in_set
= single_set (in_insn
);
26754 return reg_mentioned_p (SET_DEST (out_set
), SET_SRC (in_set
));
26760 /* Try to determine base/offset/size parts of the given MEM.
26761 Return true if successful, false if all the values couldn't
26764 This function only looks for REG or REG+CONST address forms.
26765 REG+REG address form will return false. */
26768 get_memref_parts (rtx mem
, rtx
*base
, HOST_WIDE_INT
*offset
,
26769 HOST_WIDE_INT
*size
)
26772 if MEM_SIZE_KNOWN_P (mem
)
26773 *size
= MEM_SIZE (mem
);
26777 if (GET_CODE (XEXP (mem
, 0)) == PRE_MODIFY
)
26778 addr_rtx
= XEXP (XEXP (mem
, 0), 1);
26780 addr_rtx
= (XEXP (mem
, 0));
26782 if (GET_CODE (addr_rtx
) == REG
)
26787 else if (GET_CODE (addr_rtx
) == PLUS
26788 && CONST_INT_P (XEXP (addr_rtx
, 1)))
26790 *base
= XEXP (addr_rtx
, 0);
26791 *offset
= INTVAL (XEXP (addr_rtx
, 1));
26799 /* The function returns true if the target storage location of
26800 mem1 is adjacent to the target storage location of mem2 */
26801 /* Return 1 if memory locations are adjacent. */
26804 adjacent_mem_locations (rtx mem1
, rtx mem2
)
26807 HOST_WIDE_INT off1
, size1
, off2
, size2
;
26809 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
26810 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
26811 return ((REGNO (reg1
) == REGNO (reg2
))
26812 && ((off1
+ size1
== off2
)
26813 || (off2
+ size2
== off1
)));
26818 /* This function returns true if it can be determined that the two MEM
26819 locations overlap by at least 1 byte based on base reg/offset/size. */
26822 mem_locations_overlap (rtx mem1
, rtx mem2
)
26825 HOST_WIDE_INT off1
, size1
, off2
, size2
;
26827 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
26828 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
26829 return ((REGNO (reg1
) == REGNO (reg2
))
26830 && (((off1
<= off2
) && (off1
+ size1
> off2
))
26831 || ((off2
<= off1
) && (off2
+ size2
> off1
))));
26836 /* A C statement (sans semicolon) to update the integer scheduling
26837 priority INSN_PRIORITY (INSN). Increase the priority to execute the
26838 INSN earlier, reduce the priority to execute INSN later. Do not
26839 define this macro if you do not need to adjust the scheduling
26840 priorities of insns. */
26843 rs6000_adjust_priority (rtx_insn
*insn ATTRIBUTE_UNUSED
, int priority
)
26845 rtx load_mem
, str_mem
;
26846 /* On machines (like the 750) which have asymmetric integer units,
26847 where one integer unit can do multiply and divides and the other
26848 can't, reduce the priority of multiply/divide so it is scheduled
26849 before other integer operations. */
26852 if (! INSN_P (insn
))
26855 if (GET_CODE (PATTERN (insn
)) == USE
)
26858 switch (rs6000_cpu_attr
) {
26860 switch (get_attr_type (insn
))
26867 fprintf (stderr
, "priority was %#x (%d) before adjustment\n",
26868 priority
, priority
);
26869 if (priority
>= 0 && priority
< 0x01000000)
26876 if (insn_must_be_first_in_group (insn
)
26877 && reload_completed
26878 && current_sched_info
->sched_max_insns_priority
26879 && rs6000_sched_restricted_insns_priority
)
26882 /* Prioritize insns that can be dispatched only in the first
26884 if (rs6000_sched_restricted_insns_priority
== 1)
26885 /* Attach highest priority to insn. This means that in
26886 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
26887 precede 'priority' (critical path) considerations. */
26888 return current_sched_info
->sched_max_insns_priority
;
26889 else if (rs6000_sched_restricted_insns_priority
== 2)
26890 /* Increase priority of insn by a minimal amount. This means that in
26891 haifa-sched.c:ready_sort(), only 'priority' (critical path)
26892 considerations precede dispatch-slot restriction considerations. */
26893 return (priority
+ 1);
26896 if (rs6000_cpu
== PROCESSOR_POWER6
26897 && ((load_store_pendulum
== -2 && is_load_insn (insn
, &load_mem
))
26898 || (load_store_pendulum
== 2 && is_store_insn (insn
, &str_mem
))))
26899 /* Attach highest priority to insn if the scheduler has just issued two
26900 stores and this instruction is a load, or two loads and this instruction
26901 is a store. Power6 wants loads and stores scheduled alternately
26903 return current_sched_info
->sched_max_insns_priority
;
26908 /* Return true if the instruction is nonpipelined on the Cell. */
26910 is_nonpipeline_insn (rtx_insn
*insn
)
26912 enum attr_type type
;
26913 if (!insn
|| !NONDEBUG_INSN_P (insn
)
26914 || GET_CODE (PATTERN (insn
)) == USE
26915 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
26918 type
= get_attr_type (insn
);
26919 if (type
== TYPE_MUL
26920 || type
== TYPE_DIV
26921 || type
== TYPE_SDIV
26922 || type
== TYPE_DDIV
26923 || type
== TYPE_SSQRT
26924 || type
== TYPE_DSQRT
26925 || type
== TYPE_MFCR
26926 || type
== TYPE_MFCRF
26927 || type
== TYPE_MFJMPR
)
26935 /* Return how many instructions the machine can issue per cycle. */
26938 rs6000_issue_rate (void)
26940 /* Unless scheduling for register pressure, use issue rate of 1 for
26941 first scheduling pass to decrease degradation. */
26942 if (!reload_completed
&& !flag_sched_pressure
)
26945 switch (rs6000_cpu_attr
) {
26947 case CPU_PPC601
: /* ? */
26957 case CPU_PPCE300C2
:
26958 case CPU_PPCE300C3
:
26959 case CPU_PPCE500MC
:
26960 case CPU_PPCE500MC64
:
26983 /* Return how many instructions to look ahead for better insn
26987 rs6000_use_sched_lookahead (void)
26989 switch (rs6000_cpu_attr
)
26996 return (reload_completed
? 8 : 0);
27003 /* We are choosing insn from the ready queue. Return zero if INSN can be
27006 rs6000_use_sched_lookahead_guard (rtx_insn
*insn
, int ready_index
)
27008 if (ready_index
== 0)
27011 if (rs6000_cpu_attr
!= CPU_CELL
)
27014 gcc_assert (insn
!= NULL_RTX
&& INSN_P (insn
));
27016 if (!reload_completed
27017 || is_nonpipeline_insn (insn
)
27018 || is_microcoded_insn (insn
))
27024 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
27025 and return true. */
27028 find_mem_ref (rtx pat
, rtx
*mem_ref
)
27033 /* stack_tie does not produce any real memory traffic. */
27034 if (tie_operand (pat
, VOIDmode
))
27037 if (GET_CODE (pat
) == MEM
)
27043 /* Recursively process the pattern. */
27044 fmt
= GET_RTX_FORMAT (GET_CODE (pat
));
27046 for (i
= GET_RTX_LENGTH (GET_CODE (pat
)) - 1; i
>= 0; i
--)
27050 if (find_mem_ref (XEXP (pat
, i
), mem_ref
))
27053 else if (fmt
[i
] == 'E')
27054 for (j
= XVECLEN (pat
, i
) - 1; j
>= 0; j
--)
27056 if (find_mem_ref (XVECEXP (pat
, i
, j
), mem_ref
))
27064 /* Determine if PAT is a PATTERN of a load insn. */
27067 is_load_insn1 (rtx pat
, rtx
*load_mem
)
27069 if (!pat
|| pat
== NULL_RTX
)
27072 if (GET_CODE (pat
) == SET
)
27073 return find_mem_ref (SET_SRC (pat
), load_mem
);
27075 if (GET_CODE (pat
) == PARALLEL
)
27079 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
27080 if (is_load_insn1 (XVECEXP (pat
, 0, i
), load_mem
))
27087 /* Determine if INSN loads from memory. */
27090 is_load_insn (rtx insn
, rtx
*load_mem
)
27092 if (!insn
|| !INSN_P (insn
))
27098 return is_load_insn1 (PATTERN (insn
), load_mem
);
27101 /* Determine if PAT is a PATTERN of a store insn. */
27104 is_store_insn1 (rtx pat
, rtx
*str_mem
)
27106 if (!pat
|| pat
== NULL_RTX
)
27109 if (GET_CODE (pat
) == SET
)
27110 return find_mem_ref (SET_DEST (pat
), str_mem
);
27112 if (GET_CODE (pat
) == PARALLEL
)
27116 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
27117 if (is_store_insn1 (XVECEXP (pat
, 0, i
), str_mem
))
27124 /* Determine if INSN stores to memory. */
27127 is_store_insn (rtx insn
, rtx
*str_mem
)
27129 if (!insn
|| !INSN_P (insn
))
27132 return is_store_insn1 (PATTERN (insn
), str_mem
);
27135 /* Returns whether the dependence between INSN and NEXT is considered
27136 costly by the given target. */
27139 rs6000_is_costly_dependence (dep_t dep
, int cost
, int distance
)
27143 rtx load_mem
, str_mem
;
27145 /* If the flag is not enabled - no dependence is considered costly;
27146 allow all dependent insns in the same group.
27147 This is the most aggressive option. */
27148 if (rs6000_sched_costly_dep
== no_dep_costly
)
27151 /* If the flag is set to 1 - a dependence is always considered costly;
27152 do not allow dependent instructions in the same group.
27153 This is the most conservative option. */
27154 if (rs6000_sched_costly_dep
== all_deps_costly
)
27157 insn
= DEP_PRO (dep
);
27158 next
= DEP_CON (dep
);
27160 if (rs6000_sched_costly_dep
== store_to_load_dep_costly
27161 && is_load_insn (next
, &load_mem
)
27162 && is_store_insn (insn
, &str_mem
))
27163 /* Prevent load after store in the same group. */
27166 if (rs6000_sched_costly_dep
== true_store_to_load_dep_costly
27167 && is_load_insn (next
, &load_mem
)
27168 && is_store_insn (insn
, &str_mem
)
27169 && DEP_TYPE (dep
) == REG_DEP_TRUE
27170 && mem_locations_overlap(str_mem
, load_mem
))
27171 /* Prevent load after store in the same group if it is a true
27175 /* The flag is set to X; dependences with latency >= X are considered costly,
27176 and will not be scheduled in the same group. */
27177 if (rs6000_sched_costly_dep
<= max_dep_latency
27178 && ((cost
- distance
) >= (int)rs6000_sched_costly_dep
))
27184 /* Return the next insn after INSN that is found before TAIL is reached,
27185 skipping any "non-active" insns - insns that will not actually occupy
27186 an issue slot. Return NULL_RTX if such an insn is not found. */
27189 get_next_active_insn (rtx_insn
*insn
, rtx_insn
*tail
)
27191 if (insn
== NULL_RTX
|| insn
== tail
)
27196 insn
= NEXT_INSN (insn
);
27197 if (insn
== NULL_RTX
|| insn
== tail
)
27201 || JUMP_P (insn
) || JUMP_TABLE_DATA_P (insn
)
27202 || (NONJUMP_INSN_P (insn
)
27203 && GET_CODE (PATTERN (insn
)) != USE
27204 && GET_CODE (PATTERN (insn
)) != CLOBBER
27205 && INSN_CODE (insn
) != CODE_FOR_stack_tie
))
27211 /* We are about to begin issuing insns for this clock cycle. */
27214 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
, int sched_verbose
,
27215 rtx_insn
**ready ATTRIBUTE_UNUSED
,
27216 int *pn_ready ATTRIBUTE_UNUSED
,
27217 int clock_var ATTRIBUTE_UNUSED
)
27219 int n_ready
= *pn_ready
;
27222 fprintf (dump
, "// rs6000_sched_reorder :\n");
27224 /* Reorder the ready list, if the second to last ready insn
27225 is a nonepipeline insn. */
27226 if (rs6000_cpu_attr
== CPU_CELL
&& n_ready
> 1)
27228 if (is_nonpipeline_insn (ready
[n_ready
- 1])
27229 && (recog_memoized (ready
[n_ready
- 2]) > 0))
27230 /* Simply swap first two insns. */
27232 rtx_insn
*tmp
= ready
[n_ready
- 1];
27233 ready
[n_ready
- 1] = ready
[n_ready
- 2];
27234 ready
[n_ready
- 2] = tmp
;
27238 if (rs6000_cpu
== PROCESSOR_POWER6
)
27239 load_store_pendulum
= 0;
27241 return rs6000_issue_rate ();
27244 /* Like rs6000_sched_reorder, but called after issuing each insn. */
27247 rs6000_sched_reorder2 (FILE *dump
, int sched_verbose
, rtx_insn
**ready
,
27248 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
)
27251 fprintf (dump
, "// rs6000_sched_reorder2 :\n");
27253 /* For Power6, we need to handle some special cases to try and keep the
27254 store queue from overflowing and triggering expensive flushes.
27256 This code monitors how load and store instructions are being issued
27257 and skews the ready list one way or the other to increase the likelihood
27258 that a desired instruction is issued at the proper time.
27260 A couple of things are done. First, we maintain a "load_store_pendulum"
27261 to track the current state of load/store issue.
27263 - If the pendulum is at zero, then no loads or stores have been
27264 issued in the current cycle so we do nothing.
27266 - If the pendulum is 1, then a single load has been issued in this
27267 cycle and we attempt to locate another load in the ready list to
27270 - If the pendulum is -2, then two stores have already been
27271 issued in this cycle, so we increase the priority of the first load
27272 in the ready list to increase it's likelihood of being chosen first
27275 - If the pendulum is -1, then a single store has been issued in this
27276 cycle and we attempt to locate another store in the ready list to
27277 issue with it, preferring a store to an adjacent memory location to
27278 facilitate store pairing in the store queue.
27280 - If the pendulum is 2, then two loads have already been
27281 issued in this cycle, so we increase the priority of the first store
27282 in the ready list to increase it's likelihood of being chosen first
27285 - If the pendulum < -2 or > 2, then do nothing.
27287 Note: This code covers the most common scenarios. There exist non
27288 load/store instructions which make use of the LSU and which
27289 would need to be accounted for to strictly model the behavior
27290 of the machine. Those instructions are currently unaccounted
27291 for to help minimize compile time overhead of this code.
27293 if (rs6000_cpu
== PROCESSOR_POWER6
&& last_scheduled_insn
)
27298 rtx load_mem
, str_mem
;
27300 if (is_store_insn (last_scheduled_insn
, &str_mem
))
27301 /* Issuing a store, swing the load_store_pendulum to the left */
27302 load_store_pendulum
--;
27303 else if (is_load_insn (last_scheduled_insn
, &load_mem
))
27304 /* Issuing a load, swing the load_store_pendulum to the right */
27305 load_store_pendulum
++;
27307 return cached_can_issue_more
;
27309 /* If the pendulum is balanced, or there is only one instruction on
27310 the ready list, then all is well, so return. */
27311 if ((load_store_pendulum
== 0) || (*pn_ready
<= 1))
27312 return cached_can_issue_more
;
27314 if (load_store_pendulum
== 1)
27316 /* A load has been issued in this cycle. Scan the ready list
27317 for another load to issue with it */
27322 if (is_load_insn (ready
[pos
], &load_mem
))
27324 /* Found a load. Move it to the head of the ready list,
27325 and adjust it's priority so that it is more likely to
27328 for (i
=pos
; i
<*pn_ready
-1; i
++)
27329 ready
[i
] = ready
[i
+ 1];
27330 ready
[*pn_ready
-1] = tmp
;
27332 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
27333 INSN_PRIORITY (tmp
)++;
27339 else if (load_store_pendulum
== -2)
27341 /* Two stores have been issued in this cycle. Increase the
27342 priority of the first load in the ready list to favor it for
27343 issuing in the next cycle. */
27348 if (is_load_insn (ready
[pos
], &load_mem
)
27350 && INSN_PRIORITY_KNOWN (ready
[pos
]))
27352 INSN_PRIORITY (ready
[pos
])++;
27354 /* Adjust the pendulum to account for the fact that a load
27355 was found and increased in priority. This is to prevent
27356 increasing the priority of multiple loads */
27357 load_store_pendulum
--;
27364 else if (load_store_pendulum
== -1)
27366 /* A store has been issued in this cycle. Scan the ready list for
27367 another store to issue with it, preferring a store to an adjacent
27369 int first_store_pos
= -1;
27375 if (is_store_insn (ready
[pos
], &str_mem
))
27378 /* Maintain the index of the first store found on the
27380 if (first_store_pos
== -1)
27381 first_store_pos
= pos
;
27383 if (is_store_insn (last_scheduled_insn
, &str_mem2
)
27384 && adjacent_mem_locations (str_mem
, str_mem2
))
27386 /* Found an adjacent store. Move it to the head of the
27387 ready list, and adjust it's priority so that it is
27388 more likely to stay there */
27390 for (i
=pos
; i
<*pn_ready
-1; i
++)
27391 ready
[i
] = ready
[i
+ 1];
27392 ready
[*pn_ready
-1] = tmp
;
27394 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
27395 INSN_PRIORITY (tmp
)++;
27397 first_store_pos
= -1;
27405 if (first_store_pos
>= 0)
27407 /* An adjacent store wasn't found, but a non-adjacent store was,
27408 so move the non-adjacent store to the front of the ready
27409 list, and adjust its priority so that it is more likely to
27411 tmp
= ready
[first_store_pos
];
27412 for (i
=first_store_pos
; i
<*pn_ready
-1; i
++)
27413 ready
[i
] = ready
[i
+ 1];
27414 ready
[*pn_ready
-1] = tmp
;
27415 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
27416 INSN_PRIORITY (tmp
)++;
27419 else if (load_store_pendulum
== 2)
27421 /* Two loads have been issued in this cycle. Increase the priority
27422 of the first store in the ready list to favor it for issuing in
27428 if (is_store_insn (ready
[pos
], &str_mem
)
27430 && INSN_PRIORITY_KNOWN (ready
[pos
]))
27432 INSN_PRIORITY (ready
[pos
])++;
27434 /* Adjust the pendulum to account for the fact that a store
27435 was found and increased in priority. This is to prevent
27436 increasing the priority of multiple stores */
27437 load_store_pendulum
++;
27446 return cached_can_issue_more
;
27449 /* Return whether the presence of INSN causes a dispatch group termination
27450 of group WHICH_GROUP.
27452 If WHICH_GROUP == current_group, this function will return true if INSN
27453 causes the termination of the current group (i.e, the dispatch group to
27454 which INSN belongs). This means that INSN will be the last insn in the
27455 group it belongs to.
27457 If WHICH_GROUP == previous_group, this function will return true if INSN
27458 causes the termination of the previous group (i.e, the dispatch group that
27459 precedes the group to which INSN belongs). This means that INSN will be
27460 the first insn in the group it belongs to). */
27463 insn_terminates_group_p (rtx_insn
*insn
, enum group_termination which_group
)
27470 first
= insn_must_be_first_in_group (insn
);
27471 last
= insn_must_be_last_in_group (insn
);
27476 if (which_group
== current_group
)
27478 else if (which_group
== previous_group
)
27486 insn_must_be_first_in_group (rtx_insn
*insn
)
27488 enum attr_type type
;
27492 || DEBUG_INSN_P (insn
)
27493 || GET_CODE (PATTERN (insn
)) == USE
27494 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
27497 switch (rs6000_cpu
)
27499 case PROCESSOR_POWER5
:
27500 if (is_cracked_insn (insn
))
27502 case PROCESSOR_POWER4
:
27503 if (is_microcoded_insn (insn
))
27506 if (!rs6000_sched_groups
)
27509 type
= get_attr_type (insn
);
27516 case TYPE_DELAYED_CR
:
27517 case TYPE_CR_LOGICAL
:
27530 case PROCESSOR_POWER6
:
27531 type
= get_attr_type (insn
);
27540 case TYPE_FPCOMPARE
:
27551 if (get_attr_dot (insn
) == DOT_NO
27552 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
27557 if (get_attr_size (insn
) == SIZE_32
)
27565 if (get_attr_update (insn
) == UPDATE_YES
)
27573 case PROCESSOR_POWER7
:
27574 type
= get_attr_type (insn
);
27578 case TYPE_CR_LOGICAL
:
27593 if (get_attr_dot (insn
) == DOT_YES
)
27598 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
27599 || get_attr_update (insn
) == UPDATE_YES
)
27606 if (get_attr_update (insn
) == UPDATE_YES
)
27614 case PROCESSOR_POWER8
:
27615 type
= get_attr_type (insn
);
27619 case TYPE_CR_LOGICAL
:
27620 case TYPE_DELAYED_CR
:
27629 case TYPE_VECSTORE
:
27636 if (get_attr_dot (insn
) == DOT_YES
)
27641 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
27642 || get_attr_update (insn
) == UPDATE_YES
)
27647 if (get_attr_update (insn
) == UPDATE_YES
27648 && get_attr_indexed (insn
) == INDEXED_YES
)
27664 insn_must_be_last_in_group (rtx_insn
*insn
)
27666 enum attr_type type
;
27670 || DEBUG_INSN_P (insn
)
27671 || GET_CODE (PATTERN (insn
)) == USE
27672 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
27675 switch (rs6000_cpu
) {
27676 case PROCESSOR_POWER4
:
27677 case PROCESSOR_POWER5
:
27678 if (is_microcoded_insn (insn
))
27681 if (is_branch_slot_insn (insn
))
27685 case PROCESSOR_POWER6
:
27686 type
= get_attr_type (insn
);
27694 case TYPE_FPCOMPARE
:
27705 if (get_attr_dot (insn
) == DOT_NO
27706 || get_attr_var_shift (insn
) == VAR_SHIFT_NO
)
27711 if (get_attr_size (insn
) == SIZE_32
)
27719 case PROCESSOR_POWER7
:
27720 type
= get_attr_type (insn
);
27730 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
27731 && get_attr_update (insn
) == UPDATE_YES
)
27736 if (get_attr_update (insn
) == UPDATE_YES
27737 && get_attr_indexed (insn
) == INDEXED_YES
)
27745 case PROCESSOR_POWER8
:
27746 type
= get_attr_type (insn
);
27758 if (get_attr_sign_extend (insn
) == SIGN_EXTEND_YES
27759 && get_attr_update (insn
) == UPDATE_YES
)
27764 if (get_attr_update (insn
) == UPDATE_YES
27765 && get_attr_indexed (insn
) == INDEXED_YES
)
27780 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
27781 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
27784 is_costly_group (rtx
*group_insns
, rtx next_insn
)
27787 int issue_rate
= rs6000_issue_rate ();
27789 for (i
= 0; i
< issue_rate
; i
++)
27791 sd_iterator_def sd_it
;
27793 rtx insn
= group_insns
[i
];
27798 FOR_EACH_DEP (insn
, SD_LIST_RES_FORW
, sd_it
, dep
)
27800 rtx next
= DEP_CON (dep
);
27802 if (next
== next_insn
27803 && rs6000_is_costly_dependence (dep
, dep_cost (dep
), 0))
27811 /* Utility of the function redefine_groups.
27812 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
27813 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
27814 to keep it "far" (in a separate group) from GROUP_INSNS, following
27815 one of the following schemes, depending on the value of the flag
27816 -minsert_sched_nops = X:
27817 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
27818 in order to force NEXT_INSN into a separate group.
27819 (2) X < sched_finish_regroup_exact: insert exactly X nops.
27820 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
27821 insertion (has a group just ended, how many vacant issue slots remain in the
27822 last group, and how many dispatch groups were encountered so far). */
27825 force_new_group (int sched_verbose
, FILE *dump
, rtx
*group_insns
,
27826 rtx_insn
*next_insn
, bool *group_end
, int can_issue_more
,
27831 int issue_rate
= rs6000_issue_rate ();
27832 bool end
= *group_end
;
27835 if (next_insn
== NULL_RTX
|| DEBUG_INSN_P (next_insn
))
27836 return can_issue_more
;
27838 if (rs6000_sched_insert_nops
> sched_finish_regroup_exact
)
27839 return can_issue_more
;
27841 force
= is_costly_group (group_insns
, next_insn
);
27843 return can_issue_more
;
27845 if (sched_verbose
> 6)
27846 fprintf (dump
,"force: group count = %d, can_issue_more = %d\n",
27847 *group_count
,can_issue_more
);
27849 if (rs6000_sched_insert_nops
== sched_finish_regroup_exact
)
27852 can_issue_more
= 0;
27854 /* Since only a branch can be issued in the last issue_slot, it is
27855 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
27856 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
27857 in this case the last nop will start a new group and the branch
27858 will be forced to the new group. */
27859 if (can_issue_more
&& !is_branch_slot_insn (next_insn
))
27862 /* Do we have a special group ending nop? */
27863 if (rs6000_cpu_attr
== CPU_POWER6
|| rs6000_cpu_attr
== CPU_POWER7
27864 || rs6000_cpu_attr
== CPU_POWER8
)
27866 nop
= gen_group_ending_nop ();
27867 emit_insn_before (nop
, next_insn
);
27868 can_issue_more
= 0;
27871 while (can_issue_more
> 0)
27874 emit_insn_before (nop
, next_insn
);
27882 if (rs6000_sched_insert_nops
< sched_finish_regroup_exact
)
27884 int n_nops
= rs6000_sched_insert_nops
;
27886 /* Nops can't be issued from the branch slot, so the effective
27887 issue_rate for nops is 'issue_rate - 1'. */
27888 if (can_issue_more
== 0)
27889 can_issue_more
= issue_rate
;
27891 if (can_issue_more
== 0)
27893 can_issue_more
= issue_rate
- 1;
27896 for (i
= 0; i
< issue_rate
; i
++)
27898 group_insns
[i
] = 0;
27905 emit_insn_before (nop
, next_insn
);
27906 if (can_issue_more
== issue_rate
- 1) /* new group begins */
27909 if (can_issue_more
== 0)
27911 can_issue_more
= issue_rate
- 1;
27914 for (i
= 0; i
< issue_rate
; i
++)
27916 group_insns
[i
] = 0;
27922 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
27925 /* Is next_insn going to start a new group? */
27928 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
27929 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
27930 || (can_issue_more
< issue_rate
&&
27931 insn_terminates_group_p (next_insn
, previous_group
)));
27932 if (*group_end
&& end
)
27935 if (sched_verbose
> 6)
27936 fprintf (dump
, "done force: group count = %d, can_issue_more = %d\n",
27937 *group_count
, can_issue_more
);
27938 return can_issue_more
;
27941 return can_issue_more
;
27944 /* This function tries to synch the dispatch groups that the compiler "sees"
27945 with the dispatch groups that the processor dispatcher is expected to
27946 form in practice. It tries to achieve this synchronization by forcing the
27947 estimated processor grouping on the compiler (as opposed to the function
27948 'pad_goups' which tries to force the scheduler's grouping on the processor).
27950 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
27951 examines the (estimated) dispatch groups that will be formed by the processor
27952 dispatcher. It marks these group boundaries to reflect the estimated
27953 processor grouping, overriding the grouping that the scheduler had marked.
27954 Depending on the value of the flag '-minsert-sched-nops' this function can
27955 force certain insns into separate groups or force a certain distance between
27956 them by inserting nops, for example, if there exists a "costly dependence"
27959 The function estimates the group boundaries that the processor will form as
27960 follows: It keeps track of how many vacant issue slots are available after
27961 each insn. A subsequent insn will start a new group if one of the following
27963 - no more vacant issue slots remain in the current dispatch group.
27964 - only the last issue slot, which is the branch slot, is vacant, but the next
27965 insn is not a branch.
27966 - only the last 2 or less issue slots, including the branch slot, are vacant,
27967 which means that a cracked insn (which occupies two issue slots) can't be
27968 issued in this group.
27969 - less than 'issue_rate' slots are vacant, and the next insn always needs to
27970 start a new group. */
27973 redefine_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
27976 rtx_insn
*insn
, *next_insn
;
27978 int can_issue_more
;
27981 int group_count
= 0;
27985 issue_rate
= rs6000_issue_rate ();
27986 group_insns
= XALLOCAVEC (rtx
, issue_rate
);
27987 for (i
= 0; i
< issue_rate
; i
++)
27989 group_insns
[i
] = 0;
27991 can_issue_more
= issue_rate
;
27993 insn
= get_next_active_insn (prev_head_insn
, tail
);
27996 while (insn
!= NULL_RTX
)
27998 slot
= (issue_rate
- can_issue_more
);
27999 group_insns
[slot
] = insn
;
28001 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
28002 if (insn_terminates_group_p (insn
, current_group
))
28003 can_issue_more
= 0;
28005 next_insn
= get_next_active_insn (insn
, tail
);
28006 if (next_insn
== NULL_RTX
)
28007 return group_count
+ 1;
28009 /* Is next_insn going to start a new group? */
28011 = (can_issue_more
== 0
28012 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
28013 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
28014 || (can_issue_more
< issue_rate
&&
28015 insn_terminates_group_p (next_insn
, previous_group
)));
28017 can_issue_more
= force_new_group (sched_verbose
, dump
, group_insns
,
28018 next_insn
, &group_end
, can_issue_more
,
28024 can_issue_more
= 0;
28025 for (i
= 0; i
< issue_rate
; i
++)
28027 group_insns
[i
] = 0;
28031 if (GET_MODE (next_insn
) == TImode
&& can_issue_more
)
28032 PUT_MODE (next_insn
, VOIDmode
);
28033 else if (!can_issue_more
&& GET_MODE (next_insn
) != TImode
)
28034 PUT_MODE (next_insn
, TImode
);
28037 if (can_issue_more
== 0)
28038 can_issue_more
= issue_rate
;
28041 return group_count
;
28044 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
28045 dispatch group boundaries that the scheduler had marked. Pad with nops
28046 any dispatch groups which have vacant issue slots, in order to force the
28047 scheduler's grouping on the processor dispatcher. The function
28048 returns the number of dispatch groups found. */
28051 pad_groups (FILE *dump
, int sched_verbose
, rtx_insn
*prev_head_insn
,
28054 rtx_insn
*insn
, *next_insn
;
28057 int can_issue_more
;
28059 int group_count
= 0;
28061 /* Initialize issue_rate. */
28062 issue_rate
= rs6000_issue_rate ();
28063 can_issue_more
= issue_rate
;
28065 insn
= get_next_active_insn (prev_head_insn
, tail
);
28066 next_insn
= get_next_active_insn (insn
, tail
);
28068 while (insn
!= NULL_RTX
)
28071 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
28073 group_end
= (next_insn
== NULL_RTX
|| GET_MODE (next_insn
) == TImode
);
28075 if (next_insn
== NULL_RTX
)
28080 /* If the scheduler had marked group termination at this location
28081 (between insn and next_insn), and neither insn nor next_insn will
28082 force group termination, pad the group with nops to force group
28085 && (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
28086 && !insn_terminates_group_p (insn
, current_group
)
28087 && !insn_terminates_group_p (next_insn
, previous_group
))
28089 if (!is_branch_slot_insn (next_insn
))
28092 while (can_issue_more
)
28095 emit_insn_before (nop
, next_insn
);
28100 can_issue_more
= issue_rate
;
28105 next_insn
= get_next_active_insn (insn
, tail
);
28108 return group_count
;
28111 /* We're beginning a new block. Initialize data structures as necessary. */
28114 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
28115 int sched_verbose ATTRIBUTE_UNUSED
,
28116 int max_ready ATTRIBUTE_UNUSED
)
28118 last_scheduled_insn
= NULL_RTX
;
28119 load_store_pendulum
= 0;
28122 /* The following function is called at the end of scheduling BB.
28123 After reload, it inserts nops at insn group bundling. */
28126 rs6000_sched_finish (FILE *dump
, int sched_verbose
)
28131 fprintf (dump
, "=== Finishing schedule.\n");
28133 if (reload_completed
&& rs6000_sched_groups
)
28135 /* Do not run sched_finish hook when selective scheduling enabled. */
28136 if (sel_sched_p ())
28139 if (rs6000_sched_insert_nops
== sched_finish_none
)
28142 if (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
28143 n_groups
= pad_groups (dump
, sched_verbose
,
28144 current_sched_info
->prev_head
,
28145 current_sched_info
->next_tail
);
28147 n_groups
= redefine_groups (dump
, sched_verbose
,
28148 current_sched_info
->prev_head
,
28149 current_sched_info
->next_tail
);
28151 if (sched_verbose
>= 6)
28153 fprintf (dump
, "ngroups = %d\n", n_groups
);
28154 print_rtl (dump
, current_sched_info
->prev_head
);
28155 fprintf (dump
, "Done finish_sched\n");
28160 struct _rs6000_sched_context
28162 short cached_can_issue_more
;
28163 rtx last_scheduled_insn
;
28164 int load_store_pendulum
;
28167 typedef struct _rs6000_sched_context rs6000_sched_context_def
;
28168 typedef rs6000_sched_context_def
*rs6000_sched_context_t
;
28170 /* Allocate store for new scheduling context. */
28172 rs6000_alloc_sched_context (void)
28174 return xmalloc (sizeof (rs6000_sched_context_def
));
28177 /* If CLEAN_P is true then initializes _SC with clean data,
28178 and from the global context otherwise. */
28180 rs6000_init_sched_context (void *_sc
, bool clean_p
)
28182 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
28186 sc
->cached_can_issue_more
= 0;
28187 sc
->last_scheduled_insn
= NULL_RTX
;
28188 sc
->load_store_pendulum
= 0;
28192 sc
->cached_can_issue_more
= cached_can_issue_more
;
28193 sc
->last_scheduled_insn
= last_scheduled_insn
;
28194 sc
->load_store_pendulum
= load_store_pendulum
;
28198 /* Sets the global scheduling context to the one pointed to by _SC. */
28200 rs6000_set_sched_context (void *_sc
)
28202 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
28204 gcc_assert (sc
!= NULL
);
28206 cached_can_issue_more
= sc
->cached_can_issue_more
;
28207 last_scheduled_insn
= sc
->last_scheduled_insn
;
28208 load_store_pendulum
= sc
->load_store_pendulum
;
28213 rs6000_free_sched_context (void *_sc
)
28215 gcc_assert (_sc
!= NULL
);
28221 /* Length in units of the trampoline for entering a nested function. */
28224 rs6000_trampoline_size (void)
28228 switch (DEFAULT_ABI
)
28231 gcc_unreachable ();
28234 ret
= (TARGET_32BIT
) ? 12 : 24;
28238 gcc_assert (!TARGET_32BIT
);
28244 ret
= (TARGET_32BIT
) ? 40 : 48;
28251 /* Emit RTL insns to initialize the variable parts of a trampoline.
28252 FNADDR is an RTX for the address of the function's pure code.
28253 CXT is an RTX for the static chain value for the function. */
28256 rs6000_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
28258 int regsize
= (TARGET_32BIT
) ? 4 : 8;
28259 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
28260 rtx ctx_reg
= force_reg (Pmode
, cxt
);
28261 rtx addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
28263 switch (DEFAULT_ABI
)
28266 gcc_unreachable ();
28268 /* Under AIX, just build the 3 word function descriptor */
28271 rtx fnmem
, fn_reg
, toc_reg
;
28273 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
28274 error ("You cannot take the address of a nested function if you use "
28275 "the -mno-pointers-to-nested-functions option.");
28277 fnmem
= gen_const_mem (Pmode
, force_reg (Pmode
, fnaddr
));
28278 fn_reg
= gen_reg_rtx (Pmode
);
28279 toc_reg
= gen_reg_rtx (Pmode
);
28281 /* Macro to shorten the code expansions below. */
28282 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
28284 m_tramp
= replace_equiv_address (m_tramp
, addr
);
28286 emit_move_insn (fn_reg
, MEM_PLUS (fnmem
, 0));
28287 emit_move_insn (toc_reg
, MEM_PLUS (fnmem
, regsize
));
28288 emit_move_insn (MEM_PLUS (m_tramp
, 0), fn_reg
);
28289 emit_move_insn (MEM_PLUS (m_tramp
, regsize
), toc_reg
);
28290 emit_move_insn (MEM_PLUS (m_tramp
, 2*regsize
), ctx_reg
);
28296 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
28300 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__trampoline_setup"),
28301 LCT_NORMAL
, VOIDmode
, 4,
28303 GEN_INT (rs6000_trampoline_size ()), SImode
,
28311 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
28312 identifier as an argument, so the front end shouldn't look it up. */
28315 rs6000_attribute_takes_identifier_p (const_tree attr_id
)
28317 return is_attribute_p ("altivec", attr_id
);
28320 /* Handle the "altivec" attribute. The attribute may have
28321 arguments as follows:
28323 __attribute__((altivec(vector__)))
28324 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
28325 __attribute__((altivec(bool__))) (always followed by 'unsigned')
28327 and may appear more than once (e.g., 'vector bool char') in a
28328 given declaration. */
28331 rs6000_handle_altivec_attribute (tree
*node
,
28332 tree name ATTRIBUTE_UNUSED
,
28334 int flags ATTRIBUTE_UNUSED
,
28335 bool *no_add_attrs
)
28337 tree type
= *node
, result
= NULL_TREE
;
28338 enum machine_mode mode
;
28341 = ((args
&& TREE_CODE (args
) == TREE_LIST
&& TREE_VALUE (args
)
28342 && TREE_CODE (TREE_VALUE (args
)) == IDENTIFIER_NODE
)
28343 ? *IDENTIFIER_POINTER (TREE_VALUE (args
))
28346 while (POINTER_TYPE_P (type
)
28347 || TREE_CODE (type
) == FUNCTION_TYPE
28348 || TREE_CODE (type
) == METHOD_TYPE
28349 || TREE_CODE (type
) == ARRAY_TYPE
)
28350 type
= TREE_TYPE (type
);
28352 mode
= TYPE_MODE (type
);
28354 /* Check for invalid AltiVec type qualifiers. */
28355 if (type
== long_double_type_node
)
28356 error ("use of %<long double%> in AltiVec types is invalid");
28357 else if (type
== boolean_type_node
)
28358 error ("use of boolean types in AltiVec types is invalid");
28359 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
28360 error ("use of %<complex%> in AltiVec types is invalid");
28361 else if (DECIMAL_FLOAT_MODE_P (mode
))
28362 error ("use of decimal floating point types in AltiVec types is invalid");
28363 else if (!TARGET_VSX
)
28365 if (type
== long_unsigned_type_node
|| type
== long_integer_type_node
)
28368 error ("use of %<long%> in AltiVec types is invalid for "
28369 "64-bit code without -mvsx");
28370 else if (rs6000_warn_altivec_long
)
28371 warning (0, "use of %<long%> in AltiVec types is deprecated; "
28374 else if (type
== long_long_unsigned_type_node
28375 || type
== long_long_integer_type_node
)
28376 error ("use of %<long long%> in AltiVec types is invalid without "
28378 else if (type
== double_type_node
)
28379 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
28382 switch (altivec_type
)
28385 unsigned_p
= TYPE_UNSIGNED (type
);
28389 result
= (unsigned_p
? unsigned_V1TI_type_node
: V1TI_type_node
);
28392 result
= (unsigned_p
? unsigned_V2DI_type_node
: V2DI_type_node
);
28395 result
= (unsigned_p
? unsigned_V4SI_type_node
: V4SI_type_node
);
28398 result
= (unsigned_p
? unsigned_V8HI_type_node
: V8HI_type_node
);
28401 result
= (unsigned_p
? unsigned_V16QI_type_node
: V16QI_type_node
);
28403 case SFmode
: result
= V4SF_type_node
; break;
28404 case DFmode
: result
= V2DF_type_node
; break;
28405 /* If the user says 'vector int bool', we may be handed the 'bool'
28406 attribute _before_ the 'vector' attribute, and so select the
28407 proper type in the 'b' case below. */
28408 case V4SImode
: case V8HImode
: case V16QImode
: case V4SFmode
:
28409 case V2DImode
: case V2DFmode
:
28417 case DImode
: case V2DImode
: result
= bool_V2DI_type_node
; break;
28418 case SImode
: case V4SImode
: result
= bool_V4SI_type_node
; break;
28419 case HImode
: case V8HImode
: result
= bool_V8HI_type_node
; break;
28420 case QImode
: case V16QImode
: result
= bool_V16QI_type_node
;
28427 case V8HImode
: result
= pixel_V8HI_type_node
;
28433 /* Propagate qualifiers attached to the element type
28434 onto the vector type. */
28435 if (result
&& result
!= type
&& TYPE_QUALS (type
))
28436 result
= build_qualified_type (result
, TYPE_QUALS (type
));
28438 *no_add_attrs
= true; /* No need to hang on to the attribute. */
28441 *node
= lang_hooks
.types
.reconstruct_complex_type (*node
, result
);
28446 /* AltiVec defines four built-in scalar types that serve as vector
28447 elements; we must teach the compiler how to mangle them. */
28449 static const char *
28450 rs6000_mangle_type (const_tree type
)
28452 type
= TYPE_MAIN_VARIANT (type
);
28454 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
28455 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
28458 if (type
== bool_char_type_node
) return "U6__boolc";
28459 if (type
== bool_short_type_node
) return "U6__bools";
28460 if (type
== pixel_type_node
) return "u7__pixel";
28461 if (type
== bool_int_type_node
) return "U6__booli";
28462 if (type
== bool_long_type_node
) return "U6__booll";
28464 /* Mangle IBM extended float long double as `g' (__float128) on
28465 powerpc*-linux where long-double-64 previously was the default. */
28466 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
28468 && TARGET_LONG_DOUBLE_128
28469 && !TARGET_IEEEQUAD
)
28472 /* For all other types, use normal C++ mangling. */
28476 /* Handle a "longcall" or "shortcall" attribute; arguments as in
28477 struct attribute_spec.handler. */
28480 rs6000_handle_longcall_attribute (tree
*node
, tree name
,
28481 tree args ATTRIBUTE_UNUSED
,
28482 int flags ATTRIBUTE_UNUSED
,
28483 bool *no_add_attrs
)
28485 if (TREE_CODE (*node
) != FUNCTION_TYPE
28486 && TREE_CODE (*node
) != FIELD_DECL
28487 && TREE_CODE (*node
) != TYPE_DECL
)
28489 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
28491 *no_add_attrs
= true;
28497 /* Set longcall attributes on all functions declared when
28498 rs6000_default_long_calls is true. */
28500 rs6000_set_default_type_attributes (tree type
)
28502 if (rs6000_default_long_calls
28503 && (TREE_CODE (type
) == FUNCTION_TYPE
28504 || TREE_CODE (type
) == METHOD_TYPE
))
28505 TYPE_ATTRIBUTES (type
) = tree_cons (get_identifier ("longcall"),
28507 TYPE_ATTRIBUTES (type
));
28510 darwin_set_default_type_attributes (type
);
28514 /* Return a reference suitable for calling a function with the
28515 longcall attribute. */
28518 rs6000_longcall_ref (rtx call_ref
)
28520 const char *call_name
;
28523 if (GET_CODE (call_ref
) != SYMBOL_REF
)
28526 /* System V adds '.' to the internal name, so skip them. */
28527 call_name
= XSTR (call_ref
, 0);
28528 if (*call_name
== '.')
28530 while (*call_name
== '.')
28533 node
= get_identifier (call_name
);
28534 call_ref
= gen_rtx_SYMBOL_REF (VOIDmode
, IDENTIFIER_POINTER (node
));
28537 return force_reg (Pmode
, call_ref
);
28540 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
28541 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
28544 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
28545 struct attribute_spec.handler. */
28547 rs6000_handle_struct_attribute (tree
*node
, tree name
,
28548 tree args ATTRIBUTE_UNUSED
,
28549 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
28552 if (DECL_P (*node
))
28554 if (TREE_CODE (*node
) == TYPE_DECL
)
28555 type
= &TREE_TYPE (*node
);
28560 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
28561 || TREE_CODE (*type
) == UNION_TYPE
)))
28563 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
28564 *no_add_attrs
= true;
28567 else if ((is_attribute_p ("ms_struct", name
)
28568 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
28569 || ((is_attribute_p ("gcc_struct", name
)
28570 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
28572 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
28574 *no_add_attrs
= true;
28581 rs6000_ms_bitfield_layout_p (const_tree record_type
)
28583 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
28584 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
28585 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
28588 #ifdef USING_ELFOS_H
28590 /* A get_unnamed_section callback, used for switching to toc_section. */
28593 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
28595 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
28596 && TARGET_MINIMAL_TOC
28597 && !TARGET_RELOCATABLE
)
28599 if (!toc_initialized
)
28601 toc_initialized
= 1;
28602 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
28603 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LCTOC", 0);
28604 fprintf (asm_out_file
, "\t.tc ");
28605 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1[TC],");
28606 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
28607 fprintf (asm_out_file
, "\n");
28609 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
28610 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
28611 fprintf (asm_out_file
, " = .+32768\n");
28614 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
28616 else if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
28617 && !TARGET_RELOCATABLE
)
28618 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
28621 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
28622 if (!toc_initialized
)
28624 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
28625 fprintf (asm_out_file
, " = .+32768\n");
28626 toc_initialized
= 1;
28631 /* Implement TARGET_ASM_INIT_SECTIONS. */
28634 rs6000_elf_asm_init_sections (void)
28637 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op
, NULL
);
28640 = get_unnamed_section (SECTION_WRITE
, output_section_asm_op
,
28641 SDATA2_SECTION_ASM_OP
);
28644 /* Implement TARGET_SELECT_RTX_SECTION. */
28647 rs6000_elf_select_rtx_section (enum machine_mode mode
, rtx x
,
28648 unsigned HOST_WIDE_INT align
)
28650 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
28651 return toc_section
;
28653 return default_elf_select_rtx_section (mode
, x
, align
);
28656 /* For a SYMBOL_REF, set generic flags and then perform some
28657 target-specific processing.
28659 When the AIX ABI is requested on a non-AIX system, replace the
28660 function name with the real name (with a leading .) rather than the
28661 function descriptor name. This saves a lot of overriding code to
28662 read the prefixes. */
28664 static void rs6000_elf_encode_section_info (tree
, rtx
, int) ATTRIBUTE_UNUSED
;
28666 rs6000_elf_encode_section_info (tree decl
, rtx rtl
, int first
)
28668 default_encode_section_info (decl
, rtl
, first
);
28671 && TREE_CODE (decl
) == FUNCTION_DECL
28673 && DEFAULT_ABI
== ABI_AIX
)
28675 rtx sym_ref
= XEXP (rtl
, 0);
28676 size_t len
= strlen (XSTR (sym_ref
, 0));
28677 char *str
= XALLOCAVEC (char, len
+ 2);
28679 memcpy (str
+ 1, XSTR (sym_ref
, 0), len
+ 1);
28680 XSTR (sym_ref
, 0) = ggc_alloc_string (str
, len
+ 1);
28685 compare_section_name (const char *section
, const char *templ
)
28689 len
= strlen (templ
);
28690 return (strncmp (section
, templ
, len
) == 0
28691 && (section
[len
] == 0 || section
[len
] == '.'));
28695 rs6000_elf_in_small_data_p (const_tree decl
)
28697 if (rs6000_sdata
== SDATA_NONE
)
28700 /* We want to merge strings, so we never consider them small data. */
28701 if (TREE_CODE (decl
) == STRING_CST
)
28704 /* Functions are never in the small data area. */
28705 if (TREE_CODE (decl
) == FUNCTION_DECL
)
28708 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
))
28710 const char *section
= DECL_SECTION_NAME (decl
);
28711 if (compare_section_name (section
, ".sdata")
28712 || compare_section_name (section
, ".sdata2")
28713 || compare_section_name (section
, ".gnu.linkonce.s")
28714 || compare_section_name (section
, ".sbss")
28715 || compare_section_name (section
, ".sbss2")
28716 || compare_section_name (section
, ".gnu.linkonce.sb")
28717 || strcmp (section
, ".PPC.EMB.sdata0") == 0
28718 || strcmp (section
, ".PPC.EMB.sbss0") == 0)
28723 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
28726 && size
<= g_switch_value
28727 /* If it's not public, and we're not going to reference it there,
28728 there's no need to put it in the small data section. */
28729 && (rs6000_sdata
!= SDATA_DATA
|| TREE_PUBLIC (decl
)))
28736 #endif /* USING_ELFOS_H */
28738 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
28741 rs6000_use_blocks_for_constant_p (enum machine_mode mode
, const_rtx x
)
28743 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
);
28746 /* Do not place thread-local symbols refs in the object blocks. */
28749 rs6000_use_blocks_for_decl_p (const_tree decl
)
28751 return !DECL_THREAD_LOCAL_P (decl
);
28754 /* Return a REG that occurs in ADDR with coefficient 1.
28755 ADDR can be effectively incremented by incrementing REG.
28757 r0 is special and we must not select it as an address
28758 register by this routine since our caller will try to
28759 increment the returned register via an "la" instruction. */
28762 find_addr_reg (rtx addr
)
28764 while (GET_CODE (addr
) == PLUS
)
28766 if (GET_CODE (XEXP (addr
, 0)) == REG
28767 && REGNO (XEXP (addr
, 0)) != 0)
28768 addr
= XEXP (addr
, 0);
28769 else if (GET_CODE (XEXP (addr
, 1)) == REG
28770 && REGNO (XEXP (addr
, 1)) != 0)
28771 addr
= XEXP (addr
, 1);
28772 else if (CONSTANT_P (XEXP (addr
, 0)))
28773 addr
= XEXP (addr
, 1);
28774 else if (CONSTANT_P (XEXP (addr
, 1)))
28775 addr
= XEXP (addr
, 0);
28777 gcc_unreachable ();
28779 gcc_assert (GET_CODE (addr
) == REG
&& REGNO (addr
) != 0);
28784 rs6000_fatal_bad_address (rtx op
)
28786 fatal_insn ("bad address", op
);
28791 typedef struct branch_island_d
{
28792 tree function_name
;
28798 static vec
<branch_island
, va_gc
> *branch_islands
;
28800 /* Remember to generate a branch island for far calls to the given
28804 add_compiler_branch_island (tree label_name
, tree function_name
,
28807 branch_island bi
= {function_name
, label_name
, line_number
};
28808 vec_safe_push (branch_islands
, bi
);
28811 /* Generate far-jump branch islands for everything recorded in
28812 branch_islands. Invoked immediately after the last instruction of
28813 the epilogue has been emitted; the branch islands must be appended
28814 to, and contiguous with, the function body. Mach-O stubs are
28815 generated in machopic_output_stub(). */
28818 macho_branch_islands (void)
28822 while (!vec_safe_is_empty (branch_islands
))
28824 branch_island
*bi
= &branch_islands
->last ();
28825 const char *label
= IDENTIFIER_POINTER (bi
->label_name
);
28826 const char *name
= IDENTIFIER_POINTER (bi
->function_name
);
28827 char name_buf
[512];
28828 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
28829 if (name
[0] == '*' || name
[0] == '&')
28830 strcpy (name_buf
, name
+1);
28834 strcpy (name_buf
+1, name
);
28836 strcpy (tmp_buf
, "\n");
28837 strcat (tmp_buf
, label
);
28838 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
28839 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
28840 dbxout_stabd (N_SLINE
, bi
->line_number
);
28841 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
28844 if (TARGET_LINK_STACK
)
28847 get_ppc476_thunk_name (name
);
28848 strcat (tmp_buf
, ":\n\tmflr r0\n\tbl ");
28849 strcat (tmp_buf
, name
);
28850 strcat (tmp_buf
, "\n");
28851 strcat (tmp_buf
, label
);
28852 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
28856 strcat (tmp_buf
, ":\n\tmflr r0\n\tbcl 20,31,");
28857 strcat (tmp_buf
, label
);
28858 strcat (tmp_buf
, "_pic\n");
28859 strcat (tmp_buf
, label
);
28860 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
28863 strcat (tmp_buf
, "\taddis r11,r11,ha16(");
28864 strcat (tmp_buf
, name_buf
);
28865 strcat (tmp_buf
, " - ");
28866 strcat (tmp_buf
, label
);
28867 strcat (tmp_buf
, "_pic)\n");
28869 strcat (tmp_buf
, "\tmtlr r0\n");
28871 strcat (tmp_buf
, "\taddi r12,r11,lo16(");
28872 strcat (tmp_buf
, name_buf
);
28873 strcat (tmp_buf
, " - ");
28874 strcat (tmp_buf
, label
);
28875 strcat (tmp_buf
, "_pic)\n");
28877 strcat (tmp_buf
, "\tmtctr r12\n\tbctr\n");
28881 strcat (tmp_buf
, ":\nlis r12,hi16(");
28882 strcat (tmp_buf
, name_buf
);
28883 strcat (tmp_buf
, ")\n\tori r12,r12,lo16(");
28884 strcat (tmp_buf
, name_buf
);
28885 strcat (tmp_buf
, ")\n\tmtctr r12\n\tbctr");
28887 output_asm_insn (tmp_buf
, 0);
28888 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
28889 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
28890 dbxout_stabd (N_SLINE
, bi
->line_number
);
28891 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
28892 branch_islands
->pop ();
28896 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
28897 already there or not. */
28900 no_previous_def (tree function_name
)
28905 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
28906 if (function_name
== bi
->function_name
)
28911 /* GET_PREV_LABEL gets the label name from the previous definition of
28915 get_prev_label (tree function_name
)
28920 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
28921 if (function_name
== bi
->function_name
)
28922 return bi
->label_name
;
28926 /* INSN is either a function call or a millicode call. It may have an
28927 unconditional jump in its delay slot.
28929 CALL_DEST is the routine we are calling. */
28932 output_call (rtx_insn
*insn
, rtx
*operands
, int dest_operand_number
,
28933 int cookie_operand_number
)
28935 static char buf
[256];
28936 if (darwin_emit_branch_islands
28937 && GET_CODE (operands
[dest_operand_number
]) == SYMBOL_REF
28938 && (INTVAL (operands
[cookie_operand_number
]) & CALL_LONG
))
28941 tree funname
= get_identifier (XSTR (operands
[dest_operand_number
], 0));
28943 if (no_previous_def (funname
))
28945 rtx label_rtx
= gen_label_rtx ();
28946 char *label_buf
, temp_buf
[256];
28947 ASM_GENERATE_INTERNAL_LABEL (temp_buf
, "L",
28948 CODE_LABEL_NUMBER (label_rtx
));
28949 label_buf
= temp_buf
[0] == '*' ? temp_buf
+ 1 : temp_buf
;
28950 labelname
= get_identifier (label_buf
);
28951 add_compiler_branch_island (labelname
, funname
, insn_line (insn
));
28954 labelname
= get_prev_label (funname
);
28956 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
28957 instruction will reach 'foo', otherwise link as 'bl L42'".
28958 "L42" should be a 'branch island', that will do a far jump to
28959 'foo'. Branch islands are generated in
28960 macho_branch_islands(). */
28961 sprintf (buf
, "jbsr %%z%d,%.246s",
28962 dest_operand_number
, IDENTIFIER_POINTER (labelname
));
28965 sprintf (buf
, "bl %%z%d", dest_operand_number
);
28969 /* Generate PIC and indirect symbol stubs. */
28972 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
28974 unsigned int length
;
28975 char *symbol_name
, *lazy_ptr_name
;
28976 char *local_label_0
;
28977 static int label
= 0;
28979 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
28980 symb
= (*targetm
.strip_name_encoding
) (symb
);
28983 length
= strlen (symb
);
28984 symbol_name
= XALLOCAVEC (char, length
+ 32);
28985 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
28987 lazy_ptr_name
= XALLOCAVEC (char, length
+ 32);
28988 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name
, symb
, length
);
28991 switch_to_section (darwin_sections
[machopic_picsymbol_stub1_section
]);
28993 switch_to_section (darwin_sections
[machopic_symbol_stub1_section
]);
28997 fprintf (file
, "\t.align 5\n");
28999 fprintf (file
, "%s:\n", stub
);
29000 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
29003 local_label_0
= XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
29004 sprintf (local_label_0
, "\"L%011d$spb\"", label
);
29006 fprintf (file
, "\tmflr r0\n");
29007 if (TARGET_LINK_STACK
)
29010 get_ppc476_thunk_name (name
);
29011 fprintf (file
, "\tbl %s\n", name
);
29012 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
29016 fprintf (file
, "\tbcl 20,31,%s\n", local_label_0
);
29017 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
29019 fprintf (file
, "\taddis r11,r11,ha16(%s-%s)\n",
29020 lazy_ptr_name
, local_label_0
);
29021 fprintf (file
, "\tmtlr r0\n");
29022 fprintf (file
, "\t%s r12,lo16(%s-%s)(r11)\n",
29023 (TARGET_64BIT
? "ldu" : "lwzu"),
29024 lazy_ptr_name
, local_label_0
);
29025 fprintf (file
, "\tmtctr r12\n");
29026 fprintf (file
, "\tbctr\n");
29030 fprintf (file
, "\t.align 4\n");
29032 fprintf (file
, "%s:\n", stub
);
29033 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
29035 fprintf (file
, "\tlis r11,ha16(%s)\n", lazy_ptr_name
);
29036 fprintf (file
, "\t%s r12,lo16(%s)(r11)\n",
29037 (TARGET_64BIT
? "ldu" : "lwzu"),
29039 fprintf (file
, "\tmtctr r12\n");
29040 fprintf (file
, "\tbctr\n");
29043 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr_section
]);
29044 fprintf (file
, "%s:\n", lazy_ptr_name
);
29045 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
29046 fprintf (file
, "%sdyld_stub_binding_helper\n",
29047 (TARGET_64BIT
? DOUBLE_INT_ASM_OP
: "\t.long\t"));
29050 /* Legitimize PIC addresses. If the address is already
29051 position-independent, we return ORIG. Newly generated
29052 position-independent addresses go into a reg. This is REG if non
29053 zero, otherwise we allocate register(s) as necessary. */
29055 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
29058 rs6000_machopic_legitimize_pic_address (rtx orig
, enum machine_mode mode
,
29063 if (reg
== NULL
&& ! reload_in_progress
&& ! reload_completed
)
29064 reg
= gen_reg_rtx (Pmode
);
29066 if (GET_CODE (orig
) == CONST
)
29070 if (GET_CODE (XEXP (orig
, 0)) == PLUS
29071 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
29074 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
29076 /* Use a different reg for the intermediate value, as
29077 it will be marked UNCHANGING. */
29078 reg_temp
= !can_create_pseudo_p () ? reg
: gen_reg_rtx (Pmode
);
29079 base
= rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
29082 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
29085 if (GET_CODE (offset
) == CONST_INT
)
29087 if (SMALL_INT (offset
))
29088 return plus_constant (Pmode
, base
, INTVAL (offset
));
29089 else if (! reload_in_progress
&& ! reload_completed
)
29090 offset
= force_reg (Pmode
, offset
);
29093 rtx mem
= force_const_mem (Pmode
, orig
);
29094 return machopic_legitimize_pic_address (mem
, Pmode
, reg
);
29097 return gen_rtx_PLUS (Pmode
, base
, offset
);
29100 /* Fall back on generic machopic code. */
29101 return machopic_legitimize_pic_address (orig
, mode
, reg
);
29104 /* Output a .machine directive for the Darwin assembler, and call
29105 the generic start_file routine. */
29108 rs6000_darwin_file_start (void)
29110 static const struct
29114 HOST_WIDE_INT if_set
;
29116 { "ppc64", "ppc64", MASK_64BIT
},
29117 { "970", "ppc970", MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
29118 { "power4", "ppc970", 0 },
29119 { "G5", "ppc970", 0 },
29120 { "7450", "ppc7450", 0 },
29121 { "7400", "ppc7400", MASK_ALTIVEC
},
29122 { "G4", "ppc7400", 0 },
29123 { "750", "ppc750", 0 },
29124 { "740", "ppc750", 0 },
29125 { "G3", "ppc750", 0 },
29126 { "604e", "ppc604e", 0 },
29127 { "604", "ppc604", 0 },
29128 { "603e", "ppc603", 0 },
29129 { "603", "ppc603", 0 },
29130 { "601", "ppc601", 0 },
29131 { NULL
, "ppc", 0 } };
29132 const char *cpu_id
= "";
29135 rs6000_file_start ();
29136 darwin_file_start ();
29138 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
29140 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
29141 cpu_id
= rs6000_default_cpu
;
29143 if (global_options_set
.x_rs6000_cpu_index
)
29144 cpu_id
= processor_target_table
[rs6000_cpu_index
].name
;
29146 /* Look through the mapping array. Pick the first name that either
29147 matches the argument, has a bit set in IF_SET that is also set
29148 in the target flags, or has a NULL name. */
29151 while (mapping
[i
].arg
!= NULL
29152 && strcmp (mapping
[i
].arg
, cpu_id
) != 0
29153 && (mapping
[i
].if_set
& rs6000_isa_flags
) == 0)
29156 fprintf (asm_out_file
, "\t.machine %s\n", mapping
[i
].name
);
29159 #endif /* TARGET_MACHO */
29163 rs6000_elf_reloc_rw_mask (void)
29167 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
29173 /* Record an element in the table of global constructors. SYMBOL is
29174 a SYMBOL_REF of the function to be called; PRIORITY is a number
29175 between 0 and MAX_INIT_PRIORITY.
29177 This differs from default_named_section_asm_out_constructor in
29178 that we have special handling for -mrelocatable. */
29180 static void rs6000_elf_asm_out_constructor (rtx
, int) ATTRIBUTE_UNUSED
;
29182 rs6000_elf_asm_out_constructor (rtx symbol
, int priority
)
29184 const char *section
= ".ctors";
29187 if (priority
!= DEFAULT_INIT_PRIORITY
)
29189 sprintf (buf
, ".ctors.%.5u",
29190 /* Invert the numbering so the linker puts us in the proper
29191 order; constructors are run from right to left, and the
29192 linker sorts in increasing order. */
29193 MAX_INIT_PRIORITY
- priority
);
29197 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
29198 assemble_align (POINTER_SIZE
);
29200 if (TARGET_RELOCATABLE
)
29202 fputs ("\t.long (", asm_out_file
);
29203 output_addr_const (asm_out_file
, symbol
);
29204 fputs (")@fixup\n", asm_out_file
);
29207 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
29210 static void rs6000_elf_asm_out_destructor (rtx
, int) ATTRIBUTE_UNUSED
;
29212 rs6000_elf_asm_out_destructor (rtx symbol
, int priority
)
29214 const char *section
= ".dtors";
29217 if (priority
!= DEFAULT_INIT_PRIORITY
)
29219 sprintf (buf
, ".dtors.%.5u",
29220 /* Invert the numbering so the linker puts us in the proper
29221 order; constructors are run from right to left, and the
29222 linker sorts in increasing order. */
29223 MAX_INIT_PRIORITY
- priority
);
29227 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
29228 assemble_align (POINTER_SIZE
);
29230 if (TARGET_RELOCATABLE
)
29232 fputs ("\t.long (", asm_out_file
);
29233 output_addr_const (asm_out_file
, symbol
);
29234 fputs (")@fixup\n", asm_out_file
);
29237 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
29241 rs6000_elf_declare_function_name (FILE *file
, const char *name
, tree decl
)
29243 if (TARGET_64BIT
&& DEFAULT_ABI
!= ABI_ELFv2
)
29245 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file
);
29246 ASM_OUTPUT_LABEL (file
, name
);
29247 fputs (DOUBLE_INT_ASM_OP
, file
);
29248 rs6000_output_function_entry (file
, name
);
29249 fputs (",.TOC.@tocbase,0\n\t.previous\n", file
);
29252 fputs ("\t.size\t", file
);
29253 assemble_name (file
, name
);
29254 fputs (",24\n\t.type\t.", file
);
29255 assemble_name (file
, name
);
29256 fputs (",@function\n", file
);
29257 if (TREE_PUBLIC (decl
) && ! DECL_WEAK (decl
))
29259 fputs ("\t.globl\t.", file
);
29260 assemble_name (file
, name
);
29265 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
29266 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
29267 rs6000_output_function_entry (file
, name
);
29268 fputs (":\n", file
);
29272 if (TARGET_RELOCATABLE
29273 && !TARGET_SECURE_PLT
29274 && (get_pool_size () != 0 || crtl
->profile
)
29279 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
29281 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
29282 fprintf (file
, "\t.long ");
29283 assemble_name (file
, buf
);
29285 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
29286 assemble_name (file
, buf
);
29290 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
29291 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
29293 if (DEFAULT_ABI
== ABI_AIX
)
29295 const char *desc_name
, *orig_name
;
29297 orig_name
= (*targetm
.strip_name_encoding
) (name
);
29298 desc_name
= orig_name
;
29299 while (*desc_name
== '.')
29302 if (TREE_PUBLIC (decl
))
29303 fprintf (file
, "\t.globl %s\n", desc_name
);
29305 fprintf (file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
29306 fprintf (file
, "%s:\n", desc_name
);
29307 fprintf (file
, "\t.long %s\n", orig_name
);
29308 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file
);
29309 fputs ("\t.long 0\n", file
);
29310 fprintf (file
, "\t.previous\n");
29312 ASM_OUTPUT_LABEL (file
, name
);
29315 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED
;
29317 rs6000_elf_file_end (void)
29319 #ifdef HAVE_AS_GNU_ATTRIBUTE
29320 if (TARGET_32BIT
&& DEFAULT_ABI
== ABI_V4
)
29322 if (rs6000_passes_float
)
29323 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n",
29324 ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
) ? 1
29325 : (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_SINGLE_FLOAT
) ? 3
29327 if (rs6000_passes_vector
)
29328 fprintf (asm_out_file
, "\t.gnu_attribute 8, %d\n",
29329 (TARGET_ALTIVEC_ABI
? 2
29330 : TARGET_SPE_ABI
? 3
29332 if (rs6000_returns_struct
)
29333 fprintf (asm_out_file
, "\t.gnu_attribute 12, %d\n",
29334 aix_struct_return
? 2 : 1);
29337 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
29338 if (TARGET_32BIT
|| DEFAULT_ABI
== ABI_ELFv2
)
29339 file_end_indicate_exec_stack ();
29346 rs6000_xcoff_asm_output_anchor (rtx symbol
)
29350 sprintf (buffer
, "$ + " HOST_WIDE_INT_PRINT_DEC
,
29351 SYMBOL_REF_BLOCK_OFFSET (symbol
));
29352 fprintf (asm_out_file
, "%s", SET_ASM_OP
);
29353 RS6000_OUTPUT_BASENAME (asm_out_file
, XSTR (symbol
, 0));
29354 fprintf (asm_out_file
, ",");
29355 RS6000_OUTPUT_BASENAME (asm_out_file
, buffer
);
29356 fprintf (asm_out_file
, "\n");
29360 rs6000_xcoff_asm_globalize_label (FILE *stream
, const char *name
)
29362 fputs (GLOBAL_ASM_OP
, stream
);
29363 RS6000_OUTPUT_BASENAME (stream
, name
);
29364 putc ('\n', stream
);
29367 /* A get_unnamed_decl callback, used for read-only sections. PTR
29368 points to the section string variable. */
29371 rs6000_xcoff_output_readonly_section_asm_op (const void *directive
)
29373 fprintf (asm_out_file
, "\t.csect %s[RO],%s\n",
29374 *(const char *const *) directive
,
29375 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
29378 /* Likewise for read-write sections. */
29381 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive
)
29383 fprintf (asm_out_file
, "\t.csect %s[RW],%s\n",
29384 *(const char *const *) directive
,
29385 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
29389 rs6000_xcoff_output_tls_section_asm_op (const void *directive
)
29391 fprintf (asm_out_file
, "\t.csect %s[TL],%s\n",
29392 *(const char *const *) directive
,
29393 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
29396 /* A get_unnamed_section callback, used for switching to toc_section. */
29399 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
29401 if (TARGET_MINIMAL_TOC
)
29403 /* toc_section is always selected at least once from
29404 rs6000_xcoff_file_start, so this is guaranteed to
29405 always be defined once and only once in each file. */
29406 if (!toc_initialized
)
29408 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file
);
29409 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file
);
29410 toc_initialized
= 1;
29412 fprintf (asm_out_file
, "\t.csect toc_table[RW]%s\n",
29413 (TARGET_32BIT
? "" : ",3"));
29416 fputs ("\t.toc\n", asm_out_file
);
29419 /* Implement TARGET_ASM_INIT_SECTIONS. */
29422 rs6000_xcoff_asm_init_sections (void)
29424 read_only_data_section
29425 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
29426 &xcoff_read_only_section_name
);
29428 private_data_section
29429 = get_unnamed_section (SECTION_WRITE
,
29430 rs6000_xcoff_output_readwrite_section_asm_op
,
29431 &xcoff_private_data_section_name
);
29434 = get_unnamed_section (SECTION_TLS
,
29435 rs6000_xcoff_output_tls_section_asm_op
,
29436 &xcoff_tls_data_section_name
);
29438 tls_private_data_section
29439 = get_unnamed_section (SECTION_TLS
,
29440 rs6000_xcoff_output_tls_section_asm_op
,
29441 &xcoff_private_data_section_name
);
29443 read_only_private_data_section
29444 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
29445 &xcoff_private_data_section_name
);
29448 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op
, NULL
);
29450 readonly_data_section
= read_only_data_section
;
29451 exception_section
= data_section
;
29455 rs6000_xcoff_reloc_rw_mask (void)
29461 rs6000_xcoff_asm_named_section (const char *name
, unsigned int flags
,
29462 tree decl ATTRIBUTE_UNUSED
)
29465 static const char * const suffix
[4] = { "PR", "RO", "RW", "TL" };
29467 if (flags
& SECTION_CODE
)
29469 else if (flags
& SECTION_TLS
)
29471 else if (flags
& SECTION_WRITE
)
29476 fprintf (asm_out_file
, "\t.csect %s%s[%s],%u\n",
29477 (flags
& SECTION_CODE
) ? "." : "",
29478 name
, suffix
[smclass
], flags
& SECTION_ENTSIZE
);
29481 #define IN_NAMED_SECTION(DECL) \
29482 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
29483 && DECL_SECTION_NAME (DECL) != NULL)
29486 rs6000_xcoff_select_section (tree decl
, int reloc
,
29487 unsigned HOST_WIDE_INT align
)
29489 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
29491 if (align
> BIGGEST_ALIGNMENT
)
29493 resolve_unique_section (decl
, reloc
, true);
29494 if (IN_NAMED_SECTION (decl
))
29495 return get_named_section (decl
, NULL
, reloc
);
29498 if (decl_readonly_section (decl
, reloc
))
29500 if (TREE_PUBLIC (decl
))
29501 return read_only_data_section
;
29503 return read_only_private_data_section
;
29508 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
29510 if (TREE_PUBLIC (decl
))
29511 return tls_data_section
;
29512 else if (bss_initializer_p (decl
))
29514 /* Convert to COMMON to emit in BSS. */
29515 DECL_COMMON (decl
) = 1;
29516 return tls_comm_section
;
29519 return tls_private_data_section
;
29523 if (TREE_PUBLIC (decl
))
29524 return data_section
;
29526 return private_data_section
;
29531 rs6000_xcoff_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
29535 /* Use select_section for private data and uninitialized data with
29536 alignment <= BIGGEST_ALIGNMENT. */
29537 if (!TREE_PUBLIC (decl
)
29538 || DECL_COMMON (decl
)
29539 || (DECL_INITIAL (decl
) == NULL_TREE
29540 && DECL_ALIGN (decl
) <= BIGGEST_ALIGNMENT
)
29541 || DECL_INITIAL (decl
) == error_mark_node
29542 || (flag_zero_initialized_in_bss
29543 && initializer_zerop (DECL_INITIAL (decl
))))
29546 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
29547 name
= (*targetm
.strip_name_encoding
) (name
);
29548 set_decl_section_name (decl
, name
);
29551 /* Select section for constant in constant pool.
29553 On RS/6000, all constants are in the private read-only data area.
29554 However, if this is being placed in the TOC it must be output as a
29558 rs6000_xcoff_select_rtx_section (enum machine_mode mode
, rtx x
,
29559 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
29561 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
29562 return toc_section
;
29564 return read_only_private_data_section
;
29567 /* Remove any trailing [DS] or the like from the symbol name. */
29569 static const char *
29570 rs6000_xcoff_strip_name_encoding (const char *name
)
29575 len
= strlen (name
);
29576 if (name
[len
- 1] == ']')
29577 return ggc_alloc_string (name
, len
- 4);
29582 /* Section attributes. AIX is always PIC. */
29584 static unsigned int
29585 rs6000_xcoff_section_type_flags (tree decl
, const char *name
, int reloc
)
29587 unsigned int align
;
29588 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
29590 /* Align to at least UNIT size. */
29591 if ((flags
& SECTION_CODE
) != 0 || !decl
|| !DECL_P (decl
))
29592 align
= MIN_UNITS_PER_WORD
;
29594 /* Increase alignment of large objects if not already stricter. */
29595 align
= MAX ((DECL_ALIGN (decl
) / BITS_PER_UNIT
),
29596 int_size_in_bytes (TREE_TYPE (decl
)) > MIN_UNITS_PER_WORD
29597 ? UNITS_PER_FP_WORD
: MIN_UNITS_PER_WORD
);
29599 return flags
| (exact_log2 (align
) & SECTION_ENTSIZE
);
29602 /* Output at beginning of assembler file.
29604 Initialize the section names for the RS/6000 at this point.
29606 Specify filename, including full path, to assembler.
29608 We want to go into the TOC section so at least one .toc will be emitted.
29609 Also, in order to output proper .bs/.es pairs, we need at least one static
29610 [RW] section emitted.
29612 Finally, declare mcount when profiling to make the assembler happy. */
29615 rs6000_xcoff_file_start (void)
29617 rs6000_gen_section_name (&xcoff_bss_section_name
,
29618 main_input_filename
, ".bss_");
29619 rs6000_gen_section_name (&xcoff_private_data_section_name
,
29620 main_input_filename
, ".rw_");
29621 rs6000_gen_section_name (&xcoff_read_only_section_name
,
29622 main_input_filename
, ".ro_");
29623 rs6000_gen_section_name (&xcoff_tls_data_section_name
,
29624 main_input_filename
, ".tls_");
29625 rs6000_gen_section_name (&xcoff_tbss_section_name
,
29626 main_input_filename
, ".tbss_[UL]");
29628 fputs ("\t.file\t", asm_out_file
);
29629 output_quoted_string (asm_out_file
, main_input_filename
);
29630 fputc ('\n', asm_out_file
);
29631 if (write_symbols
!= NO_DEBUG
)
29632 switch_to_section (private_data_section
);
29633 switch_to_section (text_section
);
29635 fprintf (asm_out_file
, "\t.extern %s\n", RS6000_MCOUNT
);
29636 rs6000_file_start ();
29639 /* Output at end of assembler file.
29640 On the RS/6000, referencing data should automatically pull in text. */
29643 rs6000_xcoff_file_end (void)
29645 switch_to_section (text_section
);
29646 fputs ("_section_.text:\n", asm_out_file
);
29647 switch_to_section (data_section
);
29648 fputs (TARGET_32BIT
29649 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
29653 struct declare_alias_data
29656 bool function_descriptor
;
29659 /* Declare alias N. A helper function for for_node_and_aliases. */
29662 rs6000_declare_alias (struct symtab_node
*n
, void *d
)
29664 struct declare_alias_data
*data
= (struct declare_alias_data
*)d
;
29665 /* Main symbol is output specially, because varasm machinery does part of
29666 the job for us - we do not need to declare .globl/lglobs and such. */
29667 if (!n
->alias
|| n
->weakref
)
29670 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n
->decl
)))
29673 /* Prevent assemble_alias from trying to use .set pseudo operation
29674 that does not behave as expected by the middle-end. */
29675 TREE_ASM_WRITTEN (n
->decl
) = true;
29677 const char *name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n
->decl
));
29678 char *buffer
= (char *) alloca (strlen (name
) + 2);
29680 int dollar_inside
= 0;
29682 strcpy (buffer
, name
);
29683 p
= strchr (buffer
, '$');
29687 p
= strchr (p
+ 1, '$');
29689 if (TREE_PUBLIC (n
->decl
))
29691 if (!RS6000_WEAK
|| !DECL_WEAK (n
->decl
))
29693 if (dollar_inside
) {
29694 if (data
->function_descriptor
)
29695 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
29697 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
29699 if (data
->function_descriptor
)
29700 fputs ("\t.globl .", data
->file
);
29702 fputs ("\t.globl ", data
->file
);
29703 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
29704 putc ('\n', data
->file
);
29706 else if (DECL_WEAK (n
->decl
) && !data
->function_descriptor
)
29707 ASM_WEAKEN_DECL (data
->file
, n
->decl
, name
, NULL
);
29713 if (data
->function_descriptor
)
29714 fprintf(data
->file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
29716 fprintf(data
->file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
29718 if (data
->function_descriptor
)
29719 fputs ("\t.lglobl .", data
->file
);
29721 fputs ("\t.lglobl ", data
->file
);
29722 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
29723 putc ('\n', data
->file
);
29725 if (data
->function_descriptor
)
29726 fputs (".", data
->file
);
29727 RS6000_OUTPUT_BASENAME (data
->file
, buffer
);
29728 fputs (":\n", data
->file
);
29732 /* This macro produces the initial definition of a function name.
29733 On the RS/6000, we need to place an extra '.' in the function name and
29734 output the function descriptor.
29735 Dollar signs are converted to underscores.
29737 The csect for the function will have already been created when
29738 text_section was selected. We do have to go back to that csect, however.
29740 The third and fourth parameters to the .function pseudo-op (16 and 044)
29741 are placeholders which no longer have any use.
29743 Because AIX assembler's .set command has unexpected semantics, we output
29744 all aliases as alternative labels in front of the definition. */
29747 rs6000_xcoff_declare_function_name (FILE *file
, const char *name
, tree decl
)
29749 char *buffer
= (char *) alloca (strlen (name
) + 1);
29751 int dollar_inside
= 0;
29752 struct declare_alias_data data
= {file
, false};
29754 strcpy (buffer
, name
);
29755 p
= strchr (buffer
, '$');
29759 p
= strchr (p
+ 1, '$');
29761 if (TREE_PUBLIC (decl
))
29763 if (!RS6000_WEAK
|| !DECL_WEAK (decl
))
29765 if (dollar_inside
) {
29766 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
29767 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
29769 fputs ("\t.globl .", file
);
29770 RS6000_OUTPUT_BASENAME (file
, buffer
);
29776 if (dollar_inside
) {
29777 fprintf(file
, "\t.rename .%s,\".%s\"\n", buffer
, name
);
29778 fprintf(file
, "\t.rename %s,\"%s\"\n", buffer
, name
);
29780 fputs ("\t.lglobl .", file
);
29781 RS6000_OUTPUT_BASENAME (file
, buffer
);
29784 fputs ("\t.csect ", file
);
29785 RS6000_OUTPUT_BASENAME (file
, buffer
);
29786 fputs (TARGET_32BIT
? "[DS]\n" : "[DS],3\n", file
);
29787 RS6000_OUTPUT_BASENAME (file
, buffer
);
29788 fputs (":\n", file
);
29789 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
, &data
, true);
29790 fputs (TARGET_32BIT
? "\t.long ." : "\t.llong .", file
);
29791 RS6000_OUTPUT_BASENAME (file
, buffer
);
29792 fputs (", TOC[tc0], 0\n", file
);
29794 switch_to_section (function_section (decl
));
29796 RS6000_OUTPUT_BASENAME (file
, buffer
);
29797 fputs (":\n", file
);
29798 data
.function_descriptor
= true;
29799 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
, &data
, true);
29800 if (write_symbols
!= NO_DEBUG
&& !DECL_IGNORED_P (decl
))
29801 xcoffout_declare_function (file
, decl
, buffer
);
29805 /* This macro produces the initial definition of a object (variable) name.
29806 Because AIX assembler's .set command has unexpected semantics, we output
29807 all aliases as alternative labels in front of the definition. */
29810 rs6000_xcoff_declare_object_name (FILE *file
, const char *name
, tree decl
)
29812 struct declare_alias_data data
= {file
, false};
29813 RS6000_OUTPUT_BASENAME (file
, name
);
29814 fputs (":\n", file
);
29815 symtab_node::get (decl
)->call_for_symbol_and_aliases (rs6000_declare_alias
, &data
, true);
29820 rs6000_xcoff_encode_section_info (tree decl
, rtx rtl
, int first
)
29825 default_encode_section_info (decl
, rtl
, first
);
29827 /* Careful not to prod global register variables. */
29830 symbol
= XEXP (rtl
, 0);
29831 if (GET_CODE (symbol
) != SYMBOL_REF
)
29834 flags
= SYMBOL_REF_FLAGS (symbol
);
29836 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
29837 flags
&= ~SYMBOL_FLAG_HAS_BLOCK_INFO
;
29839 SYMBOL_REF_FLAGS (symbol
) = flags
;
29841 #endif /* HAVE_AS_TLS */
29842 #endif /* TARGET_XCOFF */
29844 /* Compute a (partial) cost for rtx X. Return true if the complete
29845 cost has been computed, and false if subexpressions should be
29846 scanned. In either case, *TOTAL contains the cost result. */
29849 rs6000_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
29850 int *total
, bool speed
)
29852 enum machine_mode mode
= GET_MODE (x
);
29856 /* On the RS/6000, if it is valid in the insn, it is free. */
29858 if (((outer_code
== SET
29859 || outer_code
== PLUS
29860 || outer_code
== MINUS
)
29861 && (satisfies_constraint_I (x
)
29862 || satisfies_constraint_L (x
)))
29863 || (outer_code
== AND
29864 && (satisfies_constraint_K (x
)
29866 ? satisfies_constraint_L (x
)
29867 : satisfies_constraint_J (x
))
29868 || mask_operand (x
, mode
)
29870 && mask64_operand (x
, DImode
))))
29871 || ((outer_code
== IOR
|| outer_code
== XOR
)
29872 && (satisfies_constraint_K (x
)
29874 ? satisfies_constraint_L (x
)
29875 : satisfies_constraint_J (x
))))
29876 || outer_code
== ASHIFT
29877 || outer_code
== ASHIFTRT
29878 || outer_code
== LSHIFTRT
29879 || outer_code
== ROTATE
29880 || outer_code
== ROTATERT
29881 || outer_code
== ZERO_EXTRACT
29882 || (outer_code
== MULT
29883 && satisfies_constraint_I (x
))
29884 || ((outer_code
== DIV
|| outer_code
== UDIV
29885 || outer_code
== MOD
|| outer_code
== UMOD
)
29886 && exact_log2 (INTVAL (x
)) >= 0)
29887 || (outer_code
== COMPARE
29888 && (satisfies_constraint_I (x
)
29889 || satisfies_constraint_K (x
)))
29890 || ((outer_code
== EQ
|| outer_code
== NE
)
29891 && (satisfies_constraint_I (x
)
29892 || satisfies_constraint_K (x
)
29894 ? satisfies_constraint_L (x
)
29895 : satisfies_constraint_J (x
))))
29896 || (outer_code
== GTU
29897 && satisfies_constraint_I (x
))
29898 || (outer_code
== LTU
29899 && satisfies_constraint_P (x
)))
29904 else if ((outer_code
== PLUS
29905 && reg_or_add_cint_operand (x
, VOIDmode
))
29906 || (outer_code
== MINUS
29907 && reg_or_sub_cint_operand (x
, VOIDmode
))
29908 || ((outer_code
== SET
29909 || outer_code
== IOR
29910 || outer_code
== XOR
)
29912 & ~ (unsigned HOST_WIDE_INT
) 0xffffffff) == 0))
29914 *total
= COSTS_N_INSNS (1);
29920 case CONST_WIDE_INT
:
29925 /* When optimizing for size, MEM should be slightly more expensive
29926 than generating address, e.g., (plus (reg) (const)).
29927 L1 cache latency is about two instructions. */
29928 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
29937 if (FLOAT_MODE_P (mode
))
29938 *total
= rs6000_cost
->fp
;
29940 *total
= COSTS_N_INSNS (1);
29944 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
29945 && satisfies_constraint_I (XEXP (x
, 1)))
29947 if (INTVAL (XEXP (x
, 1)) >= -256
29948 && INTVAL (XEXP (x
, 1)) <= 255)
29949 *total
= rs6000_cost
->mulsi_const9
;
29951 *total
= rs6000_cost
->mulsi_const
;
29953 else if (mode
== SFmode
)
29954 *total
= rs6000_cost
->fp
;
29955 else if (FLOAT_MODE_P (mode
))
29956 *total
= rs6000_cost
->dmul
;
29957 else if (mode
== DImode
)
29958 *total
= rs6000_cost
->muldi
;
29960 *total
= rs6000_cost
->mulsi
;
29964 if (mode
== SFmode
)
29965 *total
= rs6000_cost
->fp
;
29967 *total
= rs6000_cost
->dmul
;
29972 if (FLOAT_MODE_P (mode
))
29974 *total
= mode
== DFmode
? rs6000_cost
->ddiv
29975 : rs6000_cost
->sdiv
;
29982 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
29983 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
29985 if (code
== DIV
|| code
== MOD
)
29987 *total
= COSTS_N_INSNS (2);
29990 *total
= COSTS_N_INSNS (1);
29994 if (GET_MODE (XEXP (x
, 1)) == DImode
)
29995 *total
= rs6000_cost
->divdi
;
29997 *total
= rs6000_cost
->divsi
;
29999 /* Add in shift and subtract for MOD. */
30000 if (code
== MOD
|| code
== UMOD
)
30001 *total
+= COSTS_N_INSNS (2);
30006 *total
= COSTS_N_INSNS (4);
30010 *total
= COSTS_N_INSNS (TARGET_POPCNTD
? 1 : 6);
30014 *total
= COSTS_N_INSNS (TARGET_CMPB
? 2 : 6);
30018 if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
30030 *total
= COSTS_N_INSNS (1);
30038 /* Handle mul_highpart. */
30039 if (outer_code
== TRUNCATE
30040 && GET_CODE (XEXP (x
, 0)) == MULT
)
30042 if (mode
== DImode
)
30043 *total
= rs6000_cost
->muldi
;
30045 *total
= rs6000_cost
->mulsi
;
30048 else if (outer_code
== AND
)
30051 *total
= COSTS_N_INSNS (1);
30056 if (GET_CODE (XEXP (x
, 0)) == MEM
)
30059 *total
= COSTS_N_INSNS (1);
30065 if (!FLOAT_MODE_P (mode
))
30067 *total
= COSTS_N_INSNS (1);
30073 case UNSIGNED_FLOAT
:
30076 case FLOAT_TRUNCATE
:
30077 *total
= rs6000_cost
->fp
;
30081 if (mode
== DFmode
)
30084 *total
= rs6000_cost
->fp
;
30088 switch (XINT (x
, 1))
30091 *total
= rs6000_cost
->fp
;
30103 *total
= COSTS_N_INSNS (1);
30106 else if (FLOAT_MODE_P (mode
)
30107 && TARGET_PPC_GFXOPT
&& TARGET_HARD_FLOAT
&& TARGET_FPRS
)
30109 *total
= rs6000_cost
->fp
;
30118 /* Carry bit requires mode == Pmode.
30119 NEG or PLUS already counted so only add one. */
30121 && (outer_code
== NEG
|| outer_code
== PLUS
))
30123 *total
= COSTS_N_INSNS (1);
30126 if (outer_code
== SET
)
30128 if (XEXP (x
, 1) == const0_rtx
)
30130 if (TARGET_ISEL
&& !TARGET_MFCRF
)
30131 *total
= COSTS_N_INSNS (8);
30133 *total
= COSTS_N_INSNS (2);
30136 else if (mode
== Pmode
)
30138 *total
= COSTS_N_INSNS (3);
30147 if (outer_code
== SET
&& (XEXP (x
, 1) == const0_rtx
))
30149 if (TARGET_ISEL
&& !TARGET_MFCRF
)
30150 *total
= COSTS_N_INSNS (8);
30152 *total
= COSTS_N_INSNS (2);
30156 if (outer_code
== COMPARE
)
30170 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
30173 rs6000_debug_rtx_costs (rtx x
, int code
, int outer_code
, int opno
, int *total
,
30176 bool ret
= rs6000_rtx_costs (x
, code
, outer_code
, opno
, total
, speed
);
30179 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
30180 "opno = %d, total = %d, speed = %s, x:\n",
30181 ret
? "complete" : "scan inner",
30182 GET_RTX_NAME (code
),
30183 GET_RTX_NAME (outer_code
),
30186 speed
? "true" : "false");
30193 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
30196 rs6000_debug_address_cost (rtx x
, enum machine_mode mode
,
30197 addr_space_t as
, bool speed
)
30199 int ret
= TARGET_ADDRESS_COST (x
, mode
, as
, speed
);
30201 fprintf (stderr
, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
30202 ret
, speed
? "true" : "false");
30209 /* A C expression returning the cost of moving data from a register of class
30210 CLASS1 to one of CLASS2. */
30213 rs6000_register_move_cost (enum machine_mode mode
,
30214 reg_class_t from
, reg_class_t to
)
30218 if (TARGET_DEBUG_COST
)
30221 /* Moves from/to GENERAL_REGS. */
30222 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
30223 || reg_classes_intersect_p (from
, GENERAL_REGS
))
30225 reg_class_t rclass
= from
;
30227 if (! reg_classes_intersect_p (to
, GENERAL_REGS
))
30230 if (rclass
== FLOAT_REGS
|| rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
30231 ret
= (rs6000_memory_move_cost (mode
, rclass
, false)
30232 + rs6000_memory_move_cost (mode
, GENERAL_REGS
, false));
30234 /* It's more expensive to move CR_REGS than CR0_REGS because of the
30236 else if (rclass
== CR_REGS
)
30239 /* For those processors that have slow LR/CTR moves, make them more
30240 expensive than memory in order to bias spills to memory .*/
30241 else if ((rs6000_cpu
== PROCESSOR_POWER6
30242 || rs6000_cpu
== PROCESSOR_POWER7
30243 || rs6000_cpu
== PROCESSOR_POWER8
)
30244 && reg_classes_intersect_p (rclass
, LINK_OR_CTR_REGS
))
30245 ret
= 6 * hard_regno_nregs
[0][mode
];
30248 /* A move will cost one instruction per GPR moved. */
30249 ret
= 2 * hard_regno_nregs
[0][mode
];
30252 /* If we have VSX, we can easily move between FPR or Altivec registers. */
30253 else if (VECTOR_MEM_VSX_P (mode
)
30254 && reg_classes_intersect_p (to
, VSX_REGS
)
30255 && reg_classes_intersect_p (from
, VSX_REGS
))
30256 ret
= 2 * hard_regno_nregs
[32][mode
];
30258 /* Moving between two similar registers is just one instruction. */
30259 else if (reg_classes_intersect_p (to
, from
))
30260 ret
= (mode
== TFmode
|| mode
== TDmode
) ? 4 : 2;
30262 /* Everything else has to go through GENERAL_REGS. */
30264 ret
= (rs6000_register_move_cost (mode
, GENERAL_REGS
, to
)
30265 + rs6000_register_move_cost (mode
, from
, GENERAL_REGS
));
30267 if (TARGET_DEBUG_COST
)
30269 if (dbg_cost_ctrl
== 1)
30271 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
30272 ret
, GET_MODE_NAME (mode
), reg_class_names
[from
],
30273 reg_class_names
[to
]);
30280 /* A C expressions returning the cost of moving data of MODE from a register to
30284 rs6000_memory_move_cost (enum machine_mode mode
, reg_class_t rclass
,
30285 bool in ATTRIBUTE_UNUSED
)
30289 if (TARGET_DEBUG_COST
)
30292 if (reg_classes_intersect_p (rclass
, GENERAL_REGS
))
30293 ret
= 4 * hard_regno_nregs
[0][mode
];
30294 else if ((reg_classes_intersect_p (rclass
, FLOAT_REGS
)
30295 || reg_classes_intersect_p (rclass
, VSX_REGS
)))
30296 ret
= 4 * hard_regno_nregs
[32][mode
];
30297 else if (reg_classes_intersect_p (rclass
, ALTIVEC_REGS
))
30298 ret
= 4 * hard_regno_nregs
[FIRST_ALTIVEC_REGNO
][mode
];
30300 ret
= 4 + rs6000_register_move_cost (mode
, rclass
, GENERAL_REGS
);
30302 if (TARGET_DEBUG_COST
)
30304 if (dbg_cost_ctrl
== 1)
30306 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
30307 ret
, GET_MODE_NAME (mode
), reg_class_names
[rclass
], in
);
30314 /* Returns a code for a target-specific builtin that implements
30315 reciprocal of the function, or NULL_TREE if not available. */
30318 rs6000_builtin_reciprocal (unsigned int fn
, bool md_fn
,
30319 bool sqrt ATTRIBUTE_UNUSED
)
30321 if (optimize_insn_for_size_p ())
30327 case VSX_BUILTIN_XVSQRTDP
:
30328 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode
))
30331 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
30333 case VSX_BUILTIN_XVSQRTSP
:
30334 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode
))
30337 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_4SF
];
30346 case BUILT_IN_SQRT
:
30347 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode
))
30350 return rs6000_builtin_decls
[RS6000_BUILTIN_RSQRT
];
30352 case BUILT_IN_SQRTF
:
30353 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode
))
30356 return rs6000_builtin_decls
[RS6000_BUILTIN_RSQRTF
];
30363 /* Load up a constant. If the mode is a vector mode, splat the value across
30364 all of the vector elements. */
30367 rs6000_load_constant_and_splat (enum machine_mode mode
, REAL_VALUE_TYPE dconst
)
30371 if (mode
== SFmode
|| mode
== DFmode
)
30373 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, mode
);
30374 reg
= force_reg (mode
, d
);
30376 else if (mode
== V4SFmode
)
30378 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, SFmode
);
30379 rtvec v
= gen_rtvec (4, d
, d
, d
, d
);
30380 reg
= gen_reg_rtx (mode
);
30381 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
30383 else if (mode
== V2DFmode
)
30385 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, DFmode
);
30386 rtvec v
= gen_rtvec (2, d
, d
);
30387 reg
= gen_reg_rtx (mode
);
30388 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
30391 gcc_unreachable ();
30396 /* Generate an FMA instruction. */
30399 rs6000_emit_madd (rtx target
, rtx m1
, rtx m2
, rtx a
)
30401 enum machine_mode mode
= GET_MODE (target
);
30404 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
30405 gcc_assert (dst
!= NULL
);
30408 emit_move_insn (target
, dst
);
30411 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
30414 rs6000_emit_msub (rtx target
, rtx m1
, rtx m2
, rtx a
)
30416 enum machine_mode mode
= GET_MODE (target
);
30419 /* Altivec does not support fms directly;
30420 generate in terms of fma in that case. */
30421 if (optab_handler (fms_optab
, mode
) != CODE_FOR_nothing
)
30422 dst
= expand_ternary_op (mode
, fms_optab
, m1
, m2
, a
, target
, 0);
30425 a
= expand_unop (mode
, neg_optab
, a
, NULL_RTX
, 0);
30426 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
30428 gcc_assert (dst
!= NULL
);
30431 emit_move_insn (target
, dst
);
30434 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
30437 rs6000_emit_nmsub (rtx dst
, rtx m1
, rtx m2
, rtx a
)
30439 enum machine_mode mode
= GET_MODE (dst
);
30442 /* This is a tad more complicated, since the fnma_optab is for
30443 a different expression: fma(-m1, m2, a), which is the same
30444 thing except in the case of signed zeros.
30446 Fortunately we know that if FMA is supported that FNMSUB is
30447 also supported in the ISA. Just expand it directly. */
30449 gcc_assert (optab_handler (fma_optab
, mode
) != CODE_FOR_nothing
);
30451 r
= gen_rtx_NEG (mode
, a
);
30452 r
= gen_rtx_FMA (mode
, m1
, m2
, r
);
30453 r
= gen_rtx_NEG (mode
, r
);
30454 emit_insn (gen_rtx_SET (VOIDmode
, dst
, r
));
30457 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
30458 add a reg_note saying that this was a division. Support both scalar and
30459 vector divide. Assumes no trapping math and finite arguments. */
30462 rs6000_emit_swdiv (rtx dst
, rtx n
, rtx d
, bool note_p
)
30464 enum machine_mode mode
= GET_MODE (dst
);
30465 rtx one
, x0
, e0
, x1
, xprev
, eprev
, xnext
, enext
, u
, v
;
30468 /* Low precision estimates guarantee 5 bits of accuracy. High
30469 precision estimates guarantee 14 bits of accuracy. SFmode
30470 requires 23 bits of accuracy. DFmode requires 52 bits of
30471 accuracy. Each pass at least doubles the accuracy, leading
30472 to the following. */
30473 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
30474 if (mode
== DFmode
|| mode
== V2DFmode
)
30477 enum insn_code code
= optab_handler (smul_optab
, mode
);
30478 insn_gen_fn gen_mul
= GEN_FCN (code
);
30480 gcc_assert (code
!= CODE_FOR_nothing
);
30482 one
= rs6000_load_constant_and_splat (mode
, dconst1
);
30484 /* x0 = 1./d estimate */
30485 x0
= gen_reg_rtx (mode
);
30486 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
30487 gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
30490 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
30493 /* e0 = 1. - d * x0 */
30494 e0
= gen_reg_rtx (mode
);
30495 rs6000_emit_nmsub (e0
, d
, x0
, one
);
30497 /* x1 = x0 + e0 * x0 */
30498 x1
= gen_reg_rtx (mode
);
30499 rs6000_emit_madd (x1
, e0
, x0
, x0
);
30501 for (i
= 0, xprev
= x1
, eprev
= e0
; i
< passes
- 2;
30502 ++i
, xprev
= xnext
, eprev
= enext
) {
30504 /* enext = eprev * eprev */
30505 enext
= gen_reg_rtx (mode
);
30506 emit_insn (gen_mul (enext
, eprev
, eprev
));
30508 /* xnext = xprev + enext * xprev */
30509 xnext
= gen_reg_rtx (mode
);
30510 rs6000_emit_madd (xnext
, enext
, xprev
, xprev
);
30516 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
30518 /* u = n * xprev */
30519 u
= gen_reg_rtx (mode
);
30520 emit_insn (gen_mul (u
, n
, xprev
));
30522 /* v = n - (d * u) */
30523 v
= gen_reg_rtx (mode
);
30524 rs6000_emit_nmsub (v
, d
, u
, n
);
30526 /* dst = (v * xprev) + u */
30527 rs6000_emit_madd (dst
, v
, xprev
, u
);
30530 add_reg_note (get_last_insn (), REG_EQUAL
, gen_rtx_DIV (mode
, n
, d
));
30533 /* Newton-Raphson approximation of single/double-precision floating point
30534 rsqrt. Assumes no trapping math and finite arguments. */
30537 rs6000_emit_swrsqrt (rtx dst
, rtx src
)
30539 enum machine_mode mode
= GET_MODE (src
);
30540 rtx x0
= gen_reg_rtx (mode
);
30541 rtx y
= gen_reg_rtx (mode
);
30543 /* Low precision estimates guarantee 5 bits of accuracy. High
30544 precision estimates guarantee 14 bits of accuracy. SFmode
30545 requires 23 bits of accuracy. DFmode requires 52 bits of
30546 accuracy. Each pass at least doubles the accuracy, leading
30547 to the following. */
30548 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
30549 if (mode
== DFmode
|| mode
== V2DFmode
)
30552 REAL_VALUE_TYPE dconst3_2
;
30555 enum insn_code code
= optab_handler (smul_optab
, mode
);
30556 insn_gen_fn gen_mul
= GEN_FCN (code
);
30558 gcc_assert (code
!= CODE_FOR_nothing
);
30560 /* Load up the constant 1.5 either as a scalar, or as a vector. */
30561 real_from_integer (&dconst3_2
, VOIDmode
, 3, SIGNED
);
30562 SET_REAL_EXP (&dconst3_2
, REAL_EXP (&dconst3_2
) - 1);
30564 halfthree
= rs6000_load_constant_and_splat (mode
, dconst3_2
);
30566 /* x0 = rsqrt estimate */
30567 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
30568 gen_rtx_UNSPEC (mode
, gen_rtvec (1, src
),
30571 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
30572 rs6000_emit_msub (y
, src
, halfthree
, src
);
30574 for (i
= 0; i
< passes
; i
++)
30576 rtx x1
= gen_reg_rtx (mode
);
30577 rtx u
= gen_reg_rtx (mode
);
30578 rtx v
= gen_reg_rtx (mode
);
30580 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
30581 emit_insn (gen_mul (u
, x0
, x0
));
30582 rs6000_emit_nmsub (v
, y
, u
, halfthree
);
30583 emit_insn (gen_mul (x1
, x0
, v
));
30587 emit_move_insn (dst
, x0
);
30591 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
30592 (Power7) targets. DST is the target, and SRC is the argument operand. */
30595 rs6000_emit_popcount (rtx dst
, rtx src
)
30597 enum machine_mode mode
= GET_MODE (dst
);
30600 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
30601 if (TARGET_POPCNTD
)
30603 if (mode
== SImode
)
30604 emit_insn (gen_popcntdsi2 (dst
, src
));
30606 emit_insn (gen_popcntddi2 (dst
, src
));
30610 tmp1
= gen_reg_rtx (mode
);
30612 if (mode
== SImode
)
30614 emit_insn (gen_popcntbsi2 (tmp1
, src
));
30615 tmp2
= expand_mult (SImode
, tmp1
, GEN_INT (0x01010101),
30617 tmp2
= force_reg (SImode
, tmp2
);
30618 emit_insn (gen_lshrsi3 (dst
, tmp2
, GEN_INT (24)));
30622 emit_insn (gen_popcntbdi2 (tmp1
, src
));
30623 tmp2
= expand_mult (DImode
, tmp1
,
30624 GEN_INT ((HOST_WIDE_INT
)
30625 0x01010101 << 32 | 0x01010101),
30627 tmp2
= force_reg (DImode
, tmp2
);
30628 emit_insn (gen_lshrdi3 (dst
, tmp2
, GEN_INT (56)));
30633 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
30634 target, and SRC is the argument operand. */
30637 rs6000_emit_parity (rtx dst
, rtx src
)
30639 enum machine_mode mode
= GET_MODE (dst
);
30642 tmp
= gen_reg_rtx (mode
);
30644 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
30647 if (mode
== SImode
)
30649 emit_insn (gen_popcntbsi2 (tmp
, src
));
30650 emit_insn (gen_paritysi2_cmpb (dst
, tmp
));
30654 emit_insn (gen_popcntbdi2 (tmp
, src
));
30655 emit_insn (gen_paritydi2_cmpb (dst
, tmp
));
30660 if (mode
== SImode
)
30662 /* Is mult+shift >= shift+xor+shift+xor? */
30663 if (rs6000_cost
->mulsi_const
>= COSTS_N_INSNS (3))
30665 rtx tmp1
, tmp2
, tmp3
, tmp4
;
30667 tmp1
= gen_reg_rtx (SImode
);
30668 emit_insn (gen_popcntbsi2 (tmp1
, src
));
30670 tmp2
= gen_reg_rtx (SImode
);
30671 emit_insn (gen_lshrsi3 (tmp2
, tmp1
, GEN_INT (16)));
30672 tmp3
= gen_reg_rtx (SImode
);
30673 emit_insn (gen_xorsi3 (tmp3
, tmp1
, tmp2
));
30675 tmp4
= gen_reg_rtx (SImode
);
30676 emit_insn (gen_lshrsi3 (tmp4
, tmp3
, GEN_INT (8)));
30677 emit_insn (gen_xorsi3 (tmp
, tmp3
, tmp4
));
30680 rs6000_emit_popcount (tmp
, src
);
30681 emit_insn (gen_andsi3 (dst
, tmp
, const1_rtx
));
30685 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
30686 if (rs6000_cost
->muldi
>= COSTS_N_INSNS (5))
30688 rtx tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
;
30690 tmp1
= gen_reg_rtx (DImode
);
30691 emit_insn (gen_popcntbdi2 (tmp1
, src
));
30693 tmp2
= gen_reg_rtx (DImode
);
30694 emit_insn (gen_lshrdi3 (tmp2
, tmp1
, GEN_INT (32)));
30695 tmp3
= gen_reg_rtx (DImode
);
30696 emit_insn (gen_xordi3 (tmp3
, tmp1
, tmp2
));
30698 tmp4
= gen_reg_rtx (DImode
);
30699 emit_insn (gen_lshrdi3 (tmp4
, tmp3
, GEN_INT (16)));
30700 tmp5
= gen_reg_rtx (DImode
);
30701 emit_insn (gen_xordi3 (tmp5
, tmp3
, tmp4
));
30703 tmp6
= gen_reg_rtx (DImode
);
30704 emit_insn (gen_lshrdi3 (tmp6
, tmp5
, GEN_INT (8)));
30705 emit_insn (gen_xordi3 (tmp
, tmp5
, tmp6
));
30708 rs6000_emit_popcount (tmp
, src
);
30709 emit_insn (gen_anddi3 (dst
, tmp
, const1_rtx
));
30713 /* Expand an Altivec constant permutation for little endian mode.
30714 There are two issues: First, the two input operands must be
30715 swapped so that together they form a double-wide array in LE
30716 order. Second, the vperm instruction has surprising behavior
30717 in LE mode: it interprets the elements of the source vectors
30718 in BE mode ("left to right") and interprets the elements of
30719 the destination vector in LE mode ("right to left"). To
30720 correct for this, we must subtract each element of the permute
30721 control vector from 31.
30723 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
30724 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
30725 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
30726 serve as the permute control vector. Then, in BE mode,
30730 places the desired result in vr9. However, in LE mode the
30731 vector contents will be
30733 vr10 = 00000003 00000002 00000001 00000000
30734 vr11 = 00000007 00000006 00000005 00000004
30736 The result of the vperm using the same permute control vector is
30738 vr9 = 05000000 07000000 01000000 03000000
30740 That is, the leftmost 4 bytes of vr10 are interpreted as the
30741 source for the rightmost 4 bytes of vr9, and so on.
30743 If we change the permute control vector to
30745 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
30753 vr9 = 00000006 00000004 00000002 00000000. */
30756 altivec_expand_vec_perm_const_le (rtx operands
[4])
30760 rtx constv
, unspec
;
30761 rtx target
= operands
[0];
30762 rtx op0
= operands
[1];
30763 rtx op1
= operands
[2];
30764 rtx sel
= operands
[3];
30766 /* Unpack and adjust the constant selector. */
30767 for (i
= 0; i
< 16; ++i
)
30769 rtx e
= XVECEXP (sel
, 0, i
);
30770 unsigned int elt
= 31 - (INTVAL (e
) & 31);
30771 perm
[i
] = GEN_INT (elt
);
30774 /* Expand to a permute, swapping the inputs and using the
30775 adjusted selector. */
30777 op0
= force_reg (V16QImode
, op0
);
30779 op1
= force_reg (V16QImode
, op1
);
30781 constv
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, perm
));
30782 constv
= force_reg (V16QImode
, constv
);
30783 unspec
= gen_rtx_UNSPEC (V16QImode
, gen_rtvec (3, op1
, op0
, constv
),
30785 if (!REG_P (target
))
30787 rtx tmp
= gen_reg_rtx (V16QImode
);
30788 emit_move_insn (tmp
, unspec
);
30792 emit_move_insn (target
, unspec
);
30795 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
30796 permute control vector. But here it's not a constant, so we must
30797 generate a vector NAND or NOR to do the adjustment. */
30800 altivec_expand_vec_perm_le (rtx operands
[4])
30802 rtx notx
, iorx
, unspec
;
30803 rtx target
= operands
[0];
30804 rtx op0
= operands
[1];
30805 rtx op1
= operands
[2];
30806 rtx sel
= operands
[3];
30808 rtx norreg
= gen_reg_rtx (V16QImode
);
30809 enum machine_mode mode
= GET_MODE (target
);
30811 /* Get everything in regs so the pattern matches. */
30813 op0
= force_reg (mode
, op0
);
30815 op1
= force_reg (mode
, op1
);
30817 sel
= force_reg (V16QImode
, sel
);
30818 if (!REG_P (target
))
30819 tmp
= gen_reg_rtx (mode
);
30821 /* Invert the selector with a VNAND if available, else a VNOR.
30822 The VNAND is preferred for future fusion opportunities. */
30823 notx
= gen_rtx_NOT (V16QImode
, sel
);
30824 iorx
= (TARGET_P8_VECTOR
30825 ? gen_rtx_IOR (V16QImode
, notx
, notx
)
30826 : gen_rtx_AND (V16QImode
, notx
, notx
));
30827 emit_insn (gen_rtx_SET (VOIDmode
, norreg
, iorx
));
30829 /* Permute with operands reversed and adjusted selector. */
30830 unspec
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, op1
, op0
, norreg
),
30833 /* Copy into target, possibly by way of a register. */
30834 if (!REG_P (target
))
30836 emit_move_insn (tmp
, unspec
);
30840 emit_move_insn (target
, unspec
);
30843 /* Expand an Altivec constant permutation. Return true if we match
30844 an efficient implementation; false to fall back to VPERM. */
30847 altivec_expand_vec_perm_const (rtx operands
[4])
30849 struct altivec_perm_insn
{
30850 HOST_WIDE_INT mask
;
30851 enum insn_code impl
;
30852 unsigned char perm
[16];
30854 static const struct altivec_perm_insn patterns
[] = {
30855 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuhum_direct
,
30856 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
30857 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuwum_direct
,
30858 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
30859 { OPTION_MASK_ALTIVEC
,
30860 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghb_direct
30861 : CODE_FOR_altivec_vmrglb_direct
),
30862 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
30863 { OPTION_MASK_ALTIVEC
,
30864 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghh_direct
30865 : CODE_FOR_altivec_vmrglh_direct
),
30866 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
30867 { OPTION_MASK_ALTIVEC
,
30868 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghw_direct
30869 : CODE_FOR_altivec_vmrglw_direct
),
30870 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
30871 { OPTION_MASK_ALTIVEC
,
30872 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglb_direct
30873 : CODE_FOR_altivec_vmrghb_direct
),
30874 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
30875 { OPTION_MASK_ALTIVEC
,
30876 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglh_direct
30877 : CODE_FOR_altivec_vmrghh_direct
),
30878 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
30879 { OPTION_MASK_ALTIVEC
,
30880 (BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglw_direct
30881 : CODE_FOR_altivec_vmrghw_direct
),
30882 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
30883 { OPTION_MASK_P8_VECTOR
, CODE_FOR_p8_vmrgew
,
30884 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
30885 { OPTION_MASK_P8_VECTOR
, CODE_FOR_p8_vmrgow
,
30886 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
30889 unsigned int i
, j
, elt
, which
;
30890 unsigned char perm
[16];
30891 rtx target
, op0
, op1
, sel
, x
;
30894 target
= operands
[0];
30899 /* Unpack the constant selector. */
30900 for (i
= which
= 0; i
< 16; ++i
)
30902 rtx e
= XVECEXP (sel
, 0, i
);
30903 elt
= INTVAL (e
) & 31;
30904 which
|= (elt
< 16 ? 1 : 2);
30908 /* Simplify the constant selector based on operands. */
30912 gcc_unreachable ();
30916 if (!rtx_equal_p (op0
, op1
))
30921 for (i
= 0; i
< 16; ++i
)
30933 /* Look for splat patterns. */
30938 for (i
= 0; i
< 16; ++i
)
30939 if (perm
[i
] != elt
)
30943 if (!BYTES_BIG_ENDIAN
)
30945 emit_insn (gen_altivec_vspltb_direct (target
, op0
, GEN_INT (elt
)));
30951 for (i
= 0; i
< 16; i
+= 2)
30952 if (perm
[i
] != elt
|| perm
[i
+ 1] != elt
+ 1)
30956 int field
= BYTES_BIG_ENDIAN
? elt
/ 2 : 7 - elt
/ 2;
30957 x
= gen_reg_rtx (V8HImode
);
30958 emit_insn (gen_altivec_vsplth_direct (x
, gen_lowpart (V8HImode
, op0
),
30960 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
30967 for (i
= 0; i
< 16; i
+= 4)
30969 || perm
[i
+ 1] != elt
+ 1
30970 || perm
[i
+ 2] != elt
+ 2
30971 || perm
[i
+ 3] != elt
+ 3)
30975 int field
= BYTES_BIG_ENDIAN
? elt
/ 4 : 3 - elt
/ 4;
30976 x
= gen_reg_rtx (V4SImode
);
30977 emit_insn (gen_altivec_vspltw_direct (x
, gen_lowpart (V4SImode
, op0
),
30979 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
30985 /* Look for merge and pack patterns. */
30986 for (j
= 0; j
< ARRAY_SIZE (patterns
); ++j
)
30990 if ((patterns
[j
].mask
& rs6000_isa_flags
) == 0)
30993 elt
= patterns
[j
].perm
[0];
30994 if (perm
[0] == elt
)
30996 else if (perm
[0] == elt
+ 16)
31000 for (i
= 1; i
< 16; ++i
)
31002 elt
= patterns
[j
].perm
[i
];
31004 elt
= (elt
>= 16 ? elt
- 16 : elt
+ 16);
31005 else if (one_vec
&& elt
>= 16)
31007 if (perm
[i
] != elt
)
31012 enum insn_code icode
= patterns
[j
].impl
;
31013 enum machine_mode omode
= insn_data
[icode
].operand
[0].mode
;
31014 enum machine_mode imode
= insn_data
[icode
].operand
[1].mode
;
31016 /* For little-endian, don't use vpkuwum and vpkuhum if the
31017 underlying vector type is not V4SI and V8HI, respectively.
31018 For example, using vpkuwum with a V8HI picks up the even
31019 halfwords (BE numbering) when the even halfwords (LE
31020 numbering) are what we need. */
31021 if (!BYTES_BIG_ENDIAN
31022 && icode
== CODE_FOR_altivec_vpkuwum_direct
31023 && ((GET_CODE (op0
) == REG
31024 && GET_MODE (op0
) != V4SImode
)
31025 || (GET_CODE (op0
) == SUBREG
31026 && GET_MODE (XEXP (op0
, 0)) != V4SImode
)))
31028 if (!BYTES_BIG_ENDIAN
31029 && icode
== CODE_FOR_altivec_vpkuhum_direct
31030 && ((GET_CODE (op0
) == REG
31031 && GET_MODE (op0
) != V8HImode
)
31032 || (GET_CODE (op0
) == SUBREG
31033 && GET_MODE (XEXP (op0
, 0)) != V8HImode
)))
31036 /* For little-endian, the two input operands must be swapped
31037 (or swapped back) to ensure proper right-to-left numbering
31039 if (swapped
^ !BYTES_BIG_ENDIAN
)
31040 x
= op0
, op0
= op1
, op1
= x
;
31041 if (imode
!= V16QImode
)
31043 op0
= gen_lowpart (imode
, op0
);
31044 op1
= gen_lowpart (imode
, op1
);
31046 if (omode
== V16QImode
)
31049 x
= gen_reg_rtx (omode
);
31050 emit_insn (GEN_FCN (icode
) (x
, op0
, op1
));
31051 if (omode
!= V16QImode
)
31052 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
31057 if (!BYTES_BIG_ENDIAN
)
31059 altivec_expand_vec_perm_const_le (operands
);
31066 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
31067 Return true if we match an efficient implementation. */
31070 rs6000_expand_vec_perm_const_1 (rtx target
, rtx op0
, rtx op1
,
31071 unsigned char perm0
, unsigned char perm1
)
31075 /* If both selectors come from the same operand, fold to single op. */
31076 if ((perm0
& 2) == (perm1
& 2))
31083 /* If both operands are equal, fold to simpler permutation. */
31084 if (rtx_equal_p (op0
, op1
))
31087 perm1
= (perm1
& 1) + 2;
31089 /* If the first selector comes from the second operand, swap. */
31090 else if (perm0
& 2)
31096 x
= op0
, op0
= op1
, op1
= x
;
31098 /* If the second selector does not come from the second operand, fail. */
31099 else if ((perm1
& 2) == 0)
31103 if (target
!= NULL
)
31105 enum machine_mode vmode
, dmode
;
31108 vmode
= GET_MODE (target
);
31109 gcc_assert (GET_MODE_NUNITS (vmode
) == 2);
31110 dmode
= mode_for_vector (GET_MODE_INNER (vmode
), 4);
31111 x
= gen_rtx_VEC_CONCAT (dmode
, op0
, op1
);
31112 v
= gen_rtvec (2, GEN_INT (perm0
), GEN_INT (perm1
));
31113 x
= gen_rtx_VEC_SELECT (vmode
, x
, gen_rtx_PARALLEL (VOIDmode
, v
));
31114 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
31120 rs6000_expand_vec_perm_const (rtx operands
[4])
31122 rtx target
, op0
, op1
, sel
;
31123 unsigned char perm0
, perm1
;
31125 target
= operands
[0];
31130 /* Unpack the constant selector. */
31131 perm0
= INTVAL (XVECEXP (sel
, 0, 0)) & 3;
31132 perm1
= INTVAL (XVECEXP (sel
, 0, 1)) & 3;
31134 return rs6000_expand_vec_perm_const_1 (target
, op0
, op1
, perm0
, perm1
);
31137 /* Test whether a constant permutation is supported. */
31140 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode
,
31141 const unsigned char *sel
)
31143 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
31144 if (TARGET_ALTIVEC
)
31147 /* Check for ps_merge* or evmerge* insns. */
31148 if ((TARGET_PAIRED_FLOAT
&& vmode
== V2SFmode
)
31149 || (TARGET_SPE
&& vmode
== V2SImode
))
31151 rtx op0
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 1);
31152 rtx op1
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 2);
31153 return rs6000_expand_vec_perm_const_1 (NULL
, op0
, op1
, sel
[0], sel
[1]);
31159 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
31162 rs6000_do_expand_vec_perm (rtx target
, rtx op0
, rtx op1
,
31163 enum machine_mode vmode
, unsigned nelt
, rtx perm
[])
31165 enum machine_mode imode
;
31169 if (GET_MODE_CLASS (vmode
) != MODE_VECTOR_INT
)
31171 imode
= GET_MODE_INNER (vmode
);
31172 imode
= mode_for_size (GET_MODE_BITSIZE (imode
), MODE_INT
, 0);
31173 imode
= mode_for_vector (imode
, nelt
);
31176 x
= gen_rtx_CONST_VECTOR (imode
, gen_rtvec_v (nelt
, perm
));
31177 x
= expand_vec_perm (vmode
, op0
, op1
, x
, target
);
31179 emit_move_insn (target
, x
);
31182 /* Expand an extract even operation. */
31185 rs6000_expand_extract_even (rtx target
, rtx op0
, rtx op1
)
31187 enum machine_mode vmode
= GET_MODE (target
);
31188 unsigned i
, nelt
= GET_MODE_NUNITS (vmode
);
31191 for (i
= 0; i
< nelt
; i
++)
31192 perm
[i
] = GEN_INT (i
* 2);
31194 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
31197 /* Expand a vector interleave operation. */
31200 rs6000_expand_interleave (rtx target
, rtx op0
, rtx op1
, bool highp
)
31202 enum machine_mode vmode
= GET_MODE (target
);
31203 unsigned i
, high
, nelt
= GET_MODE_NUNITS (vmode
);
31206 high
= (highp
? 0 : nelt
/ 2);
31207 for (i
= 0; i
< nelt
/ 2; i
++)
31209 perm
[i
* 2] = GEN_INT (i
+ high
);
31210 perm
[i
* 2 + 1] = GEN_INT (i
+ nelt
+ high
);
31213 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
31216 /* Scale a V2DF vector SRC by two to the SCALE and place in TGT. */
31218 rs6000_scale_v2df (rtx tgt
, rtx src
, int scale
)
31220 HOST_WIDE_INT
hwi_scale (scale
);
31221 REAL_VALUE_TYPE r_pow
;
31222 rtvec v
= rtvec_alloc (2);
31224 rtx scale_vec
= gen_reg_rtx (V2DFmode
);
31225 (void)real_powi (&r_pow
, DFmode
, &dconst2
, hwi_scale
);
31226 elt
= CONST_DOUBLE_FROM_REAL_VALUE (r_pow
, DFmode
);
31227 RTVEC_ELT (v
, 0) = elt
;
31228 RTVEC_ELT (v
, 1) = elt
;
31229 rs6000_expand_vector_init (scale_vec
, gen_rtx_PARALLEL (V2DFmode
, v
));
31230 emit_insn (gen_mulv2df3 (tgt
, src
, scale_vec
));
31233 /* Return an RTX representing where to find the function value of a
31234 function returning MODE. */
31236 rs6000_complex_function_value (enum machine_mode mode
)
31238 unsigned int regno
;
31240 enum machine_mode inner
= GET_MODE_INNER (mode
);
31241 unsigned int inner_bytes
= GET_MODE_SIZE (inner
);
31243 if (FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
31244 regno
= FP_ARG_RETURN
;
31247 regno
= GP_ARG_RETURN
;
31249 /* 32-bit is OK since it'll go in r3/r4. */
31250 if (TARGET_32BIT
&& inner_bytes
>= 4)
31251 return gen_rtx_REG (mode
, regno
);
31254 if (inner_bytes
>= 8)
31255 return gen_rtx_REG (mode
, regno
);
31257 r1
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
),
31259 r2
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
+ 1),
31260 GEN_INT (inner_bytes
));
31261 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
31264 /* Target hook for TARGET_FUNCTION_VALUE.
31266 On the SPE, both FPs and vectors are returned in r3.
31268 On RS/6000 an integer value is in r3 and a floating-point value is in
31269 fp1, unless -msoft-float. */
31272 rs6000_function_value (const_tree valtype
,
31273 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
31274 bool outgoing ATTRIBUTE_UNUSED
)
31276 enum machine_mode mode
;
31277 unsigned int regno
;
31278 enum machine_mode elt_mode
;
31281 /* Special handling for structs in darwin64. */
31283 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype
), valtype
))
31285 CUMULATIVE_ARGS valcum
;
31289 valcum
.fregno
= FP_ARG_MIN_REG
;
31290 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
31291 /* Do a trial code generation as if this were going to be passed as
31292 an argument; if any part goes in memory, we return NULL. */
31293 valret
= rs6000_darwin64_record_arg (&valcum
, valtype
, true, /* retval= */ true);
31296 /* Otherwise fall through to standard ABI rules. */
31299 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
31300 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (valtype
), valtype
,
31301 &elt_mode
, &n_elts
))
31303 int first_reg
, n_regs
, i
;
31306 if (SCALAR_FLOAT_MODE_P (elt_mode
))
31308 /* _Decimal128 must use even/odd register pairs. */
31309 first_reg
= (elt_mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
31310 n_regs
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
31314 first_reg
= ALTIVEC_ARG_RETURN
;
31318 par
= gen_rtx_PARALLEL (TYPE_MODE (valtype
), rtvec_alloc (n_elts
));
31319 for (i
= 0; i
< n_elts
; i
++)
31321 rtx r
= gen_rtx_REG (elt_mode
, first_reg
+ i
* n_regs
);
31322 rtx off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
31323 XVECEXP (par
, 0, i
) = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
31329 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DImode
)
31331 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
31332 return gen_rtx_PARALLEL (DImode
,
31334 gen_rtx_EXPR_LIST (VOIDmode
,
31335 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
31337 gen_rtx_EXPR_LIST (VOIDmode
,
31338 gen_rtx_REG (SImode
,
31339 GP_ARG_RETURN
+ 1),
31342 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DCmode
)
31344 return gen_rtx_PARALLEL (DCmode
,
31346 gen_rtx_EXPR_LIST (VOIDmode
,
31347 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
31349 gen_rtx_EXPR_LIST (VOIDmode
,
31350 gen_rtx_REG (SImode
,
31351 GP_ARG_RETURN
+ 1),
31353 gen_rtx_EXPR_LIST (VOIDmode
,
31354 gen_rtx_REG (SImode
,
31355 GP_ARG_RETURN
+ 2),
31357 gen_rtx_EXPR_LIST (VOIDmode
,
31358 gen_rtx_REG (SImode
,
31359 GP_ARG_RETURN
+ 3),
31363 mode
= TYPE_MODE (valtype
);
31364 if ((INTEGRAL_TYPE_P (valtype
) && GET_MODE_BITSIZE (mode
) < BITS_PER_WORD
)
31365 || POINTER_TYPE_P (valtype
))
31366 mode
= TARGET_32BIT
? SImode
: DImode
;
31368 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
31369 /* _Decimal128 must use an even/odd register pair. */
31370 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
31371 else if (SCALAR_FLOAT_TYPE_P (valtype
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
31372 && ((TARGET_SINGLE_FLOAT
&& (mode
== SFmode
)) || TARGET_DOUBLE_FLOAT
))
31373 regno
= FP_ARG_RETURN
;
31374 else if (TREE_CODE (valtype
) == COMPLEX_TYPE
31375 && targetm
.calls
.split_complex_arg
)
31376 return rs6000_complex_function_value (mode
);
31377 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
31378 return register is used in both cases, and we won't see V2DImode/V2DFmode
31379 for pure altivec, combine the two cases. */
31380 else if (TREE_CODE (valtype
) == VECTOR_TYPE
31381 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
31382 && ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
31383 regno
= ALTIVEC_ARG_RETURN
;
31384 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
31385 && (mode
== DFmode
|| mode
== DCmode
31386 || mode
== TFmode
|| mode
== TCmode
))
31387 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
31389 regno
= GP_ARG_RETURN
;
31391 return gen_rtx_REG (mode
, regno
);
31394 /* Define how to find the value returned by a library function
31395 assuming the value has mode MODE. */
31397 rs6000_libcall_value (enum machine_mode mode
)
31399 unsigned int regno
;
31401 if (TARGET_32BIT
&& TARGET_POWERPC64
&& mode
== DImode
)
31403 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
31404 return gen_rtx_PARALLEL (DImode
,
31406 gen_rtx_EXPR_LIST (VOIDmode
,
31407 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
31409 gen_rtx_EXPR_LIST (VOIDmode
,
31410 gen_rtx_REG (SImode
,
31411 GP_ARG_RETURN
+ 1),
31415 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
31416 /* _Decimal128 must use an even/odd register pair. */
31417 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
31418 else if (SCALAR_FLOAT_MODE_P (mode
)
31419 && TARGET_HARD_FLOAT
&& TARGET_FPRS
31420 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
) || TARGET_DOUBLE_FLOAT
))
31421 regno
= FP_ARG_RETURN
;
31422 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
31423 return register is used in both cases, and we won't see V2DImode/V2DFmode
31424 for pure altivec, combine the two cases. */
31425 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
31426 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
)
31427 regno
= ALTIVEC_ARG_RETURN
;
31428 else if (COMPLEX_MODE_P (mode
) && targetm
.calls
.split_complex_arg
)
31429 return rs6000_complex_function_value (mode
);
31430 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
31431 && (mode
== DFmode
|| mode
== DCmode
31432 || mode
== TFmode
|| mode
== TCmode
))
31433 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
31435 regno
= GP_ARG_RETURN
;
31437 return gen_rtx_REG (mode
, regno
);
31441 /* Return true if we use LRA instead of reload pass. */
31443 rs6000_lra_p (void)
31445 return rs6000_lra_flag
;
31448 /* Given FROM and TO register numbers, say whether this elimination is allowed.
31449 Frame pointer elimination is automatically handled.
31451 For the RS/6000, if frame pointer elimination is being done, we would like
31452 to convert ap into fp, not sp.
31454 We need r30 if -mminimal-toc was specified, and there are constant pool
31458 rs6000_can_eliminate (const int from
, const int to
)
31460 return (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
31461 ? ! frame_pointer_needed
31462 : from
== RS6000_PIC_OFFSET_TABLE_REGNUM
31463 ? ! TARGET_MINIMAL_TOC
|| TARGET_NO_TOC
|| get_pool_size () == 0
31467 /* Define the offset between two registers, FROM to be eliminated and its
31468 replacement TO, at the start of a routine. */
31470 rs6000_initial_elimination_offset (int from
, int to
)
31472 rs6000_stack_t
*info
= rs6000_stack_info ();
31473 HOST_WIDE_INT offset
;
31475 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
31476 offset
= info
->push_p
? 0 : -info
->total_size
;
31477 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
31479 offset
= info
->push_p
? 0 : -info
->total_size
;
31480 if (FRAME_GROWS_DOWNWARD
)
31481 offset
+= info
->fixed_size
+ info
->vars_size
+ info
->parm_size
;
31483 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
31484 offset
= FRAME_GROWS_DOWNWARD
31485 ? info
->fixed_size
+ info
->vars_size
+ info
->parm_size
31487 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
31488 offset
= info
->total_size
;
31489 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
31490 offset
= info
->push_p
? info
->total_size
: 0;
31491 else if (from
== RS6000_PIC_OFFSET_TABLE_REGNUM
)
31494 gcc_unreachable ();
31500 rs6000_dwarf_register_span (rtx reg
)
31504 unsigned regno
= REGNO (reg
);
31505 enum machine_mode mode
= GET_MODE (reg
);
31509 && (SPE_VECTOR_MODE (GET_MODE (reg
))
31510 || (TARGET_E500_DOUBLE
&& FLOAT_MODE_P (mode
)
31511 && mode
!= SFmode
&& mode
!= SDmode
&& mode
!= SCmode
)))
31516 regno
= REGNO (reg
);
31518 /* The duality of the SPE register size wreaks all kinds of havoc.
31519 This is a way of distinguishing r0 in 32-bits from r0 in
31521 words
= (GET_MODE_SIZE (mode
) + UNITS_PER_FP_WORD
- 1) / UNITS_PER_FP_WORD
;
31522 gcc_assert (words
<= 4);
31523 for (i
= 0; i
< words
; i
++, regno
++)
31525 if (BYTES_BIG_ENDIAN
)
31527 parts
[2 * i
] = gen_rtx_REG (SImode
, regno
+ FIRST_SPE_HIGH_REGNO
);
31528 parts
[2 * i
+ 1] = gen_rtx_REG (SImode
, regno
);
31532 parts
[2 * i
] = gen_rtx_REG (SImode
, regno
);
31533 parts
[2 * i
+ 1] = gen_rtx_REG (SImode
, regno
+ FIRST_SPE_HIGH_REGNO
);
31537 return gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (words
* 2, parts
));
31540 /* Fill in sizes for SPE register high parts in table used by unwinder. */
31543 rs6000_init_dwarf_reg_sizes_extra (tree address
)
31548 enum machine_mode mode
= TYPE_MODE (char_type_node
);
31549 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
31550 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
31551 rtx value
= gen_int_mode (4, mode
);
31553 for (i
= FIRST_SPE_HIGH_REGNO
; i
< LAST_SPE_HIGH_REGNO
+1; i
++)
31555 int column
= DWARF_REG_TO_UNWIND_COLUMN
31556 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i
), true));
31557 HOST_WIDE_INT offset
= column
* GET_MODE_SIZE (mode
);
31559 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
31563 if (TARGET_MACHO
&& ! TARGET_ALTIVEC
)
31566 enum machine_mode mode
= TYPE_MODE (char_type_node
);
31567 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
31568 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
31569 rtx value
= gen_int_mode (16, mode
);
31571 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
31572 The unwinder still needs to know the size of Altivec registers. */
31574 for (i
= FIRST_ALTIVEC_REGNO
; i
< LAST_ALTIVEC_REGNO
+1; i
++)
31576 int column
= DWARF_REG_TO_UNWIND_COLUMN
31577 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i
), true));
31578 HOST_WIDE_INT offset
= column
* GET_MODE_SIZE (mode
);
31580 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
31585 /* Map internal gcc register numbers to debug format register numbers.
31586 FORMAT specifies the type of debug register number to use:
31587 0 -- debug information, except for frame-related sections
31588 1 -- DWARF .debug_frame section
31589 2 -- DWARF .eh_frame section */
31592 rs6000_dbx_register_number (unsigned int regno
, unsigned int format
)
31594 /* We never use the GCC internal number for SPE high registers.
31595 Those are mapped to the 1200..1231 range for all debug formats. */
31596 if (SPE_HIGH_REGNO_P (regno
))
31597 return regno
- FIRST_SPE_HIGH_REGNO
+ 1200;
31599 /* Except for the above, we use the internal number for non-DWARF
31600 debug information, and also for .eh_frame. */
31601 if ((format
== 0 && write_symbols
!= DWARF2_DEBUG
) || format
== 2)
31604 /* On some platforms, we use the standard DWARF register
31605 numbering for .debug_info and .debug_frame. */
31606 #ifdef RS6000_USE_DWARF_NUMBERING
31609 if (regno
== LR_REGNO
)
31611 if (regno
== CTR_REGNO
)
31613 /* Special handling for CR for .debug_frame: rs6000_emit_prologue has
31614 translated any combination of CR2, CR3, CR4 saves to a save of CR2.
31615 The actual code emitted saves the whole of CR, so we map CR2_REGNO
31616 to the DWARF reg for CR. */
31617 if (format
== 1 && regno
== CR2_REGNO
)
31619 if (CR_REGNO_P (regno
))
31620 return regno
- CR0_REGNO
+ 86;
31621 if (regno
== CA_REGNO
)
31622 return 101; /* XER */
31623 if (ALTIVEC_REGNO_P (regno
))
31624 return regno
- FIRST_ALTIVEC_REGNO
+ 1124;
31625 if (regno
== VRSAVE_REGNO
)
31627 if (regno
== VSCR_REGNO
)
31629 if (regno
== SPE_ACC_REGNO
)
31631 if (regno
== SPEFSCR_REGNO
)
31637 /* target hook eh_return_filter_mode */
31638 static enum machine_mode
31639 rs6000_eh_return_filter_mode (void)
31641 return TARGET_32BIT
? SImode
: word_mode
;
31644 /* Target hook for scalar_mode_supported_p. */
31646 rs6000_scalar_mode_supported_p (enum machine_mode mode
)
31648 if (DECIMAL_FLOAT_MODE_P (mode
))
31649 return default_decimal_float_supported_p ();
31651 return default_scalar_mode_supported_p (mode
);
31654 /* Target hook for vector_mode_supported_p. */
31656 rs6000_vector_mode_supported_p (enum machine_mode mode
)
31659 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (mode
))
31662 if (TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
31665 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
31672 /* Target hook for invalid_arg_for_unprototyped_fn. */
31673 static const char *
31674 invalid_arg_for_unprototyped_fn (const_tree typelist
, const_tree funcdecl
, const_tree val
)
31676 return (!rs6000_darwin64_abi
31678 && TREE_CODE (TREE_TYPE (val
)) == VECTOR_TYPE
31679 && (funcdecl
== NULL_TREE
31680 || (TREE_CODE (funcdecl
) == FUNCTION_DECL
31681 && DECL_BUILT_IN_CLASS (funcdecl
) != BUILT_IN_MD
)))
31682 ? N_("AltiVec argument passed to unprototyped function")
31686 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
31687 setup by using __stack_chk_fail_local hidden function instead of
31688 calling __stack_chk_fail directly. Otherwise it is better to call
31689 __stack_chk_fail directly. */
31691 static tree ATTRIBUTE_UNUSED
31692 rs6000_stack_protect_fail (void)
31694 return (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
31695 ? default_hidden_stack_protect_fail ()
31696 : default_external_stack_protect_fail ();
31700 rs6000_final_prescan_insn (rtx_insn
*insn
, rtx
*operand ATTRIBUTE_UNUSED
,
31701 int num_operands ATTRIBUTE_UNUSED
)
31703 if (rs6000_warn_cell_microcode
)
31706 int insn_code_number
= recog_memoized (insn
);
31707 location_t location
= INSN_LOCATION (insn
);
31709 /* Punt on insns we cannot recognize. */
31710 if (insn_code_number
< 0)
31713 temp
= get_insn_template (insn_code_number
, insn
);
31715 if (get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
)
31716 warning_at (location
, OPT_mwarn_cell_microcode
,
31717 "emitting microcode insn %s\t[%s] #%d",
31718 temp
, insn_data
[INSN_CODE (insn
)].name
, INSN_UID (insn
));
31719 else if (get_attr_cell_micro (insn
) == CELL_MICRO_CONDITIONAL
)
31720 warning_at (location
, OPT_mwarn_cell_microcode
,
31721 "emitting conditional microcode insn %s\t[%s] #%d",
31722 temp
, insn_data
[INSN_CODE (insn
)].name
, INSN_UID (insn
));
31726 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
31729 static unsigned HOST_WIDE_INT
31730 rs6000_asan_shadow_offset (void)
31732 return (unsigned HOST_WIDE_INT
) 1 << (TARGET_64BIT
? 41 : 29);
31736 /* Mask options that we want to support inside of attribute((target)) and
31737 #pragma GCC target operations. Note, we do not include things like
31738 64/32-bit, endianess, hard/soft floating point, etc. that would have
31739 different calling sequences. */
31741 struct rs6000_opt_mask
{
31742 const char *name
; /* option name */
31743 HOST_WIDE_INT mask
; /* mask to set */
31744 bool invert
; /* invert sense of mask */
31745 bool valid_target
; /* option is a target option */
31748 static struct rs6000_opt_mask
const rs6000_opt_masks
[] =
31750 { "altivec", OPTION_MASK_ALTIVEC
, false, true },
31751 { "cmpb", OPTION_MASK_CMPB
, false, true },
31752 { "crypto", OPTION_MASK_CRYPTO
, false, true },
31753 { "direct-move", OPTION_MASK_DIRECT_MOVE
, false, true },
31754 { "dlmzb", OPTION_MASK_DLMZB
, false, true },
31755 { "fprnd", OPTION_MASK_FPRND
, false, true },
31756 { "hard-dfp", OPTION_MASK_DFP
, false, true },
31757 { "htm", OPTION_MASK_HTM
, false, true },
31758 { "isel", OPTION_MASK_ISEL
, false, true },
31759 { "mfcrf", OPTION_MASK_MFCRF
, false, true },
31760 { "mfpgpr", OPTION_MASK_MFPGPR
, false, true },
31761 { "mulhw", OPTION_MASK_MULHW
, false, true },
31762 { "multiple", OPTION_MASK_MULTIPLE
, false, true },
31763 { "popcntb", OPTION_MASK_POPCNTB
, false, true },
31764 { "popcntd", OPTION_MASK_POPCNTD
, false, true },
31765 { "power8-fusion", OPTION_MASK_P8_FUSION
, false, true },
31766 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN
, false, true },
31767 { "power8-vector", OPTION_MASK_P8_VECTOR
, false, true },
31768 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT
, false, true },
31769 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT
, false, true },
31770 { "quad-memory", OPTION_MASK_QUAD_MEMORY
, false, true },
31771 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC
, false, true },
31772 { "recip-precision", OPTION_MASK_RECIP_PRECISION
, false, true },
31773 { "string", OPTION_MASK_STRING
, false, true },
31774 { "update", OPTION_MASK_NO_UPDATE
, true , true },
31775 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF
, false, false },
31776 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF
, false, false },
31777 { "vsx", OPTION_MASK_VSX
, false, true },
31778 { "vsx-timode", OPTION_MASK_VSX_TIMODE
, false, true },
31779 #ifdef OPTION_MASK_64BIT
31781 { "aix64", OPTION_MASK_64BIT
, false, false },
31782 { "aix32", OPTION_MASK_64BIT
, true, false },
31784 { "64", OPTION_MASK_64BIT
, false, false },
31785 { "32", OPTION_MASK_64BIT
, true, false },
31788 #ifdef OPTION_MASK_EABI
31789 { "eabi", OPTION_MASK_EABI
, false, false },
31791 #ifdef OPTION_MASK_LITTLE_ENDIAN
31792 { "little", OPTION_MASK_LITTLE_ENDIAN
, false, false },
31793 { "big", OPTION_MASK_LITTLE_ENDIAN
, true, false },
31795 #ifdef OPTION_MASK_RELOCATABLE
31796 { "relocatable", OPTION_MASK_RELOCATABLE
, false, false },
31798 #ifdef OPTION_MASK_STRICT_ALIGN
31799 { "strict-align", OPTION_MASK_STRICT_ALIGN
, false, false },
31801 { "soft-float", OPTION_MASK_SOFT_FLOAT
, false, false },
31802 { "string", OPTION_MASK_STRING
, false, false },
31805 /* Builtin mask mapping for printing the flags. */
31806 static struct rs6000_opt_mask
const rs6000_builtin_mask_names
[] =
31808 { "altivec", RS6000_BTM_ALTIVEC
, false, false },
31809 { "vsx", RS6000_BTM_VSX
, false, false },
31810 { "spe", RS6000_BTM_SPE
, false, false },
31811 { "paired", RS6000_BTM_PAIRED
, false, false },
31812 { "fre", RS6000_BTM_FRE
, false, false },
31813 { "fres", RS6000_BTM_FRES
, false, false },
31814 { "frsqrte", RS6000_BTM_FRSQRTE
, false, false },
31815 { "frsqrtes", RS6000_BTM_FRSQRTES
, false, false },
31816 { "popcntd", RS6000_BTM_POPCNTD
, false, false },
31817 { "cell", RS6000_BTM_CELL
, false, false },
31818 { "power8-vector", RS6000_BTM_P8_VECTOR
, false, false },
31819 { "crypto", RS6000_BTM_CRYPTO
, false, false },
31820 { "htm", RS6000_BTM_HTM
, false, false },
31821 { "hard-dfp", RS6000_BTM_DFP
, false, false },
31822 { "hard-float", RS6000_BTM_HARD_FLOAT
, false, false },
31823 { "long-double-128", RS6000_BTM_LDBL128
, false, false },
31826 /* Option variables that we want to support inside attribute((target)) and
31827 #pragma GCC target operations. */
31829 struct rs6000_opt_var
{
31830 const char *name
; /* option name */
31831 size_t global_offset
; /* offset of the option in global_options. */
31832 size_t target_offset
; /* offset of the option in target optiosn. */
31835 static struct rs6000_opt_var
const rs6000_opt_vars
[] =
31838 offsetof (struct gcc_options
, x_TARGET_FRIZ
),
31839 offsetof (struct cl_target_option
, x_TARGET_FRIZ
), },
31840 { "avoid-indexed-addresses",
31841 offsetof (struct gcc_options
, x_TARGET_AVOID_XFORM
),
31842 offsetof (struct cl_target_option
, x_TARGET_AVOID_XFORM
) },
31844 offsetof (struct gcc_options
, x_rs6000_paired_float
),
31845 offsetof (struct cl_target_option
, x_rs6000_paired_float
), },
31847 offsetof (struct gcc_options
, x_rs6000_default_long_calls
),
31848 offsetof (struct cl_target_option
, x_rs6000_default_long_calls
), },
31851 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
31852 parsing. Return true if there were no errors. */
31855 rs6000_inner_target_options (tree args
, bool attr_p
)
31859 if (args
== NULL_TREE
)
31862 else if (TREE_CODE (args
) == STRING_CST
)
31864 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
31867 while ((q
= strtok (p
, ",")) != NULL
)
31869 bool error_p
= false;
31870 bool not_valid_p
= false;
31871 const char *cpu_opt
= NULL
;
31874 if (strncmp (q
, "cpu=", 4) == 0)
31876 int cpu_index
= rs6000_cpu_name_lookup (q
+4);
31877 if (cpu_index
>= 0)
31878 rs6000_cpu_index
= cpu_index
;
31885 else if (strncmp (q
, "tune=", 5) == 0)
31887 int tune_index
= rs6000_cpu_name_lookup (q
+5);
31888 if (tune_index
>= 0)
31889 rs6000_tune_index
= tune_index
;
31899 bool invert
= false;
31903 if (strncmp (r
, "no-", 3) == 0)
31909 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
31910 if (strcmp (r
, rs6000_opt_masks
[i
].name
) == 0)
31912 HOST_WIDE_INT mask
= rs6000_opt_masks
[i
].mask
;
31914 if (!rs6000_opt_masks
[i
].valid_target
)
31915 not_valid_p
= true;
31919 rs6000_isa_flags_explicit
|= mask
;
31921 /* VSX needs altivec, so -mvsx automagically sets
31923 if (mask
== OPTION_MASK_VSX
&& !invert
)
31924 mask
|= OPTION_MASK_ALTIVEC
;
31926 if (rs6000_opt_masks
[i
].invert
)
31930 rs6000_isa_flags
&= ~mask
;
31932 rs6000_isa_flags
|= mask
;
31937 if (error_p
&& !not_valid_p
)
31939 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
31940 if (strcmp (r
, rs6000_opt_vars
[i
].name
) == 0)
31942 size_t j
= rs6000_opt_vars
[i
].global_offset
;
31943 *((int *) ((char *)&global_options
+ j
)) = !invert
;
31952 const char *eprefix
, *esuffix
;
31957 eprefix
= "__attribute__((__target__(";
31962 eprefix
= "#pragma GCC target ";
31967 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt
, eprefix
,
31969 else if (not_valid_p
)
31970 error ("%s\"%s\"%s is not allowed", eprefix
, q
, esuffix
);
31972 error ("%s\"%s\"%s is invalid", eprefix
, q
, esuffix
);
31977 else if (TREE_CODE (args
) == TREE_LIST
)
31981 tree value
= TREE_VALUE (args
);
31984 bool ret2
= rs6000_inner_target_options (value
, attr_p
);
31988 args
= TREE_CHAIN (args
);
31990 while (args
!= NULL_TREE
);
31994 gcc_unreachable ();
31999 /* Print out the target options as a list for -mdebug=target. */
32002 rs6000_debug_target_options (tree args
, const char *prefix
)
32004 if (args
== NULL_TREE
)
32005 fprintf (stderr
, "%s<NULL>", prefix
);
32007 else if (TREE_CODE (args
) == STRING_CST
)
32009 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
32012 while ((q
= strtok (p
, ",")) != NULL
)
32015 fprintf (stderr
, "%s\"%s\"", prefix
, q
);
32020 else if (TREE_CODE (args
) == TREE_LIST
)
32024 tree value
= TREE_VALUE (args
);
32027 rs6000_debug_target_options (value
, prefix
);
32030 args
= TREE_CHAIN (args
);
32032 while (args
!= NULL_TREE
);
32036 gcc_unreachable ();
32042 /* Hook to validate attribute((target("..."))). */
32045 rs6000_valid_attribute_p (tree fndecl
,
32046 tree
ARG_UNUSED (name
),
32050 struct cl_target_option cur_target
;
32052 tree old_optimize
= build_optimization_node (&global_options
);
32053 tree new_target
, new_optimize
;
32054 tree func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
32056 gcc_assert ((fndecl
!= NULL_TREE
) && (args
!= NULL_TREE
));
32058 if (TARGET_DEBUG_TARGET
)
32060 tree tname
= DECL_NAME (fndecl
);
32061 fprintf (stderr
, "\n==================== rs6000_valid_attribute_p:\n");
32063 fprintf (stderr
, "function: %.*s\n",
32064 (int) IDENTIFIER_LENGTH (tname
),
32065 IDENTIFIER_POINTER (tname
));
32067 fprintf (stderr
, "function: unknown\n");
32069 fprintf (stderr
, "args:");
32070 rs6000_debug_target_options (args
, " ");
32071 fprintf (stderr
, "\n");
32074 fprintf (stderr
, "flags: 0x%x\n", flags
);
32076 fprintf (stderr
, "--------------------\n");
32079 old_optimize
= build_optimization_node (&global_options
);
32080 func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
32082 /* If the function changed the optimization levels as well as setting target
32083 options, start with the optimizations specified. */
32084 if (func_optimize
&& func_optimize
!= old_optimize
)
32085 cl_optimization_restore (&global_options
,
32086 TREE_OPTIMIZATION (func_optimize
));
32088 /* The target attributes may also change some optimization flags, so update
32089 the optimization options if necessary. */
32090 cl_target_option_save (&cur_target
, &global_options
);
32091 rs6000_cpu_index
= rs6000_tune_index
= -1;
32092 ret
= rs6000_inner_target_options (args
, true);
32094 /* Set up any additional state. */
32097 ret
= rs6000_option_override_internal (false);
32098 new_target
= build_target_option_node (&global_options
);
32103 new_optimize
= build_optimization_node (&global_options
);
32110 DECL_FUNCTION_SPECIFIC_TARGET (fndecl
) = new_target
;
32112 if (old_optimize
!= new_optimize
)
32113 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
) = new_optimize
;
32116 cl_target_option_restore (&global_options
, &cur_target
);
32118 if (old_optimize
!= new_optimize
)
32119 cl_optimization_restore (&global_options
,
32120 TREE_OPTIMIZATION (old_optimize
));
32126 /* Hook to validate the current #pragma GCC target and set the state, and
32127 update the macros based on what was changed. If ARGS is NULL, then
32128 POP_TARGET is used to reset the options. */
32131 rs6000_pragma_target_parse (tree args
, tree pop_target
)
32133 tree prev_tree
= build_target_option_node (&global_options
);
32135 struct cl_target_option
*prev_opt
, *cur_opt
;
32136 HOST_WIDE_INT prev_flags
, cur_flags
, diff_flags
;
32137 HOST_WIDE_INT prev_bumask
, cur_bumask
, diff_bumask
;
32139 if (TARGET_DEBUG_TARGET
)
32141 fprintf (stderr
, "\n==================== rs6000_pragma_target_parse\n");
32142 fprintf (stderr
, "args:");
32143 rs6000_debug_target_options (args
, " ");
32144 fprintf (stderr
, "\n");
32148 fprintf (stderr
, "pop_target:\n");
32149 debug_tree (pop_target
);
32152 fprintf (stderr
, "pop_target: <NULL>\n");
32154 fprintf (stderr
, "--------------------\n");
32159 cur_tree
= ((pop_target
)
32161 : target_option_default_node
);
32162 cl_target_option_restore (&global_options
,
32163 TREE_TARGET_OPTION (cur_tree
));
32167 rs6000_cpu_index
= rs6000_tune_index
= -1;
32168 if (!rs6000_inner_target_options (args
, false)
32169 || !rs6000_option_override_internal (false)
32170 || (cur_tree
= build_target_option_node (&global_options
))
32173 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
32174 fprintf (stderr
, "invalid pragma\n");
32180 target_option_current_node
= cur_tree
;
32182 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
32183 change the macros that are defined. */
32184 if (rs6000_target_modify_macros_ptr
)
32186 prev_opt
= TREE_TARGET_OPTION (prev_tree
);
32187 prev_bumask
= prev_opt
->x_rs6000_builtin_mask
;
32188 prev_flags
= prev_opt
->x_rs6000_isa_flags
;
32190 cur_opt
= TREE_TARGET_OPTION (cur_tree
);
32191 cur_flags
= cur_opt
->x_rs6000_isa_flags
;
32192 cur_bumask
= cur_opt
->x_rs6000_builtin_mask
;
32194 diff_bumask
= (prev_bumask
^ cur_bumask
);
32195 diff_flags
= (prev_flags
^ cur_flags
);
32197 if ((diff_flags
!= 0) || (diff_bumask
!= 0))
32199 /* Delete old macros. */
32200 rs6000_target_modify_macros_ptr (false,
32201 prev_flags
& diff_flags
,
32202 prev_bumask
& diff_bumask
);
32204 /* Define new macros. */
32205 rs6000_target_modify_macros_ptr (true,
32206 cur_flags
& diff_flags
,
32207 cur_bumask
& diff_bumask
);
32215 /* Remember the last target of rs6000_set_current_function. */
32216 static GTY(()) tree rs6000_previous_fndecl
;
32218 /* Establish appropriate back-end context for processing the function
32219 FNDECL. The argument might be NULL to indicate processing at top
32220 level, outside of any function scope. */
32222 rs6000_set_current_function (tree fndecl
)
32224 tree old_tree
= (rs6000_previous_fndecl
32225 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
)
32228 tree new_tree
= (fndecl
32229 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl
)
32232 if (TARGET_DEBUG_TARGET
)
32234 bool print_final
= false;
32235 fprintf (stderr
, "\n==================== rs6000_set_current_function");
32238 fprintf (stderr
, ", fndecl %s (%p)",
32239 (DECL_NAME (fndecl
)
32240 ? IDENTIFIER_POINTER (DECL_NAME (fndecl
))
32241 : "<unknown>"), (void *)fndecl
);
32243 if (rs6000_previous_fndecl
)
32244 fprintf (stderr
, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl
);
32246 fprintf (stderr
, "\n");
32249 fprintf (stderr
, "\nnew fndecl target specific options:\n");
32250 debug_tree (new_tree
);
32251 print_final
= true;
32256 fprintf (stderr
, "\nold fndecl target specific options:\n");
32257 debug_tree (old_tree
);
32258 print_final
= true;
32262 fprintf (stderr
, "--------------------\n");
32265 /* Only change the context if the function changes. This hook is called
32266 several times in the course of compiling a function, and we don't want to
32267 slow things down too much or call target_reinit when it isn't safe. */
32268 if (fndecl
&& fndecl
!= rs6000_previous_fndecl
)
32270 rs6000_previous_fndecl
= fndecl
;
32271 if (old_tree
== new_tree
)
32276 cl_target_option_restore (&global_options
,
32277 TREE_TARGET_OPTION (new_tree
));
32278 if (TREE_TARGET_GLOBALS (new_tree
))
32279 restore_target_globals (TREE_TARGET_GLOBALS (new_tree
));
32281 TREE_TARGET_GLOBALS (new_tree
)
32282 = save_target_globals_default_opts ();
32287 new_tree
= target_option_current_node
;
32288 cl_target_option_restore (&global_options
,
32289 TREE_TARGET_OPTION (new_tree
));
32290 if (TREE_TARGET_GLOBALS (new_tree
))
32291 restore_target_globals (TREE_TARGET_GLOBALS (new_tree
));
32292 else if (new_tree
== target_option_default_node
)
32293 restore_target_globals (&default_target_globals
);
32295 TREE_TARGET_GLOBALS (new_tree
)
32296 = save_target_globals_default_opts ();
32302 /* Save the current options */
32305 rs6000_function_specific_save (struct cl_target_option
*ptr
,
32306 struct gcc_options
*opts
)
32308 ptr
->x_rs6000_isa_flags
= opts
->x_rs6000_isa_flags
;
32309 ptr
->x_rs6000_isa_flags_explicit
= opts
->x_rs6000_isa_flags_explicit
;
32312 /* Restore the current options */
32315 rs6000_function_specific_restore (struct gcc_options
*opts
,
32316 struct cl_target_option
*ptr
)
32319 opts
->x_rs6000_isa_flags
= ptr
->x_rs6000_isa_flags
;
32320 opts
->x_rs6000_isa_flags_explicit
= ptr
->x_rs6000_isa_flags_explicit
;
32321 (void) rs6000_option_override_internal (false);
32324 /* Print the current options */
32327 rs6000_function_specific_print (FILE *file
, int indent
,
32328 struct cl_target_option
*ptr
)
32330 rs6000_print_isa_options (file
, indent
, "Isa options set",
32331 ptr
->x_rs6000_isa_flags
);
32333 rs6000_print_isa_options (file
, indent
, "Isa options explicit",
32334 ptr
->x_rs6000_isa_flags_explicit
);
32337 /* Helper function to print the current isa or misc options on a line. */
32340 rs6000_print_options_internal (FILE *file
,
32342 const char *string
,
32343 HOST_WIDE_INT flags
,
32344 const char *prefix
,
32345 const struct rs6000_opt_mask
*opts
,
32346 size_t num_elements
)
32349 size_t start_column
= 0;
32351 size_t max_column
= 76;
32352 const char *comma
= "";
32355 start_column
+= fprintf (file
, "%*s", indent
, "");
32359 fprintf (stderr
, DEBUG_FMT_S
, string
, "<none>");
32363 start_column
+= fprintf (stderr
, DEBUG_FMT_WX
, string
, flags
);
32365 /* Print the various mask options. */
32366 cur_column
= start_column
;
32367 for (i
= 0; i
< num_elements
; i
++)
32369 if ((flags
& opts
[i
].mask
) != 0)
32371 const char *no_str
= rs6000_opt_masks
[i
].invert
? "no-" : "";
32372 size_t len
= (strlen (comma
)
32375 + strlen (rs6000_opt_masks
[i
].name
));
32378 if (cur_column
> max_column
)
32380 fprintf (stderr
, ", \\\n%*s", (int)start_column
, "");
32381 cur_column
= start_column
+ len
;
32385 fprintf (file
, "%s%s%s%s", comma
, prefix
, no_str
,
32386 rs6000_opt_masks
[i
].name
);
32387 flags
&= ~ opts
[i
].mask
;
32392 fputs ("\n", file
);
32395 /* Helper function to print the current isa options on a line. */
32398 rs6000_print_isa_options (FILE *file
, int indent
, const char *string
,
32399 HOST_WIDE_INT flags
)
32401 rs6000_print_options_internal (file
, indent
, string
, flags
, "-m",
32402 &rs6000_opt_masks
[0],
32403 ARRAY_SIZE (rs6000_opt_masks
));
32407 rs6000_print_builtin_options (FILE *file
, int indent
, const char *string
,
32408 HOST_WIDE_INT flags
)
32410 rs6000_print_options_internal (file
, indent
, string
, flags
, "",
32411 &rs6000_builtin_mask_names
[0],
32412 ARRAY_SIZE (rs6000_builtin_mask_names
));
32416 /* Hook to determine if one function can safely inline another. */
32419 rs6000_can_inline_p (tree caller
, tree callee
)
32422 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
32423 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
32425 /* If callee has no option attributes, then it is ok to inline. */
32429 /* If caller has no option attributes, but callee does then it is not ok to
32431 else if (!caller_tree
)
32436 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
32437 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
32439 /* Callee's options should a subset of the caller's, i.e. a vsx function
32440 can inline an altivec function but a non-vsx function can't inline a
32442 if ((caller_opts
->x_rs6000_isa_flags
& callee_opts
->x_rs6000_isa_flags
)
32443 == callee_opts
->x_rs6000_isa_flags
)
32447 if (TARGET_DEBUG_TARGET
)
32448 fprintf (stderr
, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
32449 (DECL_NAME (caller
)
32450 ? IDENTIFIER_POINTER (DECL_NAME (caller
))
32452 (DECL_NAME (callee
)
32453 ? IDENTIFIER_POINTER (DECL_NAME (callee
))
32455 (ret
? "can" : "cannot"));
32460 /* Allocate a stack temp and fixup the address so it meets the particular
32461 memory requirements (either offetable or REG+REG addressing). */
32464 rs6000_allocate_stack_temp (enum machine_mode mode
,
32465 bool offsettable_p
,
32468 rtx stack
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
32469 rtx addr
= XEXP (stack
, 0);
32470 int strict_p
= (reload_in_progress
|| reload_completed
);
32472 if (!legitimate_indirect_address_p (addr
, strict_p
))
32475 && !rs6000_legitimate_offset_address_p (mode
, addr
, strict_p
, true))
32476 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
32478 else if (reg_reg_p
&& !legitimate_indexed_address_p (addr
, strict_p
))
32479 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
32485 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
32486 to such a form to deal with memory reference instructions like STFIWX that
32487 only take reg+reg addressing. */
32490 rs6000_address_for_fpconvert (rtx x
)
32492 int strict_p
= (reload_in_progress
|| reload_completed
);
32495 gcc_assert (MEM_P (x
));
32496 addr
= XEXP (x
, 0);
32497 if (! legitimate_indirect_address_p (addr
, strict_p
)
32498 && ! legitimate_indexed_address_p (addr
, strict_p
))
32500 if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
32502 rtx reg
= XEXP (addr
, 0);
32503 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (x
));
32504 rtx size_rtx
= GEN_INT ((GET_CODE (addr
) == PRE_DEC
) ? -size
: size
);
32505 gcc_assert (REG_P (reg
));
32506 emit_insn (gen_add3_insn (reg
, reg
, size_rtx
));
32509 else if (GET_CODE (addr
) == PRE_MODIFY
)
32511 rtx reg
= XEXP (addr
, 0);
32512 rtx expr
= XEXP (addr
, 1);
32513 gcc_assert (REG_P (reg
));
32514 gcc_assert (GET_CODE (expr
) == PLUS
);
32515 emit_insn (gen_add3_insn (reg
, XEXP (expr
, 0), XEXP (expr
, 1)));
32519 x
= replace_equiv_address (x
, copy_addr_to_reg (addr
));
32525 /* Given a memory reference, if it is not in the form for altivec memory
32526 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
32527 convert to the altivec format. */
32530 rs6000_address_for_altivec (rtx x
)
32532 gcc_assert (MEM_P (x
));
32533 if (!altivec_indexed_or_indirect_operand (x
, GET_MODE (x
)))
32535 rtx addr
= XEXP (x
, 0);
32536 int strict_p
= (reload_in_progress
|| reload_completed
);
32538 if (!legitimate_indexed_address_p (addr
, strict_p
)
32539 && !legitimate_indirect_address_p (addr
, strict_p
))
32540 addr
= copy_to_mode_reg (Pmode
, addr
);
32542 addr
= gen_rtx_AND (Pmode
, addr
, GEN_INT (-16));
32543 x
= change_address (x
, GET_MODE (x
), addr
);
32549 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
32551 On the RS/6000, all integer constants are acceptable, most won't be valid
32552 for particular insns, though. Only easy FP constants are acceptable. */
32555 rs6000_legitimate_constant_p (enum machine_mode mode
, rtx x
)
32557 if (TARGET_ELF
&& tls_referenced_p (x
))
32560 return ((GET_CODE (x
) != CONST_DOUBLE
&& GET_CODE (x
) != CONST_VECTOR
)
32561 || GET_MODE (x
) == VOIDmode
32562 || (TARGET_POWERPC64
&& mode
== DImode
)
32563 || easy_fp_constant (x
, mode
)
32564 || easy_vector_constant (x
, mode
));
32569 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
32572 rs6000_call_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
32574 rtx toc_reg
= gen_rtx_REG (Pmode
, TOC_REGNUM
);
32575 rtx toc_load
= NULL_RTX
;
32576 rtx toc_restore
= NULL_RTX
;
32578 rtx abi_reg
= NULL_RTX
;
32583 /* Handle longcall attributes. */
32584 if (INTVAL (cookie
) & CALL_LONG
)
32585 func_desc
= rs6000_longcall_ref (func_desc
);
32587 /* Handle indirect calls. */
32588 if (GET_CODE (func_desc
) != SYMBOL_REF
32589 || (DEFAULT_ABI
== ABI_AIX
&& !SYMBOL_REF_FUNCTION_P (func_desc
)))
32591 /* Save the TOC into its reserved slot before the call,
32592 and prepare to restore it after the call. */
32593 rtx stack_ptr
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
32594 rtx stack_toc_offset
= GEN_INT (RS6000_TOC_SAVE_SLOT
);
32595 rtx stack_toc_mem
= gen_frame_mem (Pmode
,
32596 gen_rtx_PLUS (Pmode
, stack_ptr
,
32597 stack_toc_offset
));
32598 toc_restore
= gen_rtx_SET (VOIDmode
, toc_reg
, stack_toc_mem
);
32600 /* Can we optimize saving the TOC in the prologue or
32601 do we need to do it at every call? */
32602 if (TARGET_SAVE_TOC_INDIRECT
&& !cfun
->calls_alloca
)
32603 cfun
->machine
->save_toc_in_prologue
= true;
32606 MEM_VOLATILE_P (stack_toc_mem
) = 1;
32607 emit_move_insn (stack_toc_mem
, toc_reg
);
32610 if (DEFAULT_ABI
== ABI_ELFv2
)
32612 /* A function pointer in the ELFv2 ABI is just a plain address, but
32613 the ABI requires it to be loaded into r12 before the call. */
32614 func_addr
= gen_rtx_REG (Pmode
, 12);
32615 emit_move_insn (func_addr
, func_desc
);
32616 abi_reg
= func_addr
;
32620 /* A function pointer under AIX is a pointer to a data area whose
32621 first word contains the actual address of the function, whose
32622 second word contains a pointer to its TOC, and whose third word
32623 contains a value to place in the static chain register (r11).
32624 Note that if we load the static chain, our "trampoline" need
32625 not have any executable code. */
32627 /* Load up address of the actual function. */
32628 func_desc
= force_reg (Pmode
, func_desc
);
32629 func_addr
= gen_reg_rtx (Pmode
);
32630 emit_move_insn (func_addr
, gen_rtx_MEM (Pmode
, func_desc
));
32632 /* Prepare to load the TOC of the called function. Note that the
32633 TOC load must happen immediately before the actual call so
32634 that unwinding the TOC registers works correctly. See the
32635 comment in frob_update_context. */
32636 rtx func_toc_offset
= GEN_INT (GET_MODE_SIZE (Pmode
));
32637 rtx func_toc_mem
= gen_rtx_MEM (Pmode
,
32638 gen_rtx_PLUS (Pmode
, func_desc
,
32640 toc_load
= gen_rtx_USE (VOIDmode
, func_toc_mem
);
32642 /* If we have a static chain, load it up. */
32643 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
32645 rtx sc_reg
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
32646 rtx func_sc_offset
= GEN_INT (2 * GET_MODE_SIZE (Pmode
));
32647 rtx func_sc_mem
= gen_rtx_MEM (Pmode
,
32648 gen_rtx_PLUS (Pmode
, func_desc
,
32650 emit_move_insn (sc_reg
, func_sc_mem
);
32657 /* Direct calls use the TOC: for local calls, the callee will
32658 assume the TOC register is set; for non-local calls, the
32659 PLT stub needs the TOC register. */
32661 func_addr
= func_desc
;
32664 /* Create the call. */
32665 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_addr
), flag
);
32666 if (value
!= NULL_RTX
)
32667 call
[0] = gen_rtx_SET (VOIDmode
, value
, call
[0]);
32671 call
[n_call
++] = toc_load
;
32673 call
[n_call
++] = toc_restore
;
32675 call
[n_call
++] = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
32677 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (n_call
, call
));
32678 insn
= emit_call_insn (insn
);
32680 /* Mention all registers defined by the ABI to hold information
32681 as uses in CALL_INSN_FUNCTION_USAGE. */
32683 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), abi_reg
);
32686 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
32689 rs6000_sibcall_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
32694 gcc_assert (INTVAL (cookie
) == 0);
32696 /* Create the call. */
32697 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_desc
), flag
);
32698 if (value
!= NULL_RTX
)
32699 call
[0] = gen_rtx_SET (VOIDmode
, value
, call
[0]);
32701 call
[1] = simple_return_rtx
;
32703 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (2, call
));
32704 insn
= emit_call_insn (insn
);
32706 /* Note use of the TOC register. */
32707 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), gen_rtx_REG (Pmode
, TOC_REGNUM
));
32708 /* We need to also mark a use of the link register since the function we
32709 sibling-call to will use it to return to our caller. */
32710 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), gen_rtx_REG (Pmode
, LR_REGNO
));
32713 /* Return whether we need to always update the saved TOC pointer when we update
32714 the stack pointer. */
32717 rs6000_save_toc_in_prologue_p (void)
32719 return (cfun
&& cfun
->machine
&& cfun
->machine
->save_toc_in_prologue
);
32722 #ifdef HAVE_GAS_HIDDEN
32723 # define USE_HIDDEN_LINKONCE 1
32725 # define USE_HIDDEN_LINKONCE 0
32728 /* Fills in the label name that should be used for a 476 link stack thunk. */
32731 get_ppc476_thunk_name (char name
[32])
32733 gcc_assert (TARGET_LINK_STACK
);
32735 if (USE_HIDDEN_LINKONCE
)
32736 sprintf (name
, "__ppc476.get_thunk");
32738 ASM_GENERATE_INTERNAL_LABEL (name
, "LPPC476_", 0);
32741 /* This function emits the simple thunk routine that is used to preserve
32742 the link stack on the 476 cpu. */
32744 static void rs6000_code_end (void) ATTRIBUTE_UNUSED
;
32746 rs6000_code_end (void)
32751 if (!TARGET_LINK_STACK
)
32754 get_ppc476_thunk_name (name
);
32756 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
, get_identifier (name
),
32757 build_function_type_list (void_type_node
, NULL_TREE
));
32758 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
32759 NULL_TREE
, void_type_node
);
32760 TREE_PUBLIC (decl
) = 1;
32761 TREE_STATIC (decl
) = 1;
32764 if (USE_HIDDEN_LINKONCE
)
32766 cgraph_node::create (decl
)->set_comdat_group (DECL_ASSEMBLER_NAME (decl
));
32767 targetm
.asm_out
.unique_section (decl
, 0);
32768 switch_to_section (get_named_section (decl
, NULL
, 0));
32769 DECL_WEAK (decl
) = 1;
32770 ASM_WEAKEN_DECL (asm_out_file
, decl
, name
, 0);
32771 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
32772 targetm
.asm_out
.assemble_visibility (decl
, VISIBILITY_HIDDEN
);
32773 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
32778 switch_to_section (text_section
);
32779 ASM_OUTPUT_LABEL (asm_out_file
, name
);
32782 DECL_INITIAL (decl
) = make_node (BLOCK
);
32783 current_function_decl
= decl
;
32784 init_function_start (decl
);
32785 first_function_block_is_cold
= false;
32786 /* Make sure unwind info is emitted for the thunk if needed. */
32787 final_start_function (emit_barrier (), asm_out_file
, 1);
32789 fputs ("\tblr\n", asm_out_file
);
32791 final_end_function ();
32792 init_insn_lengths ();
32793 free_after_compilation (cfun
);
32795 current_function_decl
= NULL
;
32798 /* Add r30 to hard reg set if the prologue sets it up and it is not
32799 pic_offset_table_rtx. */
32802 rs6000_set_up_by_prologue (struct hard_reg_set_container
*set
)
32804 if (!TARGET_SINGLE_PIC_BASE
32806 && TARGET_MINIMAL_TOC
32807 && get_pool_size () != 0)
32808 add_to_hard_reg_set (&set
->set
, Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
32812 /* Helper function for rs6000_split_logical to emit a logical instruction after
32813 spliting the operation to single GPR registers.
32815 DEST is the destination register.
32816 OP1 and OP2 are the input source registers.
32817 CODE is the base operation (AND, IOR, XOR, NOT).
32818 MODE is the machine mode.
32819 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32820 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32821 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
32824 rs6000_split_logical_inner (rtx dest
,
32827 enum rtx_code code
,
32828 enum machine_mode mode
,
32829 bool complement_final_p
,
32830 bool complement_op1_p
,
32831 bool complement_op2_p
)
32835 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
32836 if (op2
&& GET_CODE (op2
) == CONST_INT
32837 && (mode
== SImode
|| (mode
== DImode
&& TARGET_POWERPC64
))
32838 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
32840 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
32841 HOST_WIDE_INT value
= INTVAL (op2
) & mask
;
32843 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
32848 emit_insn (gen_rtx_SET (VOIDmode
, dest
, const0_rtx
));
32852 else if (value
== mask
)
32854 if (!rtx_equal_p (dest
, op1
))
32855 emit_insn (gen_rtx_SET (VOIDmode
, dest
, op1
));
32860 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
32861 into separate ORI/ORIS or XORI/XORIS instrucitons. */
32862 else if (code
== IOR
|| code
== XOR
)
32866 if (!rtx_equal_p (dest
, op1
))
32867 emit_insn (gen_rtx_SET (VOIDmode
, dest
, op1
));
32873 if (code
== AND
&& mode
== SImode
32874 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
32876 emit_insn (gen_andsi3 (dest
, op1
, op2
));
32880 if (complement_op1_p
)
32881 op1
= gen_rtx_NOT (mode
, op1
);
32883 if (complement_op2_p
)
32884 op2
= gen_rtx_NOT (mode
, op2
);
32886 bool_rtx
= ((code
== NOT
)
32887 ? gen_rtx_NOT (mode
, op1
)
32888 : gen_rtx_fmt_ee (code
, mode
, op1
, op2
));
32890 if (complement_final_p
)
32891 bool_rtx
= gen_rtx_NOT (mode
, bool_rtx
);
32893 emit_insn (gen_rtx_SET (VOIDmode
, dest
, bool_rtx
));
32896 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
32897 operations are split immediately during RTL generation to allow for more
32898 optimizations of the AND/IOR/XOR.
32900 OPERANDS is an array containing the destination and two input operands.
32901 CODE is the base operation (AND, IOR, XOR, NOT).
32902 MODE is the machine mode.
32903 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32904 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32905 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
32906 CLOBBER_REG is either NULL or a scratch register of type CC to allow
32907 formation of the AND instructions. */
32910 rs6000_split_logical_di (rtx operands
[3],
32911 enum rtx_code code
,
32912 bool complement_final_p
,
32913 bool complement_op1_p
,
32914 bool complement_op2_p
)
32916 const HOST_WIDE_INT lower_32bits
= HOST_WIDE_INT_C(0xffffffff);
32917 const HOST_WIDE_INT upper_32bits
= ~ lower_32bits
;
32918 const HOST_WIDE_INT sign_bit
= HOST_WIDE_INT_C(0x80000000);
32919 enum hi_lo
{ hi
= 0, lo
= 1 };
32920 rtx op0_hi_lo
[2], op1_hi_lo
[2], op2_hi_lo
[2];
32923 op0_hi_lo
[hi
] = gen_highpart (SImode
, operands
[0]);
32924 op1_hi_lo
[hi
] = gen_highpart (SImode
, operands
[1]);
32925 op0_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[0]);
32926 op1_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[1]);
32929 op2_hi_lo
[hi
] = op2_hi_lo
[lo
] = NULL_RTX
;
32932 if (GET_CODE (operands
[2]) != CONST_INT
)
32934 op2_hi_lo
[hi
] = gen_highpart_mode (SImode
, DImode
, operands
[2]);
32935 op2_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[2]);
32939 HOST_WIDE_INT value
= INTVAL (operands
[2]);
32940 HOST_WIDE_INT value_hi_lo
[2];
32942 gcc_assert (!complement_final_p
);
32943 gcc_assert (!complement_op1_p
);
32944 gcc_assert (!complement_op2_p
);
32946 value_hi_lo
[hi
] = value
>> 32;
32947 value_hi_lo
[lo
] = value
& lower_32bits
;
32949 for (i
= 0; i
< 2; i
++)
32951 HOST_WIDE_INT sub_value
= value_hi_lo
[i
];
32953 if (sub_value
& sign_bit
)
32954 sub_value
|= upper_32bits
;
32956 op2_hi_lo
[i
] = GEN_INT (sub_value
);
32958 /* If this is an AND instruction, check to see if we need to load
32959 the value in a register. */
32960 if (code
== AND
&& sub_value
!= -1 && sub_value
!= 0
32961 && !and_operand (op2_hi_lo
[i
], SImode
))
32962 op2_hi_lo
[i
] = force_reg (SImode
, op2_hi_lo
[i
]);
32967 for (i
= 0; i
< 2; i
++)
32969 /* Split large IOR/XOR operations. */
32970 if ((code
== IOR
|| code
== XOR
)
32971 && GET_CODE (op2_hi_lo
[i
]) == CONST_INT
32972 && !complement_final_p
32973 && !complement_op1_p
32974 && !complement_op2_p
32975 && !logical_const_operand (op2_hi_lo
[i
], SImode
))
32977 HOST_WIDE_INT value
= INTVAL (op2_hi_lo
[i
]);
32978 HOST_WIDE_INT hi_16bits
= value
& HOST_WIDE_INT_C(0xffff0000);
32979 HOST_WIDE_INT lo_16bits
= value
& HOST_WIDE_INT_C(0x0000ffff);
32980 rtx tmp
= gen_reg_rtx (SImode
);
32982 /* Make sure the constant is sign extended. */
32983 if ((hi_16bits
& sign_bit
) != 0)
32984 hi_16bits
|= upper_32bits
;
32986 rs6000_split_logical_inner (tmp
, op1_hi_lo
[i
], GEN_INT (hi_16bits
),
32987 code
, SImode
, false, false, false);
32989 rs6000_split_logical_inner (op0_hi_lo
[i
], tmp
, GEN_INT (lo_16bits
),
32990 code
, SImode
, false, false, false);
32993 rs6000_split_logical_inner (op0_hi_lo
[i
], op1_hi_lo
[i
], op2_hi_lo
[i
],
32994 code
, SImode
, complement_final_p
,
32995 complement_op1_p
, complement_op2_p
);
33001 /* Split the insns that make up boolean operations operating on multiple GPR
33002 registers. The boolean MD patterns ensure that the inputs either are
33003 exactly the same as the output registers, or there is no overlap.
33005 OPERANDS is an array containing the destination and two input operands.
33006 CODE is the base operation (AND, IOR, XOR, NOT).
33007 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
33008 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
33009 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
33012 rs6000_split_logical (rtx operands
[3],
33013 enum rtx_code code
,
33014 bool complement_final_p
,
33015 bool complement_op1_p
,
33016 bool complement_op2_p
)
33018 enum machine_mode mode
= GET_MODE (operands
[0]);
33019 enum machine_mode sub_mode
;
33021 int sub_size
, regno0
, regno1
, nregs
, i
;
33023 /* If this is DImode, use the specialized version that can run before
33024 register allocation. */
33025 if (mode
== DImode
&& !TARGET_POWERPC64
)
33027 rs6000_split_logical_di (operands
, code
, complement_final_p
,
33028 complement_op1_p
, complement_op2_p
);
33034 op2
= (code
== NOT
) ? NULL_RTX
: operands
[2];
33035 sub_mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
33036 sub_size
= GET_MODE_SIZE (sub_mode
);
33037 regno0
= REGNO (op0
);
33038 regno1
= REGNO (op1
);
33040 gcc_assert (reload_completed
);
33041 gcc_assert (IN_RANGE (regno0
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
33042 gcc_assert (IN_RANGE (regno1
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
33044 nregs
= rs6000_hard_regno_nregs
[(int)mode
][regno0
];
33045 gcc_assert (nregs
> 1);
33047 if (op2
&& REG_P (op2
))
33048 gcc_assert (IN_RANGE (REGNO (op2
), FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
33050 for (i
= 0; i
< nregs
; i
++)
33052 int offset
= i
* sub_size
;
33053 rtx sub_op0
= simplify_subreg (sub_mode
, op0
, mode
, offset
);
33054 rtx sub_op1
= simplify_subreg (sub_mode
, op1
, mode
, offset
);
33055 rtx sub_op2
= ((code
== NOT
)
33057 : simplify_subreg (sub_mode
, op2
, mode
, offset
));
33059 rs6000_split_logical_inner (sub_op0
, sub_op1
, sub_op2
, code
, sub_mode
,
33060 complement_final_p
, complement_op1_p
,
33068 /* Return true if the peephole2 can combine a load involving a combination of
33069 an addis instruction and a load with an offset that can be fused together on
33073 fusion_gpr_load_p (rtx addis_reg
, /* register set via addis. */
33074 rtx addis_value
, /* addis value. */
33075 rtx target
, /* target register that is loaded. */
33076 rtx mem
) /* bottom part of the memory addr. */
33081 /* Validate arguments. */
33082 if (!base_reg_operand (addis_reg
, GET_MODE (addis_reg
)))
33085 if (!base_reg_operand (target
, GET_MODE (target
)))
33088 if (!fusion_gpr_addis (addis_value
, GET_MODE (addis_value
)))
33091 /* Allow sign/zero extension. */
33092 if (GET_CODE (mem
) == ZERO_EXTEND
33093 || (GET_CODE (mem
) == SIGN_EXTEND
&& TARGET_P8_FUSION_SIGN
))
33094 mem
= XEXP (mem
, 0);
33099 if (!fusion_gpr_mem_load (mem
, GET_MODE (mem
)))
33102 addr
= XEXP (mem
, 0); /* either PLUS or LO_SUM. */
33103 if (GET_CODE (addr
) != PLUS
&& GET_CODE (addr
) != LO_SUM
)
33106 /* Validate that the register used to load the high value is either the
33107 register being loaded, or we can safely replace its use.
33109 This function is only called from the peephole2 pass and we assume that
33110 there are 2 instructions in the peephole (addis and load), so we want to
33111 check if the target register was not used in the memory address and the
33112 register to hold the addis result is dead after the peephole. */
33113 if (REGNO (addis_reg
) != REGNO (target
))
33115 if (reg_mentioned_p (target
, mem
))
33118 if (!peep2_reg_dead_p (2, addis_reg
))
33121 /* If the target register being loaded is the stack pointer, we must
33122 avoid loading any other value into it, even temporarily. */
33123 if (REG_P (target
) && REGNO (target
) == STACK_POINTER_REGNUM
)
33127 base_reg
= XEXP (addr
, 0);
33128 return REGNO (addis_reg
) == REGNO (base_reg
);
33131 /* During the peephole2 pass, adjust and expand the insns for a load fusion
33132 sequence. We adjust the addis register to use the target register. If the
33133 load sign extends, we adjust the code to do the zero extending load, and an
33134 explicit sign extension later since the fusion only covers zero extending
33138 operands[0] register set with addis (to be replaced with target)
33139 operands[1] value set via addis
33140 operands[2] target register being loaded
33141 operands[3] D-form memory reference using operands[0]. */
33144 expand_fusion_gpr_load (rtx
*operands
)
33146 rtx addis_value
= operands
[1];
33147 rtx target
= operands
[2];
33148 rtx orig_mem
= operands
[3];
33149 rtx new_addr
, new_mem
, orig_addr
, offset
;
33150 enum rtx_code plus_or_lo_sum
;
33151 enum machine_mode target_mode
= GET_MODE (target
);
33152 enum machine_mode extend_mode
= target_mode
;
33153 enum machine_mode ptr_mode
= Pmode
;
33154 enum rtx_code extend
= UNKNOWN
;
33156 if (GET_CODE (orig_mem
) == ZERO_EXTEND
33157 || (TARGET_P8_FUSION_SIGN
&& GET_CODE (orig_mem
) == SIGN_EXTEND
))
33159 extend
= GET_CODE (orig_mem
);
33160 orig_mem
= XEXP (orig_mem
, 0);
33161 target_mode
= GET_MODE (orig_mem
);
33164 gcc_assert (MEM_P (orig_mem
));
33166 orig_addr
= XEXP (orig_mem
, 0);
33167 plus_or_lo_sum
= GET_CODE (orig_addr
);
33168 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
33170 offset
= XEXP (orig_addr
, 1);
33171 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_value
, offset
);
33172 new_mem
= replace_equiv_address_nv (orig_mem
, new_addr
, false);
33174 if (extend
!= UNKNOWN
)
33175 new_mem
= gen_rtx_fmt_e (ZERO_EXTEND
, extend_mode
, new_mem
);
33177 new_mem
= gen_rtx_UNSPEC (extend_mode
, gen_rtvec (1, new_mem
),
33178 UNSPEC_FUSION_GPR
);
33179 emit_insn (gen_rtx_SET (VOIDmode
, target
, new_mem
));
33181 if (extend
== SIGN_EXTEND
)
33183 int sub_off
= ((BYTES_BIG_ENDIAN
)
33184 ? GET_MODE_SIZE (extend_mode
) - GET_MODE_SIZE (target_mode
)
33187 = simplify_subreg (target_mode
, target
, extend_mode
, sub_off
);
33189 emit_insn (gen_rtx_SET (VOIDmode
, target
,
33190 gen_rtx_SIGN_EXTEND (extend_mode
, sign_reg
)));
33196 /* Return a string to fuse an addis instruction with a gpr load to the same
33197 register that we loaded up the addis instruction. The address that is used
33198 is the logical address that was formed during peephole2:
33199 (lo_sum (high) (low-part))
33201 The code is complicated, so we call output_asm_insn directly, and just
33205 emit_fusion_gpr_load (rtx target
, rtx mem
)
33211 const char *addis_str
= NULL
;
33212 const char *load_str
= NULL
;
33213 const char *mode_name
= NULL
;
33214 char insn_template
[80];
33215 enum machine_mode mode
;
33216 const char *comment_str
= ASM_COMMENT_START
;
33218 if (GET_CODE (mem
) == ZERO_EXTEND
)
33219 mem
= XEXP (mem
, 0);
33221 gcc_assert (REG_P (target
) && MEM_P (mem
));
33223 if (*comment_str
== ' ')
33226 addr
= XEXP (mem
, 0);
33227 if (GET_CODE (addr
) != PLUS
&& GET_CODE (addr
) != LO_SUM
)
33228 gcc_unreachable ();
33230 addis_value
= XEXP (addr
, 0);
33231 load_offset
= XEXP (addr
, 1);
33233 /* Now emit the load instruction to the same register. */
33234 mode
= GET_MODE (mem
);
33238 mode_name
= "char";
33243 mode_name
= "short";
33253 gcc_assert (TARGET_POWERPC64
);
33254 mode_name
= "long";
33259 gcc_unreachable ();
33262 /* Emit the addis instruction. */
33263 fuse_ops
[0] = target
;
33264 if (satisfies_constraint_L (addis_value
))
33266 fuse_ops
[1] = addis_value
;
33267 addis_str
= "lis %0,%v1";
33270 else if (GET_CODE (addis_value
) == PLUS
)
33272 rtx op0
= XEXP (addis_value
, 0);
33273 rtx op1
= XEXP (addis_value
, 1);
33275 if (REG_P (op0
) && CONST_INT_P (op1
)
33276 && satisfies_constraint_L (op1
))
33280 addis_str
= "addis %0,%1,%v2";
33284 else if (GET_CODE (addis_value
) == HIGH
)
33286 rtx value
= XEXP (addis_value
, 0);
33287 if (GET_CODE (value
) == UNSPEC
&& XINT (value
, 1) == UNSPEC_TOCREL
)
33289 fuse_ops
[1] = XVECEXP (value
, 0, 0); /* symbol ref. */
33290 fuse_ops
[2] = XVECEXP (value
, 0, 1); /* TOC register. */
33292 addis_str
= "addis %0,%2,%1@toc@ha";
33294 else if (TARGET_XCOFF
)
33295 addis_str
= "addis %0,%1@u(%2)";
33298 gcc_unreachable ();
33301 else if (GET_CODE (value
) == PLUS
)
33303 rtx op0
= XEXP (value
, 0);
33304 rtx op1
= XEXP (value
, 1);
33306 if (GET_CODE (op0
) == UNSPEC
33307 && XINT (op0
, 1) == UNSPEC_TOCREL
33308 && CONST_INT_P (op1
))
33310 fuse_ops
[1] = XVECEXP (op0
, 0, 0); /* symbol ref. */
33311 fuse_ops
[2] = XVECEXP (op0
, 0, 1); /* TOC register. */
33314 addis_str
= "addis %0,%2,%1+%3@toc@ha";
33316 else if (TARGET_XCOFF
)
33317 addis_str
= "addis %0,%1+%3@u(%2)";
33320 gcc_unreachable ();
33324 else if (satisfies_constraint_L (value
))
33326 fuse_ops
[1] = value
;
33327 addis_str
= "lis %0,%v1";
33330 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (value
))
33332 fuse_ops
[1] = value
;
33333 addis_str
= "lis %0,%1@ha";
33338 fatal_insn ("Could not generate addis value for fusion", addis_value
);
33340 sprintf (insn_template
, "%s\t\t%s gpr load fusion, type %s", addis_str
,
33341 comment_str
, mode_name
);
33342 output_asm_insn (insn_template
, fuse_ops
);
33344 /* Emit the D-form load instruction. */
33345 if (CONST_INT_P (load_offset
) && satisfies_constraint_I (load_offset
))
33347 sprintf (insn_template
, "%s %%0,%%1(%%0)", load_str
);
33348 fuse_ops
[1] = load_offset
;
33349 output_asm_insn (insn_template
, fuse_ops
);
33352 else if (GET_CODE (load_offset
) == UNSPEC
33353 && XINT (load_offset
, 1) == UNSPEC_TOCREL
)
33356 sprintf (insn_template
, "%s %%0,%%1@toc@l(%%0)", load_str
);
33358 else if (TARGET_XCOFF
)
33359 sprintf (insn_template
, "%s %%0,%%1@l(%%0)", load_str
);
33362 gcc_unreachable ();
33364 fuse_ops
[1] = XVECEXP (load_offset
, 0, 0);
33365 output_asm_insn (insn_template
, fuse_ops
);
33368 else if (GET_CODE (load_offset
) == PLUS
33369 && GET_CODE (XEXP (load_offset
, 0)) == UNSPEC
33370 && XINT (XEXP (load_offset
, 0), 1) == UNSPEC_TOCREL
33371 && CONST_INT_P (XEXP (load_offset
, 1)))
33373 rtx tocrel_unspec
= XEXP (load_offset
, 0);
33375 sprintf (insn_template
, "%s %%0,%%1+%%2@toc@l(%%0)", load_str
);
33377 else if (TARGET_XCOFF
)
33378 sprintf (insn_template
, "%s %%0,%%1+%%2@l(%%0)", load_str
);
33381 gcc_unreachable ();
33383 fuse_ops
[1] = XVECEXP (tocrel_unspec
, 0, 0);
33384 fuse_ops
[2] = XEXP (load_offset
, 1);
33385 output_asm_insn (insn_template
, fuse_ops
);
33388 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (load_offset
))
33390 sprintf (insn_template
, "%s %%0,%%1@l(%%0)", load_str
);
33392 fuse_ops
[1] = load_offset
;
33393 output_asm_insn (insn_template
, fuse_ops
);
33397 fatal_insn ("Unable to generate load offset for fusion", load_offset
);
33402 /* Analyze vector computations and remove unnecessary doubleword
33403 swaps (xxswapdi instructions). This pass is performed only
33404 for little-endian VSX code generation.
33406 For this specific case, loads and stores of 4x32 and 2x64 vectors
33407 are inefficient. These are implemented using the lvx2dx and
33408 stvx2dx instructions, which invert the order of doublewords in
33409 a vector register. Thus the code generation inserts an xxswapdi
33410 after each such load, and prior to each such store. (For spill
33411 code after register assignment, an additional xxswapdi is inserted
33412 following each store in order to return a hard register to its
33415 The extra xxswapdi instructions reduce performance. This can be
33416 particularly bad for vectorized code. The purpose of this pass
33417 is to reduce the number of xxswapdi instructions required for
33420 The primary insight is that much code that operates on vectors
33421 does not care about the relative order of elements in a register,
33422 so long as the correct memory order is preserved. If we have
33423 a computation where all input values are provided by lvxd2x/xxswapdi
33424 sequences, all outputs are stored using xxswapdi/stvxd2x sequences,
33425 and all intermediate computations are pure SIMD (independent of
33426 element order), then all the xxswapdi's associated with the loads
33427 and stores may be removed.
33429 This pass uses some of the infrastructure and logical ideas from
33430 the "web" pass in web.c. We create maximal webs of computations
33431 fitting the description above using union-find. Each such web is
33432 then optimized by removing its unnecessary xxswapdi instructions.
33434 The pass is placed prior to global optimization so that we can
33435 perform the optimization in the safest and simplest way possible;
33436 that is, by replacing each xxswapdi insn with a register copy insn.
33437 Subsequent forward propagation will remove copies where possible.
33439 There are some operations sensitive to element order for which we
33440 can still allow the operation, provided we modify those operations.
33441 These include CONST_VECTORs, for which we must swap the first and
33442 second halves of the constant vector; and SUBREGs, for which we
33443 must adjust the byte offset to account for the swapped doublewords.
33444 A remaining opportunity would be non-immediate-form splats, for
33445 which we should adjust the selected lane of the input. We should
33446 also make code generation adjustments for sum-across operations,
33447 since this is a common vectorizer reduction.
33449 Because we run prior to the first split, we can see loads and stores
33450 here that match *vsx_le_perm_{load,store}_<mode>. These are vanilla
33451 vector loads and stores that have not yet been split into a permuting
33452 load/store and a swap. (One way this can happen is with a builtin
33453 call to vec_vsx_{ld,st}.) We can handle these as well, but rather
33454 than deleting a swap, we convert the load/store into a permuting
33455 load/store (which effectively removes the swap). */
33457 /* Notes on Permutes
33459 We do not currently handle computations that contain permutes. There
33460 is a general transformation that can be performed correctly, but it
33461 may introduce more expensive code than it replaces. To handle these
33462 would require a cost model to determine when to perform the optimization.
33463 This commentary records how this could be done if desired.
33465 The most general permute is something like this (example for V16QI):
33467 (vec_select:V16QI (vec_concat:V32QI (op1:V16QI) (op2:V16QI))
33468 (parallel [(const_int a0) (const_int a1)
33470 (const_int a14) (const_int a15)]))
33472 where a0,...,a15 are in [0,31] and select elements from op1 and op2
33473 to produce in the result.
33475 Regardless of mode, we can convert the PARALLEL to a mask of 16
33476 byte-element selectors. Let's call this M, with M[i] representing
33477 the ith byte-element selector value. Then if we swap doublewords
33478 throughout the computation, we can get correct behavior by replacing
33479 M with M' as follows:
33481 { M[i+8]+8 : i < 8, M[i+8] in [0,7] U [16,23]
33482 M'[i] = { M[i+8]-8 : i < 8, M[i+8] in [8,15] U [24,31]
33483 { M[i-8]+8 : i >= 8, M[i-8] in [0,7] U [16,23]
33484 { M[i-8]-8 : i >= 8, M[i-8] in [8,15] U [24,31]
33486 This seems promising at first, since we are just replacing one mask
33487 with another. But certain masks are preferable to others. If M
33488 is a mask that matches a vmrghh pattern, for example, M' certainly
33489 will not. Instead of a single vmrghh, we would generate a load of
33490 M' and a vperm. So we would need to know how many xxswapd's we can
33491 remove as a result of this transformation to determine if it's
33492 profitable; and preferably the logic would need to be aware of all
33493 the special preferable masks.
33495 Another form of permute is an UNSPEC_VPERM, in which the mask is
33496 already in a register. In some cases, this mask may be a constant
33497 that we can discover with ud-chains, in which case the above
33498 transformation is ok. However, the common usage here is for the
33499 mask to be produced by an UNSPEC_LVSL, in which case the mask
33500 cannot be known at compile time. In such a case we would have to
33501 generate several instructions to compute M' as above at run time,
33502 and a cost model is needed again. */
33504 /* This is based on the union-find logic in web.c. web_entry_base is
33505 defined in df.h. */
33506 class swap_web_entry
: public web_entry_base
33509 /* Pointer to the insn. */
33511 /* Set if insn contains a mention of a vector register. All other
33512 fields are undefined if this field is unset. */
33513 unsigned int is_relevant
: 1;
33514 /* Set if insn is a load. */
33515 unsigned int is_load
: 1;
33516 /* Set if insn is a store. */
33517 unsigned int is_store
: 1;
33518 /* Set if insn is a doubleword swap. This can either be a register swap
33519 or a permuting load or store (test is_load and is_store for this). */
33520 unsigned int is_swap
: 1;
33521 /* Set if the insn has a live-in use of a parameter register. */
33522 unsigned int is_live_in
: 1;
33523 /* Set if the insn has a live-out def of a return register. */
33524 unsigned int is_live_out
: 1;
33525 /* Set if the insn contains a subreg reference of a vector register. */
33526 unsigned int contains_subreg
: 1;
33527 /* Set if the insn contains a 128-bit integer operand. */
33528 unsigned int is_128_int
: 1;
33529 /* Set if this is a call-insn. */
33530 unsigned int is_call
: 1;
33531 /* Set if this insn does not perform a vector operation for which
33532 element order matters, or if we know how to fix it up if it does.
33533 Undefined if is_swap is set. */
33534 unsigned int is_swappable
: 1;
33535 /* A nonzero value indicates what kind of special handling for this
33536 insn is required if doublewords are swapped. Undefined if
33537 is_swappable is not set. */
33538 unsigned int special_handling
: 3;
33539 /* Set if the web represented by this entry cannot be optimized. */
33540 unsigned int web_not_optimizable
: 1;
33541 /* Set if this insn should be deleted. */
33542 unsigned int will_delete
: 1;
33545 enum special_handling_values
{
33555 /* Union INSN with all insns containing definitions that reach USE.
33556 Detect whether USE is live-in to the current function. */
33558 union_defs (swap_web_entry
*insn_entry
, rtx insn
, df_ref use
)
33560 struct df_link
*link
= DF_REF_CHAIN (use
);
33563 insn_entry
[INSN_UID (insn
)].is_live_in
= 1;
33567 if (DF_REF_IS_ARTIFICIAL (link
->ref
))
33568 insn_entry
[INSN_UID (insn
)].is_live_in
= 1;
33570 if (DF_REF_INSN_INFO (link
->ref
))
33572 rtx def_insn
= DF_REF_INSN (link
->ref
);
33573 (void)unionfind_union (insn_entry
+ INSN_UID (insn
),
33574 insn_entry
+ INSN_UID (def_insn
));
33581 /* Union INSN with all insns containing uses reached from DEF.
33582 Detect whether DEF is live-out from the current function. */
33584 union_uses (swap_web_entry
*insn_entry
, rtx insn
, df_ref def
)
33586 struct df_link
*link
= DF_REF_CHAIN (def
);
33589 insn_entry
[INSN_UID (insn
)].is_live_out
= 1;
33593 /* This could be an eh use or some other artificial use;
33594 we treat these all the same (killing the optimization). */
33595 if (DF_REF_IS_ARTIFICIAL (link
->ref
))
33596 insn_entry
[INSN_UID (insn
)].is_live_out
= 1;
33598 if (DF_REF_INSN_INFO (link
->ref
))
33600 rtx use_insn
= DF_REF_INSN (link
->ref
);
33601 (void)unionfind_union (insn_entry
+ INSN_UID (insn
),
33602 insn_entry
+ INSN_UID (use_insn
));
33609 /* Return 1 iff INSN is a load insn, including permuting loads that
33610 represent an lvxd2x instruction; else return 0. */
33611 static unsigned int
33612 insn_is_load_p (rtx insn
)
33614 rtx body
= PATTERN (insn
);
33616 if (GET_CODE (body
) == SET
)
33618 if (GET_CODE (SET_SRC (body
)) == MEM
)
33621 if (GET_CODE (SET_SRC (body
)) == VEC_SELECT
33622 && GET_CODE (XEXP (SET_SRC (body
), 0)) == MEM
)
33628 if (GET_CODE (body
) != PARALLEL
)
33631 rtx set
= XVECEXP (body
, 0, 0);
33633 if (GET_CODE (set
) == SET
&& GET_CODE (SET_SRC (set
)) == MEM
)
33639 /* Return 1 iff INSN is a store insn, including permuting stores that
33640 represent an stvxd2x instruction; else return 0. */
33641 static unsigned int
33642 insn_is_store_p (rtx insn
)
33644 rtx body
= PATTERN (insn
);
33645 if (GET_CODE (body
) == SET
&& GET_CODE (SET_DEST (body
)) == MEM
)
33647 if (GET_CODE (body
) != PARALLEL
)
33649 rtx set
= XVECEXP (body
, 0, 0);
33650 if (GET_CODE (set
) == SET
&& GET_CODE (SET_DEST (set
)) == MEM
)
33655 /* Return 1 iff INSN swaps doublewords. This may be a reg-reg swap,
33656 a permuting load, or a permuting store. */
33657 static unsigned int
33658 insn_is_swap_p (rtx insn
)
33660 rtx body
= PATTERN (insn
);
33661 if (GET_CODE (body
) != SET
)
33663 rtx rhs
= SET_SRC (body
);
33664 if (GET_CODE (rhs
) != VEC_SELECT
)
33666 rtx parallel
= XEXP (rhs
, 1);
33667 if (GET_CODE (parallel
) != PARALLEL
)
33669 unsigned int len
= XVECLEN (parallel
, 0);
33670 if (len
!= 2 && len
!= 4 && len
!= 8 && len
!= 16)
33672 for (unsigned int i
= 0; i
< len
/ 2; ++i
)
33674 rtx op
= XVECEXP (parallel
, 0, i
);
33675 if (GET_CODE (op
) != CONST_INT
|| INTVAL (op
) != len
/ 2 + i
)
33678 for (unsigned int i
= len
/ 2; i
< len
; ++i
)
33680 rtx op
= XVECEXP (parallel
, 0, i
);
33681 if (GET_CODE (op
) != CONST_INT
|| INTVAL (op
) != i
- len
/ 2)
33687 /* Return 1 iff OP is an operand that will not be affected by having
33688 vector doublewords swapped in memory. */
33689 static unsigned int
33690 rtx_is_swappable_p (rtx op
, unsigned int *special
)
33692 enum rtx_code code
= GET_CODE (op
);
33711 *special
= SH_CONST_VECTOR
;
33715 case VEC_DUPLICATE
:
33716 /* Opportunity: If XEXP (op, 0) has the same mode as the result,
33717 and XEXP (op, 1) is a PARALLEL with a single QImode const int,
33718 it represents a vector splat for which we can do special
33720 if (GET_CODE (XEXP (op
, 0)) == CONST_INT
)
33722 else if (GET_CODE (XEXP (op
, 0)) == REG
33723 && GET_MODE_INNER (GET_MODE (op
)) == GET_MODE (XEXP (op
, 0)))
33724 /* This catches V2DF and V2DI splat, at a minimum. */
33726 else if (GET_CODE (XEXP (op
, 0)) == VEC_SELECT
)
33727 /* If the duplicated item is from a select, defer to the select
33728 processing to see if we can change the lane for the splat. */
33729 return rtx_is_swappable_p (XEXP (op
, 0), special
);
33734 /* A vec_extract operation is ok if we change the lane. */
33735 if (GET_CODE (XEXP (op
, 0)) == REG
33736 && GET_MODE_INNER (GET_MODE (XEXP (op
, 0))) == GET_MODE (op
)
33737 && GET_CODE ((parallel
= XEXP (op
, 1))) == PARALLEL
33738 && XVECLEN (parallel
, 0) == 1
33739 && GET_CODE (XVECEXP (parallel
, 0, 0)) == CONST_INT
)
33741 *special
= SH_EXTRACT
;
33749 /* Various operations are unsafe for this optimization, at least
33750 without significant additional work. Permutes are obviously
33751 problematic, as both the permute control vector and the ordering
33752 of the target values are invalidated by doubleword swapping.
33753 Vector pack and unpack modify the number of vector lanes.
33754 Merge-high/low will not operate correctly on swapped operands.
33755 Vector shifts across element boundaries are clearly uncool,
33756 as are vector select and concatenate operations. Vector
33757 sum-across instructions define one operand with a specific
33758 order-dependent element, so additional fixup code would be
33759 needed to make those work. Vector set and non-immediate-form
33760 vector splat are element-order sensitive. A few of these
33761 cases might be workable with special handling if required. */
33762 int val
= XINT (op
, 1);
33767 case UNSPEC_VMRGH_DIRECT
:
33768 case UNSPEC_VMRGL_DIRECT
:
33769 case UNSPEC_VPACK_SIGN_SIGN_SAT
:
33770 case UNSPEC_VPACK_SIGN_UNS_SAT
:
33771 case UNSPEC_VPACK_UNS_UNS_MOD
:
33772 case UNSPEC_VPACK_UNS_UNS_MOD_DIRECT
:
33773 case UNSPEC_VPACK_UNS_UNS_SAT
:
33775 case UNSPEC_VPERM_UNS
:
33776 case UNSPEC_VPERMHI
:
33777 case UNSPEC_VPERMSI
:
33779 case UNSPEC_VSLDOI
:
33782 case UNSPEC_VSUM2SWS
:
33783 case UNSPEC_VSUM4S
:
33784 case UNSPEC_VSUM4UBS
:
33785 case UNSPEC_VSUMSWS
:
33786 case UNSPEC_VSUMSWS_DIRECT
:
33787 case UNSPEC_VSX_CONCAT
:
33788 case UNSPEC_VSX_SET
:
33789 case UNSPEC_VSX_SLDWI
:
33790 case UNSPEC_VUNPACK_HI_SIGN
:
33791 case UNSPEC_VUNPACK_HI_SIGN_DIRECT
:
33792 case UNSPEC_VUNPACK_LO_SIGN
:
33793 case UNSPEC_VUNPACK_LO_SIGN_DIRECT
:
33794 case UNSPEC_VUPKHPX
:
33795 case UNSPEC_VUPKHS_V4SF
:
33796 case UNSPEC_VUPKHU_V4SF
:
33797 case UNSPEC_VUPKLPX
:
33798 case UNSPEC_VUPKLS_V4SF
:
33799 case UNSPEC_VUPKLU_V4SF
:
33800 /* The following could be handled as an idiom with XXSPLTW.
33801 These place a scalar in BE element zero, but the XXSPLTW
33802 will currently expect it in BE element 2 in a swapped
33803 region. When one of these feeds an XXSPLTW with no other
33804 defs/uses either way, we can avoid the lane change for
33805 XXSPLTW and things will be correct. TBD. */
33806 case UNSPEC_VSX_CVDPSPN
:
33807 case UNSPEC_VSX_CVSPDP
:
33808 case UNSPEC_VSX_CVSPDPN
:
33810 case UNSPEC_VSPLT_DIRECT
:
33811 *special
= SH_SPLAT
;
33820 const char *fmt
= GET_RTX_FORMAT (code
);
33823 for (i
= 0; i
< GET_RTX_LENGTH (code
); ++i
)
33824 if (fmt
[i
] == 'e' || fmt
[i
] == 'u')
33826 unsigned int special_op
= SH_NONE
;
33827 ok
&= rtx_is_swappable_p (XEXP (op
, i
), &special_op
);
33828 /* Ensure we never have two kinds of special handling
33829 for the same insn. */
33830 if (*special
!= SH_NONE
&& special_op
!= SH_NONE
33831 && *special
!= special_op
)
33833 *special
= special_op
;
33835 else if (fmt
[i
] == 'E')
33836 for (j
= 0; j
< XVECLEN (op
, i
); ++j
)
33838 unsigned int special_op
= SH_NONE
;
33839 ok
&= rtx_is_swappable_p (XVECEXP (op
, i
, j
), &special_op
);
33840 /* Ensure we never have two kinds of special handling
33841 for the same insn. */
33842 if (*special
!= SH_NONE
&& special_op
!= SH_NONE
33843 && *special
!= special_op
)
33845 *special
= special_op
;
33851 /* Return 1 iff INSN is an operand that will not be affected by
33852 having vector doublewords swapped in memory (in which case
33853 *SPECIAL is unchanged), or that can be modified to be correct
33854 if vector doublewords are swapped in memory (in which case
33855 *SPECIAL is changed to a value indicating how). */
33856 static unsigned int
33857 insn_is_swappable_p (swap_web_entry
*insn_entry
, rtx insn
,
33858 unsigned int *special
)
33860 /* Calls are always bad. */
33861 if (GET_CODE (insn
) == CALL_INSN
)
33864 /* Loads and stores seen here are not permuting, but we can still
33865 fix them up by converting them to permuting ones. Exceptions:
33866 UNSPEC_LVE, UNSPEC_LVX, and UNSPEC_STVX, which have a PARALLEL
33867 body instead of a SET; and UNSPEC_STVE, which has an UNSPEC
33868 for the SET source. */
33869 rtx body
= PATTERN (insn
);
33870 int i
= INSN_UID (insn
);
33872 if (insn_entry
[i
].is_load
)
33874 if (GET_CODE (body
) == SET
)
33876 *special
= SH_NOSWAP_LD
;
33883 if (insn_entry
[i
].is_store
)
33885 if (GET_CODE (body
) == SET
&& GET_CODE (SET_SRC (body
)) != UNSPEC
)
33887 *special
= SH_NOSWAP_ST
;
33894 /* Otherwise check the operands for vector lane violations. */
33895 return rtx_is_swappable_p (body
, special
);
33898 enum chain_purpose
{ FOR_LOADS
, FOR_STORES
};
33900 /* Return true if the UD or DU chain headed by LINK is non-empty,
33901 and every entry on the chain references an insn that is a
33902 register swap. Furthermore, if PURPOSE is FOR_LOADS, each such
33903 register swap must have only permuting loads as reaching defs.
33904 If PURPOSE is FOR_STORES, each such register swap must have only
33905 register swaps or permuting stores as reached uses. */
33907 chain_contains_only_swaps (swap_web_entry
*insn_entry
, struct df_link
*link
,
33908 enum chain_purpose purpose
)
33913 for (; link
; link
= link
->next
)
33915 if (!VECTOR_MODE_P (GET_MODE (DF_REF_REG (link
->ref
))))
33918 if (DF_REF_IS_ARTIFICIAL (link
->ref
))
33921 rtx reached_insn
= DF_REF_INSN (link
->ref
);
33922 unsigned uid
= INSN_UID (reached_insn
);
33923 struct df_insn_info
*insn_info
= DF_INSN_INFO_GET (reached_insn
);
33925 if (!insn_entry
[uid
].is_swap
|| insn_entry
[uid
].is_load
33926 || insn_entry
[uid
].is_store
)
33929 if (purpose
== FOR_LOADS
)
33932 FOR_EACH_INSN_INFO_USE (use
, insn_info
)
33934 struct df_link
*swap_link
= DF_REF_CHAIN (use
);
33938 if (DF_REF_IS_ARTIFICIAL (link
->ref
))
33941 rtx swap_def_insn
= DF_REF_INSN (swap_link
->ref
);
33942 unsigned uid2
= INSN_UID (swap_def_insn
);
33944 /* Only permuting loads are allowed. */
33945 if (!insn_entry
[uid2
].is_swap
|| !insn_entry
[uid2
].is_load
)
33948 swap_link
= swap_link
->next
;
33952 else if (purpose
== FOR_STORES
)
33955 FOR_EACH_INSN_INFO_DEF (def
, insn_info
)
33957 struct df_link
*swap_link
= DF_REF_CHAIN (def
);
33961 if (DF_REF_IS_ARTIFICIAL (link
->ref
))
33964 rtx swap_use_insn
= DF_REF_INSN (swap_link
->ref
);
33965 unsigned uid2
= INSN_UID (swap_use_insn
);
33967 /* Permuting stores or register swaps are allowed. */
33968 if (!insn_entry
[uid2
].is_swap
|| insn_entry
[uid2
].is_load
)
33971 swap_link
= swap_link
->next
;
33980 /* Mark the xxswapdi instructions associated with permuting loads and
33981 stores for removal. Note that we only flag them for deletion here,
33982 as there is a possibility of a swap being reached from multiple
33985 mark_swaps_for_removal (swap_web_entry
*insn_entry
, unsigned int i
)
33987 rtx insn
= insn_entry
[i
].insn
;
33988 struct df_insn_info
*insn_info
= DF_INSN_INFO_GET (insn
);
33990 if (insn_entry
[i
].is_load
)
33993 FOR_EACH_INSN_INFO_DEF (def
, insn_info
)
33995 struct df_link
*link
= DF_REF_CHAIN (def
);
33997 /* We know by now that these are swaps, so we can delete
33998 them confidently. */
34001 rtx use_insn
= DF_REF_INSN (link
->ref
);
34002 insn_entry
[INSN_UID (use_insn
)].will_delete
= 1;
34007 else if (insn_entry
[i
].is_store
)
34010 FOR_EACH_INSN_INFO_USE (use
, insn_info
)
34012 /* Ignore uses for addressability. */
34013 enum machine_mode mode
= GET_MODE (DF_REF_REG (use
));
34014 if (!VECTOR_MODE_P (mode
))
34017 struct df_link
*link
= DF_REF_CHAIN (use
);
34019 /* We know by now that these are swaps, so we can delete
34020 them confidently. */
34023 rtx def_insn
= DF_REF_INSN (link
->ref
);
34024 insn_entry
[INSN_UID (def_insn
)].will_delete
= 1;
34031 /* OP is either a CONST_VECTOR or an expression containing one.
34032 Swap the first half of the vector with the second in the first
34033 case. Recurse to find it in the second. */
34035 swap_const_vector_halves (rtx op
)
34038 enum rtx_code code
= GET_CODE (op
);
34039 if (GET_CODE (op
) == CONST_VECTOR
)
34041 int half_units
= GET_MODE_NUNITS (GET_MODE (op
)) / 2;
34042 for (i
= 0; i
< half_units
; ++i
)
34044 rtx temp
= CONST_VECTOR_ELT (op
, i
);
34045 CONST_VECTOR_ELT (op
, i
) = CONST_VECTOR_ELT (op
, i
+ half_units
);
34046 CONST_VECTOR_ELT (op
, i
+ half_units
) = temp
;
34052 const char *fmt
= GET_RTX_FORMAT (code
);
34053 for (i
= 0; i
< GET_RTX_LENGTH (code
); ++i
)
34054 if (fmt
[i
] == 'e' || fmt
[i
] == 'u')
34055 swap_const_vector_halves (XEXP (op
, i
));
34056 else if (fmt
[i
] == 'E')
34057 for (j
= 0; j
< XVECLEN (op
, i
); ++j
)
34058 swap_const_vector_halves (XVECEXP (op
, i
, j
));
34062 /* Find all subregs of a vector expression that perform a narrowing,
34063 and adjust the subreg index to account for doubleword swapping. */
34065 adjust_subreg_index (rtx op
)
34067 enum rtx_code code
= GET_CODE (op
);
34069 && (GET_MODE_SIZE (GET_MODE (op
))
34070 < GET_MODE_SIZE (GET_MODE (XEXP (op
, 0)))))
34072 unsigned int index
= SUBREG_BYTE (op
);
34077 SUBREG_BYTE (op
) = index
;
34080 const char *fmt
= GET_RTX_FORMAT (code
);
34082 for (i
= 0; i
< GET_RTX_LENGTH (code
); ++i
)
34083 if (fmt
[i
] == 'e' || fmt
[i
] == 'u')
34084 adjust_subreg_index (XEXP (op
, i
));
34085 else if (fmt
[i
] == 'E')
34086 for (j
= 0; j
< XVECLEN (op
, i
); ++j
)
34087 adjust_subreg_index (XVECEXP (op
, i
, j
));
34090 /* Convert the non-permuting load INSN to a permuting one. */
34092 permute_load (rtx_insn
*insn
)
34094 rtx body
= PATTERN (insn
);
34095 rtx mem_op
= SET_SRC (body
);
34096 rtx tgt_reg
= SET_DEST (body
);
34097 enum machine_mode mode
= GET_MODE (tgt_reg
);
34098 int n_elts
= GET_MODE_NUNITS (mode
);
34099 int half_elts
= n_elts
/ 2;
34100 rtx par
= gen_rtx_PARALLEL (mode
, rtvec_alloc (n_elts
));
34102 for (i
= 0, j
= half_elts
; i
< half_elts
; ++i
, ++j
)
34103 XVECEXP (par
, 0, i
) = GEN_INT (j
);
34104 for (i
= half_elts
, j
= 0; j
< half_elts
; ++i
, ++j
)
34105 XVECEXP (par
, 0, i
) = GEN_INT (j
);
34106 rtx sel
= gen_rtx_VEC_SELECT (mode
, mem_op
, par
);
34107 SET_SRC (body
) = sel
;
34108 INSN_CODE (insn
) = -1; /* Force re-recognition. */
34109 df_insn_rescan (insn
);
34112 fprintf (dump_file
, "Replacing load %d with permuted load\n",
34116 /* Convert the non-permuting store INSN to a permuting one. */
34118 permute_store (rtx_insn
*insn
)
34120 rtx body
= PATTERN (insn
);
34121 rtx src_reg
= SET_SRC (body
);
34122 enum machine_mode mode
= GET_MODE (src_reg
);
34123 int n_elts
= GET_MODE_NUNITS (mode
);
34124 int half_elts
= n_elts
/ 2;
34125 rtx par
= gen_rtx_PARALLEL (mode
, rtvec_alloc (n_elts
));
34127 for (i
= 0, j
= half_elts
; i
< half_elts
; ++i
, ++j
)
34128 XVECEXP (par
, 0, i
) = GEN_INT (j
);
34129 for (i
= half_elts
, j
= 0; j
< half_elts
; ++i
, ++j
)
34130 XVECEXP (par
, 0, i
) = GEN_INT (j
);
34131 rtx sel
= gen_rtx_VEC_SELECT (mode
, src_reg
, par
);
34132 SET_SRC (body
) = sel
;
34133 INSN_CODE (insn
) = -1; /* Force re-recognition. */
34134 df_insn_rescan (insn
);
34137 fprintf (dump_file
, "Replacing store %d with permuted store\n",
34141 /* Given OP that contains a vector extract operation, adjust the index
34142 of the extracted lane to account for the doubleword swap. */
34144 adjust_extract (rtx_insn
*insn
)
34146 rtx src
= SET_SRC (PATTERN (insn
));
34147 /* The vec_select may be wrapped in a vec_duplicate for a splat, so
34148 account for that. */
34149 rtx sel
= GET_CODE (src
) == VEC_DUPLICATE
? XEXP (src
, 0) : src
;
34150 rtx par
= XEXP (sel
, 1);
34151 int half_elts
= GET_MODE_NUNITS (GET_MODE (XEXP (sel
, 0))) >> 1;
34152 int lane
= INTVAL (XVECEXP (par
, 0, 0));
34153 lane
= lane
>= half_elts
? lane
- half_elts
: lane
+ half_elts
;
34154 XVECEXP (par
, 0, 0) = GEN_INT (lane
);
34155 INSN_CODE (insn
) = -1; /* Force re-recognition. */
34156 df_insn_rescan (insn
);
34159 fprintf (dump_file
, "Changing lane for extract %d\n", INSN_UID (insn
));
34162 /* Given OP that contains a vector direct-splat operation, adjust the index
34163 of the source lane to account for the doubleword swap. */
34165 adjust_splat (rtx_insn
*insn
)
34167 rtx body
= PATTERN (insn
);
34168 rtx unspec
= XEXP (body
, 1);
34169 int half_elts
= GET_MODE_NUNITS (GET_MODE (unspec
)) >> 1;
34170 int lane
= INTVAL (XVECEXP (unspec
, 0, 1));
34171 lane
= lane
>= half_elts
? lane
- half_elts
: lane
+ half_elts
;
34172 XVECEXP (unspec
, 0, 1) = GEN_INT (lane
);
34173 INSN_CODE (insn
) = -1; /* Force re-recognition. */
34174 df_insn_rescan (insn
);
34177 fprintf (dump_file
, "Changing lane for splat %d\n", INSN_UID (insn
));
34180 /* The insn described by INSN_ENTRY[I] can be swapped, but only
34181 with special handling. Take care of that here. */
34183 handle_special_swappables (swap_web_entry
*insn_entry
, unsigned i
)
34185 rtx_insn
*insn
= insn_entry
[i
].insn
;
34186 rtx body
= PATTERN (insn
);
34188 switch (insn_entry
[i
].special_handling
)
34191 gcc_unreachable ();
34192 case SH_CONST_VECTOR
:
34194 /* A CONST_VECTOR will only show up somewhere in the RHS of a SET. */
34195 gcc_assert (GET_CODE (body
) == SET
);
34196 rtx rhs
= SET_SRC (body
);
34197 swap_const_vector_halves (rhs
);
34199 fprintf (dump_file
, "Swapping constant halves in insn %d\n", i
);
34203 /* A subreg of the same size is already safe. For subregs that
34204 select a smaller portion of a reg, adjust the index for
34205 swapped doublewords. */
34206 adjust_subreg_index (body
);
34208 fprintf (dump_file
, "Adjusting subreg in insn %d\n", i
);
34211 /* Convert a non-permuting load to a permuting one. */
34212 permute_load (insn
);
34215 /* Convert a non-permuting store to a permuting one. */
34216 permute_store (insn
);
34219 /* Change the lane on an extract operation. */
34220 adjust_extract (insn
);
34223 /* Change the lane on a direct-splat operation. */
34224 adjust_splat (insn
);
34229 /* Find the insn from the Ith table entry, which is known to be a
34230 register swap Y = SWAP(X). Replace it with a copy Y = X. */
34232 replace_swap_with_copy (swap_web_entry
*insn_entry
, unsigned i
)
34234 rtx_insn
*insn
= insn_entry
[i
].insn
;
34235 rtx body
= PATTERN (insn
);
34236 rtx src_reg
= XEXP (SET_SRC (body
), 0);
34237 rtx copy
= gen_rtx_SET (VOIDmode
, SET_DEST (body
), src_reg
);
34238 rtx_insn
*new_insn
= emit_insn_before (copy
, insn
);
34239 set_block_for_insn (new_insn
, BLOCK_FOR_INSN (insn
));
34240 df_insn_rescan (new_insn
);
34244 unsigned int new_uid
= INSN_UID (new_insn
);
34245 fprintf (dump_file
, "Replacing swap %d with copy %d\n", i
, new_uid
);
34248 df_insn_delete (insn
);
34249 remove_insn (insn
);
34250 insn
->set_deleted ();
34253 /* Dump the swap table to DUMP_FILE. */
34255 dump_swap_insn_table (swap_web_entry
*insn_entry
)
34257 int e
= get_max_uid ();
34258 fprintf (dump_file
, "\nRelevant insns with their flag settings\n\n");
34260 for (int i
= 0; i
< e
; ++i
)
34261 if (insn_entry
[i
].is_relevant
)
34263 swap_web_entry
*pred_entry
= (swap_web_entry
*)insn_entry
[i
].pred ();
34264 fprintf (dump_file
, "%6d %6d ", i
,
34265 pred_entry
&& pred_entry
->insn
34266 ? INSN_UID (pred_entry
->insn
) : 0);
34267 if (insn_entry
[i
].is_load
)
34268 fputs ("load ", dump_file
);
34269 if (insn_entry
[i
].is_store
)
34270 fputs ("store ", dump_file
);
34271 if (insn_entry
[i
].is_swap
)
34272 fputs ("swap ", dump_file
);
34273 if (insn_entry
[i
].is_live_in
)
34274 fputs ("live-in ", dump_file
);
34275 if (insn_entry
[i
].is_live_out
)
34276 fputs ("live-out ", dump_file
);
34277 if (insn_entry
[i
].contains_subreg
)
34278 fputs ("subreg ", dump_file
);
34279 if (insn_entry
[i
].is_128_int
)
34280 fputs ("int128 ", dump_file
);
34281 if (insn_entry
[i
].is_call
)
34282 fputs ("call ", dump_file
);
34283 if (insn_entry
[i
].is_swappable
)
34285 fputs ("swappable ", dump_file
);
34286 if (insn_entry
[i
].special_handling
== SH_CONST_VECTOR
)
34287 fputs ("special:constvec ", dump_file
);
34288 else if (insn_entry
[i
].special_handling
== SH_SUBREG
)
34289 fputs ("special:subreg ", dump_file
);
34290 else if (insn_entry
[i
].special_handling
== SH_NOSWAP_LD
)
34291 fputs ("special:load ", dump_file
);
34292 else if (insn_entry
[i
].special_handling
== SH_NOSWAP_ST
)
34293 fputs ("special:store ", dump_file
);
34294 else if (insn_entry
[i
].special_handling
== SH_EXTRACT
)
34295 fputs ("special:extract ", dump_file
);
34296 else if (insn_entry
[i
].special_handling
== SH_SPLAT
)
34297 fputs ("special:splat ", dump_file
);
34299 if (insn_entry
[i
].web_not_optimizable
)
34300 fputs ("unoptimizable ", dump_file
);
34301 if (insn_entry
[i
].will_delete
)
34302 fputs ("delete ", dump_file
);
34303 fputs ("\n", dump_file
);
34305 fputs ("\n", dump_file
);
34308 /* Main entry point for this pass. */
34310 rs6000_analyze_swaps (function
*fun
)
34312 swap_web_entry
*insn_entry
;
34316 /* Dataflow analysis for use-def chains. */
34317 df_set_flags (DF_RD_PRUNE_DEAD_DEFS
);
34318 df_chain_add_problem (DF_DU_CHAIN
| DF_UD_CHAIN
);
34320 df_set_flags (DF_DEFER_INSN_RESCAN
);
34322 /* Allocate structure to represent webs of insns. */
34323 insn_entry
= XCNEWVEC (swap_web_entry
, get_max_uid ());
34325 /* Walk the insns to gather basic data. */
34326 FOR_ALL_BB_FN (bb
, fun
)
34327 FOR_BB_INSNS (bb
, insn
)
34329 unsigned int uid
= INSN_UID (insn
);
34330 if (NONDEBUG_INSN_P (insn
))
34332 insn_entry
[uid
].insn
= insn
;
34334 if (GET_CODE (insn
) == CALL_INSN
)
34335 insn_entry
[uid
].is_call
= 1;
34337 /* Walk the uses and defs to see if we mention vector regs.
34338 Record any constraints on optimization of such mentions. */
34339 struct df_insn_info
*insn_info
= DF_INSN_INFO_GET (insn
);
34341 FOR_EACH_INSN_INFO_USE (mention
, insn_info
)
34343 /* We use DF_REF_REAL_REG here to get inside any subregs. */
34344 enum machine_mode mode
= GET_MODE (DF_REF_REAL_REG (mention
));
34346 /* If a use gets its value from a call insn, it will be
34347 a hard register and will look like (reg:V4SI 3 3).
34348 The df analysis creates two mentions for GPR3 and GPR4,
34349 both DImode. We must recognize this and treat it as a
34350 vector mention to ensure the call is unioned with this
34352 if (mode
== DImode
&& DF_REF_INSN_INFO (mention
))
34354 rtx feeder
= DF_REF_INSN (mention
);
34355 /* FIXME: It is pretty hard to get from the df mention
34356 to the mode of the use in the insn. We arbitrarily
34357 pick a vector mode here, even though the use might
34358 be a real DImode. We can be too conservative
34359 (create a web larger than necessary) because of
34360 this, so consider eventually fixing this. */
34361 if (GET_CODE (feeder
) == CALL_INSN
)
34365 if (VECTOR_MODE_P (mode
))
34367 insn_entry
[uid
].is_relevant
= 1;
34368 if (mode
== TImode
|| mode
== V1TImode
)
34369 insn_entry
[uid
].is_128_int
= 1;
34370 if (DF_REF_INSN_INFO (mention
))
34371 insn_entry
[uid
].contains_subreg
34372 = !rtx_equal_p (DF_REF_REG (mention
),
34373 DF_REF_REAL_REG (mention
));
34374 union_defs (insn_entry
, insn
, mention
);
34377 FOR_EACH_INSN_INFO_DEF (mention
, insn_info
)
34379 /* We use DF_REF_REAL_REG here to get inside any subregs. */
34380 enum machine_mode mode
= GET_MODE (DF_REF_REAL_REG (mention
));
34382 /* If we're loading up a hard vector register for a call,
34383 it looks like (set (reg:V4SI 9 9) (...)). The df
34384 analysis creates two mentions for GPR9 and GPR10, both
34385 DImode. So relying on the mode from the mentions
34386 isn't sufficient to ensure we union the call into the
34387 web with the parameter setup code. */
34388 if (mode
== DImode
&& GET_CODE (insn
) == SET
34389 && VECTOR_MODE_P (GET_MODE (SET_DEST (insn
))))
34390 mode
= GET_MODE (SET_DEST (insn
));
34392 if (VECTOR_MODE_P (mode
))
34394 insn_entry
[uid
].is_relevant
= 1;
34395 if (mode
== TImode
|| mode
== V1TImode
)
34396 insn_entry
[uid
].is_128_int
= 1;
34397 if (DF_REF_INSN_INFO (mention
))
34398 insn_entry
[uid
].contains_subreg
34399 = !rtx_equal_p (DF_REF_REG (mention
),
34400 DF_REF_REAL_REG (mention
));
34401 /* REG_FUNCTION_VALUE_P is not valid for subregs. */
34402 else if (REG_FUNCTION_VALUE_P (DF_REF_REG (mention
)))
34403 insn_entry
[uid
].is_live_out
= 1;
34404 union_uses (insn_entry
, insn
, mention
);
34408 if (insn_entry
[uid
].is_relevant
)
34410 /* Determine if this is a load or store. */
34411 insn_entry
[uid
].is_load
= insn_is_load_p (insn
);
34412 insn_entry
[uid
].is_store
= insn_is_store_p (insn
);
34414 /* Determine if this is a doubleword swap. If not,
34415 determine whether it can legally be swapped. */
34416 if (insn_is_swap_p (insn
))
34417 insn_entry
[uid
].is_swap
= 1;
34420 unsigned int special
= SH_NONE
;
34421 insn_entry
[uid
].is_swappable
34422 = insn_is_swappable_p (insn_entry
, insn
, &special
);
34423 if (special
!= SH_NONE
&& insn_entry
[uid
].contains_subreg
)
34424 insn_entry
[uid
].is_swappable
= 0;
34425 else if (special
!= SH_NONE
)
34426 insn_entry
[uid
].special_handling
= special
;
34427 else if (insn_entry
[uid
].contains_subreg
)
34428 insn_entry
[uid
].special_handling
= SH_SUBREG
;
34436 fprintf (dump_file
, "\nSwap insn entry table when first built\n");
34437 dump_swap_insn_table (insn_entry
);
34440 /* Record unoptimizable webs. */
34441 unsigned e
= get_max_uid (), i
;
34442 for (i
= 0; i
< e
; ++i
)
34444 if (!insn_entry
[i
].is_relevant
)
34447 swap_web_entry
*root
34448 = (swap_web_entry
*)(&insn_entry
[i
])->unionfind_root ();
34450 if (insn_entry
[i
].is_live_in
|| insn_entry
[i
].is_live_out
34451 || (insn_entry
[i
].contains_subreg
34452 && insn_entry
[i
].special_handling
!= SH_SUBREG
)
34453 || insn_entry
[i
].is_128_int
|| insn_entry
[i
].is_call
34454 || !(insn_entry
[i
].is_swappable
|| insn_entry
[i
].is_swap
))
34455 root
->web_not_optimizable
= 1;
34457 /* If we have loads or stores that aren't permuting then the
34458 optimization isn't appropriate. */
34459 else if ((insn_entry
[i
].is_load
|| insn_entry
[i
].is_store
)
34460 && !insn_entry
[i
].is_swap
&& !insn_entry
[i
].is_swappable
)
34461 root
->web_not_optimizable
= 1;
34463 /* If we have permuting loads or stores that are not accompanied
34464 by a register swap, the optimization isn't appropriate. */
34465 else if (insn_entry
[i
].is_load
&& insn_entry
[i
].is_swap
)
34467 rtx insn
= insn_entry
[i
].insn
;
34468 struct df_insn_info
*insn_info
= DF_INSN_INFO_GET (insn
);
34471 FOR_EACH_INSN_INFO_DEF (def
, insn_info
)
34473 struct df_link
*link
= DF_REF_CHAIN (def
);
34475 if (!chain_contains_only_swaps (insn_entry
, link
, FOR_LOADS
))
34477 root
->web_not_optimizable
= 1;
34482 else if (insn_entry
[i
].is_store
&& insn_entry
[i
].is_swap
)
34484 rtx insn
= insn_entry
[i
].insn
;
34485 struct df_insn_info
*insn_info
= DF_INSN_INFO_GET (insn
);
34488 FOR_EACH_INSN_INFO_USE (use
, insn_info
)
34490 struct df_link
*link
= DF_REF_CHAIN (use
);
34492 if (!chain_contains_only_swaps (insn_entry
, link
, FOR_STORES
))
34494 root
->web_not_optimizable
= 1;
34503 fprintf (dump_file
, "\nSwap insn entry table after web analysis\n");
34504 dump_swap_insn_table (insn_entry
);
34507 /* For each load and store in an optimizable web (which implies
34508 the loads and stores are permuting), find the associated
34509 register swaps and mark them for removal. Due to various
34510 optimizations we may mark the same swap more than once. Also
34511 perform special handling for swappable insns that require it. */
34512 for (i
= 0; i
< e
; ++i
)
34513 if ((insn_entry
[i
].is_load
|| insn_entry
[i
].is_store
)
34514 && insn_entry
[i
].is_swap
)
34516 swap_web_entry
* root_entry
34517 = (swap_web_entry
*)((&insn_entry
[i
])->unionfind_root ());
34518 if (!root_entry
->web_not_optimizable
)
34519 mark_swaps_for_removal (insn_entry
, i
);
34521 else if (insn_entry
[i
].is_swappable
&& insn_entry
[i
].special_handling
)
34523 swap_web_entry
* root_entry
34524 = (swap_web_entry
*)((&insn_entry
[i
])->unionfind_root ());
34525 if (!root_entry
->web_not_optimizable
)
34526 handle_special_swappables (insn_entry
, i
);
34529 /* Now delete the swaps marked for removal. */
34530 for (i
= 0; i
< e
; ++i
)
34531 if (insn_entry
[i
].will_delete
)
34532 replace_swap_with_copy (insn_entry
, i
);
34539 const pass_data pass_data_analyze_swaps
=
34541 RTL_PASS
, /* type */
34542 "swaps", /* name */
34543 OPTGROUP_NONE
, /* optinfo_flags */
34544 TV_NONE
, /* tv_id */
34545 0, /* properties_required */
34546 0, /* properties_provided */
34547 0, /* properties_destroyed */
34548 0, /* todo_flags_start */
34549 TODO_df_finish
, /* todo_flags_finish */
34552 class pass_analyze_swaps
: public rtl_opt_pass
34555 pass_analyze_swaps(gcc::context
*ctxt
)
34556 : rtl_opt_pass(pass_data_analyze_swaps
, ctxt
)
34559 /* opt_pass methods: */
34560 virtual bool gate (function
*)
34562 return (optimize
> 0 && !BYTES_BIG_ENDIAN
&& TARGET_VSX
34563 && rs6000_optimize_swaps
);
34566 virtual unsigned int execute (function
*fun
)
34568 return rs6000_analyze_swaps (fun
);
34571 }; // class pass_analyze_swaps
34574 make_pass_analyze_swaps (gcc::context
*ctxt
)
34576 return new pass_analyze_swaps (ctxt
);
34579 struct gcc_target targetm
= TARGET_INITIALIZER
;
34581 #include "gt-rs6000.h"