1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2013 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
35 #include "stringpool.h"
36 #include "stor-layout.h"
38 #include "print-tree.h"
46 #include "basic-block.h"
47 #include "diagnostic-core.h"
53 #include "target-def.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
58 #include "sched-int.h"
61 #include "gimple-iterator.h"
62 #include "gimple-walk.h"
65 #include "tm-constrs.h"
68 #include "tree-vectorizer.h"
72 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
75 #include "gstab.h" /* for N_SLINE */
78 #ifndef TARGET_NO_PROTOTYPE
79 #define TARGET_NO_PROTOTYPE 0
82 #define min(A,B) ((A) < (B) ? (A) : (B))
83 #define max(A,B) ((A) > (B) ? (A) : (B))
85 /* Structure used to define the rs6000 stack */
86 typedef struct rs6000_stack
{
87 int reload_completed
; /* stack info won't change from here on */
88 int first_gp_reg_save
; /* first callee saved GP register used */
89 int first_fp_reg_save
; /* first callee saved FP register used */
90 int first_altivec_reg_save
; /* first callee saved AltiVec register used */
91 int lr_save_p
; /* true if the link reg needs to be saved */
92 int cr_save_p
; /* true if the CR reg needs to be saved */
93 unsigned int vrsave_mask
; /* mask of vec registers to save */
94 int push_p
; /* true if we need to allocate stack space */
95 int calls_p
; /* true if the function makes any calls */
96 int world_save_p
; /* true if we're saving *everything*:
97 r13-r31, cr, f14-f31, vrsave, v20-v31 */
98 enum rs6000_abi abi
; /* which ABI to use */
99 int gp_save_offset
; /* offset to save GP regs from initial SP */
100 int fp_save_offset
; /* offset to save FP regs from initial SP */
101 int altivec_save_offset
; /* offset to save AltiVec regs from initial SP */
102 int lr_save_offset
; /* offset to save LR from initial SP */
103 int cr_save_offset
; /* offset to save CR from initial SP */
104 int vrsave_save_offset
; /* offset to save VRSAVE from initial SP */
105 int spe_gp_save_offset
; /* offset to save spe 64-bit gprs */
106 int varargs_save_offset
; /* offset to save the varargs registers */
107 int ehrd_offset
; /* offset to EH return data */
108 int ehcr_offset
; /* offset to EH CR field data */
109 int reg_size
; /* register size (4 or 8) */
110 HOST_WIDE_INT vars_size
; /* variable save area size */
111 int parm_size
; /* outgoing parameter size */
112 int save_size
; /* save area size */
113 int fixed_size
; /* fixed size of stack frame */
114 int gp_size
; /* size of saved GP registers */
115 int fp_size
; /* size of saved FP registers */
116 int altivec_size
; /* size of saved AltiVec registers */
117 int cr_size
; /* size to hold CR if not in save_size */
118 int vrsave_size
; /* size to hold VRSAVE if not in save_size */
119 int altivec_padding_size
; /* size of altivec alignment padding if
121 int spe_gp_size
; /* size of 64-bit GPR save size for SPE */
122 int spe_padding_size
;
123 HOST_WIDE_INT total_size
; /* total bytes allocated for stack */
124 int spe_64bit_regs_used
;
128 /* A C structure for machine-specific, per-function data.
129 This is added to the cfun structure. */
130 typedef struct GTY(()) machine_function
132 /* Some local-dynamic symbol. */
133 const char *some_ld_name
;
134 /* Whether the instruction chain has been scanned already. */
135 int insn_chain_scanned_p
;
136 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
137 int ra_needs_full_frame
;
138 /* Flags if __builtin_return_address (0) was used. */
140 /* Cache lr_save_p after expansion of builtin_eh_return. */
142 /* Whether we need to save the TOC to the reserved stack location in the
143 function prologue. */
144 bool save_toc_in_prologue
;
145 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
146 varargs save area. */
147 HOST_WIDE_INT varargs_save_offset
;
148 /* Temporary stack slot to use for SDmode copies. This slot is
149 64-bits wide and is allocated early enough so that the offset
150 does not overflow the 16-bit load/store offset field. */
151 rtx sdmode_stack_slot
;
152 /* Flag if r2 setup is needed with ELFv2 ABI. */
153 bool r2_setup_needed
;
156 /* Support targetm.vectorize.builtin_mask_for_load. */
157 static GTY(()) tree altivec_builtin_mask_for_load
;
159 /* Set to nonzero once AIX common-mode calls have been defined. */
160 static GTY(()) int common_mode_defined
;
162 /* Label number of label created for -mrelocatable, to call to so we can
163 get the address of the GOT section */
164 static int rs6000_pic_labelno
;
167 /* Counter for labels which are to be placed in .fixup. */
168 int fixuplabelno
= 0;
171 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
174 /* Specify the machine mode that pointers have. After generation of rtl, the
175 compiler makes no further distinction between pointers and any other objects
176 of this machine mode. The type is unsigned since not all things that
177 include rs6000.h also include machmode.h. */
178 unsigned rs6000_pmode
;
180 /* Width in bits of a pointer. */
181 unsigned rs6000_pointer_size
;
183 #ifdef HAVE_AS_GNU_ATTRIBUTE
184 /* Flag whether floating point values have been passed/returned. */
185 static bool rs6000_passes_float
;
186 /* Flag whether vector values have been passed/returned. */
187 static bool rs6000_passes_vector
;
188 /* Flag whether small (<= 8 byte) structures have been returned. */
189 static bool rs6000_returns_struct
;
192 /* Value is TRUE if register/mode pair is acceptable. */
193 bool rs6000_hard_regno_mode_ok_p
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
195 /* Maximum number of registers needed for a given register class and mode. */
196 unsigned char rs6000_class_max_nregs
[NUM_MACHINE_MODES
][LIM_REG_CLASSES
];
198 /* How many registers are needed for a given register and mode. */
199 unsigned char rs6000_hard_regno_nregs
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
201 /* Map register number to register class. */
202 enum reg_class rs6000_regno_regclass
[FIRST_PSEUDO_REGISTER
];
204 static int dbg_cost_ctrl
;
206 /* Built in types. */
207 tree rs6000_builtin_types
[RS6000_BTI_MAX
];
208 tree rs6000_builtin_decls
[RS6000_BUILTIN_COUNT
];
210 /* Flag to say the TOC is initialized */
212 char toc_label_name
[10];
214 /* Cached value of rs6000_variable_issue. This is cached in
215 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
216 static short cached_can_issue_more
;
218 static GTY(()) section
*read_only_data_section
;
219 static GTY(()) section
*private_data_section
;
220 static GTY(()) section
*tls_data_section
;
221 static GTY(()) section
*tls_private_data_section
;
222 static GTY(()) section
*read_only_private_data_section
;
223 static GTY(()) section
*sdata2_section
;
224 static GTY(()) section
*toc_section
;
226 struct builtin_description
228 const HOST_WIDE_INT mask
;
229 const enum insn_code icode
;
230 const char *const name
;
231 const enum rs6000_builtins code
;
234 /* Describe the vector unit used for modes. */
235 enum rs6000_vector rs6000_vector_unit
[NUM_MACHINE_MODES
];
236 enum rs6000_vector rs6000_vector_mem
[NUM_MACHINE_MODES
];
238 /* Register classes for various constraints that are based on the target
240 enum reg_class rs6000_constraints
[RS6000_CONSTRAINT_MAX
];
242 /* Describe the alignment of a vector. */
243 int rs6000_vector_align
[NUM_MACHINE_MODES
];
245 /* Map selected modes to types for builtins. */
246 static GTY(()) tree builtin_mode_to_type
[MAX_MACHINE_MODE
][2];
248 /* What modes to automatically generate reciprocal divide estimate (fre) and
249 reciprocal sqrt (frsqrte) for. */
250 unsigned char rs6000_recip_bits
[MAX_MACHINE_MODE
];
252 /* Masks to determine which reciprocal esitmate instructions to generate
254 enum rs6000_recip_mask
{
255 RECIP_SF_DIV
= 0x001, /* Use divide estimate */
256 RECIP_DF_DIV
= 0x002,
257 RECIP_V4SF_DIV
= 0x004,
258 RECIP_V2DF_DIV
= 0x008,
260 RECIP_SF_RSQRT
= 0x010, /* Use reciprocal sqrt estimate. */
261 RECIP_DF_RSQRT
= 0x020,
262 RECIP_V4SF_RSQRT
= 0x040,
263 RECIP_V2DF_RSQRT
= 0x080,
265 /* Various combination of flags for -mrecip=xxx. */
267 RECIP_ALL
= (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
268 | RECIP_V2DF_DIV
| RECIP_SF_RSQRT
| RECIP_DF_RSQRT
269 | RECIP_V4SF_RSQRT
| RECIP_V2DF_RSQRT
),
271 RECIP_HIGH_PRECISION
= RECIP_ALL
,
273 /* On low precision machines like the power5, don't enable double precision
274 reciprocal square root estimate, since it isn't accurate enough. */
275 RECIP_LOW_PRECISION
= (RECIP_ALL
& ~(RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
))
278 /* -mrecip options. */
281 const char *string
; /* option name */
282 unsigned int mask
; /* mask bits to set */
283 } recip_options
[] = {
284 { "all", RECIP_ALL
},
285 { "none", RECIP_NONE
},
286 { "div", (RECIP_SF_DIV
| RECIP_DF_DIV
| RECIP_V4SF_DIV
288 { "divf", (RECIP_SF_DIV
| RECIP_V4SF_DIV
) },
289 { "divd", (RECIP_DF_DIV
| RECIP_V2DF_DIV
) },
290 { "rsqrt", (RECIP_SF_RSQRT
| RECIP_DF_RSQRT
| RECIP_V4SF_RSQRT
291 | RECIP_V2DF_RSQRT
) },
292 { "rsqrtf", (RECIP_SF_RSQRT
| RECIP_V4SF_RSQRT
) },
293 { "rsqrtd", (RECIP_DF_RSQRT
| RECIP_V2DF_RSQRT
) },
296 /* Pointer to function (in rs6000-c.c) that can define or undefine target
297 macros that have changed. Languages that don't support the preprocessor
298 don't link in rs6000-c.c, so we can't call it directly. */
299 void (*rs6000_target_modify_macros_ptr
) (bool, HOST_WIDE_INT
, HOST_WIDE_INT
);
301 /* Simplfy register classes into simpler classifications. We assume
302 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
303 check for standard register classes (gpr/floating/altivec/vsx) and
304 floating/vector classes (float/altivec/vsx). */
306 enum rs6000_reg_type
{
319 /* Map register class to register type. */
320 static enum rs6000_reg_type reg_class_to_reg_type
[N_REG_CLASSES
];
322 /* First/last register type for the 'normal' register types (i.e. general
323 purpose, floating point, altivec, and VSX registers). */
324 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
326 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
329 /* Register classes we care about in secondary reload or go if legitimate
330 address. We only need to worry about GPR, FPR, and Altivec registers here,
331 along an ANY field that is the OR of the 3 register classes. */
333 enum rs6000_reload_reg_type
{
334 RELOAD_REG_GPR
, /* General purpose registers. */
335 RELOAD_REG_FPR
, /* Traditional floating point regs. */
336 RELOAD_REG_VMX
, /* Altivec (VMX) registers. */
337 RELOAD_REG_ANY
, /* OR of GPR, FPR, Altivec masks. */
341 /* For setting up register classes, loop through the 3 register classes mapping
342 into real registers, and skip the ANY class, which is just an OR of the
344 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
345 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
347 /* Map reload register type to a register in the register class. */
348 struct reload_reg_map_type
{
349 const char *name
; /* Register class name. */
350 int reg
; /* Register in the register class. */
353 static const struct reload_reg_map_type reload_reg_map
[N_RELOAD_REG
] = {
354 { "Gpr", FIRST_GPR_REGNO
}, /* RELOAD_REG_GPR. */
355 { "Fpr", FIRST_FPR_REGNO
}, /* RELOAD_REG_FPR. */
356 { "VMX", FIRST_ALTIVEC_REGNO
}, /* RELOAD_REG_VMX. */
357 { "Any", -1 }, /* RELOAD_REG_ANY. */
360 /* Mask bits for each register class, indexed per mode. Historically the
361 compiler has been more restrictive which types can do PRE_MODIFY instead of
362 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
363 typedef unsigned char addr_mask_type
;
365 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
366 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
367 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
368 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
369 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
370 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
372 /* Register type masks based on the type, of valid addressing modes. */
373 struct rs6000_reg_addr
{
374 enum insn_code reload_load
; /* INSN to reload for loading. */
375 enum insn_code reload_store
; /* INSN to reload for storing. */
376 enum insn_code reload_fpr_gpr
; /* INSN to move from FPR to GPR. */
377 enum insn_code reload_gpr_vsx
; /* INSN to move from GPR to VSX. */
378 enum insn_code reload_vsx_gpr
; /* INSN to move from VSX to GPR. */
379 addr_mask_type addr_mask
[(int)N_RELOAD_REG
]; /* Valid address masks. */
382 static struct rs6000_reg_addr reg_addr
[NUM_MACHINE_MODES
];
384 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
386 mode_supports_pre_incdec_p (enum machine_mode mode
)
388 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_INCDEC
)
392 /* Helper function to say whether a mode supports PRE_MODIFY. */
394 mode_supports_pre_modify_p (enum machine_mode mode
)
396 return ((reg_addr
[mode
].addr_mask
[RELOAD_REG_ANY
] & RELOAD_REG_PRE_MODIFY
)
401 /* Target cpu costs. */
403 struct processor_costs
{
404 const int mulsi
; /* cost of SImode multiplication. */
405 const int mulsi_const
; /* cost of SImode multiplication by constant. */
406 const int mulsi_const9
; /* cost of SImode mult by short constant. */
407 const int muldi
; /* cost of DImode multiplication. */
408 const int divsi
; /* cost of SImode division. */
409 const int divdi
; /* cost of DImode division. */
410 const int fp
; /* cost of simple SFmode and DFmode insns. */
411 const int dmul
; /* cost of DFmode multiplication (and fmadd). */
412 const int sdiv
; /* cost of SFmode division (fdivs). */
413 const int ddiv
; /* cost of DFmode division (fdiv). */
414 const int cache_line_size
; /* cache line size in bytes. */
415 const int l1_cache_size
; /* size of l1 cache, in kilobytes. */
416 const int l2_cache_size
; /* size of l2 cache, in kilobytes. */
417 const int simultaneous_prefetches
; /* number of parallel prefetch
421 const struct processor_costs
*rs6000_cost
;
423 /* Processor costs (relative to an add) */
425 /* Instruction size costs on 32bit processors. */
427 struct processor_costs size32_cost
= {
428 COSTS_N_INSNS (1), /* mulsi */
429 COSTS_N_INSNS (1), /* mulsi_const */
430 COSTS_N_INSNS (1), /* mulsi_const9 */
431 COSTS_N_INSNS (1), /* muldi */
432 COSTS_N_INSNS (1), /* divsi */
433 COSTS_N_INSNS (1), /* divdi */
434 COSTS_N_INSNS (1), /* fp */
435 COSTS_N_INSNS (1), /* dmul */
436 COSTS_N_INSNS (1), /* sdiv */
437 COSTS_N_INSNS (1), /* ddiv */
444 /* Instruction size costs on 64bit processors. */
446 struct processor_costs size64_cost
= {
447 COSTS_N_INSNS (1), /* mulsi */
448 COSTS_N_INSNS (1), /* mulsi_const */
449 COSTS_N_INSNS (1), /* mulsi_const9 */
450 COSTS_N_INSNS (1), /* muldi */
451 COSTS_N_INSNS (1), /* divsi */
452 COSTS_N_INSNS (1), /* divdi */
453 COSTS_N_INSNS (1), /* fp */
454 COSTS_N_INSNS (1), /* dmul */
455 COSTS_N_INSNS (1), /* sdiv */
456 COSTS_N_INSNS (1), /* ddiv */
463 /* Instruction costs on RS64A processors. */
465 struct processor_costs rs64a_cost
= {
466 COSTS_N_INSNS (20), /* mulsi */
467 COSTS_N_INSNS (12), /* mulsi_const */
468 COSTS_N_INSNS (8), /* mulsi_const9 */
469 COSTS_N_INSNS (34), /* muldi */
470 COSTS_N_INSNS (65), /* divsi */
471 COSTS_N_INSNS (67), /* divdi */
472 COSTS_N_INSNS (4), /* fp */
473 COSTS_N_INSNS (4), /* dmul */
474 COSTS_N_INSNS (31), /* sdiv */
475 COSTS_N_INSNS (31), /* ddiv */
476 128, /* cache line size */
482 /* Instruction costs on MPCCORE processors. */
484 struct processor_costs mpccore_cost
= {
485 COSTS_N_INSNS (2), /* mulsi */
486 COSTS_N_INSNS (2), /* mulsi_const */
487 COSTS_N_INSNS (2), /* mulsi_const9 */
488 COSTS_N_INSNS (2), /* muldi */
489 COSTS_N_INSNS (6), /* divsi */
490 COSTS_N_INSNS (6), /* divdi */
491 COSTS_N_INSNS (4), /* fp */
492 COSTS_N_INSNS (5), /* dmul */
493 COSTS_N_INSNS (10), /* sdiv */
494 COSTS_N_INSNS (17), /* ddiv */
495 32, /* cache line size */
501 /* Instruction costs on PPC403 processors. */
503 struct processor_costs ppc403_cost
= {
504 COSTS_N_INSNS (4), /* mulsi */
505 COSTS_N_INSNS (4), /* mulsi_const */
506 COSTS_N_INSNS (4), /* mulsi_const9 */
507 COSTS_N_INSNS (4), /* muldi */
508 COSTS_N_INSNS (33), /* divsi */
509 COSTS_N_INSNS (33), /* divdi */
510 COSTS_N_INSNS (11), /* fp */
511 COSTS_N_INSNS (11), /* dmul */
512 COSTS_N_INSNS (11), /* sdiv */
513 COSTS_N_INSNS (11), /* ddiv */
514 32, /* cache line size */
520 /* Instruction costs on PPC405 processors. */
522 struct processor_costs ppc405_cost
= {
523 COSTS_N_INSNS (5), /* mulsi */
524 COSTS_N_INSNS (4), /* mulsi_const */
525 COSTS_N_INSNS (3), /* mulsi_const9 */
526 COSTS_N_INSNS (5), /* muldi */
527 COSTS_N_INSNS (35), /* divsi */
528 COSTS_N_INSNS (35), /* divdi */
529 COSTS_N_INSNS (11), /* fp */
530 COSTS_N_INSNS (11), /* dmul */
531 COSTS_N_INSNS (11), /* sdiv */
532 COSTS_N_INSNS (11), /* ddiv */
533 32, /* cache line size */
539 /* Instruction costs on PPC440 processors. */
541 struct processor_costs ppc440_cost
= {
542 COSTS_N_INSNS (3), /* mulsi */
543 COSTS_N_INSNS (2), /* mulsi_const */
544 COSTS_N_INSNS (2), /* mulsi_const9 */
545 COSTS_N_INSNS (3), /* muldi */
546 COSTS_N_INSNS (34), /* divsi */
547 COSTS_N_INSNS (34), /* divdi */
548 COSTS_N_INSNS (5), /* fp */
549 COSTS_N_INSNS (5), /* dmul */
550 COSTS_N_INSNS (19), /* sdiv */
551 COSTS_N_INSNS (33), /* ddiv */
552 32, /* cache line size */
558 /* Instruction costs on PPC476 processors. */
560 struct processor_costs ppc476_cost
= {
561 COSTS_N_INSNS (4), /* mulsi */
562 COSTS_N_INSNS (4), /* mulsi_const */
563 COSTS_N_INSNS (4), /* mulsi_const9 */
564 COSTS_N_INSNS (4), /* muldi */
565 COSTS_N_INSNS (11), /* divsi */
566 COSTS_N_INSNS (11), /* divdi */
567 COSTS_N_INSNS (6), /* fp */
568 COSTS_N_INSNS (6), /* dmul */
569 COSTS_N_INSNS (19), /* sdiv */
570 COSTS_N_INSNS (33), /* ddiv */
571 32, /* l1 cache line size */
577 /* Instruction costs on PPC601 processors. */
579 struct processor_costs ppc601_cost
= {
580 COSTS_N_INSNS (5), /* mulsi */
581 COSTS_N_INSNS (5), /* mulsi_const */
582 COSTS_N_INSNS (5), /* mulsi_const9 */
583 COSTS_N_INSNS (5), /* muldi */
584 COSTS_N_INSNS (36), /* divsi */
585 COSTS_N_INSNS (36), /* divdi */
586 COSTS_N_INSNS (4), /* fp */
587 COSTS_N_INSNS (5), /* dmul */
588 COSTS_N_INSNS (17), /* sdiv */
589 COSTS_N_INSNS (31), /* ddiv */
590 32, /* cache line size */
596 /* Instruction costs on PPC603 processors. */
598 struct processor_costs ppc603_cost
= {
599 COSTS_N_INSNS (5), /* mulsi */
600 COSTS_N_INSNS (3), /* mulsi_const */
601 COSTS_N_INSNS (2), /* mulsi_const9 */
602 COSTS_N_INSNS (5), /* muldi */
603 COSTS_N_INSNS (37), /* divsi */
604 COSTS_N_INSNS (37), /* divdi */
605 COSTS_N_INSNS (3), /* fp */
606 COSTS_N_INSNS (4), /* dmul */
607 COSTS_N_INSNS (18), /* sdiv */
608 COSTS_N_INSNS (33), /* ddiv */
609 32, /* cache line size */
615 /* Instruction costs on PPC604 processors. */
617 struct processor_costs ppc604_cost
= {
618 COSTS_N_INSNS (4), /* mulsi */
619 COSTS_N_INSNS (4), /* mulsi_const */
620 COSTS_N_INSNS (4), /* mulsi_const9 */
621 COSTS_N_INSNS (4), /* muldi */
622 COSTS_N_INSNS (20), /* divsi */
623 COSTS_N_INSNS (20), /* divdi */
624 COSTS_N_INSNS (3), /* fp */
625 COSTS_N_INSNS (3), /* dmul */
626 COSTS_N_INSNS (18), /* sdiv */
627 COSTS_N_INSNS (32), /* ddiv */
628 32, /* cache line size */
634 /* Instruction costs on PPC604e processors. */
636 struct processor_costs ppc604e_cost
= {
637 COSTS_N_INSNS (2), /* mulsi */
638 COSTS_N_INSNS (2), /* mulsi_const */
639 COSTS_N_INSNS (2), /* mulsi_const9 */
640 COSTS_N_INSNS (2), /* muldi */
641 COSTS_N_INSNS (20), /* divsi */
642 COSTS_N_INSNS (20), /* divdi */
643 COSTS_N_INSNS (3), /* fp */
644 COSTS_N_INSNS (3), /* dmul */
645 COSTS_N_INSNS (18), /* sdiv */
646 COSTS_N_INSNS (32), /* ddiv */
647 32, /* cache line size */
653 /* Instruction costs on PPC620 processors. */
655 struct processor_costs ppc620_cost
= {
656 COSTS_N_INSNS (5), /* mulsi */
657 COSTS_N_INSNS (4), /* mulsi_const */
658 COSTS_N_INSNS (3), /* mulsi_const9 */
659 COSTS_N_INSNS (7), /* muldi */
660 COSTS_N_INSNS (21), /* divsi */
661 COSTS_N_INSNS (37), /* divdi */
662 COSTS_N_INSNS (3), /* fp */
663 COSTS_N_INSNS (3), /* dmul */
664 COSTS_N_INSNS (18), /* sdiv */
665 COSTS_N_INSNS (32), /* ddiv */
666 128, /* cache line size */
672 /* Instruction costs on PPC630 processors. */
674 struct processor_costs ppc630_cost
= {
675 COSTS_N_INSNS (5), /* mulsi */
676 COSTS_N_INSNS (4), /* mulsi_const */
677 COSTS_N_INSNS (3), /* mulsi_const9 */
678 COSTS_N_INSNS (7), /* muldi */
679 COSTS_N_INSNS (21), /* divsi */
680 COSTS_N_INSNS (37), /* divdi */
681 COSTS_N_INSNS (3), /* fp */
682 COSTS_N_INSNS (3), /* dmul */
683 COSTS_N_INSNS (17), /* sdiv */
684 COSTS_N_INSNS (21), /* ddiv */
685 128, /* cache line size */
691 /* Instruction costs on Cell processor. */
692 /* COSTS_N_INSNS (1) ~ one add. */
694 struct processor_costs ppccell_cost
= {
695 COSTS_N_INSNS (9/2)+2, /* mulsi */
696 COSTS_N_INSNS (6/2), /* mulsi_const */
697 COSTS_N_INSNS (6/2), /* mulsi_const9 */
698 COSTS_N_INSNS (15/2)+2, /* muldi */
699 COSTS_N_INSNS (38/2), /* divsi */
700 COSTS_N_INSNS (70/2), /* divdi */
701 COSTS_N_INSNS (10/2), /* fp */
702 COSTS_N_INSNS (10/2), /* dmul */
703 COSTS_N_INSNS (74/2), /* sdiv */
704 COSTS_N_INSNS (74/2), /* ddiv */
705 128, /* cache line size */
711 /* Instruction costs on PPC750 and PPC7400 processors. */
713 struct processor_costs ppc750_cost
= {
714 COSTS_N_INSNS (5), /* mulsi */
715 COSTS_N_INSNS (3), /* mulsi_const */
716 COSTS_N_INSNS (2), /* mulsi_const9 */
717 COSTS_N_INSNS (5), /* muldi */
718 COSTS_N_INSNS (17), /* divsi */
719 COSTS_N_INSNS (17), /* divdi */
720 COSTS_N_INSNS (3), /* fp */
721 COSTS_N_INSNS (3), /* dmul */
722 COSTS_N_INSNS (17), /* sdiv */
723 COSTS_N_INSNS (31), /* ddiv */
724 32, /* cache line size */
730 /* Instruction costs on PPC7450 processors. */
732 struct processor_costs ppc7450_cost
= {
733 COSTS_N_INSNS (4), /* mulsi */
734 COSTS_N_INSNS (3), /* mulsi_const */
735 COSTS_N_INSNS (3), /* mulsi_const9 */
736 COSTS_N_INSNS (4), /* muldi */
737 COSTS_N_INSNS (23), /* divsi */
738 COSTS_N_INSNS (23), /* divdi */
739 COSTS_N_INSNS (5), /* fp */
740 COSTS_N_INSNS (5), /* dmul */
741 COSTS_N_INSNS (21), /* sdiv */
742 COSTS_N_INSNS (35), /* ddiv */
743 32, /* cache line size */
749 /* Instruction costs on PPC8540 processors. */
751 struct processor_costs ppc8540_cost
= {
752 COSTS_N_INSNS (4), /* mulsi */
753 COSTS_N_INSNS (4), /* mulsi_const */
754 COSTS_N_INSNS (4), /* mulsi_const9 */
755 COSTS_N_INSNS (4), /* muldi */
756 COSTS_N_INSNS (19), /* divsi */
757 COSTS_N_INSNS (19), /* divdi */
758 COSTS_N_INSNS (4), /* fp */
759 COSTS_N_INSNS (4), /* dmul */
760 COSTS_N_INSNS (29), /* sdiv */
761 COSTS_N_INSNS (29), /* ddiv */
762 32, /* cache line size */
765 1, /* prefetch streams /*/
768 /* Instruction costs on E300C2 and E300C3 cores. */
770 struct processor_costs ppce300c2c3_cost
= {
771 COSTS_N_INSNS (4), /* mulsi */
772 COSTS_N_INSNS (4), /* mulsi_const */
773 COSTS_N_INSNS (4), /* mulsi_const9 */
774 COSTS_N_INSNS (4), /* muldi */
775 COSTS_N_INSNS (19), /* divsi */
776 COSTS_N_INSNS (19), /* divdi */
777 COSTS_N_INSNS (3), /* fp */
778 COSTS_N_INSNS (4), /* dmul */
779 COSTS_N_INSNS (18), /* sdiv */
780 COSTS_N_INSNS (33), /* ddiv */
784 1, /* prefetch streams /*/
787 /* Instruction costs on PPCE500MC processors. */
789 struct processor_costs ppce500mc_cost
= {
790 COSTS_N_INSNS (4), /* mulsi */
791 COSTS_N_INSNS (4), /* mulsi_const */
792 COSTS_N_INSNS (4), /* mulsi_const9 */
793 COSTS_N_INSNS (4), /* muldi */
794 COSTS_N_INSNS (14), /* divsi */
795 COSTS_N_INSNS (14), /* divdi */
796 COSTS_N_INSNS (8), /* fp */
797 COSTS_N_INSNS (10), /* dmul */
798 COSTS_N_INSNS (36), /* sdiv */
799 COSTS_N_INSNS (66), /* ddiv */
800 64, /* cache line size */
803 1, /* prefetch streams /*/
806 /* Instruction costs on PPCE500MC64 processors. */
808 struct processor_costs ppce500mc64_cost
= {
809 COSTS_N_INSNS (4), /* mulsi */
810 COSTS_N_INSNS (4), /* mulsi_const */
811 COSTS_N_INSNS (4), /* mulsi_const9 */
812 COSTS_N_INSNS (4), /* muldi */
813 COSTS_N_INSNS (14), /* divsi */
814 COSTS_N_INSNS (14), /* divdi */
815 COSTS_N_INSNS (4), /* fp */
816 COSTS_N_INSNS (10), /* dmul */
817 COSTS_N_INSNS (36), /* sdiv */
818 COSTS_N_INSNS (66), /* ddiv */
819 64, /* cache line size */
822 1, /* prefetch streams /*/
825 /* Instruction costs on PPCE5500 processors. */
827 struct processor_costs ppce5500_cost
= {
828 COSTS_N_INSNS (5), /* mulsi */
829 COSTS_N_INSNS (5), /* mulsi_const */
830 COSTS_N_INSNS (4), /* mulsi_const9 */
831 COSTS_N_INSNS (5), /* muldi */
832 COSTS_N_INSNS (14), /* divsi */
833 COSTS_N_INSNS (14), /* divdi */
834 COSTS_N_INSNS (7), /* fp */
835 COSTS_N_INSNS (10), /* dmul */
836 COSTS_N_INSNS (36), /* sdiv */
837 COSTS_N_INSNS (66), /* ddiv */
838 64, /* cache line size */
841 1, /* prefetch streams /*/
844 /* Instruction costs on PPCE6500 processors. */
846 struct processor_costs ppce6500_cost
= {
847 COSTS_N_INSNS (5), /* mulsi */
848 COSTS_N_INSNS (5), /* mulsi_const */
849 COSTS_N_INSNS (4), /* mulsi_const9 */
850 COSTS_N_INSNS (5), /* muldi */
851 COSTS_N_INSNS (14), /* divsi */
852 COSTS_N_INSNS (14), /* divdi */
853 COSTS_N_INSNS (7), /* fp */
854 COSTS_N_INSNS (10), /* dmul */
855 COSTS_N_INSNS (36), /* sdiv */
856 COSTS_N_INSNS (66), /* ddiv */
857 64, /* cache line size */
860 1, /* prefetch streams /*/
863 /* Instruction costs on AppliedMicro Titan processors. */
865 struct processor_costs titan_cost
= {
866 COSTS_N_INSNS (5), /* mulsi */
867 COSTS_N_INSNS (5), /* mulsi_const */
868 COSTS_N_INSNS (5), /* mulsi_const9 */
869 COSTS_N_INSNS (5), /* muldi */
870 COSTS_N_INSNS (18), /* divsi */
871 COSTS_N_INSNS (18), /* divdi */
872 COSTS_N_INSNS (10), /* fp */
873 COSTS_N_INSNS (10), /* dmul */
874 COSTS_N_INSNS (46), /* sdiv */
875 COSTS_N_INSNS (72), /* ddiv */
876 32, /* cache line size */
879 1, /* prefetch streams /*/
882 /* Instruction costs on POWER4 and POWER5 processors. */
884 struct processor_costs power4_cost
= {
885 COSTS_N_INSNS (3), /* mulsi */
886 COSTS_N_INSNS (2), /* mulsi_const */
887 COSTS_N_INSNS (2), /* mulsi_const9 */
888 COSTS_N_INSNS (4), /* muldi */
889 COSTS_N_INSNS (18), /* divsi */
890 COSTS_N_INSNS (34), /* divdi */
891 COSTS_N_INSNS (3), /* fp */
892 COSTS_N_INSNS (3), /* dmul */
893 COSTS_N_INSNS (17), /* sdiv */
894 COSTS_N_INSNS (17), /* ddiv */
895 128, /* cache line size */
898 8, /* prefetch streams /*/
901 /* Instruction costs on POWER6 processors. */
903 struct processor_costs power6_cost
= {
904 COSTS_N_INSNS (8), /* mulsi */
905 COSTS_N_INSNS (8), /* mulsi_const */
906 COSTS_N_INSNS (8), /* mulsi_const9 */
907 COSTS_N_INSNS (8), /* muldi */
908 COSTS_N_INSNS (22), /* divsi */
909 COSTS_N_INSNS (28), /* divdi */
910 COSTS_N_INSNS (3), /* fp */
911 COSTS_N_INSNS (3), /* dmul */
912 COSTS_N_INSNS (13), /* sdiv */
913 COSTS_N_INSNS (16), /* ddiv */
914 128, /* cache line size */
917 16, /* prefetch streams */
920 /* Instruction costs on POWER7 processors. */
922 struct processor_costs power7_cost
= {
923 COSTS_N_INSNS (2), /* mulsi */
924 COSTS_N_INSNS (2), /* mulsi_const */
925 COSTS_N_INSNS (2), /* mulsi_const9 */
926 COSTS_N_INSNS (2), /* muldi */
927 COSTS_N_INSNS (18), /* divsi */
928 COSTS_N_INSNS (34), /* divdi */
929 COSTS_N_INSNS (3), /* fp */
930 COSTS_N_INSNS (3), /* dmul */
931 COSTS_N_INSNS (13), /* sdiv */
932 COSTS_N_INSNS (16), /* ddiv */
933 128, /* cache line size */
936 12, /* prefetch streams */
939 /* Instruction costs on POWER8 processors. */
941 struct processor_costs power8_cost
= {
942 COSTS_N_INSNS (3), /* mulsi */
943 COSTS_N_INSNS (3), /* mulsi_const */
944 COSTS_N_INSNS (3), /* mulsi_const9 */
945 COSTS_N_INSNS (3), /* muldi */
946 COSTS_N_INSNS (19), /* divsi */
947 COSTS_N_INSNS (35), /* divdi */
948 COSTS_N_INSNS (3), /* fp */
949 COSTS_N_INSNS (3), /* dmul */
950 COSTS_N_INSNS (14), /* sdiv */
951 COSTS_N_INSNS (17), /* ddiv */
952 128, /* cache line size */
955 12, /* prefetch streams */
958 /* Instruction costs on POWER A2 processors. */
960 struct processor_costs ppca2_cost
= {
961 COSTS_N_INSNS (16), /* mulsi */
962 COSTS_N_INSNS (16), /* mulsi_const */
963 COSTS_N_INSNS (16), /* mulsi_const9 */
964 COSTS_N_INSNS (16), /* muldi */
965 COSTS_N_INSNS (22), /* divsi */
966 COSTS_N_INSNS (28), /* divdi */
967 COSTS_N_INSNS (3), /* fp */
968 COSTS_N_INSNS (3), /* dmul */
969 COSTS_N_INSNS (59), /* sdiv */
970 COSTS_N_INSNS (72), /* ddiv */
974 16, /* prefetch streams */
978 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
979 #undef RS6000_BUILTIN_1
980 #undef RS6000_BUILTIN_2
981 #undef RS6000_BUILTIN_3
982 #undef RS6000_BUILTIN_A
983 #undef RS6000_BUILTIN_D
984 #undef RS6000_BUILTIN_E
985 #undef RS6000_BUILTIN_H
986 #undef RS6000_BUILTIN_P
987 #undef RS6000_BUILTIN_Q
988 #undef RS6000_BUILTIN_S
989 #undef RS6000_BUILTIN_X
991 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
992 { NAME, ICODE, MASK, ATTR },
994 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
995 { NAME, ICODE, MASK, ATTR },
997 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
998 { NAME, ICODE, MASK, ATTR },
1000 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1001 { NAME, ICODE, MASK, ATTR },
1003 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1004 { NAME, ICODE, MASK, ATTR },
1006 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
1007 { NAME, ICODE, MASK, ATTR },
1009 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1010 { NAME, ICODE, MASK, ATTR },
1012 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1013 { NAME, ICODE, MASK, ATTR },
1015 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1016 { NAME, ICODE, MASK, ATTR },
1018 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
1019 { NAME, ICODE, MASK, ATTR },
1021 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1022 { NAME, ICODE, MASK, ATTR },
1024 struct rs6000_builtin_info_type
{
1026 const enum insn_code icode
;
1027 const HOST_WIDE_INT mask
;
1028 const unsigned attr
;
1031 static const struct rs6000_builtin_info_type rs6000_builtin_info
[] =
1033 #include "rs6000-builtin.def"
1036 #undef RS6000_BUILTIN_1
1037 #undef RS6000_BUILTIN_2
1038 #undef RS6000_BUILTIN_3
1039 #undef RS6000_BUILTIN_A
1040 #undef RS6000_BUILTIN_D
1041 #undef RS6000_BUILTIN_E
1042 #undef RS6000_BUILTIN_H
1043 #undef RS6000_BUILTIN_P
1044 #undef RS6000_BUILTIN_Q
1045 #undef RS6000_BUILTIN_S
1046 #undef RS6000_BUILTIN_X
1048 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1049 static tree (*rs6000_veclib_handler
) (tree
, tree
, tree
);
1052 static bool rs6000_debug_legitimate_address_p (enum machine_mode
, rtx
, bool);
1053 static bool spe_func_has_64bit_regs_p (void);
1054 static struct machine_function
* rs6000_init_machine_status (void);
1055 static int rs6000_ra_ever_killed (void);
1056 static tree
rs6000_handle_longcall_attribute (tree
*, tree
, tree
, int, bool *);
1057 static tree
rs6000_handle_altivec_attribute (tree
*, tree
, tree
, int, bool *);
1058 static tree
rs6000_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
1059 static tree
rs6000_builtin_vectorized_libmass (tree
, tree
, tree
);
1060 static rtx
rs6000_emit_set_long_const (rtx
, HOST_WIDE_INT
, HOST_WIDE_INT
);
1061 static int rs6000_memory_move_cost (enum machine_mode
, reg_class_t
, bool);
1062 static bool rs6000_debug_rtx_costs (rtx
, int, int, int, int *, bool);
1063 static int rs6000_debug_address_cost (rtx
, enum machine_mode
, addr_space_t
,
1065 static int rs6000_debug_adjust_cost (rtx
, rtx
, rtx
, int);
1066 static bool is_microcoded_insn (rtx
);
1067 static bool is_nonpipeline_insn (rtx
);
1068 static bool is_cracked_insn (rtx
);
1069 static bool is_load_insn (rtx
, rtx
*);
1070 static bool is_store_insn (rtx
, rtx
*);
1071 static bool set_to_load_agen (rtx
,rtx
);
1072 static bool insn_terminates_group_p (rtx
, enum group_termination
);
1073 static bool insn_must_be_first_in_group (rtx
);
1074 static bool insn_must_be_last_in_group (rtx
);
1075 static void altivec_init_builtins (void);
1076 static tree
builtin_function_type (enum machine_mode
, enum machine_mode
,
1077 enum machine_mode
, enum machine_mode
,
1078 enum rs6000_builtins
, const char *name
);
1079 static void rs6000_common_init_builtins (void);
1080 static void paired_init_builtins (void);
1081 static rtx
paired_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
1082 static void spe_init_builtins (void);
1083 static void htm_init_builtins (void);
1084 static rtx
spe_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
1085 static rtx
spe_expand_evsel_builtin (enum insn_code
, tree
, rtx
);
1086 static int rs6000_emit_int_cmove (rtx
, rtx
, rtx
, rtx
);
1087 static rs6000_stack_t
*rs6000_stack_info (void);
1088 static void is_altivec_return_reg (rtx
, void *);
1089 int easy_vector_constant (rtx
, enum machine_mode
);
1090 static rtx
rs6000_debug_legitimize_address (rtx
, rtx
, enum machine_mode
);
1091 static rtx
rs6000_legitimize_tls_address (rtx
, enum tls_model
);
1092 static int rs6000_tls_symbol_ref_1 (rtx
*, void *);
1093 static int rs6000_get_some_local_dynamic_name_1 (rtx
*, void *);
1094 static rtx
rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*, const_tree
,
1097 static void macho_branch_islands (void);
1099 static rtx
rs6000_legitimize_reload_address (rtx
, enum machine_mode
, int, int,
1101 static rtx
rs6000_debug_legitimize_reload_address (rtx
, enum machine_mode
, int,
1103 static bool rs6000_mode_dependent_address (const_rtx
);
1104 static bool rs6000_debug_mode_dependent_address (const_rtx
);
1105 static enum reg_class
rs6000_secondary_reload_class (enum reg_class
,
1106 enum machine_mode
, rtx
);
1107 static enum reg_class
rs6000_debug_secondary_reload_class (enum reg_class
,
1110 static enum reg_class
rs6000_preferred_reload_class (rtx
, enum reg_class
);
1111 static enum reg_class
rs6000_debug_preferred_reload_class (rtx
,
1113 static bool rs6000_secondary_memory_needed (enum reg_class
, enum reg_class
,
1115 static bool rs6000_debug_secondary_memory_needed (enum reg_class
,
1118 static bool rs6000_cannot_change_mode_class (enum machine_mode
,
1121 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode
,
1124 static bool rs6000_save_toc_in_prologue_p (void);
1126 rtx (*rs6000_legitimize_reload_address_ptr
) (rtx
, enum machine_mode
, int, int,
1128 = rs6000_legitimize_reload_address
;
1130 static bool (*rs6000_mode_dependent_address_ptr
) (const_rtx
)
1131 = rs6000_mode_dependent_address
;
1133 enum reg_class (*rs6000_secondary_reload_class_ptr
) (enum reg_class
,
1134 enum machine_mode
, rtx
)
1135 = rs6000_secondary_reload_class
;
1137 enum reg_class (*rs6000_preferred_reload_class_ptr
) (rtx
, enum reg_class
)
1138 = rs6000_preferred_reload_class
;
1140 bool (*rs6000_secondary_memory_needed_ptr
) (enum reg_class
, enum reg_class
,
1142 = rs6000_secondary_memory_needed
;
1144 bool (*rs6000_cannot_change_mode_class_ptr
) (enum machine_mode
,
1147 = rs6000_cannot_change_mode_class
;
1149 const int INSN_NOT_AVAILABLE
= -1;
1151 static void rs6000_print_isa_options (FILE *, int, const char *,
1153 static void rs6000_print_builtin_options (FILE *, int, const char *,
1156 static enum rs6000_reg_type
register_to_reg_type (rtx
, bool *);
1157 static bool rs6000_secondary_reload_move (enum rs6000_reg_type
,
1158 enum rs6000_reg_type
,
1160 secondary_reload_info
*,
1163 /* Hash table stuff for keeping track of TOC entries. */
1165 struct GTY(()) toc_hash_struct
1167 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1168 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1170 enum machine_mode key_mode
;
1174 static GTY ((param_is (struct toc_hash_struct
))) htab_t toc_hash_table
;
1176 /* Hash table to keep track of the argument types for builtin functions. */
1178 struct GTY(()) builtin_hash_struct
1181 enum machine_mode mode
[4]; /* return value + 3 arguments. */
1182 unsigned char uns_p
[4]; /* and whether the types are unsigned. */
1185 static GTY ((param_is (struct builtin_hash_struct
))) htab_t builtin_hash_table
;
1188 /* Default register names. */
1189 char rs6000_reg_names
[][8] =
1191 "0", "1", "2", "3", "4", "5", "6", "7",
1192 "8", "9", "10", "11", "12", "13", "14", "15",
1193 "16", "17", "18", "19", "20", "21", "22", "23",
1194 "24", "25", "26", "27", "28", "29", "30", "31",
1195 "0", "1", "2", "3", "4", "5", "6", "7",
1196 "8", "9", "10", "11", "12", "13", "14", "15",
1197 "16", "17", "18", "19", "20", "21", "22", "23",
1198 "24", "25", "26", "27", "28", "29", "30", "31",
1199 "mq", "lr", "ctr","ap",
1200 "0", "1", "2", "3", "4", "5", "6", "7",
1202 /* AltiVec registers. */
1203 "0", "1", "2", "3", "4", "5", "6", "7",
1204 "8", "9", "10", "11", "12", "13", "14", "15",
1205 "16", "17", "18", "19", "20", "21", "22", "23",
1206 "24", "25", "26", "27", "28", "29", "30", "31",
1208 /* SPE registers. */
1209 "spe_acc", "spefscr",
1210 /* Soft frame pointer. */
1212 /* HTM SPR registers. */
1213 "tfhar", "tfiar", "texasr"
1216 #ifdef TARGET_REGNAMES
1217 static const char alt_reg_names
[][8] =
1219 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1220 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1221 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1222 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1223 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1224 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1225 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1226 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1227 "mq", "lr", "ctr", "ap",
1228 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1230 /* AltiVec registers. */
1231 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1232 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1233 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1234 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1236 /* SPE registers. */
1237 "spe_acc", "spefscr",
1238 /* Soft frame pointer. */
1240 /* HTM SPR registers. */
1241 "tfhar", "tfiar", "texasr"
1245 /* Table of valid machine attributes. */
1247 static const struct attribute_spec rs6000_attribute_table
[] =
1249 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1250 affects_type_identity } */
1251 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute
,
1253 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1255 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
,
1257 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1259 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
,
1261 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1262 SUBTARGET_ATTRIBUTE_TABLE
,
1264 { NULL
, 0, 0, false, false, false, NULL
, false }
1267 #ifndef TARGET_PROFILE_KERNEL
1268 #define TARGET_PROFILE_KERNEL 0
1271 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1272 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1274 /* Initialize the GCC target structure. */
1275 #undef TARGET_ATTRIBUTE_TABLE
1276 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1277 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1278 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1279 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1280 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1282 #undef TARGET_ASM_ALIGNED_DI_OP
1283 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1285 /* Default unaligned ops are only provided for ELF. Find the ops needed
1286 for non-ELF systems. */
1287 #ifndef OBJECT_FORMAT_ELF
1289 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1291 #undef TARGET_ASM_UNALIGNED_HI_OP
1292 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1293 #undef TARGET_ASM_UNALIGNED_SI_OP
1294 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1295 #undef TARGET_ASM_UNALIGNED_DI_OP
1296 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1299 #undef TARGET_ASM_UNALIGNED_HI_OP
1300 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1301 #undef TARGET_ASM_UNALIGNED_SI_OP
1302 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1303 #undef TARGET_ASM_UNALIGNED_DI_OP
1304 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1305 #undef TARGET_ASM_ALIGNED_DI_OP
1306 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1310 /* This hook deals with fixups for relocatable code and DI-mode objects
1312 #undef TARGET_ASM_INTEGER
1313 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1315 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1316 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1317 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1320 #undef TARGET_SET_UP_BY_PROLOGUE
1321 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1323 #undef TARGET_HAVE_TLS
1324 #define TARGET_HAVE_TLS HAVE_AS_TLS
1326 #undef TARGET_CANNOT_FORCE_CONST_MEM
1327 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1329 #undef TARGET_DELEGITIMIZE_ADDRESS
1330 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1332 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1333 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1335 #undef TARGET_ASM_FUNCTION_PROLOGUE
1336 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1337 #undef TARGET_ASM_FUNCTION_EPILOGUE
1338 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1340 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1341 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1343 #undef TARGET_LEGITIMIZE_ADDRESS
1344 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1346 #undef TARGET_SCHED_VARIABLE_ISSUE
1347 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1349 #undef TARGET_SCHED_ISSUE_RATE
1350 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1351 #undef TARGET_SCHED_ADJUST_COST
1352 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1353 #undef TARGET_SCHED_ADJUST_PRIORITY
1354 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1355 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1356 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1357 #undef TARGET_SCHED_INIT
1358 #define TARGET_SCHED_INIT rs6000_sched_init
1359 #undef TARGET_SCHED_FINISH
1360 #define TARGET_SCHED_FINISH rs6000_sched_finish
1361 #undef TARGET_SCHED_REORDER
1362 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1363 #undef TARGET_SCHED_REORDER2
1364 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1366 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1367 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1369 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1370 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1372 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1373 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1374 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1375 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1376 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1377 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1378 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1379 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1381 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1382 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1383 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1384 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1385 rs6000_builtin_support_vector_misalignment
1386 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1387 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1388 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1389 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1390 rs6000_builtin_vectorization_cost
1391 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1392 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1393 rs6000_preferred_simd_mode
1394 #undef TARGET_VECTORIZE_INIT_COST
1395 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1396 #undef TARGET_VECTORIZE_ADD_STMT_COST
1397 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1398 #undef TARGET_VECTORIZE_FINISH_COST
1399 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1400 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1401 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1403 #undef TARGET_INIT_BUILTINS
1404 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1405 #undef TARGET_BUILTIN_DECL
1406 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1408 #undef TARGET_EXPAND_BUILTIN
1409 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1411 #undef TARGET_MANGLE_TYPE
1412 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1414 #undef TARGET_INIT_LIBFUNCS
1415 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1418 #undef TARGET_BINDS_LOCAL_P
1419 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1422 #undef TARGET_MS_BITFIELD_LAYOUT_P
1423 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1425 #undef TARGET_ASM_OUTPUT_MI_THUNK
1426 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1428 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1429 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1431 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1432 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1434 #undef TARGET_REGISTER_MOVE_COST
1435 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1436 #undef TARGET_MEMORY_MOVE_COST
1437 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1438 #undef TARGET_RTX_COSTS
1439 #define TARGET_RTX_COSTS rs6000_rtx_costs
1440 #undef TARGET_ADDRESS_COST
1441 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1443 #undef TARGET_DWARF_REGISTER_SPAN
1444 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1446 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1447 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1449 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1450 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1452 /* On rs6000, function arguments are promoted, as are function return
1454 #undef TARGET_PROMOTE_FUNCTION_MODE
1455 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1457 #undef TARGET_RETURN_IN_MEMORY
1458 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1460 #undef TARGET_RETURN_IN_MSB
1461 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1463 #undef TARGET_SETUP_INCOMING_VARARGS
1464 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1466 /* Always strict argument naming on rs6000. */
1467 #undef TARGET_STRICT_ARGUMENT_NAMING
1468 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1469 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1470 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1471 #undef TARGET_SPLIT_COMPLEX_ARG
1472 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1473 #undef TARGET_MUST_PASS_IN_STACK
1474 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1475 #undef TARGET_PASS_BY_REFERENCE
1476 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1477 #undef TARGET_ARG_PARTIAL_BYTES
1478 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1479 #undef TARGET_FUNCTION_ARG_ADVANCE
1480 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1481 #undef TARGET_FUNCTION_ARG
1482 #define TARGET_FUNCTION_ARG rs6000_function_arg
1483 #undef TARGET_FUNCTION_ARG_BOUNDARY
1484 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1486 #undef TARGET_BUILD_BUILTIN_VA_LIST
1487 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1489 #undef TARGET_EXPAND_BUILTIN_VA_START
1490 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1492 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1493 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1495 #undef TARGET_EH_RETURN_FILTER_MODE
1496 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1498 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1499 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1501 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1502 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1504 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1505 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1507 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1508 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1510 #undef TARGET_OPTION_OVERRIDE
1511 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1513 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1514 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1515 rs6000_builtin_vectorized_function
1518 #undef TARGET_STACK_PROTECT_FAIL
1519 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1522 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1523 The PowerPC architecture requires only weak consistency among
1524 processors--that is, memory accesses between processors need not be
1525 sequentially consistent and memory accesses among processors can occur
1526 in any order. The ability to order memory accesses weakly provides
1527 opportunities for more efficient use of the system bus. Unless a
1528 dependency exists, the 604e allows read operations to precede store
1530 #undef TARGET_RELAXED_ORDERING
1531 #define TARGET_RELAXED_ORDERING true
1534 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1535 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1538 /* Use a 32-bit anchor range. This leads to sequences like:
1540 addis tmp,anchor,high
1543 where tmp itself acts as an anchor, and can be shared between
1544 accesses to the same 64k page. */
1545 #undef TARGET_MIN_ANCHOR_OFFSET
1546 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1547 #undef TARGET_MAX_ANCHOR_OFFSET
1548 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1549 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1550 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1551 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1552 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1554 #undef TARGET_BUILTIN_RECIPROCAL
1555 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1557 #undef TARGET_EXPAND_TO_RTL_HOOK
1558 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1560 #undef TARGET_INSTANTIATE_DECLS
1561 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1563 #undef TARGET_SECONDARY_RELOAD
1564 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1566 #undef TARGET_LEGITIMATE_ADDRESS_P
1567 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1569 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1570 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1573 #define TARGET_LRA_P rs6000_lra_p
1575 #undef TARGET_CAN_ELIMINATE
1576 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1578 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1579 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1581 #undef TARGET_TRAMPOLINE_INIT
1582 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1584 #undef TARGET_FUNCTION_VALUE
1585 #define TARGET_FUNCTION_VALUE rs6000_function_value
1587 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1588 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1590 #undef TARGET_OPTION_SAVE
1591 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1593 #undef TARGET_OPTION_RESTORE
1594 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1596 #undef TARGET_OPTION_PRINT
1597 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1599 #undef TARGET_CAN_INLINE_P
1600 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1602 #undef TARGET_SET_CURRENT_FUNCTION
1603 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1605 #undef TARGET_LEGITIMATE_CONSTANT_P
1606 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1608 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1609 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1611 #undef TARGET_CAN_USE_DOLOOP_P
1612 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1615 /* Processor table. */
1618 const char *const name
; /* Canonical processor name. */
1619 const enum processor_type processor
; /* Processor type enum value. */
1620 const HOST_WIDE_INT target_enable
; /* Target flags to enable. */
1623 static struct rs6000_ptt
const processor_target_table
[] =
1625 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1626 #include "rs6000-cpus.def"
1630 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1634 rs6000_cpu_name_lookup (const char *name
)
1640 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
1641 if (! strcmp (name
, processor_target_table
[i
].name
))
1649 /* Return number of consecutive hard regs needed starting at reg REGNO
1650 to hold something of mode MODE.
1651 This is ordinarily the length in words of a value of mode MODE
1652 but can be less for certain modes in special long registers.
1654 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1655 scalar instructions. The upper 32 bits are only available to the
1658 POWER and PowerPC GPRs hold 32 bits worth;
1659 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1662 rs6000_hard_regno_nregs_internal (int regno
, enum machine_mode mode
)
1664 unsigned HOST_WIDE_INT reg_size
;
1666 /* TF/TD modes are special in that they always take 2 registers. */
1667 if (FP_REGNO_P (regno
))
1668 reg_size
= ((VECTOR_MEM_VSX_P (mode
) && mode
!= TDmode
&& mode
!= TFmode
)
1669 ? UNITS_PER_VSX_WORD
1670 : UNITS_PER_FP_WORD
);
1672 else if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
1673 reg_size
= UNITS_PER_SPE_WORD
;
1675 else if (ALTIVEC_REGNO_P (regno
))
1676 reg_size
= UNITS_PER_ALTIVEC_WORD
;
1678 /* The value returned for SCmode in the E500 double case is 2 for
1679 ABI compatibility; storing an SCmode value in a single register
1680 would require function_arg and rs6000_spe_function_arg to handle
1681 SCmode so as to pass the value correctly in a pair of
1683 else if (TARGET_E500_DOUBLE
&& FLOAT_MODE_P (mode
) && mode
!= SCmode
1684 && !DECIMAL_FLOAT_MODE_P (mode
))
1685 reg_size
= UNITS_PER_FP_WORD
;
1688 reg_size
= UNITS_PER_WORD
;
1690 return (GET_MODE_SIZE (mode
) + reg_size
- 1) / reg_size
;
1693 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1696 rs6000_hard_regno_mode_ok (int regno
, enum machine_mode mode
)
1698 int last_regno
= regno
+ rs6000_hard_regno_nregs
[mode
][regno
] - 1;
1700 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1701 register combinations, and use PTImode where we need to deal with quad
1702 word memory operations. Don't allow quad words in the argument or frame
1703 pointer registers, just registers 0..31. */
1704 if (mode
== PTImode
)
1705 return (IN_RANGE (regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
1706 && IN_RANGE (last_regno
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
)
1707 && ((regno
& 1) == 0));
1709 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1710 implementations. Don't allow an item to be split between a FP register
1711 and an Altivec register. Allow TImode in all VSX registers if the user
1713 if (TARGET_VSX
&& VSX_REGNO_P (regno
)
1714 && (VECTOR_MEM_VSX_P (mode
)
1715 || (TARGET_VSX_SCALAR_FLOAT
&& mode
== SFmode
)
1716 || (TARGET_VSX_SCALAR_DOUBLE
&& (mode
== DFmode
|| mode
== DImode
))
1717 || (TARGET_VSX_TIMODE
&& mode
== TImode
)))
1719 if (FP_REGNO_P (regno
))
1720 return FP_REGNO_P (last_regno
);
1722 if (ALTIVEC_REGNO_P (regno
))
1724 if (mode
== SFmode
&& !TARGET_UPPER_REGS_SF
)
1727 if ((mode
== DFmode
|| mode
== DImode
) && !TARGET_UPPER_REGS_DF
)
1730 return ALTIVEC_REGNO_P (last_regno
);
1734 /* The GPRs can hold any mode, but values bigger than one register
1735 cannot go past R31. */
1736 if (INT_REGNO_P (regno
))
1737 return INT_REGNO_P (last_regno
);
1739 /* The float registers (except for VSX vector modes) can only hold floating
1740 modes and DImode. */
1741 if (FP_REGNO_P (regno
))
1743 if (SCALAR_FLOAT_MODE_P (mode
)
1744 && (mode
!= TDmode
|| (regno
% 2) == 0)
1745 && FP_REGNO_P (last_regno
))
1748 if (GET_MODE_CLASS (mode
) == MODE_INT
1749 && GET_MODE_SIZE (mode
) == UNITS_PER_FP_WORD
)
1752 if (PAIRED_SIMD_REGNO_P (regno
) && TARGET_PAIRED_FLOAT
1753 && PAIRED_VECTOR_MODE (mode
))
1759 /* The CR register can only hold CC modes. */
1760 if (CR_REGNO_P (regno
))
1761 return GET_MODE_CLASS (mode
) == MODE_CC
;
1763 if (CA_REGNO_P (regno
))
1764 return mode
== BImode
;
1766 /* AltiVec only in AldyVec registers. */
1767 if (ALTIVEC_REGNO_P (regno
))
1768 return VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
);
1770 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1771 if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
1774 /* We cannot put non-VSX TImode or PTImode anywhere except general register
1775 and it must be able to fit within the register set. */
1777 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
;
1780 /* Print interesting facts about registers. */
1782 rs6000_debug_reg_print (int first_regno
, int last_regno
, const char *reg_name
)
1786 for (r
= first_regno
; r
<= last_regno
; ++r
)
1788 const char *comma
= "";
1791 if (first_regno
== last_regno
)
1792 fprintf (stderr
, "%s:\t", reg_name
);
1794 fprintf (stderr
, "%s%d:\t", reg_name
, r
- first_regno
);
1797 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1798 if (rs6000_hard_regno_mode_ok_p
[m
][r
] && rs6000_hard_regno_nregs
[m
][r
])
1802 fprintf (stderr
, ",\n\t");
1807 if (rs6000_hard_regno_nregs
[m
][r
] > 1)
1808 len
+= fprintf (stderr
, "%s%s/%d", comma
, GET_MODE_NAME (m
),
1809 rs6000_hard_regno_nregs
[m
][r
]);
1811 len
+= fprintf (stderr
, "%s%s", comma
, GET_MODE_NAME (m
));
1816 if (call_used_regs
[r
])
1820 fprintf (stderr
, ",\n\t");
1825 len
+= fprintf (stderr
, "%s%s", comma
, "call-used");
1833 fprintf (stderr
, ",\n\t");
1838 len
+= fprintf (stderr
, "%s%s", comma
, "fixed");
1844 fprintf (stderr
, ",\n\t");
1848 len
+= fprintf (stderr
, "%sreg-class = %s", comma
,
1849 reg_class_names
[(int)rs6000_regno_regclass
[r
]]);
1854 fprintf (stderr
, ",\n\t");
1858 fprintf (stderr
, "%sregno = %d\n", comma
, r
);
1863 rs6000_debug_vector_unit (enum rs6000_vector v
)
1869 case VECTOR_NONE
: ret
= "none"; break;
1870 case VECTOR_ALTIVEC
: ret
= "altivec"; break;
1871 case VECTOR_VSX
: ret
= "vsx"; break;
1872 case VECTOR_P8_VECTOR
: ret
= "p8_vector"; break;
1873 case VECTOR_PAIRED
: ret
= "paired"; break;
1874 case VECTOR_SPE
: ret
= "spe"; break;
1875 case VECTOR_OTHER
: ret
= "other"; break;
1876 default: ret
= "unknown"; break;
1882 /* Print the address masks in a human readble fashion. */
1884 rs6000_debug_print_mode (ssize_t m
)
1888 fprintf (stderr
, "Mode: %-5s", GET_MODE_NAME (m
));
1889 for (rc
= 0; rc
< N_RELOAD_REG
; rc
++)
1891 addr_mask_type mask
= reg_addr
[m
].addr_mask
[rc
];
1893 " %s: %c%c%c%c%c%c",
1894 reload_reg_map
[rc
].name
,
1895 (mask
& RELOAD_REG_VALID
) != 0 ? 'v' : ' ',
1896 (mask
& RELOAD_REG_MULTIPLE
) != 0 ? 'm' : ' ',
1897 (mask
& RELOAD_REG_INDEXED
) != 0 ? 'i' : ' ',
1898 (mask
& RELOAD_REG_OFFSET
) != 0 ? 'o' : ' ',
1899 (mask
& RELOAD_REG_PRE_INCDEC
) != 0 ? '+' : ' ',
1900 (mask
& RELOAD_REG_PRE_MODIFY
) != 0 ? '+' : ' ');
1903 if (rs6000_vector_unit
[m
] != VECTOR_NONE
1904 || rs6000_vector_mem
[m
] != VECTOR_NONE
1905 || (reg_addr
[m
].reload_store
!= CODE_FOR_nothing
)
1906 || (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
))
1909 " Vector-arith=%-10s Vector-mem=%-10s Reload=%c%c",
1910 rs6000_debug_vector_unit (rs6000_vector_unit
[m
]),
1911 rs6000_debug_vector_unit (rs6000_vector_mem
[m
]),
1912 (reg_addr
[m
].reload_store
!= CODE_FOR_nothing
) ? 's' : '*',
1913 (reg_addr
[m
].reload_load
!= CODE_FOR_nothing
) ? 'l' : '*');
1916 fputs ("\n", stderr
);
1919 #define DEBUG_FMT_ID "%-32s= "
1920 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
1921 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
1922 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
1924 /* Print various interesting information with -mdebug=reg. */
1926 rs6000_debug_reg_global (void)
1928 static const char *const tf
[2] = { "false", "true" };
1929 const char *nl
= (const char *)0;
1932 char costly_num
[20];
1934 char flags_buffer
[40];
1935 const char *costly_str
;
1936 const char *nop_str
;
1937 const char *trace_str
;
1938 const char *abi_str
;
1939 const char *cmodel_str
;
1940 struct cl_target_option cl_opts
;
1942 /* Modes we want tieable information on. */
1943 static const enum machine_mode print_tieable_modes
[] = {
1977 /* Virtual regs we are interested in. */
1978 const static struct {
1979 int regno
; /* register number. */
1980 const char *name
; /* register name. */
1981 } virtual_regs
[] = {
1982 { STACK_POINTER_REGNUM
, "stack pointer:" },
1983 { TOC_REGNUM
, "toc: " },
1984 { STATIC_CHAIN_REGNUM
, "static chain: " },
1985 { RS6000_PIC_OFFSET_TABLE_REGNUM
, "pic offset: " },
1986 { HARD_FRAME_POINTER_REGNUM
, "hard frame: " },
1987 { ARG_POINTER_REGNUM
, "arg pointer: " },
1988 { FRAME_POINTER_REGNUM
, "frame pointer:" },
1989 { FIRST_PSEUDO_REGISTER
, "first pseudo: " },
1990 { FIRST_VIRTUAL_REGISTER
, "first virtual:" },
1991 { VIRTUAL_INCOMING_ARGS_REGNUM
, "incoming_args:" },
1992 { VIRTUAL_STACK_VARS_REGNUM
, "stack_vars: " },
1993 { VIRTUAL_STACK_DYNAMIC_REGNUM
, "stack_dynamic:" },
1994 { VIRTUAL_OUTGOING_ARGS_REGNUM
, "outgoing_args:" },
1995 { VIRTUAL_CFA_REGNUM
, "cfa (frame): " },
1996 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM
, "stack boundry:" },
1997 { LAST_VIRTUAL_REGISTER
, "last virtual: " },
2000 fputs ("\nHard register information:\n", stderr
);
2001 rs6000_debug_reg_print (FIRST_GPR_REGNO
, LAST_GPR_REGNO
, "gr");
2002 rs6000_debug_reg_print (FIRST_FPR_REGNO
, LAST_FPR_REGNO
, "fp");
2003 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO
,
2006 rs6000_debug_reg_print (LR_REGNO
, LR_REGNO
, "lr");
2007 rs6000_debug_reg_print (CTR_REGNO
, CTR_REGNO
, "ctr");
2008 rs6000_debug_reg_print (CR0_REGNO
, CR7_REGNO
, "cr");
2009 rs6000_debug_reg_print (CA_REGNO
, CA_REGNO
, "ca");
2010 rs6000_debug_reg_print (VRSAVE_REGNO
, VRSAVE_REGNO
, "vrsave");
2011 rs6000_debug_reg_print (VSCR_REGNO
, VSCR_REGNO
, "vscr");
2012 rs6000_debug_reg_print (SPE_ACC_REGNO
, SPE_ACC_REGNO
, "spe_a");
2013 rs6000_debug_reg_print (SPEFSCR_REGNO
, SPEFSCR_REGNO
, "spe_f");
2015 fputs ("\nVirtual/stack/frame registers:\n", stderr
);
2016 for (v
= 0; v
< ARRAY_SIZE (virtual_regs
); v
++)
2017 fprintf (stderr
, "%s regno = %3d\n", virtual_regs
[v
].name
, virtual_regs
[v
].regno
);
2021 "d reg_class = %s\n"
2022 "f reg_class = %s\n"
2023 "v reg_class = %s\n"
2024 "wa reg_class = %s\n"
2025 "wd reg_class = %s\n"
2026 "wf reg_class = %s\n"
2027 "wg reg_class = %s\n"
2028 "wl reg_class = %s\n"
2029 "wm reg_class = %s\n"
2030 "wr reg_class = %s\n"
2031 "ws reg_class = %s\n"
2032 "wt reg_class = %s\n"
2033 "wu reg_class = %s\n"
2034 "wv reg_class = %s\n"
2035 "ww reg_class = %s\n"
2036 "wx reg_class = %s\n"
2037 "wy reg_class = %s\n"
2038 "wz reg_class = %s\n"
2040 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_d
]],
2041 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_f
]],
2042 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_v
]],
2043 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wa
]],
2044 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wd
]],
2045 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wf
]],
2046 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wg
]],
2047 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wl
]],
2048 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wm
]],
2049 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wr
]],
2050 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ws
]],
2051 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wt
]],
2052 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wu
]],
2053 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wv
]],
2054 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_ww
]],
2055 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wx
]],
2056 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wy
]],
2057 reg_class_names
[rs6000_constraints
[RS6000_CONSTRAINT_wz
]]);
2060 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2061 rs6000_debug_print_mode (m
);
2063 fputs ("\n", stderr
);
2065 for (m1
= 0; m1
< ARRAY_SIZE (print_tieable_modes
); m1
++)
2067 enum machine_mode mode1
= print_tieable_modes
[m1
];
2068 bool first_time
= true;
2070 nl
= (const char *)0;
2071 for (m2
= 0; m2
< ARRAY_SIZE (print_tieable_modes
); m2
++)
2073 enum machine_mode mode2
= print_tieable_modes
[m2
];
2074 if (mode1
!= mode2
&& MODES_TIEABLE_P (mode1
, mode2
))
2078 fprintf (stderr
, "Tieable modes %s:", GET_MODE_NAME (mode1
));
2083 fprintf (stderr
, " %s", GET_MODE_NAME (mode2
));
2088 fputs ("\n", stderr
);
2094 if (rs6000_recip_control
)
2096 fprintf (stderr
, "\nReciprocal mask = 0x%x\n", rs6000_recip_control
);
2098 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2099 if (rs6000_recip_bits
[m
])
2102 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2104 (RS6000_RECIP_AUTO_RE_P (m
)
2106 : (RS6000_RECIP_HAVE_RE_P (m
) ? "have" : "none")),
2107 (RS6000_RECIP_AUTO_RSQRTE_P (m
)
2109 : (RS6000_RECIP_HAVE_RSQRTE_P (m
) ? "have" : "none")));
2112 fputs ("\n", stderr
);
2115 if (rs6000_cpu_index
>= 0)
2117 const char *name
= processor_target_table
[rs6000_cpu_index
].name
;
2119 = processor_target_table
[rs6000_cpu_index
].target_enable
;
2121 sprintf (flags_buffer
, "-mcpu=%s flags", name
);
2122 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2125 fprintf (stderr
, DEBUG_FMT_S
, "cpu", "<none>");
2127 if (rs6000_tune_index
>= 0)
2129 const char *name
= processor_target_table
[rs6000_tune_index
].name
;
2131 = processor_target_table
[rs6000_tune_index
].target_enable
;
2133 sprintf (flags_buffer
, "-mtune=%s flags", name
);
2134 rs6000_print_isa_options (stderr
, 0, flags_buffer
, flags
);
2137 fprintf (stderr
, DEBUG_FMT_S
, "tune", "<none>");
2139 cl_target_option_save (&cl_opts
, &global_options
);
2140 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags",
2143 rs6000_print_isa_options (stderr
, 0, "rs6000_isa_flags_explicit",
2144 rs6000_isa_flags_explicit
);
2146 rs6000_print_builtin_options (stderr
, 0, "rs6000_builtin_mask",
2147 rs6000_builtin_mask
);
2149 rs6000_print_isa_options (stderr
, 0, "TARGET_DEFAULT", TARGET_DEFAULT
);
2151 fprintf (stderr
, DEBUG_FMT_S
, "--with-cpu default",
2152 OPTION_TARGET_CPU_DEFAULT
? OPTION_TARGET_CPU_DEFAULT
: "<none>");
2154 switch (rs6000_sched_costly_dep
)
2156 case max_dep_latency
:
2157 costly_str
= "max_dep_latency";
2161 costly_str
= "no_dep_costly";
2164 case all_deps_costly
:
2165 costly_str
= "all_deps_costly";
2168 case true_store_to_load_dep_costly
:
2169 costly_str
= "true_store_to_load_dep_costly";
2172 case store_to_load_dep_costly
:
2173 costly_str
= "store_to_load_dep_costly";
2177 costly_str
= costly_num
;
2178 sprintf (costly_num
, "%d", (int)rs6000_sched_costly_dep
);
2182 fprintf (stderr
, DEBUG_FMT_S
, "sched_costly_dep", costly_str
);
2184 switch (rs6000_sched_insert_nops
)
2186 case sched_finish_regroup_exact
:
2187 nop_str
= "sched_finish_regroup_exact";
2190 case sched_finish_pad_groups
:
2191 nop_str
= "sched_finish_pad_groups";
2194 case sched_finish_none
:
2195 nop_str
= "sched_finish_none";
2200 sprintf (nop_num
, "%d", (int)rs6000_sched_insert_nops
);
2204 fprintf (stderr
, DEBUG_FMT_S
, "sched_insert_nops", nop_str
);
2206 switch (rs6000_sdata
)
2213 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "data");
2217 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "sysv");
2221 fprintf (stderr
, DEBUG_FMT_S
, "sdata", "eabi");
2226 switch (rs6000_traceback
)
2228 case traceback_default
: trace_str
= "default"; break;
2229 case traceback_none
: trace_str
= "none"; break;
2230 case traceback_part
: trace_str
= "part"; break;
2231 case traceback_full
: trace_str
= "full"; break;
2232 default: trace_str
= "unknown"; break;
2235 fprintf (stderr
, DEBUG_FMT_S
, "traceback", trace_str
);
2237 switch (rs6000_current_cmodel
)
2239 case CMODEL_SMALL
: cmodel_str
= "small"; break;
2240 case CMODEL_MEDIUM
: cmodel_str
= "medium"; break;
2241 case CMODEL_LARGE
: cmodel_str
= "large"; break;
2242 default: cmodel_str
= "unknown"; break;
2245 fprintf (stderr
, DEBUG_FMT_S
, "cmodel", cmodel_str
);
2247 switch (rs6000_current_abi
)
2249 case ABI_NONE
: abi_str
= "none"; break;
2250 case ABI_AIX
: abi_str
= "aix"; break;
2251 case ABI_ELFv2
: abi_str
= "ELFv2"; break;
2252 case ABI_V4
: abi_str
= "V4"; break;
2253 case ABI_DARWIN
: abi_str
= "darwin"; break;
2254 default: abi_str
= "unknown"; break;
2257 fprintf (stderr
, DEBUG_FMT_S
, "abi", abi_str
);
2259 if (rs6000_altivec_abi
)
2260 fprintf (stderr
, DEBUG_FMT_S
, "altivec_abi", "true");
2263 fprintf (stderr
, DEBUG_FMT_S
, "spe_abi", "true");
2265 if (rs6000_darwin64_abi
)
2266 fprintf (stderr
, DEBUG_FMT_S
, "darwin64_abi", "true");
2268 if (rs6000_float_gprs
)
2269 fprintf (stderr
, DEBUG_FMT_S
, "float_gprs", "true");
2271 if (TARGET_LINK_STACK
)
2272 fprintf (stderr
, DEBUG_FMT_S
, "link_stack", "true");
2274 if (targetm
.lra_p ())
2275 fprintf (stderr
, DEBUG_FMT_S
, "lra", "true");
2277 if (TARGET_P8_FUSION
)
2278 fprintf (stderr
, DEBUG_FMT_S
, "p8 fusion",
2279 (TARGET_P8_FUSION_SIGN
) ? "zero+sign" : "zero");
2281 fprintf (stderr
, DEBUG_FMT_S
, "plt-format",
2282 TARGET_SECURE_PLT
? "secure" : "bss");
2283 fprintf (stderr
, DEBUG_FMT_S
, "struct-return",
2284 aix_struct_return
? "aix" : "sysv");
2285 fprintf (stderr
, DEBUG_FMT_S
, "always_hint", tf
[!!rs6000_always_hint
]);
2286 fprintf (stderr
, DEBUG_FMT_S
, "sched_groups", tf
[!!rs6000_sched_groups
]);
2287 fprintf (stderr
, DEBUG_FMT_S
, "align_branch",
2288 tf
[!!rs6000_align_branch_targets
]);
2289 fprintf (stderr
, DEBUG_FMT_D
, "tls_size", rs6000_tls_size
);
2290 fprintf (stderr
, DEBUG_FMT_D
, "long_double_size",
2291 rs6000_long_double_type_size
);
2292 fprintf (stderr
, DEBUG_FMT_D
, "sched_restricted_insns_priority",
2293 (int)rs6000_sched_restricted_insns_priority
);
2294 fprintf (stderr
, DEBUG_FMT_D
, "Number of standard builtins",
2296 fprintf (stderr
, DEBUG_FMT_D
, "Number of rs6000 builtins",
2297 (int)RS6000_BUILTIN_COUNT
);
2301 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2302 legitimate address support to figure out the appropriate addressing to
2306 rs6000_setup_reg_addr_masks (void)
2308 ssize_t rc
, reg
, m
, nregs
;
2309 addr_mask_type any_addr_mask
, addr_mask
;
2311 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2313 /* SDmode is special in that we want to access it only via REG+REG
2314 addressing on power7 and above, since we want to use the LFIWZX and
2315 STFIWZX instructions to load it. */
2316 bool indexed_only_p
= (m
== SDmode
&& TARGET_NO_SDMODE_STACK
);
2319 for (rc
= FIRST_RELOAD_REG_CLASS
; rc
<= LAST_RELOAD_REG_CLASS
; rc
++)
2322 reg
= reload_reg_map
[rc
].reg
;
2324 /* Can mode values go in the GPR/FPR/Altivec registers? */
2325 if (reg
>= 0 && rs6000_hard_regno_mode_ok_p
[m
][reg
])
2327 nregs
= rs6000_hard_regno_nregs
[m
][reg
];
2328 addr_mask
|= RELOAD_REG_VALID
;
2330 /* Indicate if the mode takes more than 1 physical register. If
2331 it takes a single register, indicate it can do REG+REG
2333 if (nregs
> 1 || m
== BLKmode
)
2334 addr_mask
|= RELOAD_REG_MULTIPLE
;
2336 addr_mask
|= RELOAD_REG_INDEXED
;
2338 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2339 addressing. Restrict addressing on SPE for 64-bit types
2340 because of the SUBREG hackery used to address 64-bit floats in
2341 '32-bit' GPRs. To simplify secondary reload, don't allow
2342 update forms on scalar floating point types that can go in the
2346 && (rc
== RELOAD_REG_GPR
|| rc
== RELOAD_REG_FPR
)
2347 && GET_MODE_SIZE (m
) <= 8
2348 && !VECTOR_MODE_P (m
)
2349 && !COMPLEX_MODE_P (m
)
2351 && !(TARGET_E500_DOUBLE
&& GET_MODE_SIZE (m
) == 8)
2352 && !(m
== DFmode
&& TARGET_UPPER_REGS_DF
)
2353 && !(m
== SFmode
&& TARGET_UPPER_REGS_SF
))
2355 addr_mask
|= RELOAD_REG_PRE_INCDEC
;
2357 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2358 we don't allow PRE_MODIFY for some multi-register
2363 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2367 if (TARGET_POWERPC64
)
2368 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2374 addr_mask
|= RELOAD_REG_PRE_MODIFY
;
2380 /* GPR and FPR registers can do REG+OFFSET addressing, except
2381 possibly for SDmode. */
2382 if ((addr_mask
!= 0) && !indexed_only_p
2383 && (rc
== RELOAD_REG_GPR
|| rc
== RELOAD_REG_FPR
))
2384 addr_mask
|= RELOAD_REG_OFFSET
;
2386 reg_addr
[m
].addr_mask
[rc
] = addr_mask
;
2387 any_addr_mask
|= addr_mask
;
2390 reg_addr
[m
].addr_mask
[RELOAD_REG_ANY
] = any_addr_mask
;
2395 /* Initialize the various global tables that are based on register size. */
2397 rs6000_init_hard_regno_mode_ok (bool global_init_p
)
2403 /* Precalculate REGNO_REG_CLASS. */
2404 rs6000_regno_regclass
[0] = GENERAL_REGS
;
2405 for (r
= 1; r
< 32; ++r
)
2406 rs6000_regno_regclass
[r
] = BASE_REGS
;
2408 for (r
= 32; r
< 64; ++r
)
2409 rs6000_regno_regclass
[r
] = FLOAT_REGS
;
2411 for (r
= 64; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2412 rs6000_regno_regclass
[r
] = NO_REGS
;
2414 for (r
= FIRST_ALTIVEC_REGNO
; r
<= LAST_ALTIVEC_REGNO
; ++r
)
2415 rs6000_regno_regclass
[r
] = ALTIVEC_REGS
;
2417 rs6000_regno_regclass
[CR0_REGNO
] = CR0_REGS
;
2418 for (r
= CR1_REGNO
; r
<= CR7_REGNO
; ++r
)
2419 rs6000_regno_regclass
[r
] = CR_REGS
;
2421 rs6000_regno_regclass
[LR_REGNO
] = LINK_REGS
;
2422 rs6000_regno_regclass
[CTR_REGNO
] = CTR_REGS
;
2423 rs6000_regno_regclass
[CA_REGNO
] = CA_REGS
;
2424 rs6000_regno_regclass
[VRSAVE_REGNO
] = VRSAVE_REGS
;
2425 rs6000_regno_regclass
[VSCR_REGNO
] = VRSAVE_REGS
;
2426 rs6000_regno_regclass
[SPE_ACC_REGNO
] = SPE_ACC_REGS
;
2427 rs6000_regno_regclass
[SPEFSCR_REGNO
] = SPEFSCR_REGS
;
2428 rs6000_regno_regclass
[TFHAR_REGNO
] = SPR_REGS
;
2429 rs6000_regno_regclass
[TFIAR_REGNO
] = SPR_REGS
;
2430 rs6000_regno_regclass
[TEXASR_REGNO
] = SPR_REGS
;
2431 rs6000_regno_regclass
[ARG_POINTER_REGNUM
] = BASE_REGS
;
2432 rs6000_regno_regclass
[FRAME_POINTER_REGNUM
] = BASE_REGS
;
2434 /* Precalculate register class to simpler reload register class. We don't
2435 need all of the register classes that are combinations of different
2436 classes, just the simple ones that have constraint letters. */
2437 for (c
= 0; c
< N_REG_CLASSES
; c
++)
2438 reg_class_to_reg_type
[c
] = NO_REG_TYPE
;
2440 reg_class_to_reg_type
[(int)GENERAL_REGS
] = GPR_REG_TYPE
;
2441 reg_class_to_reg_type
[(int)BASE_REGS
] = GPR_REG_TYPE
;
2442 reg_class_to_reg_type
[(int)VSX_REGS
] = VSX_REG_TYPE
;
2443 reg_class_to_reg_type
[(int)VRSAVE_REGS
] = SPR_REG_TYPE
;
2444 reg_class_to_reg_type
[(int)VSCR_REGS
] = SPR_REG_TYPE
;
2445 reg_class_to_reg_type
[(int)LINK_REGS
] = SPR_REG_TYPE
;
2446 reg_class_to_reg_type
[(int)CTR_REGS
] = SPR_REG_TYPE
;
2447 reg_class_to_reg_type
[(int)LINK_OR_CTR_REGS
] = SPR_REG_TYPE
;
2448 reg_class_to_reg_type
[(int)CR_REGS
] = CR_REG_TYPE
;
2449 reg_class_to_reg_type
[(int)CR0_REGS
] = CR_REG_TYPE
;
2450 reg_class_to_reg_type
[(int)SPE_ACC_REGS
] = SPE_ACC_TYPE
;
2451 reg_class_to_reg_type
[(int)SPEFSCR_REGS
] = SPEFSCR_REG_TYPE
;
2455 reg_class_to_reg_type
[(int)FLOAT_REGS
] = VSX_REG_TYPE
;
2456 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = VSX_REG_TYPE
;
2460 reg_class_to_reg_type
[(int)FLOAT_REGS
] = FPR_REG_TYPE
;
2461 reg_class_to_reg_type
[(int)ALTIVEC_REGS
] = ALTIVEC_REG_TYPE
;
2464 /* Precalculate the valid memory formats as well as the vector information,
2465 this must be set up before the rs6000_hard_regno_nregs_internal calls
2467 gcc_assert ((int)VECTOR_NONE
== 0);
2468 memset ((void *) &rs6000_vector_unit
[0], '\0', sizeof (rs6000_vector_unit
));
2469 memset ((void *) &rs6000_vector_mem
[0], '\0', sizeof (rs6000_vector_unit
));
2471 gcc_assert ((int)CODE_FOR_nothing
== 0);
2472 memset ((void *) ®_addr
[0], '\0', sizeof (reg_addr
));
2474 gcc_assert ((int)NO_REGS
== 0);
2475 memset ((void *) &rs6000_constraints
[0], '\0', sizeof (rs6000_constraints
));
2477 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2478 believes it can use native alignment or still uses 128-bit alignment. */
2479 if (TARGET_VSX
&& !TARGET_VSX_ALIGN_128
)
2490 /* V2DF mode, VSX only. */
2493 rs6000_vector_unit
[V2DFmode
] = VECTOR_VSX
;
2494 rs6000_vector_mem
[V2DFmode
] = VECTOR_VSX
;
2495 rs6000_vector_align
[V2DFmode
] = align64
;
2498 /* V4SF mode, either VSX or Altivec. */
2501 rs6000_vector_unit
[V4SFmode
] = VECTOR_VSX
;
2502 rs6000_vector_mem
[V4SFmode
] = VECTOR_VSX
;
2503 rs6000_vector_align
[V4SFmode
] = align32
;
2505 else if (TARGET_ALTIVEC
)
2507 rs6000_vector_unit
[V4SFmode
] = VECTOR_ALTIVEC
;
2508 rs6000_vector_mem
[V4SFmode
] = VECTOR_ALTIVEC
;
2509 rs6000_vector_align
[V4SFmode
] = align32
;
2512 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2516 rs6000_vector_unit
[V4SImode
] = VECTOR_ALTIVEC
;
2517 rs6000_vector_unit
[V8HImode
] = VECTOR_ALTIVEC
;
2518 rs6000_vector_unit
[V16QImode
] = VECTOR_ALTIVEC
;
2519 rs6000_vector_align
[V4SImode
] = align32
;
2520 rs6000_vector_align
[V8HImode
] = align32
;
2521 rs6000_vector_align
[V16QImode
] = align32
;
2525 rs6000_vector_mem
[V4SImode
] = VECTOR_VSX
;
2526 rs6000_vector_mem
[V8HImode
] = VECTOR_VSX
;
2527 rs6000_vector_mem
[V16QImode
] = VECTOR_VSX
;
2531 rs6000_vector_mem
[V4SImode
] = VECTOR_ALTIVEC
;
2532 rs6000_vector_mem
[V8HImode
] = VECTOR_ALTIVEC
;
2533 rs6000_vector_mem
[V16QImode
] = VECTOR_ALTIVEC
;
2537 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
2538 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
2541 rs6000_vector_mem
[V2DImode
] = VECTOR_VSX
;
2542 rs6000_vector_unit
[V2DImode
]
2543 = (TARGET_P8_VECTOR
) ? VECTOR_P8_VECTOR
: VECTOR_NONE
;
2544 rs6000_vector_align
[V2DImode
] = align64
;
2547 /* DFmode, see if we want to use the VSX unit. */
2548 if (TARGET_VSX
&& TARGET_VSX_SCALAR_DOUBLE
)
2550 rs6000_vector_unit
[DFmode
] = VECTOR_VSX
;
2551 rs6000_vector_mem
[DFmode
]
2552 = (TARGET_UPPER_REGS_DF
? VECTOR_VSX
: VECTOR_NONE
);
2553 rs6000_vector_align
[DFmode
] = align64
;
2556 /* Allow TImode in VSX register and set the VSX memory macros. */
2557 if (TARGET_VSX
&& TARGET_VSX_TIMODE
)
2559 rs6000_vector_mem
[TImode
] = VECTOR_VSX
;
2560 rs6000_vector_align
[TImode
] = align64
;
2563 /* TODO add SPE and paired floating point vector support. */
2565 /* Register class constraints for the constraints that depend on compile
2566 switches. When the VSX code was added, different constraints were added
2567 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
2568 of the VSX registers are used. The register classes for scalar floating
2569 point types is set, based on whether we allow that type into the upper
2570 (Altivec) registers. GCC has register classes to target the Altivec
2571 registers for load/store operations, to select using a VSX memory
2572 operation instead of the traditional floating point operation. The
2575 d - Register class to use with traditional DFmode instructions.
2576 f - Register class to use with traditional SFmode instructions.
2577 v - Altivec register.
2578 wa - Any VSX register.
2579 wd - Preferred register class for V2DFmode.
2580 wf - Preferred register class for V4SFmode.
2581 wg - Float register for power6x move insns.
2582 wl - Float register if we can do 32-bit signed int loads.
2583 wm - VSX register for ISA 2.07 direct move operations.
2584 wr - GPR if 64-bit mode is permitted.
2585 ws - Register class to do ISA 2.06 DF operations.
2586 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
2587 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
2588 wt - VSX register for TImode in VSX registers.
2589 ww - Register class to do SF conversions in with VSX operations.
2590 wx - Float register if we can do 32-bit int stores.
2591 wy - Register class to do ISA 2.07 SF operations.
2592 wz - Float register if we can do 32-bit unsigned int loads. */
2594 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
)
2595 rs6000_constraints
[RS6000_CONSTRAINT_f
] = FLOAT_REGS
;
2597 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
2598 rs6000_constraints
[RS6000_CONSTRAINT_d
] = FLOAT_REGS
;
2602 rs6000_constraints
[RS6000_CONSTRAINT_wa
] = VSX_REGS
;
2603 rs6000_constraints
[RS6000_CONSTRAINT_wd
] = VSX_REGS
;
2604 rs6000_constraints
[RS6000_CONSTRAINT_wf
] = VSX_REGS
;
2606 if (TARGET_VSX_TIMODE
)
2607 rs6000_constraints
[RS6000_CONSTRAINT_wt
] = VSX_REGS
;
2609 if (TARGET_UPPER_REGS_DF
)
2611 rs6000_constraints
[RS6000_CONSTRAINT_ws
] = VSX_REGS
;
2612 rs6000_constraints
[RS6000_CONSTRAINT_wv
] = ALTIVEC_REGS
;
2615 rs6000_constraints
[RS6000_CONSTRAINT_ws
] = FLOAT_REGS
;
2618 /* Add conditional constraints based on various options, to allow us to
2619 collapse multiple insn patterns. */
2621 rs6000_constraints
[RS6000_CONSTRAINT_v
] = ALTIVEC_REGS
;
2624 rs6000_constraints
[RS6000_CONSTRAINT_wg
] = FLOAT_REGS
;
2627 rs6000_constraints
[RS6000_CONSTRAINT_wl
] = FLOAT_REGS
;
2629 if (TARGET_DIRECT_MOVE
)
2630 rs6000_constraints
[RS6000_CONSTRAINT_wm
] = VSX_REGS
;
2632 if (TARGET_POWERPC64
)
2633 rs6000_constraints
[RS6000_CONSTRAINT_wr
] = GENERAL_REGS
;
2635 if (TARGET_P8_VECTOR
&& TARGET_UPPER_REGS_SF
)
2637 rs6000_constraints
[RS6000_CONSTRAINT_wu
] = ALTIVEC_REGS
;
2638 rs6000_constraints
[RS6000_CONSTRAINT_wy
] = VSX_REGS
;
2639 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = VSX_REGS
;
2641 else if (TARGET_P8_VECTOR
)
2643 rs6000_constraints
[RS6000_CONSTRAINT_wy
] = FLOAT_REGS
;
2644 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = FLOAT_REGS
;
2646 else if (TARGET_VSX
)
2647 rs6000_constraints
[RS6000_CONSTRAINT_ww
] = FLOAT_REGS
;
2650 rs6000_constraints
[RS6000_CONSTRAINT_wx
] = FLOAT_REGS
;
2653 rs6000_constraints
[RS6000_CONSTRAINT_wz
] = FLOAT_REGS
;
2655 /* Set up the reload helper and direct move functions. */
2656 if (TARGET_VSX
|| TARGET_ALTIVEC
)
2660 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_di_store
;
2661 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_di_load
;
2662 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_di_store
;
2663 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_di_load
;
2664 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_di_store
;
2665 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_di_load
;
2666 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_di_store
;
2667 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_di_load
;
2668 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_di_store
;
2669 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_di_load
;
2670 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_di_store
;
2671 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_di_load
;
2672 if (TARGET_VSX
&& TARGET_UPPER_REGS_DF
)
2674 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_di_store
;
2675 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_di_load
;
2676 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_di_store
;
2677 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_di_load
;
2679 if (TARGET_P8_VECTOR
)
2681 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_di_store
;
2682 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_di_load
;
2683 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_di_store
;
2684 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_di_load
;
2686 if (TARGET_VSX_TIMODE
)
2688 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_di_store
;
2689 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_di_load
;
2691 if (TARGET_DIRECT_MOVE
)
2693 if (TARGET_POWERPC64
)
2695 reg_addr
[TImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxti
;
2696 reg_addr
[V2DFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2df
;
2697 reg_addr
[V2DImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv2di
;
2698 reg_addr
[V4SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4sf
;
2699 reg_addr
[V4SImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv4si
;
2700 reg_addr
[V8HImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv8hi
;
2701 reg_addr
[V16QImode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxv16qi
;
2702 reg_addr
[SFmode
].reload_gpr_vsx
= CODE_FOR_reload_gpr_from_vsxsf
;
2704 reg_addr
[TImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprti
;
2705 reg_addr
[V2DFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2df
;
2706 reg_addr
[V2DImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv2di
;
2707 reg_addr
[V4SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4sf
;
2708 reg_addr
[V4SImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv4si
;
2709 reg_addr
[V8HImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv8hi
;
2710 reg_addr
[V16QImode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprv16qi
;
2711 reg_addr
[SFmode
].reload_vsx_gpr
= CODE_FOR_reload_vsx_from_gprsf
;
2715 reg_addr
[DImode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdi
;
2716 reg_addr
[DDmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdd
;
2717 reg_addr
[DFmode
].reload_fpr_gpr
= CODE_FOR_reload_fpr_from_gprdf
;
2723 reg_addr
[V16QImode
].reload_store
= CODE_FOR_reload_v16qi_si_store
;
2724 reg_addr
[V16QImode
].reload_load
= CODE_FOR_reload_v16qi_si_load
;
2725 reg_addr
[V8HImode
].reload_store
= CODE_FOR_reload_v8hi_si_store
;
2726 reg_addr
[V8HImode
].reload_load
= CODE_FOR_reload_v8hi_si_load
;
2727 reg_addr
[V4SImode
].reload_store
= CODE_FOR_reload_v4si_si_store
;
2728 reg_addr
[V4SImode
].reload_load
= CODE_FOR_reload_v4si_si_load
;
2729 reg_addr
[V2DImode
].reload_store
= CODE_FOR_reload_v2di_si_store
;
2730 reg_addr
[V2DImode
].reload_load
= CODE_FOR_reload_v2di_si_load
;
2731 reg_addr
[V4SFmode
].reload_store
= CODE_FOR_reload_v4sf_si_store
;
2732 reg_addr
[V4SFmode
].reload_load
= CODE_FOR_reload_v4sf_si_load
;
2733 reg_addr
[V2DFmode
].reload_store
= CODE_FOR_reload_v2df_si_store
;
2734 reg_addr
[V2DFmode
].reload_load
= CODE_FOR_reload_v2df_si_load
;
2735 if (TARGET_VSX
&& TARGET_UPPER_REGS_DF
)
2737 reg_addr
[DFmode
].reload_store
= CODE_FOR_reload_df_si_store
;
2738 reg_addr
[DFmode
].reload_load
= CODE_FOR_reload_df_si_load
;
2739 reg_addr
[DDmode
].reload_store
= CODE_FOR_reload_dd_si_store
;
2740 reg_addr
[DDmode
].reload_load
= CODE_FOR_reload_dd_si_load
;
2742 if (TARGET_P8_VECTOR
)
2744 reg_addr
[SFmode
].reload_store
= CODE_FOR_reload_sf_si_store
;
2745 reg_addr
[SFmode
].reload_load
= CODE_FOR_reload_sf_si_load
;
2746 reg_addr
[SDmode
].reload_store
= CODE_FOR_reload_sd_si_store
;
2747 reg_addr
[SDmode
].reload_load
= CODE_FOR_reload_sd_si_load
;
2749 if (TARGET_VSX_TIMODE
)
2751 reg_addr
[TImode
].reload_store
= CODE_FOR_reload_ti_si_store
;
2752 reg_addr
[TImode
].reload_load
= CODE_FOR_reload_ti_si_load
;
2757 /* Precalculate HARD_REGNO_NREGS. */
2758 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2759 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2760 rs6000_hard_regno_nregs
[m
][r
]
2761 = rs6000_hard_regno_nregs_internal (r
, (enum machine_mode
)m
);
2763 /* Precalculate HARD_REGNO_MODE_OK. */
2764 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
2765 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2766 if (rs6000_hard_regno_mode_ok (r
, (enum machine_mode
)m
))
2767 rs6000_hard_regno_mode_ok_p
[m
][r
] = true;
2769 /* Precalculate CLASS_MAX_NREGS sizes. */
2770 for (c
= 0; c
< LIM_REG_CLASSES
; ++c
)
2774 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
))
2775 reg_size
= UNITS_PER_VSX_WORD
;
2777 else if (c
== ALTIVEC_REGS
)
2778 reg_size
= UNITS_PER_ALTIVEC_WORD
;
2780 else if (c
== FLOAT_REGS
)
2781 reg_size
= UNITS_PER_FP_WORD
;
2784 reg_size
= UNITS_PER_WORD
;
2786 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
2788 int reg_size2
= reg_size
;
2790 /* TFmode/TDmode always takes 2 registers, even in VSX. */
2791 if (TARGET_VSX
&& VSX_REG_CLASS_P (c
)
2792 && (m
== TDmode
|| m
== TFmode
))
2793 reg_size2
= UNITS_PER_FP_WORD
;
2795 rs6000_class_max_nregs
[m
][c
]
2796 = (GET_MODE_SIZE (m
) + reg_size2
- 1) / reg_size2
;
2800 if (TARGET_E500_DOUBLE
)
2801 rs6000_class_max_nregs
[DFmode
][GENERAL_REGS
] = 1;
2803 /* Calculate which modes to automatically generate code to use a the
2804 reciprocal divide and square root instructions. In the future, possibly
2805 automatically generate the instructions even if the user did not specify
2806 -mrecip. The older machines double precision reciprocal sqrt estimate is
2807 not accurate enough. */
2808 memset (rs6000_recip_bits
, 0, sizeof (rs6000_recip_bits
));
2810 rs6000_recip_bits
[SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2812 rs6000_recip_bits
[DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2813 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
2814 rs6000_recip_bits
[V4SFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2815 if (VECTOR_UNIT_VSX_P (V2DFmode
))
2816 rs6000_recip_bits
[V2DFmode
] = RS6000_RECIP_MASK_HAVE_RE
;
2818 if (TARGET_FRSQRTES
)
2819 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2821 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2822 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
))
2823 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2824 if (VECTOR_UNIT_VSX_P (V2DFmode
))
2825 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_HAVE_RSQRTE
;
2827 if (rs6000_recip_control
)
2829 if (!flag_finite_math_only
)
2830 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2831 if (flag_trapping_math
)
2832 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2833 if (!flag_reciprocal_math
)
2834 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2835 if (flag_finite_math_only
&& !flag_trapping_math
&& flag_reciprocal_math
)
2837 if (RS6000_RECIP_HAVE_RE_P (SFmode
)
2838 && (rs6000_recip_control
& RECIP_SF_DIV
) != 0)
2839 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2841 if (RS6000_RECIP_HAVE_RE_P (DFmode
)
2842 && (rs6000_recip_control
& RECIP_DF_DIV
) != 0)
2843 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2845 if (RS6000_RECIP_HAVE_RE_P (V4SFmode
)
2846 && (rs6000_recip_control
& RECIP_V4SF_DIV
) != 0)
2847 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2849 if (RS6000_RECIP_HAVE_RE_P (V2DFmode
)
2850 && (rs6000_recip_control
& RECIP_V2DF_DIV
) != 0)
2851 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RE
;
2853 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode
)
2854 && (rs6000_recip_control
& RECIP_SF_RSQRT
) != 0)
2855 rs6000_recip_bits
[SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2857 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode
)
2858 && (rs6000_recip_control
& RECIP_DF_RSQRT
) != 0)
2859 rs6000_recip_bits
[DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2861 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode
)
2862 && (rs6000_recip_control
& RECIP_V4SF_RSQRT
) != 0)
2863 rs6000_recip_bits
[V4SFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2865 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode
)
2866 && (rs6000_recip_control
& RECIP_V2DF_RSQRT
) != 0)
2867 rs6000_recip_bits
[V2DFmode
] |= RS6000_RECIP_MASK_AUTO_RSQRTE
;
2871 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2872 legitimate address support to figure out the appropriate addressing to
2874 rs6000_setup_reg_addr_masks ();
2876 if (global_init_p
|| TARGET_DEBUG_TARGET
)
2878 if (TARGET_DEBUG_REG
)
2879 rs6000_debug_reg_global ();
2881 if (TARGET_DEBUG_COST
|| TARGET_DEBUG_REG
)
2883 "SImode variable mult cost = %d\n"
2884 "SImode constant mult cost = %d\n"
2885 "SImode short constant mult cost = %d\n"
2886 "DImode multipliciation cost = %d\n"
2887 "SImode division cost = %d\n"
2888 "DImode division cost = %d\n"
2889 "Simple fp operation cost = %d\n"
2890 "DFmode multiplication cost = %d\n"
2891 "SFmode division cost = %d\n"
2892 "DFmode division cost = %d\n"
2893 "cache line size = %d\n"
2894 "l1 cache size = %d\n"
2895 "l2 cache size = %d\n"
2896 "simultaneous prefetches = %d\n"
2899 rs6000_cost
->mulsi_const
,
2900 rs6000_cost
->mulsi_const9
,
2908 rs6000_cost
->cache_line_size
,
2909 rs6000_cost
->l1_cache_size
,
2910 rs6000_cost
->l2_cache_size
,
2911 rs6000_cost
->simultaneous_prefetches
);
2916 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
2919 darwin_rs6000_override_options (void)
2921 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
2923 rs6000_altivec_abi
= 1;
2924 TARGET_ALTIVEC_VRSAVE
= 1;
2925 rs6000_current_abi
= ABI_DARWIN
;
2927 if (DEFAULT_ABI
== ABI_DARWIN
2929 darwin_one_byte_bool
= 1;
2931 if (TARGET_64BIT
&& ! TARGET_POWERPC64
)
2933 rs6000_isa_flags
|= OPTION_MASK_POWERPC64
;
2934 warning (0, "-m64 requires PowerPC64 architecture, enabling");
2938 rs6000_default_long_calls
= 1;
2939 rs6000_isa_flags
|= OPTION_MASK_SOFT_FLOAT
;
2942 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
2944 if (!flag_mkernel
&& !flag_apple_kext
2946 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
))
2947 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
2949 /* Unless the user (not the configurer) has explicitly overridden
2950 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
2951 G4 unless targeting the kernel. */
2954 && strverscmp (darwin_macosx_version_min
, "10.5") >= 0
2955 && ! (rs6000_isa_flags_explicit
& OPTION_MASK_ALTIVEC
)
2956 && ! global_options_set
.x_rs6000_cpu_index
)
2958 rs6000_isa_flags
|= OPTION_MASK_ALTIVEC
;
2963 /* If not otherwise specified by a target, make 'long double' equivalent to
2966 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
2967 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
2970 /* Return the builtin mask of the various options used that could affect which
2971 builtins were used. In the past we used target_flags, but we've run out of
2972 bits, and some options like SPE and PAIRED are no longer in
2976 rs6000_builtin_mask_calculate (void)
2978 return (((TARGET_ALTIVEC
) ? RS6000_BTM_ALTIVEC
: 0)
2979 | ((TARGET_VSX
) ? RS6000_BTM_VSX
: 0)
2980 | ((TARGET_SPE
) ? RS6000_BTM_SPE
: 0)
2981 | ((TARGET_PAIRED_FLOAT
) ? RS6000_BTM_PAIRED
: 0)
2982 | ((TARGET_FRE
) ? RS6000_BTM_FRE
: 0)
2983 | ((TARGET_FRES
) ? RS6000_BTM_FRES
: 0)
2984 | ((TARGET_FRSQRTE
) ? RS6000_BTM_FRSQRTE
: 0)
2985 | ((TARGET_FRSQRTES
) ? RS6000_BTM_FRSQRTES
: 0)
2986 | ((TARGET_POPCNTD
) ? RS6000_BTM_POPCNTD
: 0)
2987 | ((rs6000_cpu
== PROCESSOR_CELL
) ? RS6000_BTM_CELL
: 0)
2988 | ((TARGET_P8_VECTOR
) ? RS6000_BTM_P8_VECTOR
: 0)
2989 | ((TARGET_CRYPTO
) ? RS6000_BTM_CRYPTO
: 0)
2990 | ((TARGET_HTM
) ? RS6000_BTM_HTM
: 0));
2993 /* Override command line options. Mostly we process the processor type and
2994 sometimes adjust other TARGET_ options. */
2997 rs6000_option_override_internal (bool global_init_p
)
3000 bool have_cpu
= false;
3002 /* The default cpu requested at configure time, if any. */
3003 const char *implicit_cpu
= OPTION_TARGET_CPU_DEFAULT
;
3005 HOST_WIDE_INT set_masks
;
3008 struct cl_target_option
*main_target_opt
3009 = ((global_init_p
|| target_option_default_node
== NULL
)
3010 ? NULL
: TREE_TARGET_OPTION (target_option_default_node
));
3012 /* Remember the explicit arguments. */
3014 rs6000_isa_flags_explicit
= global_options_set
.x_rs6000_isa_flags
;
3016 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3017 library functions, so warn about it. The flag may be useful for
3018 performance studies from time to time though, so don't disable it
3020 if (global_options_set
.x_rs6000_alignment_flags
3021 && rs6000_alignment_flags
== MASK_ALIGN_POWER
3022 && DEFAULT_ABI
== ABI_DARWIN
3024 warning (0, "-malign-power is not supported for 64-bit Darwin;"
3025 " it is incompatible with the installed C and C++ libraries");
3027 /* Numerous experiment shows that IRA based loop pressure
3028 calculation works better for RTL loop invariant motion on targets
3029 with enough (>= 32) registers. It is an expensive optimization.
3030 So it is on only for peak performance. */
3031 if (optimize
>= 3 && global_init_p
)
3032 flag_ira_loop_pressure
= 1;
3034 /* Set the pointer size. */
3037 rs6000_pmode
= (int)DImode
;
3038 rs6000_pointer_size
= 64;
3042 rs6000_pmode
= (int)SImode
;
3043 rs6000_pointer_size
= 32;
3046 /* Some OSs don't support saving the high part of 64-bit registers on context
3047 switch. Other OSs don't support saving Altivec registers. On those OSs,
3048 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3049 if the user wants either, the user must explicitly specify them and we
3050 won't interfere with the user's specification. */
3052 set_masks
= POWERPC_MASKS
;
3053 #ifdef OS_MISSING_POWERPC64
3054 if (OS_MISSING_POWERPC64
)
3055 set_masks
&= ~OPTION_MASK_POWERPC64
;
3057 #ifdef OS_MISSING_ALTIVEC
3058 if (OS_MISSING_ALTIVEC
)
3059 set_masks
&= ~(OPTION_MASK_ALTIVEC
| OPTION_MASK_VSX
);
3062 /* Don't override by the processor default if given explicitly. */
3063 set_masks
&= ~rs6000_isa_flags_explicit
;
3065 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3066 the cpu in a target attribute or pragma, but did not specify a tuning
3067 option, use the cpu for the tuning option rather than the option specified
3068 with -mtune on the command line. Process a '--with-cpu' configuration
3069 request as an implicit --cpu. */
3070 if (rs6000_cpu_index
>= 0)
3072 cpu_index
= rs6000_cpu_index
;
3075 else if (main_target_opt
!= NULL
&& main_target_opt
->x_rs6000_cpu_index
>= 0)
3077 rs6000_cpu_index
= cpu_index
= main_target_opt
->x_rs6000_cpu_index
;
3080 else if (implicit_cpu
)
3082 rs6000_cpu_index
= cpu_index
= rs6000_cpu_name_lookup (implicit_cpu
);
3087 const char *default_cpu
= (TARGET_POWERPC64
? "powerpc64" : "powerpc");
3088 rs6000_cpu_index
= cpu_index
= rs6000_cpu_name_lookup (default_cpu
);
3092 gcc_assert (cpu_index
>= 0);
3094 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3095 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3096 with those from the cpu, except for options that were explicitly set. If
3097 we don't have a cpu, do not override the target bits set in
3101 rs6000_isa_flags
&= ~set_masks
;
3102 rs6000_isa_flags
|= (processor_target_table
[cpu_index
].target_enable
3106 rs6000_isa_flags
|= (processor_target_table
[cpu_index
].target_enable
3107 & ~rs6000_isa_flags_explicit
);
3109 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3110 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3111 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3112 to using rs6000_isa_flags, we need to do the initialization here. */
3114 rs6000_isa_flags
|= (TARGET_DEFAULT
& ~rs6000_isa_flags_explicit
);
3116 if (rs6000_tune_index
>= 0)
3117 tune_index
= rs6000_tune_index
;
3119 rs6000_tune_index
= tune_index
= cpu_index
;
3123 enum processor_type tune_proc
3124 = (TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
);
3127 for (i
= 0; i
< ARRAY_SIZE (processor_target_table
); i
++)
3128 if (processor_target_table
[i
].processor
== tune_proc
)
3130 rs6000_tune_index
= tune_index
= i
;
3135 gcc_assert (tune_index
>= 0);
3136 rs6000_cpu
= processor_target_table
[tune_index
].processor
;
3138 /* Pick defaults for SPE related control flags. Do this early to make sure
3139 that the TARGET_ macros are representative ASAP. */
3141 int spe_capable_cpu
=
3142 (rs6000_cpu
== PROCESSOR_PPC8540
3143 || rs6000_cpu
== PROCESSOR_PPC8548
);
3145 if (!global_options_set
.x_rs6000_spe_abi
)
3146 rs6000_spe_abi
= spe_capable_cpu
;
3148 if (!global_options_set
.x_rs6000_spe
)
3149 rs6000_spe
= spe_capable_cpu
;
3151 if (!global_options_set
.x_rs6000_float_gprs
)
3153 (rs6000_cpu
== PROCESSOR_PPC8540
? 1
3154 : rs6000_cpu
== PROCESSOR_PPC8548
? 2
3158 if (global_options_set
.x_rs6000_spe_abi
3161 error ("not configured for SPE ABI");
3163 if (global_options_set
.x_rs6000_spe
3166 error ("not configured for SPE instruction set");
3168 if (main_target_opt
!= NULL
3169 && ((main_target_opt
->x_rs6000_spe_abi
!= rs6000_spe_abi
)
3170 || (main_target_opt
->x_rs6000_spe
!= rs6000_spe
)
3171 || (main_target_opt
->x_rs6000_float_gprs
!= rs6000_float_gprs
)))
3172 error ("target attribute or pragma changes SPE ABI");
3174 if (rs6000_cpu
== PROCESSOR_PPCE300C2
|| rs6000_cpu
== PROCESSOR_PPCE300C3
3175 || rs6000_cpu
== PROCESSOR_PPCE500MC
|| rs6000_cpu
== PROCESSOR_PPCE500MC64
3176 || rs6000_cpu
== PROCESSOR_PPCE5500
)
3179 error ("AltiVec not supported in this target");
3181 error ("SPE not supported in this target");
3183 if (rs6000_cpu
== PROCESSOR_PPCE6500
)
3186 error ("SPE not supported in this target");
3189 /* Disable Cell microcode if we are optimizing for the Cell
3190 and not optimizing for size. */
3191 if (rs6000_gen_cell_microcode
== -1)
3192 rs6000_gen_cell_microcode
= !(rs6000_cpu
== PROCESSOR_CELL
3195 /* If we are optimizing big endian systems for space and it's OK to
3196 use instructions that would be microcoded on the Cell, use the
3197 load/store multiple and string instructions. */
3198 if (BYTES_BIG_ENDIAN
&& optimize_size
&& rs6000_gen_cell_microcode
)
3199 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& (OPTION_MASK_MULTIPLE
3200 | OPTION_MASK_STRING
);
3202 /* Don't allow -mmultiple or -mstring on little endian systems
3203 unless the cpu is a 750, because the hardware doesn't support the
3204 instructions used in little endian mode, and causes an alignment
3205 trap. The 750 does not cause an alignment trap (except when the
3206 target is unaligned). */
3208 if (!BYTES_BIG_ENDIAN
&& rs6000_cpu
!= PROCESSOR_PPC750
)
3210 if (TARGET_MULTIPLE
)
3212 rs6000_isa_flags
&= ~OPTION_MASK_MULTIPLE
;
3213 if ((rs6000_isa_flags_explicit
& OPTION_MASK_MULTIPLE
) != 0)
3214 warning (0, "-mmultiple is not supported on little endian systems");
3219 rs6000_isa_flags
&= ~OPTION_MASK_STRING
;
3220 if ((rs6000_isa_flags_explicit
& OPTION_MASK_STRING
) != 0)
3221 warning (0, "-mstring is not supported on little endian systems");
3225 /* If little-endian, default to -mstrict-align on older processors.
3226 Testing for htm matches power8 and later. */
3227 if (!BYTES_BIG_ENDIAN
3228 && !(processor_target_table
[tune_index
].target_enable
& OPTION_MASK_HTM
))
3229 rs6000_isa_flags
|= ~rs6000_isa_flags_explicit
& OPTION_MASK_STRICT_ALIGN
;
3231 /* Add some warnings for VSX. */
3234 const char *msg
= NULL
;
3235 if (!TARGET_HARD_FLOAT
|| !TARGET_FPRS
3236 || !TARGET_SINGLE_FLOAT
|| !TARGET_DOUBLE_FLOAT
)
3238 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
3239 msg
= N_("-mvsx requires hardware floating point");
3242 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
3243 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
3246 else if (TARGET_PAIRED_FLOAT
)
3247 msg
= N_("-mvsx and -mpaired are incompatible");
3248 else if (TARGET_AVOID_XFORM
> 0)
3249 msg
= N_("-mvsx needs indexed addressing");
3250 else if (!TARGET_ALTIVEC
&& (rs6000_isa_flags_explicit
3251 & OPTION_MASK_ALTIVEC
))
3253 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX
)
3254 msg
= N_("-mvsx and -mno-altivec are incompatible");
3256 msg
= N_("-mno-altivec disables vsx");
3262 rs6000_isa_flags
&= ~ OPTION_MASK_VSX
;
3263 rs6000_isa_flags_explicit
|= OPTION_MASK_VSX
;
3267 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
3268 the -mcpu setting to enable options that conflict. */
3269 if ((!TARGET_HARD_FLOAT
|| !TARGET_ALTIVEC
|| !TARGET_VSX
)
3270 && (rs6000_isa_flags_explicit
& (OPTION_MASK_SOFT_FLOAT
3271 | OPTION_MASK_ALTIVEC
3272 | OPTION_MASK_VSX
)) != 0)
3273 rs6000_isa_flags
&= ~((OPTION_MASK_P8_VECTOR
| OPTION_MASK_CRYPTO
3274 | OPTION_MASK_DIRECT_MOVE
)
3275 & ~rs6000_isa_flags_explicit
);
3277 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
3278 rs6000_print_isa_options (stderr
, 0, "before defaults", rs6000_isa_flags
);
3280 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
3281 unless the user explicitly used the -mno-<option> to disable the code. */
3282 if (TARGET_P8_VECTOR
|| TARGET_DIRECT_MOVE
|| TARGET_CRYPTO
)
3283 rs6000_isa_flags
|= (ISA_2_7_MASKS_SERVER
& ~rs6000_isa_flags_explicit
);
3284 else if (TARGET_VSX
)
3285 rs6000_isa_flags
|= (ISA_2_6_MASKS_SERVER
& ~rs6000_isa_flags_explicit
);
3286 else if (TARGET_POPCNTD
)
3287 rs6000_isa_flags
|= (ISA_2_6_MASKS_EMBEDDED
& ~rs6000_isa_flags_explicit
);
3288 else if (TARGET_DFP
)
3289 rs6000_isa_flags
|= (ISA_2_5_MASKS_SERVER
& ~rs6000_isa_flags_explicit
);
3290 else if (TARGET_CMPB
)
3291 rs6000_isa_flags
|= (ISA_2_5_MASKS_EMBEDDED
& ~rs6000_isa_flags_explicit
);
3292 else if (TARGET_FPRND
)
3293 rs6000_isa_flags
|= (ISA_2_4_MASKS
& ~rs6000_isa_flags_explicit
);
3294 else if (TARGET_POPCNTB
)
3295 rs6000_isa_flags
|= (ISA_2_2_MASKS
& ~rs6000_isa_flags_explicit
);
3296 else if (TARGET_ALTIVEC
)
3297 rs6000_isa_flags
|= (OPTION_MASK_PPC_GFXOPT
& ~rs6000_isa_flags_explicit
);
3299 if (TARGET_CRYPTO
&& !TARGET_ALTIVEC
)
3301 if (rs6000_isa_flags_explicit
& OPTION_MASK_CRYPTO
)
3302 error ("-mcrypto requires -maltivec");
3303 rs6000_isa_flags
&= ~OPTION_MASK_CRYPTO
;
3306 if (TARGET_DIRECT_MOVE
&& !TARGET_VSX
)
3308 if (rs6000_isa_flags_explicit
& OPTION_MASK_DIRECT_MOVE
)
3309 error ("-mdirect-move requires -mvsx");
3310 rs6000_isa_flags
&= ~OPTION_MASK_DIRECT_MOVE
;
3313 if (TARGET_P8_VECTOR
&& !TARGET_ALTIVEC
)
3315 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
3316 error ("-mpower8-vector requires -maltivec");
3317 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
3320 if (TARGET_P8_VECTOR
&& !TARGET_VSX
)
3322 if (rs6000_isa_flags_explicit
& OPTION_MASK_P8_VECTOR
)
3323 error ("-mpower8-vector requires -mvsx");
3324 rs6000_isa_flags
&= ~OPTION_MASK_P8_VECTOR
;
3327 if (TARGET_VSX_TIMODE
&& !TARGET_VSX
)
3329 if (rs6000_isa_flags_explicit
& OPTION_MASK_VSX_TIMODE
)
3330 error ("-mvsx-timode requires -mvsx");
3331 rs6000_isa_flags
&= ~OPTION_MASK_VSX_TIMODE
;
3334 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
3335 silently turn off quad memory mode. */
3336 if (TARGET_QUAD_MEMORY
&& !TARGET_POWERPC64
)
3338 if ((rs6000_isa_flags_explicit
& OPTION_MASK_QUAD_MEMORY
) != 0)
3339 warning (0, N_("-mquad-memory requires 64-bit mode"));
3341 rs6000_isa_flags
&= ~OPTION_MASK_QUAD_MEMORY
;
3344 /* Enable power8 fusion if we are tuning for power8, even if we aren't
3345 generating power8 instructions. */
3346 if (!(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION
))
3347 rs6000_isa_flags
|= (processor_target_table
[tune_index
].target_enable
3348 & OPTION_MASK_P8_FUSION
);
3350 /* Power8 does not fuse sign extended loads with the addis. If we are
3351 optimizing at high levels for speed, convert a sign extended load into a
3352 zero extending load, and an explicit sign extension. */
3353 if (TARGET_P8_FUSION
3354 && !(rs6000_isa_flags_explicit
& OPTION_MASK_P8_FUSION_SIGN
)
3355 && optimize_function_for_speed_p (cfun
)
3357 rs6000_isa_flags
|= OPTION_MASK_P8_FUSION_SIGN
;
3359 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
3360 rs6000_print_isa_options (stderr
, 0, "after defaults", rs6000_isa_flags
);
3362 /* E500mc does "better" if we inline more aggressively. Respect the
3363 user's opinion, though. */
3364 if (rs6000_block_move_inline_limit
== 0
3365 && (rs6000_cpu
== PROCESSOR_PPCE500MC
3366 || rs6000_cpu
== PROCESSOR_PPCE500MC64
3367 || rs6000_cpu
== PROCESSOR_PPCE5500
3368 || rs6000_cpu
== PROCESSOR_PPCE6500
))
3369 rs6000_block_move_inline_limit
= 128;
3371 /* store_one_arg depends on expand_block_move to handle at least the
3372 size of reg_parm_stack_space. */
3373 if (rs6000_block_move_inline_limit
< (TARGET_POWERPC64
? 64 : 32))
3374 rs6000_block_move_inline_limit
= (TARGET_POWERPC64
? 64 : 32);
3378 /* If the appropriate debug option is enabled, replace the target hooks
3379 with debug versions that call the real version and then prints
3380 debugging information. */
3381 if (TARGET_DEBUG_COST
)
3383 targetm
.rtx_costs
= rs6000_debug_rtx_costs
;
3384 targetm
.address_cost
= rs6000_debug_address_cost
;
3385 targetm
.sched
.adjust_cost
= rs6000_debug_adjust_cost
;
3388 if (TARGET_DEBUG_ADDR
)
3390 targetm
.legitimate_address_p
= rs6000_debug_legitimate_address_p
;
3391 targetm
.legitimize_address
= rs6000_debug_legitimize_address
;
3392 rs6000_secondary_reload_class_ptr
3393 = rs6000_debug_secondary_reload_class
;
3394 rs6000_secondary_memory_needed_ptr
3395 = rs6000_debug_secondary_memory_needed
;
3396 rs6000_cannot_change_mode_class_ptr
3397 = rs6000_debug_cannot_change_mode_class
;
3398 rs6000_preferred_reload_class_ptr
3399 = rs6000_debug_preferred_reload_class
;
3400 rs6000_legitimize_reload_address_ptr
3401 = rs6000_debug_legitimize_reload_address
;
3402 rs6000_mode_dependent_address_ptr
3403 = rs6000_debug_mode_dependent_address
;
3406 if (rs6000_veclibabi_name
)
3408 if (strcmp (rs6000_veclibabi_name
, "mass") == 0)
3409 rs6000_veclib_handler
= rs6000_builtin_vectorized_libmass
;
3412 error ("unknown vectorization library ABI type (%s) for "
3413 "-mveclibabi= switch", rs6000_veclibabi_name
);
3419 if (!global_options_set
.x_rs6000_long_double_type_size
)
3421 if (main_target_opt
!= NULL
3422 && (main_target_opt
->x_rs6000_long_double_type_size
3423 != RS6000_DEFAULT_LONG_DOUBLE_SIZE
))
3424 error ("target attribute or pragma changes long double size");
3426 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
3429 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
3430 if (!global_options_set
.x_rs6000_ieeequad
)
3431 rs6000_ieeequad
= 1;
3434 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
3435 target attribute or pragma which automatically enables both options,
3436 unless the altivec ABI was set. This is set by default for 64-bit, but
3438 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
3439 rs6000_isa_flags
&= ~((OPTION_MASK_VSX
| OPTION_MASK_ALTIVEC
)
3440 & ~rs6000_isa_flags_explicit
);
3442 /* Enable Altivec ABI for AIX -maltivec. */
3443 if (TARGET_XCOFF
&& (TARGET_ALTIVEC
|| TARGET_VSX
))
3445 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_altivec_abi
)
3446 error ("target attribute or pragma changes AltiVec ABI");
3448 rs6000_altivec_abi
= 1;
3451 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
3452 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
3453 be explicitly overridden in either case. */
3456 if (!global_options_set
.x_rs6000_altivec_abi
3457 && (TARGET_64BIT
|| TARGET_ALTIVEC
|| TARGET_VSX
))
3459 if (main_target_opt
!= NULL
&&
3460 !main_target_opt
->x_rs6000_altivec_abi
)
3461 error ("target attribute or pragma changes AltiVec ABI");
3463 rs6000_altivec_abi
= 1;
3467 /* Set the Darwin64 ABI as default for 64-bit Darwin.
3468 So far, the only darwin64 targets are also MACH-O. */
3470 && DEFAULT_ABI
== ABI_DARWIN
3473 if (main_target_opt
!= NULL
&& !main_target_opt
->x_rs6000_darwin64_abi
)
3474 error ("target attribute or pragma changes darwin64 ABI");
3477 rs6000_darwin64_abi
= 1;
3478 /* Default to natural alignment, for better performance. */
3479 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
3483 /* Place FP constants in the constant pool instead of TOC
3484 if section anchors enabled. */
3485 if (flag_section_anchors
3486 && !global_options_set
.x_TARGET_NO_FP_IN_TOC
)
3487 TARGET_NO_FP_IN_TOC
= 1;
3489 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
3490 rs6000_print_isa_options (stderr
, 0, "before subtarget", rs6000_isa_flags
);
3492 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3493 SUBTARGET_OVERRIDE_OPTIONS
;
3495 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3496 SUBSUBTARGET_OVERRIDE_OPTIONS
;
3498 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
3499 SUB3TARGET_OVERRIDE_OPTIONS
;
3502 if (TARGET_DEBUG_REG
|| TARGET_DEBUG_TARGET
)
3503 rs6000_print_isa_options (stderr
, 0, "after subtarget", rs6000_isa_flags
);
3505 /* For the E500 family of cores, reset the single/double FP flags to let us
3506 check that they remain constant across attributes or pragmas. Also,
3507 clear a possible request for string instructions, not supported and which
3508 we might have silently queried above for -Os.
3510 For other families, clear ISEL in case it was set implicitly.
3515 case PROCESSOR_PPC8540
:
3516 case PROCESSOR_PPC8548
:
3517 case PROCESSOR_PPCE500MC
:
3518 case PROCESSOR_PPCE500MC64
:
3519 case PROCESSOR_PPCE5500
:
3520 case PROCESSOR_PPCE6500
:
3522 rs6000_single_float
= TARGET_E500_SINGLE
|| TARGET_E500_DOUBLE
;
3523 rs6000_double_float
= TARGET_E500_DOUBLE
;
3525 rs6000_isa_flags
&= ~OPTION_MASK_STRING
;
3531 if (have_cpu
&& !(rs6000_isa_flags_explicit
& OPTION_MASK_ISEL
))
3532 rs6000_isa_flags
&= ~OPTION_MASK_ISEL
;
3537 if (main_target_opt
)
3539 if (main_target_opt
->x_rs6000_single_float
!= rs6000_single_float
)
3540 error ("target attribute or pragma changes single precision floating "
3542 if (main_target_opt
->x_rs6000_double_float
!= rs6000_double_float
)
3543 error ("target attribute or pragma changes double precision floating "
3547 /* Detect invalid option combinations with E500. */
3550 rs6000_always_hint
= (rs6000_cpu
!= PROCESSOR_POWER4
3551 && rs6000_cpu
!= PROCESSOR_POWER5
3552 && rs6000_cpu
!= PROCESSOR_POWER6
3553 && rs6000_cpu
!= PROCESSOR_POWER7
3554 && rs6000_cpu
!= PROCESSOR_POWER8
3555 && rs6000_cpu
!= PROCESSOR_PPCA2
3556 && rs6000_cpu
!= PROCESSOR_CELL
3557 && rs6000_cpu
!= PROCESSOR_PPC476
);
3558 rs6000_sched_groups
= (rs6000_cpu
== PROCESSOR_POWER4
3559 || rs6000_cpu
== PROCESSOR_POWER5
3560 || rs6000_cpu
== PROCESSOR_POWER7
3561 || rs6000_cpu
== PROCESSOR_POWER8
);
3562 rs6000_align_branch_targets
= (rs6000_cpu
== PROCESSOR_POWER4
3563 || rs6000_cpu
== PROCESSOR_POWER5
3564 || rs6000_cpu
== PROCESSOR_POWER6
3565 || rs6000_cpu
== PROCESSOR_POWER7
3566 || rs6000_cpu
== PROCESSOR_POWER8
3567 || rs6000_cpu
== PROCESSOR_PPCE500MC
3568 || rs6000_cpu
== PROCESSOR_PPCE500MC64
3569 || rs6000_cpu
== PROCESSOR_PPCE5500
3570 || rs6000_cpu
== PROCESSOR_PPCE6500
);
3572 /* Allow debug switches to override the above settings. These are set to -1
3573 in rs6000.opt to indicate the user hasn't directly set the switch. */
3574 if (TARGET_ALWAYS_HINT
>= 0)
3575 rs6000_always_hint
= TARGET_ALWAYS_HINT
;
3577 if (TARGET_SCHED_GROUPS
>= 0)
3578 rs6000_sched_groups
= TARGET_SCHED_GROUPS
;
3580 if (TARGET_ALIGN_BRANCH_TARGETS
>= 0)
3581 rs6000_align_branch_targets
= TARGET_ALIGN_BRANCH_TARGETS
;
3583 rs6000_sched_restricted_insns_priority
3584 = (rs6000_sched_groups
? 1 : 0);
3586 /* Handle -msched-costly-dep option. */
3587 rs6000_sched_costly_dep
3588 = (rs6000_sched_groups
? true_store_to_load_dep_costly
: no_dep_costly
);
3590 if (rs6000_sched_costly_dep_str
)
3592 if (! strcmp (rs6000_sched_costly_dep_str
, "no"))
3593 rs6000_sched_costly_dep
= no_dep_costly
;
3594 else if (! strcmp (rs6000_sched_costly_dep_str
, "all"))
3595 rs6000_sched_costly_dep
= all_deps_costly
;
3596 else if (! strcmp (rs6000_sched_costly_dep_str
, "true_store_to_load"))
3597 rs6000_sched_costly_dep
= true_store_to_load_dep_costly
;
3598 else if (! strcmp (rs6000_sched_costly_dep_str
, "store_to_load"))
3599 rs6000_sched_costly_dep
= store_to_load_dep_costly
;
3601 rs6000_sched_costly_dep
= ((enum rs6000_dependence_cost
)
3602 atoi (rs6000_sched_costly_dep_str
));
3605 /* Handle -minsert-sched-nops option. */
3606 rs6000_sched_insert_nops
3607 = (rs6000_sched_groups
? sched_finish_regroup_exact
: sched_finish_none
);
3609 if (rs6000_sched_insert_nops_str
)
3611 if (! strcmp (rs6000_sched_insert_nops_str
, "no"))
3612 rs6000_sched_insert_nops
= sched_finish_none
;
3613 else if (! strcmp (rs6000_sched_insert_nops_str
, "pad"))
3614 rs6000_sched_insert_nops
= sched_finish_pad_groups
;
3615 else if (! strcmp (rs6000_sched_insert_nops_str
, "regroup_exact"))
3616 rs6000_sched_insert_nops
= sched_finish_regroup_exact
;
3618 rs6000_sched_insert_nops
= ((enum rs6000_nop_insertion
)
3619 atoi (rs6000_sched_insert_nops_str
));
3624 #ifdef TARGET_REGNAMES
3625 /* If the user desires alternate register names, copy in the
3626 alternate names now. */
3627 if (TARGET_REGNAMES
)
3628 memcpy (rs6000_reg_names
, alt_reg_names
, sizeof (rs6000_reg_names
));
3631 /* Set aix_struct_return last, after the ABI is determined.
3632 If -maix-struct-return or -msvr4-struct-return was explicitly
3633 used, don't override with the ABI default. */
3634 if (!global_options_set
.x_aix_struct_return
)
3635 aix_struct_return
= (DEFAULT_ABI
!= ABI_V4
|| DRAFT_V4_STRUCT_RET
);
3638 /* IBM XL compiler defaults to unsigned bitfields. */
3639 if (TARGET_XL_COMPAT
)
3640 flag_signed_bitfields
= 0;
3643 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
3644 REAL_MODE_FORMAT (TFmode
) = &ibm_extended_format
;
3647 ASM_GENERATE_INTERNAL_LABEL (toc_label_name
, "LCTOC", 1);
3649 /* We can only guarantee the availability of DI pseudo-ops when
3650 assembling for 64-bit targets. */
3653 targetm
.asm_out
.aligned_op
.di
= NULL
;
3654 targetm
.asm_out
.unaligned_op
.di
= NULL
;
3658 /* Set branch target alignment, if not optimizing for size. */
3661 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
3662 aligned 8byte to avoid misprediction by the branch predictor. */
3663 if (rs6000_cpu
== PROCESSOR_TITAN
3664 || rs6000_cpu
== PROCESSOR_CELL
)
3666 if (align_functions
<= 0)
3667 align_functions
= 8;
3668 if (align_jumps
<= 0)
3670 if (align_loops
<= 0)
3673 if (rs6000_align_branch_targets
)
3675 if (align_functions
<= 0)
3676 align_functions
= 16;
3677 if (align_jumps
<= 0)
3679 if (align_loops
<= 0)
3681 can_override_loop_align
= 1;
3685 if (align_jumps_max_skip
<= 0)
3686 align_jumps_max_skip
= 15;
3687 if (align_loops_max_skip
<= 0)
3688 align_loops_max_skip
= 15;
3691 /* Arrange to save and restore machine status around nested functions. */
3692 init_machine_status
= rs6000_init_machine_status
;
3694 /* We should always be splitting complex arguments, but we can't break
3695 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
3696 if (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
)
3697 targetm
.calls
.split_complex_arg
= NULL
;
3700 /* Initialize rs6000_cost with the appropriate target costs. */
3702 rs6000_cost
= TARGET_POWERPC64
? &size64_cost
: &size32_cost
;
3706 case PROCESSOR_RS64A
:
3707 rs6000_cost
= &rs64a_cost
;
3710 case PROCESSOR_MPCCORE
:
3711 rs6000_cost
= &mpccore_cost
;
3714 case PROCESSOR_PPC403
:
3715 rs6000_cost
= &ppc403_cost
;
3718 case PROCESSOR_PPC405
:
3719 rs6000_cost
= &ppc405_cost
;
3722 case PROCESSOR_PPC440
:
3723 rs6000_cost
= &ppc440_cost
;
3726 case PROCESSOR_PPC476
:
3727 rs6000_cost
= &ppc476_cost
;
3730 case PROCESSOR_PPC601
:
3731 rs6000_cost
= &ppc601_cost
;
3734 case PROCESSOR_PPC603
:
3735 rs6000_cost
= &ppc603_cost
;
3738 case PROCESSOR_PPC604
:
3739 rs6000_cost
= &ppc604_cost
;
3742 case PROCESSOR_PPC604e
:
3743 rs6000_cost
= &ppc604e_cost
;
3746 case PROCESSOR_PPC620
:
3747 rs6000_cost
= &ppc620_cost
;
3750 case PROCESSOR_PPC630
:
3751 rs6000_cost
= &ppc630_cost
;
3754 case PROCESSOR_CELL
:
3755 rs6000_cost
= &ppccell_cost
;
3758 case PROCESSOR_PPC750
:
3759 case PROCESSOR_PPC7400
:
3760 rs6000_cost
= &ppc750_cost
;
3763 case PROCESSOR_PPC7450
:
3764 rs6000_cost
= &ppc7450_cost
;
3767 case PROCESSOR_PPC8540
:
3768 case PROCESSOR_PPC8548
:
3769 rs6000_cost
= &ppc8540_cost
;
3772 case PROCESSOR_PPCE300C2
:
3773 case PROCESSOR_PPCE300C3
:
3774 rs6000_cost
= &ppce300c2c3_cost
;
3777 case PROCESSOR_PPCE500MC
:
3778 rs6000_cost
= &ppce500mc_cost
;
3781 case PROCESSOR_PPCE500MC64
:
3782 rs6000_cost
= &ppce500mc64_cost
;
3785 case PROCESSOR_PPCE5500
:
3786 rs6000_cost
= &ppce5500_cost
;
3789 case PROCESSOR_PPCE6500
:
3790 rs6000_cost
= &ppce6500_cost
;
3793 case PROCESSOR_TITAN
:
3794 rs6000_cost
= &titan_cost
;
3797 case PROCESSOR_POWER4
:
3798 case PROCESSOR_POWER5
:
3799 rs6000_cost
= &power4_cost
;
3802 case PROCESSOR_POWER6
:
3803 rs6000_cost
= &power6_cost
;
3806 case PROCESSOR_POWER7
:
3807 rs6000_cost
= &power7_cost
;
3810 case PROCESSOR_POWER8
:
3811 rs6000_cost
= &power8_cost
;
3814 case PROCESSOR_PPCA2
:
3815 rs6000_cost
= &ppca2_cost
;
3824 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES
,
3825 rs6000_cost
->simultaneous_prefetches
,
3826 global_options
.x_param_values
,
3827 global_options_set
.x_param_values
);
3828 maybe_set_param_value (PARAM_L1_CACHE_SIZE
, rs6000_cost
->l1_cache_size
,
3829 global_options
.x_param_values
,
3830 global_options_set
.x_param_values
);
3831 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE
,
3832 rs6000_cost
->cache_line_size
,
3833 global_options
.x_param_values
,
3834 global_options_set
.x_param_values
);
3835 maybe_set_param_value (PARAM_L2_CACHE_SIZE
, rs6000_cost
->l2_cache_size
,
3836 global_options
.x_param_values
,
3837 global_options_set
.x_param_values
);
3839 /* Increase loop peeling limits based on performance analysis. */
3840 maybe_set_param_value (PARAM_MAX_PEELED_INSNS
, 400,
3841 global_options
.x_param_values
,
3842 global_options_set
.x_param_values
);
3843 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS
, 400,
3844 global_options
.x_param_values
,
3845 global_options_set
.x_param_values
);
3847 /* If using typedef char *va_list, signal that
3848 __builtin_va_start (&ap, 0) can be optimized to
3849 ap = __builtin_next_arg (0). */
3850 if (DEFAULT_ABI
!= ABI_V4
)
3851 targetm
.expand_builtin_va_start
= NULL
;
3854 /* Set up single/double float flags.
3855 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3856 then set both flags. */
3857 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
3858 && rs6000_single_float
== 0 && rs6000_double_float
== 0)
3859 rs6000_single_float
= rs6000_double_float
= 1;
3861 /* If not explicitly specified via option, decide whether to generate indexed
3862 load/store instructions. */
3863 if (TARGET_AVOID_XFORM
== -1)
3864 /* Avoid indexed addressing when targeting Power6 in order to avoid the
3865 DERAT mispredict penalty. However the LVE and STVE altivec instructions
3866 need indexed accesses and the type used is the scalar type of the element
3867 being loaded or stored. */
3868 TARGET_AVOID_XFORM
= (rs6000_cpu
== PROCESSOR_POWER6
&& TARGET_CMPB
3869 && !TARGET_ALTIVEC
);
3871 /* Set the -mrecip options. */
3872 if (rs6000_recip_name
)
3874 char *p
= ASTRDUP (rs6000_recip_name
);
3876 unsigned int mask
, i
;
3879 while ((q
= strtok (p
, ",")) != NULL
)
3890 if (!strcmp (q
, "default"))
3891 mask
= ((TARGET_RECIP_PRECISION
)
3892 ? RECIP_HIGH_PRECISION
: RECIP_LOW_PRECISION
);
3895 for (i
= 0; i
< ARRAY_SIZE (recip_options
); i
++)
3896 if (!strcmp (q
, recip_options
[i
].string
))
3898 mask
= recip_options
[i
].mask
;
3902 if (i
== ARRAY_SIZE (recip_options
))
3904 error ("unknown option for -mrecip=%s", q
);
3912 rs6000_recip_control
&= ~mask
;
3914 rs6000_recip_control
|= mask
;
3918 /* Set the builtin mask of the various options used that could affect which
3919 builtins were used. In the past we used target_flags, but we've run out
3920 of bits, and some options like SPE and PAIRED are no longer in
3922 rs6000_builtin_mask
= rs6000_builtin_mask_calculate ();
3923 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
3926 "new builtin mask = " HOST_WIDE_INT_PRINT_HEX
", ",
3927 rs6000_builtin_mask
);
3928 rs6000_print_builtin_options (stderr
, 0, NULL
, rs6000_builtin_mask
);
3931 /* Initialize all of the registers. */
3932 rs6000_init_hard_regno_mode_ok (global_init_p
);
3934 /* Save the initial options in case the user does function specific options */
3936 target_option_default_node
= target_option_current_node
3937 = build_target_option_node (&global_options
);
3939 /* If not explicitly specified via option, decide whether to generate the
3940 extra blr's required to preserve the link stack on some cpus (eg, 476). */
3941 if (TARGET_LINK_STACK
== -1)
3942 SET_TARGET_LINK_STACK (rs6000_cpu
== PROCESSOR_PPC476
&& flag_pic
);
3947 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
3948 define the target cpu type. */
3951 rs6000_option_override (void)
3953 (void) rs6000_option_override_internal (true);
3957 /* Implement targetm.vectorize.builtin_mask_for_load. */
3959 rs6000_builtin_mask_for_load (void)
3961 if (TARGET_ALTIVEC
|| TARGET_VSX
)
3962 return altivec_builtin_mask_for_load
;
3967 /* Implement LOOP_ALIGN. */
3969 rs6000_loop_align (rtx label
)
3974 /* Don't override loop alignment if -falign-loops was specified. */
3975 if (!can_override_loop_align
)
3976 return align_loops_log
;
3978 bb
= BLOCK_FOR_INSN (label
);
3979 ninsns
= num_loop_insns(bb
->loop_father
);
3981 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
3982 if (ninsns
> 4 && ninsns
<= 8
3983 && (rs6000_cpu
== PROCESSOR_POWER4
3984 || rs6000_cpu
== PROCESSOR_POWER5
3985 || rs6000_cpu
== PROCESSOR_POWER6
3986 || rs6000_cpu
== PROCESSOR_POWER7
3987 || rs6000_cpu
== PROCESSOR_POWER8
))
3990 return align_loops_log
;
3993 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
3995 rs6000_loop_align_max_skip (rtx label
)
3997 return (1 << rs6000_loop_align (label
)) - 1;
4000 /* Return true iff, data reference of TYPE can reach vector alignment (16)
4001 after applying N number of iterations. This routine does not determine
4002 how may iterations are required to reach desired alignment. */
4005 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED
, bool is_packed
)
4012 if (rs6000_alignment_flags
== MASK_ALIGN_NATURAL
)
4015 if (rs6000_alignment_flags
== MASK_ALIGN_POWER
)
4025 /* Assuming that all other types are naturally aligned. CHECKME! */
4030 /* Return true if the vector misalignment factor is supported by the
4033 rs6000_builtin_support_vector_misalignment (enum machine_mode mode
,
4040 /* Return if movmisalign pattern is not supported for this mode. */
4041 if (optab_handler (movmisalign_optab
, mode
) == CODE_FOR_nothing
)
4044 if (misalignment
== -1)
4046 /* Misalignment factor is unknown at compile time but we know
4047 it's word aligned. */
4048 if (rs6000_vector_alignment_reachable (type
, is_packed
))
4050 int element_size
= TREE_INT_CST_LOW (TYPE_SIZE (type
));
4052 if (element_size
== 64 || element_size
== 32)
4059 /* VSX supports word-aligned vector. */
4060 if (misalignment
% 4 == 0)
4066 /* Implement targetm.vectorize.builtin_vectorization_cost. */
4068 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost
,
4069 tree vectype
, int misalign
)
4074 switch (type_of_cost
)
4084 case cond_branch_not_taken
:
4093 case vec_promote_demote
:
4099 case cond_branch_taken
:
4102 case unaligned_load
:
4103 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
4105 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
4107 /* Double word aligned. */
4115 /* Double word aligned. */
4119 /* Unknown misalignment. */
4132 /* Misaligned loads are not supported. */
4137 case unaligned_store
:
4138 if (TARGET_VSX
&& TARGET_ALLOW_MOVMISALIGN
)
4140 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
4142 /* Double word aligned. */
4150 /* Double word aligned. */
4154 /* Unknown misalignment. */
4167 /* Misaligned stores are not supported. */
4173 elements
= TYPE_VECTOR_SUBPARTS (vectype
);
4174 elem_type
= TREE_TYPE (vectype
);
4175 /* 32-bit vectors loaded into registers are stored as double
4176 precision, so we need n/2 converts in addition to the usual
4177 n/2 merges to construct a vector of short floats from them. */
4178 if (SCALAR_FLOAT_TYPE_P (elem_type
)
4179 && TYPE_PRECISION (elem_type
) == 32)
4180 return elements
+ 1;
4182 return elements
/ 2 + 1;
4189 /* Implement targetm.vectorize.preferred_simd_mode. */
4191 static enum machine_mode
4192 rs6000_preferred_simd_mode (enum machine_mode mode
)
4201 if (TARGET_ALTIVEC
|| TARGET_VSX
)
4225 if (TARGET_PAIRED_FLOAT
4231 typedef struct _rs6000_cost_data
4233 struct loop
*loop_info
;
4237 /* Test for likely overcommitment of vector hardware resources. If a
4238 loop iteration is relatively large, and too large a percentage of
4239 instructions in the loop are vectorized, the cost model may not
4240 adequately reflect delays from unavailable vector resources.
4241 Penalize the loop body cost for this case. */
4244 rs6000_density_test (rs6000_cost_data
*data
)
4246 const int DENSITY_PCT_THRESHOLD
= 85;
4247 const int DENSITY_SIZE_THRESHOLD
= 70;
4248 const int DENSITY_PENALTY
= 10;
4249 struct loop
*loop
= data
->loop_info
;
4250 basic_block
*bbs
= get_loop_body (loop
);
4251 int nbbs
= loop
->num_nodes
;
4252 int vec_cost
= data
->cost
[vect_body
], not_vec_cost
= 0;
4255 for (i
= 0; i
< nbbs
; i
++)
4257 basic_block bb
= bbs
[i
];
4258 gimple_stmt_iterator gsi
;
4260 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
4262 gimple stmt
= gsi_stmt (gsi
);
4263 stmt_vec_info stmt_info
= vinfo_for_stmt (stmt
);
4265 if (!STMT_VINFO_RELEVANT_P (stmt_info
)
4266 && !STMT_VINFO_IN_PATTERN_P (stmt_info
))
4272 density_pct
= (vec_cost
* 100) / (vec_cost
+ not_vec_cost
);
4274 if (density_pct
> DENSITY_PCT_THRESHOLD
4275 && vec_cost
+ not_vec_cost
> DENSITY_SIZE_THRESHOLD
)
4277 data
->cost
[vect_body
] = vec_cost
* (100 + DENSITY_PENALTY
) / 100;
4278 if (dump_enabled_p ())
4279 dump_printf_loc (MSG_NOTE
, vect_location
,
4280 "density %d%%, cost %d exceeds threshold, penalizing "
4281 "loop body cost by %d%%", density_pct
,
4282 vec_cost
+ not_vec_cost
, DENSITY_PENALTY
);
4286 /* Implement targetm.vectorize.init_cost. */
4289 rs6000_init_cost (struct loop
*loop_info
)
4291 rs6000_cost_data
*data
= XNEW (struct _rs6000_cost_data
);
4292 data
->loop_info
= loop_info
;
4293 data
->cost
[vect_prologue
] = 0;
4294 data
->cost
[vect_body
] = 0;
4295 data
->cost
[vect_epilogue
] = 0;
4299 /* Implement targetm.vectorize.add_stmt_cost. */
4302 rs6000_add_stmt_cost (void *data
, int count
, enum vect_cost_for_stmt kind
,
4303 struct _stmt_vec_info
*stmt_info
, int misalign
,
4304 enum vect_cost_model_location where
)
4306 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
4307 unsigned retval
= 0;
4309 if (flag_vect_cost_model
)
4311 tree vectype
= stmt_info
? stmt_vectype (stmt_info
) : NULL_TREE
;
4312 int stmt_cost
= rs6000_builtin_vectorization_cost (kind
, vectype
,
4314 /* Statements in an inner loop relative to the loop being
4315 vectorized are weighted more heavily. The value here is
4316 arbitrary and could potentially be improved with analysis. */
4317 if (where
== vect_body
&& stmt_info
&& stmt_in_inner_loop_p (stmt_info
))
4318 count
*= 50; /* FIXME. */
4320 retval
= (unsigned) (count
* stmt_cost
);
4321 cost_data
->cost
[where
] += retval
;
4327 /* Implement targetm.vectorize.finish_cost. */
4330 rs6000_finish_cost (void *data
, unsigned *prologue_cost
,
4331 unsigned *body_cost
, unsigned *epilogue_cost
)
4333 rs6000_cost_data
*cost_data
= (rs6000_cost_data
*) data
;
4335 if (cost_data
->loop_info
)
4336 rs6000_density_test (cost_data
);
4338 *prologue_cost
= cost_data
->cost
[vect_prologue
];
4339 *body_cost
= cost_data
->cost
[vect_body
];
4340 *epilogue_cost
= cost_data
->cost
[vect_epilogue
];
4343 /* Implement targetm.vectorize.destroy_cost_data. */
4346 rs6000_destroy_cost_data (void *data
)
4351 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
4352 library with vectorized intrinsics. */
4355 rs6000_builtin_vectorized_libmass (tree fndecl
, tree type_out
, tree type_in
)
4358 const char *suffix
= NULL
;
4359 tree fntype
, new_fndecl
, bdecl
= NULL_TREE
;
4362 enum machine_mode el_mode
, in_mode
;
4365 /* Libmass is suitable for unsafe math only as it does not correctly support
4366 parts of IEEE with the required precision such as denormals. Only support
4367 it if we have VSX to use the simd d2 or f4 functions.
4368 XXX: Add variable length support. */
4369 if (!flag_unsafe_math_optimizations
|| !TARGET_VSX
)
4372 el_mode
= TYPE_MODE (TREE_TYPE (type_out
));
4373 n
= TYPE_VECTOR_SUBPARTS (type_out
);
4374 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
4375 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
4376 if (el_mode
!= in_mode
4380 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
4382 enum built_in_function fn
= DECL_FUNCTION_CODE (fndecl
);
4385 case BUILT_IN_ATAN2
:
4386 case BUILT_IN_HYPOT
:
4392 case BUILT_IN_ACOSH
:
4394 case BUILT_IN_ASINH
:
4396 case BUILT_IN_ATANH
:
4404 case BUILT_IN_EXPM1
:
4405 case BUILT_IN_LGAMMA
:
4406 case BUILT_IN_LOG10
:
4407 case BUILT_IN_LOG1P
:
4415 bdecl
= builtin_decl_implicit (fn
);
4416 suffix
= "d2"; /* pow -> powd2 */
4417 if (el_mode
!= DFmode
4423 case BUILT_IN_ATAN2F
:
4424 case BUILT_IN_HYPOTF
:
4429 case BUILT_IN_ACOSF
:
4430 case BUILT_IN_ACOSHF
:
4431 case BUILT_IN_ASINF
:
4432 case BUILT_IN_ASINHF
:
4433 case BUILT_IN_ATANF
:
4434 case BUILT_IN_ATANHF
:
4435 case BUILT_IN_CBRTF
:
4437 case BUILT_IN_COSHF
:
4439 case BUILT_IN_ERFCF
:
4440 case BUILT_IN_EXP2F
:
4442 case BUILT_IN_EXPM1F
:
4443 case BUILT_IN_LGAMMAF
:
4444 case BUILT_IN_LOG10F
:
4445 case BUILT_IN_LOG1PF
:
4446 case BUILT_IN_LOG2F
:
4449 case BUILT_IN_SINHF
:
4450 case BUILT_IN_SQRTF
:
4452 case BUILT_IN_TANHF
:
4453 bdecl
= builtin_decl_implicit (fn
);
4454 suffix
= "4"; /* powf -> powf4 */
4455 if (el_mode
!= SFmode
4468 gcc_assert (suffix
!= NULL
);
4469 bname
= IDENTIFIER_POINTER (DECL_NAME (bdecl
));
4473 strcpy (name
, bname
+ sizeof ("__builtin_") - 1);
4474 strcat (name
, suffix
);
4477 fntype
= build_function_type_list (type_out
, type_in
, NULL
);
4478 else if (n_args
== 2)
4479 fntype
= build_function_type_list (type_out
, type_in
, type_in
, NULL
);
4483 /* Build a function declaration for the vectorized function. */
4484 new_fndecl
= build_decl (BUILTINS_LOCATION
,
4485 FUNCTION_DECL
, get_identifier (name
), fntype
);
4486 TREE_PUBLIC (new_fndecl
) = 1;
4487 DECL_EXTERNAL (new_fndecl
) = 1;
4488 DECL_IS_NOVOPS (new_fndecl
) = 1;
4489 TREE_READONLY (new_fndecl
) = 1;
4494 /* Returns a function decl for a vectorized version of the builtin function
4495 with builtin function code FN and the result vector type TYPE, or NULL_TREE
4496 if it is not available. */
4499 rs6000_builtin_vectorized_function (tree fndecl
, tree type_out
,
4502 enum machine_mode in_mode
, out_mode
;
4505 if (TARGET_DEBUG_BUILTIN
)
4506 fprintf (stderr
, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
4507 IDENTIFIER_POINTER (DECL_NAME (fndecl
)),
4508 GET_MODE_NAME (TYPE_MODE (type_out
)),
4509 GET_MODE_NAME (TYPE_MODE (type_in
)));
4511 if (TREE_CODE (type_out
) != VECTOR_TYPE
4512 || TREE_CODE (type_in
) != VECTOR_TYPE
4513 || !TARGET_VECTORIZE_BUILTINS
)
4516 out_mode
= TYPE_MODE (TREE_TYPE (type_out
));
4517 out_n
= TYPE_VECTOR_SUBPARTS (type_out
);
4518 in_mode
= TYPE_MODE (TREE_TYPE (type_in
));
4519 in_n
= TYPE_VECTOR_SUBPARTS (type_in
);
4521 if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_NORMAL
)
4523 enum built_in_function fn
= DECL_FUNCTION_CODE (fndecl
);
4526 case BUILT_IN_CLZIMAX
:
4527 case BUILT_IN_CLZLL
:
4530 if (TARGET_P8_VECTOR
&& in_mode
== out_mode
&& out_n
== in_n
)
4532 if (out_mode
== QImode
&& out_n
== 16)
4533 return rs6000_builtin_decls
[P8V_BUILTIN_VCLZB
];
4534 else if (out_mode
== HImode
&& out_n
== 8)
4535 return rs6000_builtin_decls
[P8V_BUILTIN_VCLZH
];
4536 else if (out_mode
== SImode
&& out_n
== 4)
4537 return rs6000_builtin_decls
[P8V_BUILTIN_VCLZW
];
4538 else if (out_mode
== DImode
&& out_n
== 2)
4539 return rs6000_builtin_decls
[P8V_BUILTIN_VCLZD
];
4542 case BUILT_IN_COPYSIGN
:
4543 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4544 && out_mode
== DFmode
&& out_n
== 2
4545 && in_mode
== DFmode
&& in_n
== 2)
4546 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNDP
];
4548 case BUILT_IN_COPYSIGNF
:
4549 if (out_mode
!= SFmode
|| out_n
!= 4
4550 || in_mode
!= SFmode
|| in_n
!= 4)
4552 if (VECTOR_UNIT_VSX_P (V4SFmode
))
4553 return rs6000_builtin_decls
[VSX_BUILTIN_CPSGNSP
];
4554 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
4555 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_COPYSIGN_V4SF
];
4557 case BUILT_IN_POPCOUNTIMAX
:
4558 case BUILT_IN_POPCOUNTLL
:
4559 case BUILT_IN_POPCOUNTL
:
4560 case BUILT_IN_POPCOUNT
:
4561 if (TARGET_P8_VECTOR
&& in_mode
== out_mode
&& out_n
== in_n
)
4563 if (out_mode
== QImode
&& out_n
== 16)
4564 return rs6000_builtin_decls
[P8V_BUILTIN_VPOPCNTB
];
4565 else if (out_mode
== HImode
&& out_n
== 8)
4566 return rs6000_builtin_decls
[P8V_BUILTIN_VPOPCNTH
];
4567 else if (out_mode
== SImode
&& out_n
== 4)
4568 return rs6000_builtin_decls
[P8V_BUILTIN_VPOPCNTW
];
4569 else if (out_mode
== DImode
&& out_n
== 2)
4570 return rs6000_builtin_decls
[P8V_BUILTIN_VPOPCNTD
];
4574 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4575 && out_mode
== DFmode
&& out_n
== 2
4576 && in_mode
== DFmode
&& in_n
== 2)
4577 return rs6000_builtin_decls
[VSX_BUILTIN_XVSQRTDP
];
4579 case BUILT_IN_SQRTF
:
4580 if (VECTOR_UNIT_VSX_P (V4SFmode
)
4581 && out_mode
== SFmode
&& out_n
== 4
4582 && in_mode
== SFmode
&& in_n
== 4)
4583 return rs6000_builtin_decls
[VSX_BUILTIN_XVSQRTSP
];
4586 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4587 && out_mode
== DFmode
&& out_n
== 2
4588 && in_mode
== DFmode
&& in_n
== 2)
4589 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIP
];
4591 case BUILT_IN_CEILF
:
4592 if (out_mode
!= SFmode
|| out_n
!= 4
4593 || in_mode
!= SFmode
|| in_n
!= 4)
4595 if (VECTOR_UNIT_VSX_P (V4SFmode
))
4596 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIP
];
4597 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
4598 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIP
];
4600 case BUILT_IN_FLOOR
:
4601 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4602 && out_mode
== DFmode
&& out_n
== 2
4603 && in_mode
== DFmode
&& in_n
== 2)
4604 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIM
];
4606 case BUILT_IN_FLOORF
:
4607 if (out_mode
!= SFmode
|| out_n
!= 4
4608 || in_mode
!= SFmode
|| in_n
!= 4)
4610 if (VECTOR_UNIT_VSX_P (V4SFmode
))
4611 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIM
];
4612 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
4613 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIM
];
4616 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4617 && out_mode
== DFmode
&& out_n
== 2
4618 && in_mode
== DFmode
&& in_n
== 2)
4619 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDDP
];
4622 if (VECTOR_UNIT_VSX_P (V4SFmode
)
4623 && out_mode
== SFmode
&& out_n
== 4
4624 && in_mode
== SFmode
&& in_n
== 4)
4625 return rs6000_builtin_decls
[VSX_BUILTIN_XVMADDSP
];
4626 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
)
4627 && out_mode
== SFmode
&& out_n
== 4
4628 && in_mode
== SFmode
&& in_n
== 4)
4629 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMADDFP
];
4631 case BUILT_IN_TRUNC
:
4632 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4633 && out_mode
== DFmode
&& out_n
== 2
4634 && in_mode
== DFmode
&& in_n
== 2)
4635 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIZ
];
4637 case BUILT_IN_TRUNCF
:
4638 if (out_mode
!= SFmode
|| out_n
!= 4
4639 || in_mode
!= SFmode
|| in_n
!= 4)
4641 if (VECTOR_UNIT_VSX_P (V4SFmode
))
4642 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIZ
];
4643 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode
))
4644 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRFIZ
];
4646 case BUILT_IN_NEARBYINT
:
4647 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4648 && flag_unsafe_math_optimizations
4649 && out_mode
== DFmode
&& out_n
== 2
4650 && in_mode
== DFmode
&& in_n
== 2)
4651 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPI
];
4653 case BUILT_IN_NEARBYINTF
:
4654 if (VECTOR_UNIT_VSX_P (V4SFmode
)
4655 && flag_unsafe_math_optimizations
4656 && out_mode
== SFmode
&& out_n
== 4
4657 && in_mode
== SFmode
&& in_n
== 4)
4658 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPI
];
4661 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4662 && !flag_trapping_math
4663 && out_mode
== DFmode
&& out_n
== 2
4664 && in_mode
== DFmode
&& in_n
== 2)
4665 return rs6000_builtin_decls
[VSX_BUILTIN_XVRDPIC
];
4667 case BUILT_IN_RINTF
:
4668 if (VECTOR_UNIT_VSX_P (V4SFmode
)
4669 && !flag_trapping_math
4670 && out_mode
== SFmode
&& out_n
== 4
4671 && in_mode
== SFmode
&& in_n
== 4)
4672 return rs6000_builtin_decls
[VSX_BUILTIN_XVRSPIC
];
4679 else if (DECL_BUILT_IN_CLASS (fndecl
) == BUILT_IN_MD
)
4681 enum rs6000_builtins fn
4682 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
4685 case RS6000_BUILTIN_RSQRTF
:
4686 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
4687 && out_mode
== SFmode
&& out_n
== 4
4688 && in_mode
== SFmode
&& in_n
== 4)
4689 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRSQRTFP
];
4691 case RS6000_BUILTIN_RSQRT
:
4692 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4693 && out_mode
== DFmode
&& out_n
== 2
4694 && in_mode
== DFmode
&& in_n
== 2)
4695 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
4697 case RS6000_BUILTIN_RECIPF
:
4698 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode
)
4699 && out_mode
== SFmode
&& out_n
== 4
4700 && in_mode
== SFmode
&& in_n
== 4)
4701 return rs6000_builtin_decls
[ALTIVEC_BUILTIN_VRECIPFP
];
4703 case RS6000_BUILTIN_RECIP
:
4704 if (VECTOR_UNIT_VSX_P (V2DFmode
)
4705 && out_mode
== DFmode
&& out_n
== 2
4706 && in_mode
== DFmode
&& in_n
== 2)
4707 return rs6000_builtin_decls
[VSX_BUILTIN_RECIP_V2DF
];
4714 /* Generate calls to libmass if appropriate. */
4715 if (rs6000_veclib_handler
)
4716 return rs6000_veclib_handler (fndecl
, type_out
, type_in
);
4721 /* Default CPU string for rs6000*_file_start functions. */
4722 static const char *rs6000_default_cpu
;
4724 /* Do anything needed at the start of the asm file. */
4727 rs6000_file_start (void)
4730 const char *start
= buffer
;
4731 FILE *file
= asm_out_file
;
4733 rs6000_default_cpu
= TARGET_CPU_DEFAULT
;
4735 default_file_start ();
4737 if (flag_verbose_asm
)
4739 sprintf (buffer
, "\n%s rs6000/powerpc options:", ASM_COMMENT_START
);
4741 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
4743 fprintf (file
, "%s --with-cpu=%s", start
, rs6000_default_cpu
);
4747 if (global_options_set
.x_rs6000_cpu_index
)
4749 fprintf (file
, "%s -mcpu=%s", start
,
4750 processor_target_table
[rs6000_cpu_index
].name
);
4754 if (global_options_set
.x_rs6000_tune_index
)
4756 fprintf (file
, "%s -mtune=%s", start
,
4757 processor_target_table
[rs6000_tune_index
].name
);
4761 if (PPC405_ERRATUM77
)
4763 fprintf (file
, "%s PPC405CR_ERRATUM77", start
);
4767 #ifdef USING_ELFOS_H
4768 switch (rs6000_sdata
)
4770 case SDATA_NONE
: fprintf (file
, "%s -msdata=none", start
); start
= ""; break;
4771 case SDATA_DATA
: fprintf (file
, "%s -msdata=data", start
); start
= ""; break;
4772 case SDATA_SYSV
: fprintf (file
, "%s -msdata=sysv", start
); start
= ""; break;
4773 case SDATA_EABI
: fprintf (file
, "%s -msdata=eabi", start
); start
= ""; break;
4776 if (rs6000_sdata
&& g_switch_value
)
4778 fprintf (file
, "%s -G %d", start
,
4788 if (DEFAULT_ABI
== ABI_ELFv2
)
4789 fprintf (file
, "\t.abiversion 2\n");
4791 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
4792 || (TARGET_ELF
&& flag_pic
== 2))
4794 switch_to_section (toc_section
);
4795 switch_to_section (text_section
);
4800 /* Return nonzero if this function is known to have a null epilogue. */
4803 direct_return (void)
4805 if (reload_completed
)
4807 rs6000_stack_t
*info
= rs6000_stack_info ();
4809 if (info
->first_gp_reg_save
== 32
4810 && info
->first_fp_reg_save
== 64
4811 && info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
4812 && ! info
->lr_save_p
4813 && ! info
->cr_save_p
4814 && info
->vrsave_mask
== 0
4822 /* Return the number of instructions it takes to form a constant in an
4823 integer register. */
4826 num_insns_constant_wide (HOST_WIDE_INT value
)
4828 /* signed constant loadable with addi */
4829 if ((unsigned HOST_WIDE_INT
) (value
+ 0x8000) < 0x10000)
4832 /* constant loadable with addis */
4833 else if ((value
& 0xffff) == 0
4834 && (value
>> 31 == -1 || value
>> 31 == 0))
4837 else if (TARGET_POWERPC64
)
4839 HOST_WIDE_INT low
= ((value
& 0xffffffff) ^ 0x80000000) - 0x80000000;
4840 HOST_WIDE_INT high
= value
>> 31;
4842 if (high
== 0 || high
== -1)
4848 return num_insns_constant_wide (high
) + 1;
4850 return num_insns_constant_wide (low
) + 1;
4852 return (num_insns_constant_wide (high
)
4853 + num_insns_constant_wide (low
) + 1);
4861 num_insns_constant (rtx op
, enum machine_mode mode
)
4863 HOST_WIDE_INT low
, high
;
4865 switch (GET_CODE (op
))
4868 if ((INTVAL (op
) >> 31) != 0 && (INTVAL (op
) >> 31) != -1
4869 && mask64_operand (op
, mode
))
4872 return num_insns_constant_wide (INTVAL (op
));
4875 if (mode
== SFmode
|| mode
== SDmode
)
4880 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
4881 if (DECIMAL_FLOAT_MODE_P (mode
))
4882 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
4884 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
4885 return num_insns_constant_wide ((HOST_WIDE_INT
) l
);
4891 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
4892 if (DECIMAL_FLOAT_MODE_P (mode
))
4893 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, l
);
4895 REAL_VALUE_TO_TARGET_DOUBLE (rv
, l
);
4896 high
= l
[WORDS_BIG_ENDIAN
== 0];
4897 low
= l
[WORDS_BIG_ENDIAN
!= 0];
4900 return (num_insns_constant_wide (low
)
4901 + num_insns_constant_wide (high
));
4904 if ((high
== 0 && low
>= 0)
4905 || (high
== -1 && low
< 0))
4906 return num_insns_constant_wide (low
);
4908 else if (mask64_operand (op
, mode
))
4912 return num_insns_constant_wide (high
) + 1;
4915 return (num_insns_constant_wide (high
)
4916 + num_insns_constant_wide (low
) + 1);
4924 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
4925 If the mode of OP is MODE_VECTOR_INT, this simply returns the
4926 corresponding element of the vector, but for V4SFmode and V2SFmode,
4927 the corresponding "float" is interpreted as an SImode integer. */
4930 const_vector_elt_as_int (rtx op
, unsigned int elt
)
4934 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
4935 gcc_assert (GET_MODE (op
) != V2DImode
4936 && GET_MODE (op
) != V2DFmode
);
4938 tmp
= CONST_VECTOR_ELT (op
, elt
);
4939 if (GET_MODE (op
) == V4SFmode
4940 || GET_MODE (op
) == V2SFmode
)
4941 tmp
= gen_lowpart (SImode
, tmp
);
4942 return INTVAL (tmp
);
4945 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
4946 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
4947 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
4948 all items are set to the same value and contain COPIES replicas of the
4949 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
4950 operand and the others are set to the value of the operand's msb. */
4953 vspltis_constant (rtx op
, unsigned step
, unsigned copies
)
4955 enum machine_mode mode
= GET_MODE (op
);
4956 enum machine_mode inner
= GET_MODE_INNER (mode
);
4964 HOST_WIDE_INT splat_val
;
4965 HOST_WIDE_INT msb_val
;
4967 if (mode
== V2DImode
|| mode
== V2DFmode
)
4970 nunits
= GET_MODE_NUNITS (mode
);
4971 bitsize
= GET_MODE_BITSIZE (inner
);
4972 mask
= GET_MODE_MASK (inner
);
4974 val
= const_vector_elt_as_int (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
4976 msb_val
= val
> 0 ? 0 : -1;
4978 /* Construct the value to be splatted, if possible. If not, return 0. */
4979 for (i
= 2; i
<= copies
; i
*= 2)
4981 HOST_WIDE_INT small_val
;
4983 small_val
= splat_val
>> bitsize
;
4985 if (splat_val
!= ((small_val
<< bitsize
) | (small_val
& mask
)))
4987 splat_val
= small_val
;
4990 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
4991 if (EASY_VECTOR_15 (splat_val
))
4994 /* Also check if we can splat, and then add the result to itself. Do so if
4995 the value is positive, of if the splat instruction is using OP's mode;
4996 for splat_val < 0, the splat and the add should use the same mode. */
4997 else if (EASY_VECTOR_15_ADD_SELF (splat_val
)
4998 && (splat_val
>= 0 || (step
== 1 && copies
== 1)))
5001 /* Also check if are loading up the most significant bit which can be done by
5002 loading up -1 and shifting the value left by -1. */
5003 else if (EASY_VECTOR_MSB (splat_val
, inner
))
5009 /* Check if VAL is present in every STEP-th element, and the
5010 other elements are filled with its most significant bit. */
5011 for (i
= 1; i
< nunits
; ++i
)
5013 HOST_WIDE_INT desired_val
;
5014 unsigned elt
= BYTES_BIG_ENDIAN
? nunits
- 1 - i
: i
;
5015 if ((i
& (step
- 1)) == 0)
5018 desired_val
= msb_val
;
5020 if (desired_val
!= const_vector_elt_as_int (op
, elt
))
5028 /* Return true if OP is of the given MODE and can be synthesized
5029 with a vspltisb, vspltish or vspltisw. */
5032 easy_altivec_constant (rtx op
, enum machine_mode mode
)
5034 unsigned step
, copies
;
5036 if (mode
== VOIDmode
)
5037 mode
= GET_MODE (op
);
5038 else if (mode
!= GET_MODE (op
))
5041 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
5043 if (mode
== V2DFmode
)
5044 return zero_constant (op
, mode
);
5046 if (mode
== V2DImode
)
5048 /* In case the compiler is built 32-bit, CONST_DOUBLE constants are not
5050 if (GET_CODE (CONST_VECTOR_ELT (op
, 0)) != CONST_INT
5051 || GET_CODE (CONST_VECTOR_ELT (op
, 1)) != CONST_INT
)
5054 if (zero_constant (op
, mode
))
5057 if (INTVAL (CONST_VECTOR_ELT (op
, 0)) == -1
5058 && INTVAL (CONST_VECTOR_ELT (op
, 1)) == -1)
5064 /* Start with a vspltisw. */
5065 step
= GET_MODE_NUNITS (mode
) / 4;
5068 if (vspltis_constant (op
, step
, copies
))
5071 /* Then try with a vspltish. */
5077 if (vspltis_constant (op
, step
, copies
))
5080 /* And finally a vspltisb. */
5086 if (vspltis_constant (op
, step
, copies
))
5092 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
5093 result is OP. Abort if it is not possible. */
5096 gen_easy_altivec_constant (rtx op
)
5098 enum machine_mode mode
= GET_MODE (op
);
5099 int nunits
= GET_MODE_NUNITS (mode
);
5100 rtx val
= CONST_VECTOR_ELT (op
, BYTES_BIG_ENDIAN
? nunits
- 1 : 0);
5101 unsigned step
= nunits
/ 4;
5102 unsigned copies
= 1;
5104 /* Start with a vspltisw. */
5105 if (vspltis_constant (op
, step
, copies
))
5106 return gen_rtx_VEC_DUPLICATE (V4SImode
, gen_lowpart (SImode
, val
));
5108 /* Then try with a vspltish. */
5114 if (vspltis_constant (op
, step
, copies
))
5115 return gen_rtx_VEC_DUPLICATE (V8HImode
, gen_lowpart (HImode
, val
));
5117 /* And finally a vspltisb. */
5123 if (vspltis_constant (op
, step
, copies
))
5124 return gen_rtx_VEC_DUPLICATE (V16QImode
, gen_lowpart (QImode
, val
));
5130 output_vec_const_move (rtx
*operands
)
5133 enum machine_mode mode
;
5138 mode
= GET_MODE (dest
);
5142 if (zero_constant (vec
, mode
))
5143 return "xxlxor %x0,%x0,%x0";
5145 if (mode
== V2DImode
5146 && INTVAL (CONST_VECTOR_ELT (vec
, 0)) == -1
5147 && INTVAL (CONST_VECTOR_ELT (vec
, 1)) == -1)
5148 return "vspltisw %0,-1";
5154 if (zero_constant (vec
, mode
))
5155 return "vxor %0,%0,%0";
5157 splat_vec
= gen_easy_altivec_constant (vec
);
5158 gcc_assert (GET_CODE (splat_vec
) == VEC_DUPLICATE
);
5159 operands
[1] = XEXP (splat_vec
, 0);
5160 if (!EASY_VECTOR_15 (INTVAL (operands
[1])))
5163 switch (GET_MODE (splat_vec
))
5166 return "vspltisw %0,%1";
5169 return "vspltish %0,%1";
5172 return "vspltisb %0,%1";
5179 gcc_assert (TARGET_SPE
);
5181 /* Vector constant 0 is handled as a splitter of V2SI, and in the
5182 pattern of V1DI, V4HI, and V2SF.
5184 FIXME: We should probably return # and add post reload
5185 splitters for these, but this way is so easy ;-). */
5186 cst
= INTVAL (CONST_VECTOR_ELT (vec
, 0));
5187 cst2
= INTVAL (CONST_VECTOR_ELT (vec
, 1));
5188 operands
[1] = CONST_VECTOR_ELT (vec
, 0);
5189 operands
[2] = CONST_VECTOR_ELT (vec
, 1);
5191 return "li %0,%1\n\tevmergelo %0,%0,%0";
5193 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
5196 /* Initialize TARGET of vector PAIRED to VALS. */
5199 paired_expand_vector_init (rtx target
, rtx vals
)
5201 enum machine_mode mode
= GET_MODE (target
);
5202 int n_elts
= GET_MODE_NUNITS (mode
);
5204 rtx x
, new_rtx
, tmp
, constant_op
, op1
, op2
;
5207 for (i
= 0; i
< n_elts
; ++i
)
5209 x
= XVECEXP (vals
, 0, i
);
5210 if (!(CONST_INT_P (x
)
5211 || GET_CODE (x
) == CONST_DOUBLE
5212 || GET_CODE (x
) == CONST_FIXED
))
5217 /* Load from constant pool. */
5218 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
5224 /* The vector is initialized only with non-constants. */
5225 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, XVECEXP (vals
, 0, 0),
5226 XVECEXP (vals
, 0, 1));
5228 emit_move_insn (target
, new_rtx
);
5232 /* One field is non-constant and the other one is a constant. Load the
5233 constant from the constant pool and use ps_merge instruction to
5234 construct the whole vector. */
5235 op1
= XVECEXP (vals
, 0, 0);
5236 op2
= XVECEXP (vals
, 0, 1);
5238 constant_op
= (CONSTANT_P (op1
)) ? op1
: op2
;
5240 tmp
= gen_reg_rtx (GET_MODE (constant_op
));
5241 emit_move_insn (tmp
, constant_op
);
5243 if (CONSTANT_P (op1
))
5244 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, tmp
, op2
);
5246 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, op1
, tmp
);
5248 emit_move_insn (target
, new_rtx
);
5252 paired_expand_vector_move (rtx operands
[])
5254 rtx op0
= operands
[0], op1
= operands
[1];
5256 emit_move_insn (op0
, op1
);
5259 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
5260 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
5261 operands for the relation operation COND. This is a recursive
5265 paired_emit_vector_compare (enum rtx_code rcode
,
5266 rtx dest
, rtx op0
, rtx op1
,
5267 rtx cc_op0
, rtx cc_op1
)
5269 rtx tmp
= gen_reg_rtx (V2SFmode
);
5272 gcc_assert (TARGET_PAIRED_FLOAT
);
5273 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
5279 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
5283 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
5284 emit_insn (gen_selv2sf4 (dest
, tmp
, op0
, op1
, CONST0_RTX (SFmode
)));
5288 paired_emit_vector_compare (GE
, dest
, op0
, op1
, cc_op1
, cc_op0
);
5291 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
5294 tmp1
= gen_reg_rtx (V2SFmode
);
5295 max
= gen_reg_rtx (V2SFmode
);
5296 min
= gen_reg_rtx (V2SFmode
);
5297 gen_reg_rtx (V2SFmode
);
5299 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
5300 emit_insn (gen_selv2sf4
5301 (max
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
5302 emit_insn (gen_subv2sf3 (tmp
, cc_op1
, cc_op0
));
5303 emit_insn (gen_selv2sf4
5304 (min
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
5305 emit_insn (gen_subv2sf3 (tmp1
, min
, max
));
5306 emit_insn (gen_selv2sf4 (dest
, tmp1
, op0
, op1
, CONST0_RTX (SFmode
)));
5309 paired_emit_vector_compare (EQ
, dest
, op1
, op0
, cc_op0
, cc_op1
);
5312 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
5315 paired_emit_vector_compare (LT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
5318 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
5321 paired_emit_vector_compare (GT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
5330 /* Emit vector conditional expression.
5331 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
5332 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
5335 paired_emit_vector_cond_expr (rtx dest
, rtx op1
, rtx op2
,
5336 rtx cond
, rtx cc_op0
, rtx cc_op1
)
5338 enum rtx_code rcode
= GET_CODE (cond
);
5340 if (!TARGET_PAIRED_FLOAT
)
5343 paired_emit_vector_compare (rcode
, dest
, op1
, op2
, cc_op0
, cc_op1
);
5348 /* Initialize vector TARGET to VALS. */
5351 rs6000_expand_vector_init (rtx target
, rtx vals
)
5353 enum machine_mode mode
= GET_MODE (target
);
5354 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
5355 int n_elts
= GET_MODE_NUNITS (mode
);
5356 int n_var
= 0, one_var
= -1;
5357 bool all_same
= true, all_const_zero
= true;
5361 for (i
= 0; i
< n_elts
; ++i
)
5363 x
= XVECEXP (vals
, 0, i
);
5364 if (!(CONST_INT_P (x
)
5365 || GET_CODE (x
) == CONST_DOUBLE
5366 || GET_CODE (x
) == CONST_FIXED
))
5367 ++n_var
, one_var
= i
;
5368 else if (x
!= CONST0_RTX (inner_mode
))
5369 all_const_zero
= false;
5371 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
5377 rtx const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
5378 bool int_vector_p
= (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
);
5379 if ((int_vector_p
|| TARGET_VSX
) && all_const_zero
)
5381 /* Zero register. */
5382 emit_insn (gen_rtx_SET (VOIDmode
, target
,
5383 gen_rtx_XOR (mode
, target
, target
)));
5386 else if (int_vector_p
&& easy_vector_constant (const_vec
, mode
))
5388 /* Splat immediate. */
5389 emit_insn (gen_rtx_SET (VOIDmode
, target
, const_vec
));
5394 /* Load from constant pool. */
5395 emit_move_insn (target
, const_vec
);
5400 /* Double word values on VSX can use xxpermdi or lxvdsx. */
5401 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
5403 rtx op0
= XVECEXP (vals
, 0, 0);
5404 rtx op1
= XVECEXP (vals
, 0, 1);
5407 if (!MEM_P (op0
) && !REG_P (op0
))
5408 op0
= force_reg (inner_mode
, op0
);
5409 if (mode
== V2DFmode
)
5410 emit_insn (gen_vsx_splat_v2df (target
, op0
));
5412 emit_insn (gen_vsx_splat_v2di (target
, op0
));
5416 op0
= force_reg (inner_mode
, op0
);
5417 op1
= force_reg (inner_mode
, op1
);
5418 if (mode
== V2DFmode
)
5419 emit_insn (gen_vsx_concat_v2df (target
, op0
, op1
));
5421 emit_insn (gen_vsx_concat_v2di (target
, op0
, op1
));
5426 /* With single precision floating point on VSX, know that internally single
5427 precision is actually represented as a double, and either make 2 V2DF
5428 vectors, and convert these vectors to single precision, or do one
5429 conversion, and splat the result to the other elements. */
5430 if (mode
== V4SFmode
&& VECTOR_MEM_VSX_P (mode
))
5434 rtx freg
= gen_reg_rtx (V4SFmode
);
5435 rtx sreg
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
5436 rtx cvt
= ((TARGET_XSCVDPSPN
)
5437 ? gen_vsx_xscvdpspn_scalar (freg
, sreg
)
5438 : gen_vsx_xscvdpsp_scalar (freg
, sreg
));
5441 emit_insn (gen_vsx_xxspltw_v4sf (target
, freg
, const0_rtx
));
5445 rtx dbl_even
= gen_reg_rtx (V2DFmode
);
5446 rtx dbl_odd
= gen_reg_rtx (V2DFmode
);
5447 rtx flt_even
= gen_reg_rtx (V4SFmode
);
5448 rtx flt_odd
= gen_reg_rtx (V4SFmode
);
5449 rtx op0
= force_reg (SFmode
, XVECEXP (vals
, 0, 0));
5450 rtx op1
= force_reg (SFmode
, XVECEXP (vals
, 0, 1));
5451 rtx op2
= force_reg (SFmode
, XVECEXP (vals
, 0, 2));
5452 rtx op3
= force_reg (SFmode
, XVECEXP (vals
, 0, 3));
5454 emit_insn (gen_vsx_concat_v2sf (dbl_even
, op0
, op1
));
5455 emit_insn (gen_vsx_concat_v2sf (dbl_odd
, op2
, op3
));
5456 emit_insn (gen_vsx_xvcvdpsp (flt_even
, dbl_even
));
5457 emit_insn (gen_vsx_xvcvdpsp (flt_odd
, dbl_odd
));
5458 rs6000_expand_extract_even (target
, flt_even
, flt_odd
);
5463 /* Store value to stack temp. Load vector element. Splat. However, splat
5464 of 64-bit items is not supported on Altivec. */
5465 if (all_same
&& GET_MODE_SIZE (inner_mode
) <= 4)
5468 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
5469 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0),
5470 XVECEXP (vals
, 0, 0));
5471 x
= gen_rtx_UNSPEC (VOIDmode
,
5472 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
5473 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
5475 gen_rtx_SET (VOIDmode
,
5478 field
= (BYTES_BIG_ENDIAN
? const0_rtx
5479 : GEN_INT (GET_MODE_NUNITS (mode
) - 1));
5480 x
= gen_rtx_VEC_SELECT (inner_mode
, target
,
5481 gen_rtx_PARALLEL (VOIDmode
,
5482 gen_rtvec (1, field
)));
5483 emit_insn (gen_rtx_SET (VOIDmode
, target
,
5484 gen_rtx_VEC_DUPLICATE (mode
, x
)));
5488 /* One field is non-constant. Load constant then overwrite
5492 rtx copy
= copy_rtx (vals
);
5494 /* Load constant part of vector, substitute neighboring value for
5496 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, (one_var
+ 1) % n_elts
);
5497 rs6000_expand_vector_init (target
, copy
);
5499 /* Insert variable. */
5500 rs6000_expand_vector_set (target
, XVECEXP (vals
, 0, one_var
), one_var
);
5504 /* Construct the vector in memory one field at a time
5505 and load the whole vector. */
5506 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
5507 for (i
= 0; i
< n_elts
; i
++)
5508 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
5509 i
* GET_MODE_SIZE (inner_mode
)),
5510 XVECEXP (vals
, 0, i
));
5511 emit_move_insn (target
, mem
);
5514 /* Set field ELT of TARGET to VAL. */
5517 rs6000_expand_vector_set (rtx target
, rtx val
, int elt
)
5519 enum machine_mode mode
= GET_MODE (target
);
5520 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
5521 rtx reg
= gen_reg_rtx (mode
);
5523 int width
= GET_MODE_SIZE (inner_mode
);
5526 if (VECTOR_MEM_VSX_P (mode
) && (mode
== V2DFmode
|| mode
== V2DImode
))
5528 rtx (*set_func
) (rtx
, rtx
, rtx
, rtx
)
5529 = ((mode
== V2DFmode
) ? gen_vsx_set_v2df
: gen_vsx_set_v2di
);
5530 emit_insn (set_func (target
, target
, val
, GEN_INT (elt
)));
5534 /* Load single variable value. */
5535 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
));
5536 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0), val
);
5537 x
= gen_rtx_UNSPEC (VOIDmode
,
5538 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
5539 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
5541 gen_rtx_SET (VOIDmode
,
5545 /* Linear sequence. */
5546 mask
= gen_rtx_PARALLEL (V16QImode
, rtvec_alloc (16));
5547 for (i
= 0; i
< 16; ++i
)
5548 XVECEXP (mask
, 0, i
) = GEN_INT (i
);
5550 /* Set permute mask to insert element into target. */
5551 for (i
= 0; i
< width
; ++i
)
5552 XVECEXP (mask
, 0, elt
*width
+ i
)
5553 = GEN_INT (i
+ 0x10);
5554 x
= gen_rtx_CONST_VECTOR (V16QImode
, XVEC (mask
, 0));
5556 if (BYTES_BIG_ENDIAN
)
5557 x
= gen_rtx_UNSPEC (mode
,
5558 gen_rtvec (3, target
, reg
,
5559 force_reg (V16QImode
, x
)),
5563 /* Invert selector. */
5564 rtx splat
= gen_rtx_VEC_DUPLICATE (V16QImode
,
5565 gen_rtx_CONST_INT (QImode
, -1));
5566 rtx tmp
= gen_reg_rtx (V16QImode
);
5567 emit_move_insn (tmp
, splat
);
5568 x
= gen_rtx_MINUS (V16QImode
, tmp
, force_reg (V16QImode
, x
));
5569 emit_move_insn (tmp
, x
);
5571 /* Permute with operands reversed and adjusted selector. */
5572 x
= gen_rtx_UNSPEC (mode
, gen_rtvec (3, reg
, target
, tmp
),
5576 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
5579 /* Extract field ELT from VEC into TARGET. */
5582 rs6000_expand_vector_extract (rtx target
, rtx vec
, int elt
)
5584 enum machine_mode mode
= GET_MODE (vec
);
5585 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
5588 if (VECTOR_MEM_VSX_P (mode
))
5595 emit_insn (gen_vsx_extract_v2df (target
, vec
, GEN_INT (elt
)));
5598 emit_insn (gen_vsx_extract_v2di (target
, vec
, GEN_INT (elt
)));
5601 emit_insn (gen_vsx_extract_v4sf (target
, vec
, GEN_INT (elt
)));
5606 /* Allocate mode-sized buffer. */
5607 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
5609 emit_move_insn (mem
, vec
);
5611 /* Add offset to field within buffer matching vector element. */
5612 mem
= adjust_address_nv (mem
, inner_mode
, elt
* GET_MODE_SIZE (inner_mode
));
5614 emit_move_insn (target
, adjust_address_nv (mem
, inner_mode
, 0));
5617 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
5618 implement ANDing by the mask IN. */
5620 build_mask64_2_operands (rtx in
, rtx
*out
)
5622 unsigned HOST_WIDE_INT c
, lsb
, m1
, m2
;
5625 gcc_assert (GET_CODE (in
) == CONST_INT
);
5630 /* Assume c initially something like 0x00fff000000fffff. The idea
5631 is to rotate the word so that the middle ^^^^^^ group of zeros
5632 is at the MS end and can be cleared with an rldicl mask. We then
5633 rotate back and clear off the MS ^^ group of zeros with a
5635 c
= ~c
; /* c == 0xff000ffffff00000 */
5636 lsb
= c
& -c
; /* lsb == 0x0000000000100000 */
5637 m1
= -lsb
; /* m1 == 0xfffffffffff00000 */
5638 c
= ~c
; /* c == 0x00fff000000fffff */
5639 c
&= -lsb
; /* c == 0x00fff00000000000 */
5640 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
5641 c
= ~c
; /* c == 0xff000fffffffffff */
5642 c
&= -lsb
; /* c == 0xff00000000000000 */
5644 while ((lsb
>>= 1) != 0)
5645 shift
++; /* shift == 44 on exit from loop */
5646 m1
<<= 64 - shift
; /* m1 == 0xffffff0000000000 */
5647 m1
= ~m1
; /* m1 == 0x000000ffffffffff */
5648 m2
= ~c
; /* m2 == 0x00ffffffffffffff */
5652 /* Assume c initially something like 0xff000f0000000000. The idea
5653 is to rotate the word so that the ^^^ middle group of zeros
5654 is at the LS end and can be cleared with an rldicr mask. We then
5655 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
5657 lsb
= c
& -c
; /* lsb == 0x0000010000000000 */
5658 m2
= -lsb
; /* m2 == 0xffffff0000000000 */
5659 c
= ~c
; /* c == 0x00fff0ffffffffff */
5660 c
&= -lsb
; /* c == 0x00fff00000000000 */
5661 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
5662 c
= ~c
; /* c == 0xff000fffffffffff */
5663 c
&= -lsb
; /* c == 0xff00000000000000 */
5665 while ((lsb
>>= 1) != 0)
5666 shift
++; /* shift == 44 on exit from loop */
5667 m1
= ~c
; /* m1 == 0x00ffffffffffffff */
5668 m1
>>= shift
; /* m1 == 0x0000000000000fff */
5669 m1
= ~m1
; /* m1 == 0xfffffffffffff000 */
5672 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
5673 masks will be all 1's. We are guaranteed more than one transition. */
5674 out
[0] = GEN_INT (64 - shift
);
5675 out
[1] = GEN_INT (m1
);
5676 out
[2] = GEN_INT (shift
);
5677 out
[3] = GEN_INT (m2
);
5680 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
5683 invalid_e500_subreg (rtx op
, enum machine_mode mode
)
5685 if (TARGET_E500_DOUBLE
)
5687 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
5688 subreg:TI and reg:TF. Decimal float modes are like integer
5689 modes (only low part of each register used) for this
5691 if (GET_CODE (op
) == SUBREG
5692 && (mode
== SImode
|| mode
== DImode
|| mode
== TImode
5693 || mode
== DDmode
|| mode
== TDmode
|| mode
== PTImode
)
5694 && REG_P (SUBREG_REG (op
))
5695 && (GET_MODE (SUBREG_REG (op
)) == DFmode
5696 || GET_MODE (SUBREG_REG (op
)) == TFmode
))
5699 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
5701 if (GET_CODE (op
) == SUBREG
5702 && (mode
== DFmode
|| mode
== TFmode
)
5703 && REG_P (SUBREG_REG (op
))
5704 && (GET_MODE (SUBREG_REG (op
)) == DImode
5705 || GET_MODE (SUBREG_REG (op
)) == TImode
5706 || GET_MODE (SUBREG_REG (op
)) == PTImode
5707 || GET_MODE (SUBREG_REG (op
)) == DDmode
5708 || GET_MODE (SUBREG_REG (op
)) == TDmode
))
5713 && GET_CODE (op
) == SUBREG
5715 && REG_P (SUBREG_REG (op
))
5716 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op
))))
5722 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
5723 selects whether the alignment is abi mandated, optional, or
5724 both abi and optional alignment. */
5727 rs6000_data_alignment (tree type
, unsigned int align
, enum data_align how
)
5729 if (how
!= align_opt
)
5731 if (TREE_CODE (type
) == VECTOR_TYPE
)
5733 if ((TARGET_SPE
&& SPE_VECTOR_MODE (TYPE_MODE (type
)))
5734 || (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (TYPE_MODE (type
))))
5739 else if (align
< 128)
5742 else if (TARGET_E500_DOUBLE
5743 && TREE_CODE (type
) == REAL_TYPE
5744 && TYPE_MODE (type
) == DFmode
)
5751 if (how
!= align_abi
)
5753 if (TREE_CODE (type
) == ARRAY_TYPE
5754 && TYPE_MODE (TREE_TYPE (type
)) == QImode
)
5756 if (align
< BITS_PER_WORD
)
5757 align
= BITS_PER_WORD
;
5764 /* AIX increases natural record alignment to doubleword if the first
5765 field is an FP double while the FP fields remain word aligned. */
5768 rs6000_special_round_type_align (tree type
, unsigned int computed
,
5769 unsigned int specified
)
5771 unsigned int align
= MAX (computed
, specified
);
5772 tree field
= TYPE_FIELDS (type
);
5774 /* Skip all non field decls */
5775 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
5776 field
= DECL_CHAIN (field
);
5778 if (field
!= NULL
&& field
!= type
)
5780 type
= TREE_TYPE (field
);
5781 while (TREE_CODE (type
) == ARRAY_TYPE
)
5782 type
= TREE_TYPE (type
);
5784 if (type
!= error_mark_node
&& TYPE_MODE (type
) == DFmode
)
5785 align
= MAX (align
, 64);
5791 /* Darwin increases record alignment to the natural alignment of
5795 darwin_rs6000_special_round_type_align (tree type
, unsigned int computed
,
5796 unsigned int specified
)
5798 unsigned int align
= MAX (computed
, specified
);
5800 if (TYPE_PACKED (type
))
5803 /* Find the first field, looking down into aggregates. */
5805 tree field
= TYPE_FIELDS (type
);
5806 /* Skip all non field decls */
5807 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
5808 field
= DECL_CHAIN (field
);
5811 /* A packed field does not contribute any extra alignment. */
5812 if (DECL_PACKED (field
))
5814 type
= TREE_TYPE (field
);
5815 while (TREE_CODE (type
) == ARRAY_TYPE
)
5816 type
= TREE_TYPE (type
);
5817 } while (AGGREGATE_TYPE_P (type
));
5819 if (! AGGREGATE_TYPE_P (type
) && type
!= error_mark_node
)
5820 align
= MAX (align
, TYPE_ALIGN (type
));
5825 /* Return 1 for an operand in small memory on V.4/eabi. */
5828 small_data_operand (rtx op ATTRIBUTE_UNUSED
,
5829 enum machine_mode mode ATTRIBUTE_UNUSED
)
5834 if (rs6000_sdata
== SDATA_NONE
|| rs6000_sdata
== SDATA_DATA
)
5837 if (DEFAULT_ABI
!= ABI_V4
)
5840 /* Vector and float memory instructions have a limited offset on the
5841 SPE, so using a vector or float variable directly as an operand is
5844 && (SPE_VECTOR_MODE (mode
) || FLOAT_MODE_P (mode
)))
5847 if (GET_CODE (op
) == SYMBOL_REF
)
5850 else if (GET_CODE (op
) != CONST
5851 || GET_CODE (XEXP (op
, 0)) != PLUS
5852 || GET_CODE (XEXP (XEXP (op
, 0), 0)) != SYMBOL_REF
5853 || GET_CODE (XEXP (XEXP (op
, 0), 1)) != CONST_INT
)
5858 rtx sum
= XEXP (op
, 0);
5859 HOST_WIDE_INT summand
;
5861 /* We have to be careful here, because it is the referenced address
5862 that must be 32k from _SDA_BASE_, not just the symbol. */
5863 summand
= INTVAL (XEXP (sum
, 1));
5864 if (summand
< 0 || summand
> g_switch_value
)
5867 sym_ref
= XEXP (sum
, 0);
5870 return SYMBOL_REF_SMALL_P (sym_ref
);
5876 /* Return true if either operand is a general purpose register. */
5879 gpr_or_gpr_p (rtx op0
, rtx op1
)
5881 return ((REG_P (op0
) && INT_REGNO_P (REGNO (op0
)))
5882 || (REG_P (op1
) && INT_REGNO_P (REGNO (op1
))));
5885 /* Return true if this is a move direct operation between GPR registers and
5886 floating point/VSX registers. */
5889 direct_move_p (rtx op0
, rtx op1
)
5893 if (!REG_P (op0
) || !REG_P (op1
))
5896 if (!TARGET_DIRECT_MOVE
&& !TARGET_MFPGPR
)
5899 regno0
= REGNO (op0
);
5900 regno1
= REGNO (op1
);
5901 if (regno0
>= FIRST_PSEUDO_REGISTER
|| regno1
>= FIRST_PSEUDO_REGISTER
)
5904 if (INT_REGNO_P (regno0
))
5905 return (TARGET_DIRECT_MOVE
) ? VSX_REGNO_P (regno1
) : FP_REGNO_P (regno1
);
5907 else if (INT_REGNO_P (regno1
))
5909 if (TARGET_MFPGPR
&& FP_REGNO_P (regno0
))
5912 else if (TARGET_DIRECT_MOVE
&& VSX_REGNO_P (regno0
))
5919 /* Return true if this is a load or store quad operation. */
5922 quad_load_store_p (rtx op0
, rtx op1
)
5926 if (!TARGET_QUAD_MEMORY
)
5929 else if (REG_P (op0
) && MEM_P (op1
))
5930 ret
= (quad_int_reg_operand (op0
, GET_MODE (op0
))
5931 && quad_memory_operand (op1
, GET_MODE (op1
))
5932 && !reg_overlap_mentioned_p (op0
, op1
));
5934 else if (MEM_P (op0
) && REG_P (op1
))
5935 ret
= (quad_memory_operand (op0
, GET_MODE (op0
))
5936 && quad_int_reg_operand (op1
, GET_MODE (op1
)));
5941 if (TARGET_DEBUG_ADDR
)
5943 fprintf (stderr
, "\n========== quad_load_store, return %s\n",
5944 ret
? "true" : "false");
5945 debug_rtx (gen_rtx_SET (VOIDmode
, op0
, op1
));
5951 /* Given an address, return a constant offset term if one exists. */
5954 address_offset (rtx op
)
5956 if (GET_CODE (op
) == PRE_INC
5957 || GET_CODE (op
) == PRE_DEC
)
5959 else if (GET_CODE (op
) == PRE_MODIFY
5960 || GET_CODE (op
) == LO_SUM
)
5963 if (GET_CODE (op
) == CONST
)
5966 if (GET_CODE (op
) == PLUS
)
5969 if (CONST_INT_P (op
))
5975 /* Return true if the MEM operand is a memory operand suitable for use
5976 with a (full width, possibly multiple) gpr load/store. On
5977 powerpc64 this means the offset must be divisible by 4.
5978 Implements 'Y' constraint.
5980 Accept direct, indexed, offset, lo_sum and tocref. Since this is
5981 a constraint function we know the operand has satisfied a suitable
5982 memory predicate. Also accept some odd rtl generated by reload
5983 (see rs6000_legitimize_reload_address for various forms). It is
5984 important that reload rtl be accepted by appropriate constraints
5985 but not by the operand predicate.
5987 Offsetting a lo_sum should not be allowed, except where we know by
5988 alignment that a 32k boundary is not crossed, but see the ???
5989 comment in rs6000_legitimize_reload_address. Note that by
5990 "offsetting" here we mean a further offset to access parts of the
5991 MEM. It's fine to have a lo_sum where the inner address is offset
5992 from a sym, since the same sym+offset will appear in the high part
5993 of the address calculation. */
5996 mem_operand_gpr (rtx op
, enum machine_mode mode
)
5998 unsigned HOST_WIDE_INT offset
;
6000 rtx addr
= XEXP (op
, 0);
6002 op
= address_offset (addr
);
6006 offset
= INTVAL (op
);
6007 if (TARGET_POWERPC64
&& (offset
& 3) != 0)
6010 extra
= GET_MODE_SIZE (mode
) - UNITS_PER_WORD
;
6011 gcc_assert (extra
>= 0);
6013 if (GET_CODE (addr
) == LO_SUM
)
6014 /* For lo_sum addresses, we must allow any offset except one that
6015 causes a wrap, so test only the low 16 bits. */
6016 offset
= ((offset
& 0xffff) ^ 0x8000) - 0x8000;
6018 return offset
+ 0x8000 < 0x10000u
- extra
;
6021 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
6024 reg_offset_addressing_ok_p (enum machine_mode mode
)
6035 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. While
6036 TImode is not a vector mode, if we want to use the VSX registers to
6037 move it around, we need to restrict ourselves to reg+reg
6039 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
6047 /* Paired vector modes. Only reg+reg addressing is valid. */
6048 if (TARGET_PAIRED_FLOAT
)
6053 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
6054 addressing for the LFIWZX and STFIWX instructions. */
6055 if (TARGET_NO_SDMODE_STACK
)
6067 virtual_stack_registers_memory_p (rtx op
)
6071 if (GET_CODE (op
) == REG
)
6072 regnum
= REGNO (op
);
6074 else if (GET_CODE (op
) == PLUS
6075 && GET_CODE (XEXP (op
, 0)) == REG
6076 && GET_CODE (XEXP (op
, 1)) == CONST_INT
)
6077 regnum
= REGNO (XEXP (op
, 0));
6082 return (regnum
>= FIRST_VIRTUAL_REGISTER
6083 && regnum
<= LAST_VIRTUAL_POINTER_REGISTER
);
6086 /* Return true if a MODE sized memory accesses to OP plus OFFSET
6087 is known to not straddle a 32k boundary. */
6090 offsettable_ok_by_alignment (rtx op
, HOST_WIDE_INT offset
,
6091 enum machine_mode mode
)
6094 unsigned HOST_WIDE_INT dsize
, dalign
, lsb
, mask
;
6096 if (GET_CODE (op
) != SYMBOL_REF
)
6099 dsize
= GET_MODE_SIZE (mode
);
6100 decl
= SYMBOL_REF_DECL (op
);
6106 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
6107 replacing memory addresses with an anchor plus offset. We
6108 could find the decl by rummaging around in the block->objects
6109 VEC for the given offset but that seems like too much work. */
6110 dalign
= BITS_PER_UNIT
;
6111 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op
)
6112 && SYMBOL_REF_ANCHOR_P (op
)
6113 && SYMBOL_REF_BLOCK (op
) != NULL
)
6115 struct object_block
*block
= SYMBOL_REF_BLOCK (op
);
6117 dalign
= block
->alignment
;
6118 offset
+= SYMBOL_REF_BLOCK_OFFSET (op
);
6120 else if (CONSTANT_POOL_ADDRESS_P (op
))
6122 /* It would be nice to have get_pool_align().. */
6123 enum machine_mode cmode
= get_pool_mode (op
);
6125 dalign
= GET_MODE_ALIGNMENT (cmode
);
6128 else if (DECL_P (decl
))
6130 dalign
= DECL_ALIGN (decl
);
6134 /* Allow BLKmode when the entire object is known to not
6135 cross a 32k boundary. */
6136 if (!DECL_SIZE_UNIT (decl
))
6139 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl
)))
6142 dsize
= tree_to_uhwi (DECL_SIZE_UNIT (decl
));
6146 return dalign
/ BITS_PER_UNIT
>= dsize
;
6151 type
= TREE_TYPE (decl
);
6153 dalign
= TYPE_ALIGN (type
);
6154 if (CONSTANT_CLASS_P (decl
))
6155 dalign
= CONSTANT_ALIGNMENT (decl
, dalign
);
6157 dalign
= DATA_ALIGNMENT (decl
, dalign
);
6161 /* BLKmode, check the entire object. */
6162 if (TREE_CODE (decl
) == STRING_CST
)
6163 dsize
= TREE_STRING_LENGTH (decl
);
6164 else if (TYPE_SIZE_UNIT (type
)
6165 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type
)))
6166 dsize
= tree_to_uhwi (TYPE_SIZE_UNIT (type
));
6172 return dalign
/ BITS_PER_UNIT
>= dsize
;
6176 /* Find how many bits of the alignment we know for this access. */
6177 mask
= dalign
/ BITS_PER_UNIT
- 1;
6178 lsb
= offset
& -offset
;
6182 return dalign
>= dsize
;
6186 constant_pool_expr_p (rtx op
)
6190 split_const (op
, &base
, &offset
);
6191 return (GET_CODE (base
) == SYMBOL_REF
6192 && CONSTANT_POOL_ADDRESS_P (base
)
6193 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base
), Pmode
));
6196 static const_rtx tocrel_base
, tocrel_offset
;
6198 /* Return true if OP is a toc pointer relative address (the output
6199 of create_TOC_reference). If STRICT, do not match high part or
6200 non-split -mcmodel=large/medium toc pointer relative addresses. */
6203 toc_relative_expr_p (const_rtx op
, bool strict
)
6208 if (TARGET_CMODEL
!= CMODEL_SMALL
)
6210 /* Only match the low part. */
6211 if (GET_CODE (op
) == LO_SUM
6212 && REG_P (XEXP (op
, 0))
6213 && INT_REG_OK_FOR_BASE_P (XEXP (op
, 0), strict
))
6220 tocrel_offset
= const0_rtx
;
6221 if (GET_CODE (op
) == PLUS
&& add_cint_operand (XEXP (op
, 1), GET_MODE (op
)))
6223 tocrel_base
= XEXP (op
, 0);
6224 tocrel_offset
= XEXP (op
, 1);
6227 return (GET_CODE (tocrel_base
) == UNSPEC
6228 && XINT (tocrel_base
, 1) == UNSPEC_TOCREL
);
6231 /* Return true if X is a constant pool address, and also for cmodel=medium
6232 if X is a toc-relative address known to be offsettable within MODE. */
6235 legitimate_constant_pool_address_p (const_rtx x
, enum machine_mode mode
,
6238 return (toc_relative_expr_p (x
, strict
)
6239 && (TARGET_CMODEL
!= CMODEL_MEDIUM
6240 || constant_pool_expr_p (XVECEXP (tocrel_base
, 0, 0))
6242 || offsettable_ok_by_alignment (XVECEXP (tocrel_base
, 0, 0),
6243 INTVAL (tocrel_offset
), mode
)));
6247 legitimate_small_data_p (enum machine_mode mode
, rtx x
)
6249 return (DEFAULT_ABI
== ABI_V4
6250 && !flag_pic
&& !TARGET_TOC
6251 && (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
)
6252 && small_data_operand (x
, mode
));
6255 /* SPE offset addressing is limited to 5-bits worth of double words. */
6256 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
6259 rs6000_legitimate_offset_address_p (enum machine_mode mode
, rtx x
,
6260 bool strict
, bool worst_case
)
6262 unsigned HOST_WIDE_INT offset
;
6265 if (GET_CODE (x
) != PLUS
)
6267 if (!REG_P (XEXP (x
, 0)))
6269 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
6271 if (!reg_offset_addressing_ok_p (mode
))
6272 return virtual_stack_registers_memory_p (x
);
6273 if (legitimate_constant_pool_address_p (x
, mode
, strict
|| lra_in_progress
))
6275 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
6278 offset
= INTVAL (XEXP (x
, 1));
6286 /* SPE vector modes. */
6287 return SPE_CONST_OFFSET_OK (offset
);
6292 /* On e500v2, we may have:
6294 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
6296 Which gets addressed with evldd instructions. */
6297 if (TARGET_E500_DOUBLE
)
6298 return SPE_CONST_OFFSET_OK (offset
);
6300 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
6302 if (VECTOR_MEM_VSX_P (mode
))
6307 if (!TARGET_POWERPC64
)
6309 else if (offset
& 3)
6317 if (TARGET_E500_DOUBLE
)
6318 return (SPE_CONST_OFFSET_OK (offset
)
6319 && SPE_CONST_OFFSET_OK (offset
+ 8));
6324 if (!TARGET_POWERPC64
)
6326 else if (offset
& 3)
6335 return offset
< 0x10000 - extra
;
6339 legitimate_indexed_address_p (rtx x
, int strict
)
6343 if (GET_CODE (x
) != PLUS
)
6349 /* Recognize the rtl generated by reload which we know will later be
6350 replaced with proper base and index regs. */
6352 && reload_in_progress
6353 && (REG_P (op0
) || GET_CODE (op0
) == PLUS
)
6357 return (REG_P (op0
) && REG_P (op1
)
6358 && ((INT_REG_OK_FOR_BASE_P (op0
, strict
)
6359 && INT_REG_OK_FOR_INDEX_P (op1
, strict
))
6360 || (INT_REG_OK_FOR_BASE_P (op1
, strict
)
6361 && INT_REG_OK_FOR_INDEX_P (op0
, strict
))));
6365 avoiding_indexed_address_p (enum machine_mode mode
)
6367 /* Avoid indexed addressing for modes that have non-indexed
6368 load/store instruction forms. */
6369 return (TARGET_AVOID_XFORM
&& VECTOR_MEM_NONE_P (mode
));
6373 legitimate_indirect_address_p (rtx x
, int strict
)
6375 return GET_CODE (x
) == REG
&& INT_REG_OK_FOR_BASE_P (x
, strict
);
6379 macho_lo_sum_memory_operand (rtx x
, enum machine_mode mode
)
6381 if (!TARGET_MACHO
|| !flag_pic
6382 || mode
!= SImode
|| GET_CODE (x
) != MEM
)
6386 if (GET_CODE (x
) != LO_SUM
)
6388 if (GET_CODE (XEXP (x
, 0)) != REG
)
6390 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 0))
6394 return CONSTANT_P (x
);
6398 legitimate_lo_sum_address_p (enum machine_mode mode
, rtx x
, int strict
)
6400 if (GET_CODE (x
) != LO_SUM
)
6402 if (GET_CODE (XEXP (x
, 0)) != REG
)
6404 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
6406 /* Restrict addressing for DI because of our SUBREG hackery. */
6407 if (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
6411 if (TARGET_ELF
|| TARGET_MACHO
)
6415 if (DEFAULT_ABI
== ABI_V4
&& flag_pic
)
6417 /* LRA don't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
6418 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
6419 recognizes some LO_SUM addresses as valid although this
6420 function says opposite. In most cases, LRA through different
6421 transformations can generate correct code for address reloads.
6422 It can not manage only some LO_SUM cases. So we need to add
6423 code analogous to one in rs6000_legitimize_reload_address for
6424 LOW_SUM here saying that some addresses are still valid. */
6425 large_toc_ok
= (lra_in_progress
&& TARGET_CMODEL
!= CMODEL_SMALL
6426 && small_toc_ref (x
, VOIDmode
));
6427 if (TARGET_TOC
&& ! large_toc_ok
)
6429 if (GET_MODE_NUNITS (mode
) != 1)
6431 if (GET_MODE_SIZE (mode
) > UNITS_PER_WORD
6432 && !(/* ??? Assume floating point reg based on mode? */
6433 TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
6434 && (mode
== DFmode
|| mode
== DDmode
)))
6437 return CONSTANT_P (x
) || large_toc_ok
;
6444 /* Try machine-dependent ways of modifying an illegitimate address
6445 to be legitimate. If we find one, return the new, valid address.
6446 This is used from only one place: `memory_address' in explow.c.
6448 OLDX is the address as it was before break_out_memory_refs was
6449 called. In some cases it is useful to look at this to decide what
6452 It is always safe for this function to do nothing. It exists to
6453 recognize opportunities to optimize the output.
6455 On RS/6000, first check for the sum of a register with a constant
6456 integer that is out of range. If so, generate code to add the
6457 constant with the low-order 16 bits masked to the register and force
6458 this result into another register (this can be done with `cau').
6459 Then generate an address of REG+(CONST&0xffff), allowing for the
6460 possibility of bit 16 being a one.
6462 Then check for the sum of a register and something not constant, try to
6463 load the other things into a register and return the sum. */
6466 rs6000_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
6467 enum machine_mode mode
)
6471 if (!reg_offset_addressing_ok_p (mode
))
6473 if (virtual_stack_registers_memory_p (x
))
6476 /* In theory we should not be seeing addresses of the form reg+0,
6477 but just in case it is generated, optimize it away. */
6478 if (GET_CODE (x
) == PLUS
&& XEXP (x
, 1) == const0_rtx
)
6479 return force_reg (Pmode
, XEXP (x
, 0));
6481 /* For TImode with load/store quad, restrict addresses to just a single
6482 pointer, so it works with both GPRs and VSX registers. */
6483 /* Make sure both operands are registers. */
6484 else if (GET_CODE (x
) == PLUS
6485 && (mode
!= TImode
|| !TARGET_QUAD_MEMORY
))
6486 return gen_rtx_PLUS (Pmode
,
6487 force_reg (Pmode
, XEXP (x
, 0)),
6488 force_reg (Pmode
, XEXP (x
, 1)));
6490 return force_reg (Pmode
, x
);
6492 if (GET_CODE (x
) == SYMBOL_REF
)
6494 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
6496 return rs6000_legitimize_tls_address (x
, model
);
6506 /* As in legitimate_offset_address_p we do not assume
6507 worst-case. The mode here is just a hint as to the registers
6508 used. A TImode is usually in gprs, but may actually be in
6509 fprs. Leave worst-case scenario for reload to handle via
6510 insn constraints. PTImode is only GPRs. */
6517 if (GET_CODE (x
) == PLUS
6518 && GET_CODE (XEXP (x
, 0)) == REG
6519 && GET_CODE (XEXP (x
, 1)) == CONST_INT
6520 && ((unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)) + 0x8000)
6522 && !(SPE_VECTOR_MODE (mode
)
6523 || (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)))
6525 HOST_WIDE_INT high_int
, low_int
;
6527 low_int
= ((INTVAL (XEXP (x
, 1)) & 0xffff) ^ 0x8000) - 0x8000;
6528 if (low_int
>= 0x8000 - extra
)
6530 high_int
= INTVAL (XEXP (x
, 1)) - low_int
;
6531 sum
= force_operand (gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
6532 GEN_INT (high_int
)), 0);
6533 return plus_constant (Pmode
, sum
, low_int
);
6535 else if (GET_CODE (x
) == PLUS
6536 && GET_CODE (XEXP (x
, 0)) == REG
6537 && GET_CODE (XEXP (x
, 1)) != CONST_INT
6538 && GET_MODE_NUNITS (mode
) == 1
6539 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
6540 || (/* ??? Assume floating point reg based on mode? */
6541 (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
6542 && (mode
== DFmode
|| mode
== DDmode
)))
6543 && !avoiding_indexed_address_p (mode
))
6545 return gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
6546 force_reg (Pmode
, force_operand (XEXP (x
, 1), 0)));
6548 else if (SPE_VECTOR_MODE (mode
)
6549 || (TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
))
6553 /* We accept [reg + reg] and [reg + OFFSET]. */
6555 if (GET_CODE (x
) == PLUS
)
6557 rtx op1
= XEXP (x
, 0);
6558 rtx op2
= XEXP (x
, 1);
6561 op1
= force_reg (Pmode
, op1
);
6563 if (GET_CODE (op2
) != REG
6564 && (GET_CODE (op2
) != CONST_INT
6565 || !SPE_CONST_OFFSET_OK (INTVAL (op2
))
6566 || (GET_MODE_SIZE (mode
) > 8
6567 && !SPE_CONST_OFFSET_OK (INTVAL (op2
) + 8))))
6568 op2
= force_reg (Pmode
, op2
);
6570 /* We can't always do [reg + reg] for these, because [reg +
6571 reg + offset] is not a legitimate addressing mode. */
6572 y
= gen_rtx_PLUS (Pmode
, op1
, op2
);
6574 if ((GET_MODE_SIZE (mode
) > 8 || mode
== DDmode
) && REG_P (op2
))
6575 return force_reg (Pmode
, y
);
6580 return force_reg (Pmode
, x
);
6582 else if ((TARGET_ELF
6584 || !MACHO_DYNAMIC_NO_PIC_P
6590 && GET_CODE (x
) != CONST_INT
6591 && GET_CODE (x
) != CONST_DOUBLE
6593 && GET_MODE_NUNITS (mode
) == 1
6594 && (GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
6595 || (/* ??? Assume floating point reg based on mode? */
6596 (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
6597 && (mode
== DFmode
|| mode
== DDmode
))))
6599 rtx reg
= gen_reg_rtx (Pmode
);
6601 emit_insn (gen_elf_high (reg
, x
));
6603 emit_insn (gen_macho_high (reg
, x
));
6604 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
6607 && GET_CODE (x
) == SYMBOL_REF
6608 && constant_pool_expr_p (x
)
6609 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), Pmode
))
6610 return create_TOC_reference (x
, NULL_RTX
);
6615 /* Debug version of rs6000_legitimize_address. */
6617 rs6000_debug_legitimize_address (rtx x
, rtx oldx
, enum machine_mode mode
)
6623 ret
= rs6000_legitimize_address (x
, oldx
, mode
);
6624 insns
= get_insns ();
6630 "\nrs6000_legitimize_address: mode %s, old code %s, "
6631 "new code %s, modified\n",
6632 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)),
6633 GET_RTX_NAME (GET_CODE (ret
)));
6635 fprintf (stderr
, "Original address:\n");
6638 fprintf (stderr
, "oldx:\n");
6641 fprintf (stderr
, "New address:\n");
6646 fprintf (stderr
, "Insns added:\n");
6647 debug_rtx_list (insns
, 20);
6653 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
6654 GET_MODE_NAME (mode
), GET_RTX_NAME (GET_CODE (x
)));
6665 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6666 We need to emit DTP-relative relocations. */
6668 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
6670 rs6000_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
6675 fputs ("\t.long\t", file
);
6678 fputs (DOUBLE_INT_ASM_OP
, file
);
6683 output_addr_const (file
, x
);
6684 fputs ("@dtprel+0x8000", file
);
6687 /* In the name of slightly smaller debug output, and to cater to
6688 general assembler lossage, recognize various UNSPEC sequences
6689 and turn them back into a direct symbol reference. */
6692 rs6000_delegitimize_address (rtx orig_x
)
6696 orig_x
= delegitimize_mem_from_attrs (orig_x
);
6702 if (TARGET_CMODEL
!= CMODEL_SMALL
6703 && GET_CODE (y
) == LO_SUM
)
6707 if (GET_CODE (y
) == PLUS
6708 && GET_MODE (y
) == Pmode
6709 && CONST_INT_P (XEXP (y
, 1)))
6711 offset
= XEXP (y
, 1);
6715 if (GET_CODE (y
) == UNSPEC
6716 && XINT (y
, 1) == UNSPEC_TOCREL
)
6718 #ifdef ENABLE_CHECKING
6719 if (REG_P (XVECEXP (y
, 0, 1))
6720 && REGNO (XVECEXP (y
, 0, 1)) == TOC_REGISTER
)
6724 else if (GET_CODE (XVECEXP (y
, 0, 1)) == DEBUG_EXPR
)
6726 /* Weirdness alert. df_note_compute can replace r2 with a
6727 debug_expr when this unspec is in a debug_insn.
6728 Seen in gcc.dg/pr51957-1.c */
6736 y
= XVECEXP (y
, 0, 0);
6739 /* Do not associate thread-local symbols with the original
6740 constant pool symbol. */
6742 && GET_CODE (y
) == SYMBOL_REF
6743 && CONSTANT_POOL_ADDRESS_P (y
)
6744 && SYMBOL_REF_TLS_MODEL (get_pool_constant (y
)) >= TLS_MODEL_REAL
)
6748 if (offset
!= NULL_RTX
)
6749 y
= gen_rtx_PLUS (Pmode
, y
, offset
);
6750 if (!MEM_P (orig_x
))
6753 return replace_equiv_address_nv (orig_x
, y
);
6757 && GET_CODE (orig_x
) == LO_SUM
6758 && GET_CODE (XEXP (orig_x
, 1)) == CONST
)
6760 y
= XEXP (XEXP (orig_x
, 1), 0);
6761 if (GET_CODE (y
) == UNSPEC
6762 && XINT (y
, 1) == UNSPEC_MACHOPIC_OFFSET
)
6763 return XVECEXP (y
, 0, 0);
6769 /* Return true if X shouldn't be emitted into the debug info.
6770 The linker doesn't like .toc section references from
6771 .debug_* sections, so reject .toc section symbols. */
6774 rs6000_const_not_ok_for_debug_p (rtx x
)
6776 if (GET_CODE (x
) == SYMBOL_REF
6777 && CONSTANT_POOL_ADDRESS_P (x
))
6779 rtx c
= get_pool_constant (x
);
6780 enum machine_mode cmode
= get_pool_mode (x
);
6781 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c
, cmode
))
6788 /* Construct the SYMBOL_REF for the tls_get_addr function. */
6790 static GTY(()) rtx rs6000_tls_symbol
;
6792 rs6000_tls_get_addr (void)
6794 if (!rs6000_tls_symbol
)
6795 rs6000_tls_symbol
= init_one_libfunc ("__tls_get_addr");
6797 return rs6000_tls_symbol
;
6800 /* Construct the SYMBOL_REF for TLS GOT references. */
6802 static GTY(()) rtx rs6000_got_symbol
;
6804 rs6000_got_sym (void)
6806 if (!rs6000_got_symbol
)
6808 rs6000_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
6809 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_LOCAL
;
6810 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_EXTERNAL
;
6813 return rs6000_got_symbol
;
6816 /* AIX Thread-Local Address support. */
6819 rs6000_legitimize_tls_address_aix (rtx addr
, enum tls_model model
)
6821 rtx sym
, mem
, tocref
, tlsreg
, tmpreg
, dest
, tlsaddr
;
6825 name
= XSTR (addr
, 0);
6826 /* Append TLS CSECT qualifier, unless the symbol already is qualified
6827 or the symbol will be in TLS private data section. */
6828 if (name
[strlen (name
) - 1] != ']'
6829 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr
))
6830 || bss_initializer_p (SYMBOL_REF_DECL (addr
))))
6832 tlsname
= XALLOCAVEC (char, strlen (name
) + 4);
6833 strcpy (tlsname
, name
);
6835 bss_initializer_p (SYMBOL_REF_DECL (addr
)) ? "[UL]" : "[TL]");
6836 tlsaddr
= copy_rtx (addr
);
6837 XSTR (tlsaddr
, 0) = ggc_strdup (tlsname
);
6842 /* Place addr into TOC constant pool. */
6843 sym
= force_const_mem (GET_MODE (tlsaddr
), tlsaddr
);
6845 /* Output the TOC entry and create the MEM referencing the value. */
6846 if (constant_pool_expr_p (XEXP (sym
, 0))
6847 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym
, 0)), Pmode
))
6849 tocref
= create_TOC_reference (XEXP (sym
, 0), NULL_RTX
);
6850 mem
= gen_const_mem (Pmode
, tocref
);
6851 set_mem_alias_set (mem
, get_TOC_alias_set ());
6856 /* Use global-dynamic for local-dynamic. */
6857 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
6858 || model
== TLS_MODEL_LOCAL_DYNAMIC
)
6860 /* Create new TOC reference for @m symbol. */
6861 name
= XSTR (XVECEXP (XEXP (mem
, 0), 0, 0), 0);
6862 tlsname
= XALLOCAVEC (char, strlen (name
) + 1);
6863 strcpy (tlsname
, "*LCM");
6864 strcat (tlsname
, name
+ 3);
6865 rtx modaddr
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (tlsname
));
6866 SYMBOL_REF_FLAGS (modaddr
) |= SYMBOL_FLAG_LOCAL
;
6867 tocref
= create_TOC_reference (modaddr
, NULL_RTX
);
6868 rtx modmem
= gen_const_mem (Pmode
, tocref
);
6869 set_mem_alias_set (modmem
, get_TOC_alias_set ());
6871 rtx modreg
= gen_reg_rtx (Pmode
);
6872 emit_insn (gen_rtx_SET (VOIDmode
, modreg
, modmem
));
6874 tmpreg
= gen_reg_rtx (Pmode
);
6875 emit_insn (gen_rtx_SET (VOIDmode
, tmpreg
, mem
));
6877 dest
= gen_reg_rtx (Pmode
);
6879 emit_insn (gen_tls_get_addrsi (dest
, modreg
, tmpreg
));
6881 emit_insn (gen_tls_get_addrdi (dest
, modreg
, tmpreg
));
6884 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
6885 else if (TARGET_32BIT
)
6887 tlsreg
= gen_reg_rtx (SImode
);
6888 emit_insn (gen_tls_get_tpointer (tlsreg
));
6891 tlsreg
= gen_rtx_REG (DImode
, 13);
6893 /* Load the TOC value into temporary register. */
6894 tmpreg
= gen_reg_rtx (Pmode
);
6895 emit_insn (gen_rtx_SET (VOIDmode
, tmpreg
, mem
));
6896 set_unique_reg_note (get_last_insn (), REG_EQUAL
,
6897 gen_rtx_MINUS (Pmode
, addr
, tlsreg
));
6899 /* Add TOC symbol value to TLS pointer. */
6900 dest
= force_reg (Pmode
, gen_rtx_PLUS (Pmode
, tmpreg
, tlsreg
));
6905 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
6906 this (thread-local) address. */
6909 rs6000_legitimize_tls_address (rtx addr
, enum tls_model model
)
6914 return rs6000_legitimize_tls_address_aix (addr
, model
);
6916 dest
= gen_reg_rtx (Pmode
);
6917 if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 16)
6923 tlsreg
= gen_rtx_REG (Pmode
, 13);
6924 insn
= gen_tls_tprel_64 (dest
, tlsreg
, addr
);
6928 tlsreg
= gen_rtx_REG (Pmode
, 2);
6929 insn
= gen_tls_tprel_32 (dest
, tlsreg
, addr
);
6933 else if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 32)
6937 tmp
= gen_reg_rtx (Pmode
);
6940 tlsreg
= gen_rtx_REG (Pmode
, 13);
6941 insn
= gen_tls_tprel_ha_64 (tmp
, tlsreg
, addr
);
6945 tlsreg
= gen_rtx_REG (Pmode
, 2);
6946 insn
= gen_tls_tprel_ha_32 (tmp
, tlsreg
, addr
);
6950 insn
= gen_tls_tprel_lo_64 (dest
, tmp
, addr
);
6952 insn
= gen_tls_tprel_lo_32 (dest
, tmp
, addr
);
6957 rtx r3
, got
, tga
, tmp1
, tmp2
, call_insn
;
6959 /* We currently use relocations like @got@tlsgd for tls, which
6960 means the linker will handle allocation of tls entries, placing
6961 them in the .got section. So use a pointer to the .got section,
6962 not one to secondary TOC sections used by 64-bit -mminimal-toc,
6963 or to secondary GOT sections used by 32-bit -fPIC. */
6965 got
= gen_rtx_REG (Pmode
, 2);
6969 got
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
6972 rtx gsym
= rs6000_got_sym ();
6973 got
= gen_reg_rtx (Pmode
);
6975 rs6000_emit_move (got
, gsym
, Pmode
);
6980 tmp1
= gen_reg_rtx (Pmode
);
6981 tmp2
= gen_reg_rtx (Pmode
);
6982 mem
= gen_const_mem (Pmode
, tmp1
);
6983 lab
= gen_label_rtx ();
6984 emit_insn (gen_load_toc_v4_PIC_1b (gsym
, lab
));
6985 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
6986 if (TARGET_LINK_STACK
)
6987 emit_insn (gen_addsi3 (tmp1
, tmp1
, GEN_INT (4)));
6988 emit_move_insn (tmp2
, mem
);
6989 last
= emit_insn (gen_addsi3 (got
, tmp1
, tmp2
));
6990 set_unique_reg_note (last
, REG_EQUAL
, gsym
);
6995 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
)
6997 tga
= rs6000_tls_get_addr ();
6998 emit_library_call_value (tga
, dest
, LCT_CONST
, Pmode
,
6999 1, const0_rtx
, Pmode
);
7001 r3
= gen_rtx_REG (Pmode
, 3);
7002 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
7005 insn
= gen_tls_gd_aix64 (r3
, got
, addr
, tga
, const0_rtx
);
7007 insn
= gen_tls_gd_aix32 (r3
, got
, addr
, tga
, const0_rtx
);
7009 else if (DEFAULT_ABI
== ABI_V4
)
7010 insn
= gen_tls_gd_sysvsi (r3
, got
, addr
, tga
, const0_rtx
);
7013 call_insn
= last_call_insn ();
7014 PATTERN (call_insn
) = insn
;
7015 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
7016 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
7017 pic_offset_table_rtx
);
7019 else if (model
== TLS_MODEL_LOCAL_DYNAMIC
)
7021 tga
= rs6000_tls_get_addr ();
7022 tmp1
= gen_reg_rtx (Pmode
);
7023 emit_library_call_value (tga
, tmp1
, LCT_CONST
, Pmode
,
7024 1, const0_rtx
, Pmode
);
7026 r3
= gen_rtx_REG (Pmode
, 3);
7027 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
7030 insn
= gen_tls_ld_aix64 (r3
, got
, tga
, const0_rtx
);
7032 insn
= gen_tls_ld_aix32 (r3
, got
, tga
, const0_rtx
);
7034 else if (DEFAULT_ABI
== ABI_V4
)
7035 insn
= gen_tls_ld_sysvsi (r3
, got
, tga
, const0_rtx
);
7038 call_insn
= last_call_insn ();
7039 PATTERN (call_insn
) = insn
;
7040 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
7041 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn
),
7042 pic_offset_table_rtx
);
7044 if (rs6000_tls_size
== 16)
7047 insn
= gen_tls_dtprel_64 (dest
, tmp1
, addr
);
7049 insn
= gen_tls_dtprel_32 (dest
, tmp1
, addr
);
7051 else if (rs6000_tls_size
== 32)
7053 tmp2
= gen_reg_rtx (Pmode
);
7055 insn
= gen_tls_dtprel_ha_64 (tmp2
, tmp1
, addr
);
7057 insn
= gen_tls_dtprel_ha_32 (tmp2
, tmp1
, addr
);
7060 insn
= gen_tls_dtprel_lo_64 (dest
, tmp2
, addr
);
7062 insn
= gen_tls_dtprel_lo_32 (dest
, tmp2
, addr
);
7066 tmp2
= gen_reg_rtx (Pmode
);
7068 insn
= gen_tls_got_dtprel_64 (tmp2
, got
, addr
);
7070 insn
= gen_tls_got_dtprel_32 (tmp2
, got
, addr
);
7072 insn
= gen_rtx_SET (Pmode
, dest
,
7073 gen_rtx_PLUS (Pmode
, tmp2
, tmp1
));
7079 /* IE, or 64-bit offset LE. */
7080 tmp2
= gen_reg_rtx (Pmode
);
7082 insn
= gen_tls_got_tprel_64 (tmp2
, got
, addr
);
7084 insn
= gen_tls_got_tprel_32 (tmp2
, got
, addr
);
7087 insn
= gen_tls_tls_64 (dest
, tmp2
, addr
);
7089 insn
= gen_tls_tls_32 (dest
, tmp2
, addr
);
7097 /* Return 1 if X contains a thread-local symbol. */
7100 rs6000_tls_referenced_p (rtx x
)
7102 if (! TARGET_HAVE_TLS
)
7105 return for_each_rtx (&x
, &rs6000_tls_symbol_ref_1
, 0);
7108 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
7111 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
7113 if (GET_CODE (x
) == HIGH
7114 && GET_CODE (XEXP (x
, 0)) == UNSPEC
)
7117 /* A TLS symbol in the TOC cannot contain a sum. */
7118 if (GET_CODE (x
) == CONST
7119 && GET_CODE (XEXP (x
, 0)) == PLUS
7120 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == SYMBOL_REF
7121 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x
, 0), 0)) != 0)
7124 /* Do not place an ELF TLS symbol in the constant pool. */
7125 return TARGET_ELF
&& rs6000_tls_referenced_p (x
);
7128 /* Return 1 if *X is a thread-local symbol. This is the same as
7129 rs6000_tls_symbol_ref except for the type of the unused argument. */
7132 rs6000_tls_symbol_ref_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
7134 return RS6000_SYMBOL_REF_TLS_P (*x
);
7137 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
7138 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
7139 can be addressed relative to the toc pointer. */
7142 use_toc_relative_ref (rtx sym
)
7144 return ((constant_pool_expr_p (sym
)
7145 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym
),
7146 get_pool_mode (sym
)))
7147 || (TARGET_CMODEL
== CMODEL_MEDIUM
7148 && SYMBOL_REF_LOCAL_P (sym
)));
7151 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
7152 replace the input X, or the original X if no replacement is called for.
7153 The output parameter *WIN is 1 if the calling macro should goto WIN,
7156 For RS/6000, we wish to handle large displacements off a base
7157 register by splitting the addend across an addiu/addis and the mem insn.
7158 This cuts number of extra insns needed from 3 to 1.
7160 On Darwin, we use this to generate code for floating point constants.
7161 A movsf_low is generated so we wind up with 2 instructions rather than 3.
7162 The Darwin code is inside #if TARGET_MACHO because only then are the
7163 machopic_* functions defined. */
7165 rs6000_legitimize_reload_address (rtx x
, enum machine_mode mode
,
7166 int opnum
, int type
,
7167 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
7169 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
7171 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
7172 DFmode/DImode MEM. */
7175 && ((mode
== DFmode
&& recog_data
.operand_mode
[0] == V2DFmode
)
7176 || (mode
== DImode
&& recog_data
.operand_mode
[0] == V2DImode
)))
7177 reg_offset_p
= false;
7179 /* We must recognize output that we have already generated ourselves. */
7180 if (GET_CODE (x
) == PLUS
7181 && GET_CODE (XEXP (x
, 0)) == PLUS
7182 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
7183 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
7184 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
7186 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
7187 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
7188 opnum
, (enum reload_type
) type
);
7193 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
7194 if (GET_CODE (x
) == LO_SUM
7195 && GET_CODE (XEXP (x
, 0)) == HIGH
)
7197 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
7198 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
7199 opnum
, (enum reload_type
) type
);
7205 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
7206 && GET_CODE (x
) == LO_SUM
7207 && GET_CODE (XEXP (x
, 0)) == PLUS
7208 && XEXP (XEXP (x
, 0), 0) == pic_offset_table_rtx
7209 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == HIGH
7210 && XEXP (XEXP (XEXP (x
, 0), 1), 0) == XEXP (x
, 1)
7211 && machopic_operand_p (XEXP (x
, 1)))
7213 /* Result of previous invocation of this function on Darwin
7214 floating point constant. */
7215 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
7216 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
7217 opnum
, (enum reload_type
) type
);
7223 if (TARGET_CMODEL
!= CMODEL_SMALL
7225 && small_toc_ref (x
, VOIDmode
))
7227 rtx hi
= gen_rtx_HIGH (Pmode
, copy_rtx (x
));
7228 x
= gen_rtx_LO_SUM (Pmode
, hi
, x
);
7229 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
7230 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
7231 opnum
, (enum reload_type
) type
);
7236 if (GET_CODE (x
) == PLUS
7237 && GET_CODE (XEXP (x
, 0)) == REG
7238 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
7239 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
7240 && GET_CODE (XEXP (x
, 1)) == CONST_INT
7242 && !SPE_VECTOR_MODE (mode
)
7243 && !(TARGET_E500_DOUBLE
&& GET_MODE_SIZE (mode
) > UNITS_PER_WORD
)
7244 && (!VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
)))
7246 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
7247 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
7249 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7251 /* Check for 32-bit overflow. */
7252 if (high
+ low
!= val
)
7258 /* Reload the high part into a base reg; leave the low part
7259 in the mem directly. */
7261 x
= gen_rtx_PLUS (GET_MODE (x
),
7262 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
7266 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
7267 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
7268 opnum
, (enum reload_type
) type
);
7273 if (GET_CODE (x
) == SYMBOL_REF
7275 && (!VECTOR_MODE_P (mode
) || VECTOR_MEM_NONE_P (mode
))
7276 && !SPE_VECTOR_MODE (mode
)
7278 && DEFAULT_ABI
== ABI_DARWIN
7279 && (flag_pic
|| MACHO_DYNAMIC_NO_PIC_P
)
7280 && machopic_symbol_defined_p (x
)
7282 && DEFAULT_ABI
== ABI_V4
7285 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
7286 The same goes for DImode without 64-bit gprs and DFmode and DDmode
7288 ??? Assume floating point reg based on mode? This assumption is
7289 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
7290 where reload ends up doing a DFmode load of a constant from
7291 mem using two gprs. Unfortunately, at this point reload
7292 hasn't yet selected regs so poking around in reload data
7293 won't help and even if we could figure out the regs reliably,
7294 we'd still want to allow this transformation when the mem is
7295 naturally aligned. Since we say the address is good here, we
7296 can't disable offsets from LO_SUMs in mem_operand_gpr.
7297 FIXME: Allow offset from lo_sum for other modes too, when
7298 mem is sufficiently aligned. */
7301 && (mode
!= TImode
|| !TARGET_VSX_TIMODE
)
7303 && (mode
!= DImode
|| TARGET_POWERPC64
)
7304 && ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_POWERPC64
7305 || (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)))
7310 rtx offset
= machopic_gen_offset (x
);
7311 x
= gen_rtx_LO_SUM (GET_MODE (x
),
7312 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
7313 gen_rtx_HIGH (Pmode
, offset
)), offset
);
7317 x
= gen_rtx_LO_SUM (GET_MODE (x
),
7318 gen_rtx_HIGH (Pmode
, x
), x
);
7320 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
7321 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
7322 opnum
, (enum reload_type
) type
);
7327 /* Reload an offset address wrapped by an AND that represents the
7328 masking of the lower bits. Strip the outer AND and let reload
7329 convert the offset address into an indirect address. For VSX,
7330 force reload to create the address with an AND in a separate
7331 register, because we can't guarantee an altivec register will
7333 if (VECTOR_MEM_ALTIVEC_P (mode
)
7334 && GET_CODE (x
) == AND
7335 && GET_CODE (XEXP (x
, 0)) == PLUS
7336 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
7337 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
7338 && GET_CODE (XEXP (x
, 1)) == CONST_INT
7339 && INTVAL (XEXP (x
, 1)) == -16)
7348 && GET_CODE (x
) == SYMBOL_REF
7349 && use_toc_relative_ref (x
))
7351 x
= create_TOC_reference (x
, NULL_RTX
);
7352 if (TARGET_CMODEL
!= CMODEL_SMALL
)
7353 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
7354 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
7355 opnum
, (enum reload_type
) type
);
7363 /* Debug version of rs6000_legitimize_reload_address. */
7365 rs6000_debug_legitimize_reload_address (rtx x
, enum machine_mode mode
,
7366 int opnum
, int type
,
7367 int ind_levels
, int *win
)
7369 rtx ret
= rs6000_legitimize_reload_address (x
, mode
, opnum
, type
,
7372 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
7373 "type = %d, ind_levels = %d, win = %d, original addr:\n",
7374 GET_MODE_NAME (mode
), opnum
, type
, ind_levels
, *win
);
7378 fprintf (stderr
, "Same address returned\n");
7380 fprintf (stderr
, "NULL returned\n");
7383 fprintf (stderr
, "New address:\n");
7390 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
7391 that is a valid memory address for an instruction.
7392 The MODE argument is the machine mode for the MEM expression
7393 that wants to use this address.
7395 On the RS/6000, there are four valid address: a SYMBOL_REF that
7396 refers to a constant pool entry of an address (or the sum of it
7397 plus a constant), a short (16-bit signed) constant plus a register,
7398 the sum of two registers, or a register indirect, possibly with an
7399 auto-increment. For DFmode, DDmode and DImode with a constant plus
7400 register, we must ensure that both words are addressable or PowerPC64
7401 with offset word aligned.
7403 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
7404 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
7405 because adjacent memory cells are accessed by adding word-sized offsets
7406 during assembly output. */
7408 rs6000_legitimate_address_p (enum machine_mode mode
, rtx x
, bool reg_ok_strict
)
7410 bool reg_offset_p
= reg_offset_addressing_ok_p (mode
);
7412 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
7413 if (VECTOR_MEM_ALTIVEC_P (mode
)
7414 && GET_CODE (x
) == AND
7415 && GET_CODE (XEXP (x
, 1)) == CONST_INT
7416 && INTVAL (XEXP (x
, 1)) == -16)
7419 if (TARGET_ELF
&& RS6000_SYMBOL_REF_TLS_P (x
))
7421 if (legitimate_indirect_address_p (x
, reg_ok_strict
))
7424 && (GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == PRE_DEC
)
7425 && mode_supports_pre_incdec_p (mode
)
7426 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
))
7428 if (virtual_stack_registers_memory_p (x
))
7430 if (reg_offset_p
&& legitimate_small_data_p (mode
, x
))
7433 && legitimate_constant_pool_address_p (x
, mode
,
7434 reg_ok_strict
|| lra_in_progress
))
7436 /* For TImode, if we have load/store quad and TImode in VSX registers, only
7437 allow register indirect addresses. This will allow the values to go in
7438 either GPRs or VSX registers without reloading. The vector types would
7439 tend to go into VSX registers, so we allow REG+REG, while TImode seems
7440 somewhat split, in that some uses are GPR based, and some VSX based. */
7441 if (mode
== TImode
&& TARGET_QUAD_MEMORY
&& TARGET_VSX_TIMODE
)
7443 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
7446 && GET_CODE (x
) == PLUS
7447 && GET_CODE (XEXP (x
, 0)) == REG
7448 && (XEXP (x
, 0) == virtual_stack_vars_rtx
7449 || XEXP (x
, 0) == arg_pointer_rtx
)
7450 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
7452 if (rs6000_legitimate_offset_address_p (mode
, x
, reg_ok_strict
, false))
7456 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
7458 || (mode
!= DFmode
&& mode
!= DDmode
)
7459 || (TARGET_E500_DOUBLE
&& mode
!= DDmode
))
7460 && (TARGET_POWERPC64
|| mode
!= DImode
)
7461 && (mode
!= TImode
|| VECTOR_MEM_VSX_P (TImode
))
7463 && !avoiding_indexed_address_p (mode
)
7464 && legitimate_indexed_address_p (x
, reg_ok_strict
))
7466 if (TARGET_UPDATE
&& GET_CODE (x
) == PRE_MODIFY
7467 && mode_supports_pre_modify_p (mode
)
7468 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
)
7469 && (rs6000_legitimate_offset_address_p (mode
, XEXP (x
, 1),
7470 reg_ok_strict
, false)
7471 || (!avoiding_indexed_address_p (mode
)
7472 && legitimate_indexed_address_p (XEXP (x
, 1), reg_ok_strict
)))
7473 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
7475 if (reg_offset_p
&& legitimate_lo_sum_address_p (mode
, x
, reg_ok_strict
))
7480 /* Debug version of rs6000_legitimate_address_p. */
7482 rs6000_debug_legitimate_address_p (enum machine_mode mode
, rtx x
,
7485 bool ret
= rs6000_legitimate_address_p (mode
, x
, reg_ok_strict
);
7487 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
7488 "strict = %d, reload = %s, code = %s\n",
7489 ret
? "true" : "false",
7490 GET_MODE_NAME (mode
),
7494 : (reload_in_progress
? "progress" : "before")),
7495 GET_RTX_NAME (GET_CODE (x
)));
7501 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
7504 rs6000_mode_dependent_address_p (const_rtx addr
,
7505 addr_space_t as ATTRIBUTE_UNUSED
)
7507 return rs6000_mode_dependent_address_ptr (addr
);
7510 /* Go to LABEL if ADDR (a legitimate address expression)
7511 has an effect that depends on the machine mode it is used for.
7513 On the RS/6000 this is true of all integral offsets (since AltiVec
7514 and VSX modes don't allow them) or is a pre-increment or decrement.
7516 ??? Except that due to conceptual problems in offsettable_address_p
7517 we can't really report the problems of integral offsets. So leave
7518 this assuming that the adjustable offset must be valid for the
7519 sub-words of a TFmode operand, which is what we had before. */
7522 rs6000_mode_dependent_address (const_rtx addr
)
7524 switch (GET_CODE (addr
))
7527 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
7528 is considered a legitimate address before reload, so there
7529 are no offset restrictions in that case. Note that this
7530 condition is safe in strict mode because any address involving
7531 virtual_stack_vars_rtx or arg_pointer_rtx would already have
7532 been rejected as illegitimate. */
7533 if (XEXP (addr
, 0) != virtual_stack_vars_rtx
7534 && XEXP (addr
, 0) != arg_pointer_rtx
7535 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
7537 unsigned HOST_WIDE_INT val
= INTVAL (XEXP (addr
, 1));
7538 return val
+ 0x8000 >= 0x10000 - (TARGET_POWERPC64
? 8 : 12);
7543 /* Anything in the constant pool is sufficiently aligned that
7544 all bytes have the same high part address. */
7545 return !legitimate_constant_pool_address_p (addr
, QImode
, false);
7547 /* Auto-increment cases are now treated generically in recog.c. */
7549 return TARGET_UPDATE
;
7551 /* AND is only allowed in Altivec loads. */
7562 /* Debug version of rs6000_mode_dependent_address. */
7564 rs6000_debug_mode_dependent_address (const_rtx addr
)
7566 bool ret
= rs6000_mode_dependent_address (addr
);
7568 fprintf (stderr
, "\nrs6000_mode_dependent_address: ret = %s\n",
7569 ret
? "true" : "false");
7575 /* Implement FIND_BASE_TERM. */
7578 rs6000_find_base_term (rtx op
)
7583 if (GET_CODE (base
) == CONST
)
7584 base
= XEXP (base
, 0);
7585 if (GET_CODE (base
) == PLUS
)
7586 base
= XEXP (base
, 0);
7587 if (GET_CODE (base
) == UNSPEC
)
7588 switch (XINT (base
, 1))
7591 case UNSPEC_MACHOPIC_OFFSET
:
7592 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
7593 for aliasing purposes. */
7594 return XVECEXP (base
, 0, 0);
7600 /* More elaborate version of recog's offsettable_memref_p predicate
7601 that works around the ??? note of rs6000_mode_dependent_address.
7602 In particular it accepts
7604 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
7606 in 32-bit mode, that the recog predicate rejects. */
7609 rs6000_offsettable_memref_p (rtx op
, enum machine_mode reg_mode
)
7616 /* First mimic offsettable_memref_p. */
7617 if (offsettable_address_p (true, GET_MODE (op
), XEXP (op
, 0)))
7620 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
7621 the latter predicate knows nothing about the mode of the memory
7622 reference and, therefore, assumes that it is the largest supported
7623 mode (TFmode). As a consequence, legitimate offsettable memory
7624 references are rejected. rs6000_legitimate_offset_address_p contains
7625 the correct logic for the PLUS case of rs6000_mode_dependent_address,
7626 at least with a little bit of help here given that we know the
7627 actual registers used. */
7628 worst_case
= ((TARGET_POWERPC64
&& GET_MODE_CLASS (reg_mode
) == MODE_INT
)
7629 || GET_MODE_SIZE (reg_mode
) == 4);
7630 return rs6000_legitimate_offset_address_p (GET_MODE (op
), XEXP (op
, 0),
7634 /* Change register usage conditional on target flags. */
7636 rs6000_conditional_register_usage (void)
7640 if (TARGET_DEBUG_TARGET
)
7641 fprintf (stderr
, "rs6000_conditional_register_usage called\n");
7643 /* Set MQ register fixed (already call_used) so that it will not be
7647 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
7649 fixed_regs
[13] = call_used_regs
[13]
7650 = call_really_used_regs
[13] = 1;
7652 /* Conditionally disable FPRs. */
7653 if (TARGET_SOFT_FLOAT
|| !TARGET_FPRS
)
7654 for (i
= 32; i
< 64; i
++)
7655 fixed_regs
[i
] = call_used_regs
[i
]
7656 = call_really_used_regs
[i
] = 1;
7658 /* The TOC register is not killed across calls in a way that is
7659 visible to the compiler. */
7660 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
7661 call_really_used_regs
[2] = 0;
7663 if (DEFAULT_ABI
== ABI_V4
7664 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
7666 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
7668 if (DEFAULT_ABI
== ABI_V4
7669 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
7671 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
7672 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
7673 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
7675 if (DEFAULT_ABI
== ABI_DARWIN
7676 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
)
7677 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
7678 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
7679 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
7681 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
)
7682 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
7683 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
7687 global_regs
[SPEFSCR_REGNO
] = 1;
7688 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
7689 registers in prologues and epilogues. We no longer use r14
7690 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
7691 pool for link-compatibility with older versions of GCC. Once
7692 "old" code has died out, we can return r14 to the allocation
7695 = call_used_regs
[14]
7696 = call_really_used_regs
[14] = 1;
7699 if (!TARGET_ALTIVEC
&& !TARGET_VSX
)
7701 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
7702 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
7703 call_really_used_regs
[VRSAVE_REGNO
] = 1;
7706 if (TARGET_ALTIVEC
|| TARGET_VSX
)
7707 global_regs
[VSCR_REGNO
] = 1;
7709 if (TARGET_ALTIVEC_ABI
)
7711 for (i
= FIRST_ALTIVEC_REGNO
; i
< FIRST_ALTIVEC_REGNO
+ 20; ++i
)
7712 call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
7714 /* AIX reserves VR20:31 in non-extended ABI mode. */
7716 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
< FIRST_ALTIVEC_REGNO
+ 32; ++i
)
7717 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
7722 /* Try to output insns to set TARGET equal to the constant C if it can
7723 be done in less than N insns. Do all computations in MODE.
7724 Returns the place where the output has been placed if it can be
7725 done and the insns have been emitted. If it would take more than N
7726 insns, zero is returned and no insns and emitted. */
7729 rs6000_emit_set_const (rtx dest
, enum machine_mode mode
,
7730 rtx source
, int n ATTRIBUTE_UNUSED
)
7732 rtx result
, insn
, set
;
7733 HOST_WIDE_INT c0
, c1
;
7740 dest
= gen_reg_rtx (mode
);
7741 emit_insn (gen_rtx_SET (VOIDmode
, dest
, source
));
7745 result
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (SImode
);
7747 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (result
),
7748 GEN_INT (INTVAL (source
)
7749 & (~ (HOST_WIDE_INT
) 0xffff))));
7750 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
7751 gen_rtx_IOR (SImode
, copy_rtx (result
),
7752 GEN_INT (INTVAL (source
) & 0xffff))));
7757 switch (GET_CODE (source
))
7760 c0
= INTVAL (source
);
7768 result
= rs6000_emit_set_long_const (dest
, c0
, c1
);
7775 insn
= get_last_insn ();
7776 set
= single_set (insn
);
7777 if (! CONSTANT_P (SET_SRC (set
)))
7778 set_unique_reg_note (insn
, REG_EQUAL
, source
);
7783 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
7784 fall back to a straight forward decomposition. We do this to avoid
7785 exponential run times encountered when looking for longer sequences
7786 with rs6000_emit_set_const. */
7788 rs6000_emit_set_long_const (rtx dest
, HOST_WIDE_INT c1
, HOST_WIDE_INT c2
)
7790 if (!TARGET_POWERPC64
)
7792 rtx operand1
, operand2
;
7794 operand1
= operand_subword_force (dest
, WORDS_BIG_ENDIAN
== 0,
7796 operand2
= operand_subword_force (copy_rtx (dest
), WORDS_BIG_ENDIAN
!= 0,
7798 emit_move_insn (operand1
, GEN_INT (c1
));
7799 emit_move_insn (operand2
, GEN_INT (c2
));
7803 HOST_WIDE_INT ud1
, ud2
, ud3
, ud4
;
7806 ud2
= (c1
& 0xffff0000) >> 16;
7809 ud4
= (c2
& 0xffff0000) >> 16;
7811 if ((ud4
== 0xffff && ud3
== 0xffff && ud2
== 0xffff && (ud1
& 0x8000))
7812 || (ud4
== 0 && ud3
== 0 && ud2
== 0 && ! (ud1
& 0x8000)))
7813 emit_move_insn (dest
, GEN_INT ((ud1
^ 0x8000) - 0x8000));
7815 else if ((ud4
== 0xffff && ud3
== 0xffff && (ud2
& 0x8000))
7816 || (ud4
== 0 && ud3
== 0 && ! (ud2
& 0x8000)))
7818 emit_move_insn (dest
, GEN_INT (((ud2
<< 16) ^ 0x80000000)
7821 emit_move_insn (copy_rtx (dest
),
7822 gen_rtx_IOR (DImode
, copy_rtx (dest
),
7825 else if (ud3
== 0 && ud4
== 0)
7827 gcc_assert (ud2
& 0x8000);
7828 emit_move_insn (dest
, GEN_INT (((ud2
<< 16) ^ 0x80000000)
7831 emit_move_insn (copy_rtx (dest
),
7832 gen_rtx_IOR (DImode
, copy_rtx (dest
),
7834 emit_move_insn (copy_rtx (dest
),
7835 gen_rtx_ZERO_EXTEND (DImode
,
7836 gen_lowpart (SImode
,
7839 else if ((ud4
== 0xffff && (ud3
& 0x8000))
7840 || (ud4
== 0 && ! (ud3
& 0x8000)))
7842 emit_move_insn (dest
, GEN_INT (((ud3
<< 16) ^ 0x80000000)
7845 emit_move_insn (copy_rtx (dest
),
7846 gen_rtx_IOR (DImode
, copy_rtx (dest
),
7848 emit_move_insn (copy_rtx (dest
),
7849 gen_rtx_ASHIFT (DImode
, copy_rtx (dest
),
7852 emit_move_insn (copy_rtx (dest
),
7853 gen_rtx_IOR (DImode
, copy_rtx (dest
),
7858 emit_move_insn (dest
, GEN_INT (((ud4
<< 16) ^ 0x80000000)
7861 emit_move_insn (copy_rtx (dest
),
7862 gen_rtx_IOR (DImode
, copy_rtx (dest
),
7865 emit_move_insn (copy_rtx (dest
),
7866 gen_rtx_ASHIFT (DImode
, copy_rtx (dest
),
7869 emit_move_insn (copy_rtx (dest
),
7870 gen_rtx_IOR (DImode
, copy_rtx (dest
),
7871 GEN_INT (ud2
<< 16)));
7873 emit_move_insn (copy_rtx (dest
),
7874 gen_rtx_IOR (DImode
, copy_rtx (dest
),
7881 /* Helper for the following. Get rid of [r+r] memory refs
7882 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
7885 rs6000_eliminate_indexed_memrefs (rtx operands
[2])
7887 if (reload_in_progress
)
7890 if (GET_CODE (operands
[0]) == MEM
7891 && GET_CODE (XEXP (operands
[0], 0)) != REG
7892 && ! legitimate_constant_pool_address_p (XEXP (operands
[0], 0),
7893 GET_MODE (operands
[0]), false))
7895 = replace_equiv_address (operands
[0],
7896 copy_addr_to_reg (XEXP (operands
[0], 0)));
7898 if (GET_CODE (operands
[1]) == MEM
7899 && GET_CODE (XEXP (operands
[1], 0)) != REG
7900 && ! legitimate_constant_pool_address_p (XEXP (operands
[1], 0),
7901 GET_MODE (operands
[1]), false))
7903 = replace_equiv_address (operands
[1],
7904 copy_addr_to_reg (XEXP (operands
[1], 0)));
7907 /* Generate a vector of constants to permute MODE for a little-endian
7908 storage operation by swapping the two halves of a vector. */
7910 rs6000_const_vec (enum machine_mode mode
)
7935 v
= rtvec_alloc (subparts
);
7937 for (i
= 0; i
< subparts
/ 2; ++i
)
7938 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
+ subparts
/ 2);
7939 for (i
= subparts
/ 2; i
< subparts
; ++i
)
7940 RTVEC_ELT (v
, i
) = gen_rtx_CONST_INT (DImode
, i
- subparts
/ 2);
7945 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
7946 for a VSX load or store operation. */
7948 rs6000_gen_le_vsx_permute (rtx source
, enum machine_mode mode
)
7950 rtx par
= gen_rtx_PARALLEL (VOIDmode
, rs6000_const_vec (mode
));
7951 return gen_rtx_VEC_SELECT (mode
, source
, par
);
7954 /* Emit a little-endian load from vector memory location SOURCE to VSX
7955 register DEST in mode MODE. The load is done with two permuting
7956 insn's that represent an lxvd2x and xxpermdi. */
7958 rs6000_emit_le_vsx_load (rtx dest
, rtx source
, enum machine_mode mode
)
7960 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest
) : dest
;
7961 rtx permute_mem
= rs6000_gen_le_vsx_permute (source
, mode
);
7962 rtx permute_reg
= rs6000_gen_le_vsx_permute (tmp
, mode
);
7963 emit_insn (gen_rtx_SET (VOIDmode
, tmp
, permute_mem
));
7964 emit_insn (gen_rtx_SET (VOIDmode
, dest
, permute_reg
));
7967 /* Emit a little-endian store to vector memory location DEST from VSX
7968 register SOURCE in mode MODE. The store is done with two permuting
7969 insn's that represent an xxpermdi and an stxvd2x. */
7971 rs6000_emit_le_vsx_store (rtx dest
, rtx source
, enum machine_mode mode
)
7973 rtx tmp
= can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source
) : source
;
7974 rtx permute_src
= rs6000_gen_le_vsx_permute (source
, mode
);
7975 rtx permute_tmp
= rs6000_gen_le_vsx_permute (tmp
, mode
);
7976 emit_insn (gen_rtx_SET (VOIDmode
, tmp
, permute_src
));
7977 emit_insn (gen_rtx_SET (VOIDmode
, dest
, permute_tmp
));
7980 /* Emit a sequence representing a little-endian VSX load or store,
7981 moving data from SOURCE to DEST in mode MODE. This is done
7982 separately from rs6000_emit_move to ensure it is called only
7983 during expand. LE VSX loads and stores introduced later are
7984 handled with a split. The expand-time RTL generation allows
7985 us to optimize away redundant pairs of register-permutes. */
7987 rs6000_emit_le_vsx_move (rtx dest
, rtx source
, enum machine_mode mode
)
7989 gcc_assert (!BYTES_BIG_ENDIAN
7990 && VECTOR_MEM_VSX_P (mode
)
7992 && !gpr_or_gpr_p (dest
, source
)
7993 && (MEM_P (source
) ^ MEM_P (dest
)));
7997 gcc_assert (REG_P (dest
));
7998 rs6000_emit_le_vsx_load (dest
, source
, mode
);
8002 if (!REG_P (source
))
8003 source
= force_reg (mode
, source
);
8004 rs6000_emit_le_vsx_store (dest
, source
, mode
);
8008 /* Emit a move from SOURCE to DEST in mode MODE. */
8010 rs6000_emit_move (rtx dest
, rtx source
, enum machine_mode mode
)
8014 operands
[1] = source
;
8016 if (TARGET_DEBUG_ADDR
)
8019 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
8020 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
8021 GET_MODE_NAME (mode
),
8024 can_create_pseudo_p ());
8026 fprintf (stderr
, "source:\n");
8030 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
8031 if (GET_CODE (operands
[1]) == CONST_DOUBLE
8032 && ! FLOAT_MODE_P (mode
)
8033 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
8035 /* FIXME. This should never happen. */
8036 /* Since it seems that it does, do the safe thing and convert
8038 operands
[1] = gen_int_mode (CONST_DOUBLE_LOW (operands
[1]), mode
);
8040 gcc_assert (GET_CODE (operands
[1]) != CONST_DOUBLE
8041 || FLOAT_MODE_P (mode
)
8042 || ((CONST_DOUBLE_HIGH (operands
[1]) != 0
8043 || CONST_DOUBLE_LOW (operands
[1]) < 0)
8044 && (CONST_DOUBLE_HIGH (operands
[1]) != -1
8045 || CONST_DOUBLE_LOW (operands
[1]) >= 0)));
8047 /* Check if GCC is setting up a block move that will end up using FP
8048 registers as temporaries. We must make sure this is acceptable. */
8049 if (GET_CODE (operands
[0]) == MEM
8050 && GET_CODE (operands
[1]) == MEM
8052 && (SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[0]))
8053 || SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[1])))
8054 && ! (SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[0]) > 32
8055 ? 32 : MEM_ALIGN (operands
[0])))
8056 || SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[1]) > 32
8058 : MEM_ALIGN (operands
[1]))))
8059 && ! MEM_VOLATILE_P (operands
[0])
8060 && ! MEM_VOLATILE_P (operands
[1]))
8062 emit_move_insn (adjust_address (operands
[0], SImode
, 0),
8063 adjust_address (operands
[1], SImode
, 0));
8064 emit_move_insn (adjust_address (copy_rtx (operands
[0]), SImode
, 4),
8065 adjust_address (copy_rtx (operands
[1]), SImode
, 4));
8069 if (can_create_pseudo_p () && GET_CODE (operands
[0]) == MEM
8070 && !gpc_reg_operand (operands
[1], mode
))
8071 operands
[1] = force_reg (mode
, operands
[1]);
8073 /* Recognize the case where operand[1] is a reference to thread-local
8074 data and load its address to a register. */
8075 if (rs6000_tls_referenced_p (operands
[1]))
8077 enum tls_model model
;
8078 rtx tmp
= operands
[1];
8081 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
8083 addend
= XEXP (XEXP (tmp
, 0), 1);
8084 tmp
= XEXP (XEXP (tmp
, 0), 0);
8087 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
8088 model
= SYMBOL_REF_TLS_MODEL (tmp
);
8089 gcc_assert (model
!= 0);
8091 tmp
= rs6000_legitimize_tls_address (tmp
, model
);
8094 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
8095 tmp
= force_operand (tmp
, operands
[0]);
8100 /* Handle the case where reload calls us with an invalid address. */
8101 if (reload_in_progress
&& mode
== Pmode
8102 && (! general_operand (operands
[1], mode
)
8103 || ! nonimmediate_operand (operands
[0], mode
)))
8106 /* 128-bit constant floating-point values on Darwin should really be
8107 loaded as two parts. */
8108 if (!TARGET_IEEEQUAD
&& TARGET_LONG_DOUBLE_128
8109 && mode
== TFmode
&& GET_CODE (operands
[1]) == CONST_DOUBLE
)
8111 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
, 0),
8112 simplify_gen_subreg (DFmode
, operands
[1], mode
, 0),
8114 rs6000_emit_move (simplify_gen_subreg (DFmode
, operands
[0], mode
,
8115 GET_MODE_SIZE (DFmode
)),
8116 simplify_gen_subreg (DFmode
, operands
[1], mode
,
8117 GET_MODE_SIZE (DFmode
)),
8122 if (reload_in_progress
&& cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
8123 cfun
->machine
->sdmode_stack_slot
=
8124 eliminate_regs (cfun
->machine
->sdmode_stack_slot
, VOIDmode
, NULL_RTX
);
8129 && REG_P (operands
[0]) && REGNO (operands
[0]) >= FIRST_PSEUDO_REGISTER
8130 && reg_preferred_class (REGNO (operands
[0])) == NO_REGS
8131 && (REG_P (operands
[1])
8132 || (GET_CODE (operands
[1]) == SUBREG
8133 && REG_P (SUBREG_REG (operands
[1])))))
8135 int regno
= REGNO (GET_CODE (operands
[1]) == SUBREG
8136 ? SUBREG_REG (operands
[1]) : operands
[1]);
8139 if (regno
>= FIRST_PSEUDO_REGISTER
)
8141 cl
= reg_preferred_class (regno
);
8142 gcc_assert (cl
!= NO_REGS
);
8143 regno
= ira_class_hard_regs
[cl
][0];
8145 if (FP_REGNO_P (regno
))
8147 if (GET_MODE (operands
[0]) != DDmode
)
8148 operands
[0] = gen_rtx_SUBREG (DDmode
, operands
[0], 0);
8149 emit_insn (gen_movsd_store (operands
[0], operands
[1]));
8151 else if (INT_REGNO_P (regno
))
8152 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
8159 && (REG_P (operands
[0])
8160 || (GET_CODE (operands
[0]) == SUBREG
8161 && REG_P (SUBREG_REG (operands
[0]))))
8162 && REG_P (operands
[1]) && REGNO (operands
[1]) >= FIRST_PSEUDO_REGISTER
8163 && reg_preferred_class (REGNO (operands
[1])) == NO_REGS
)
8165 int regno
= REGNO (GET_CODE (operands
[0]) == SUBREG
8166 ? SUBREG_REG (operands
[0]) : operands
[0]);
8169 if (regno
>= FIRST_PSEUDO_REGISTER
)
8171 cl
= reg_preferred_class (regno
);
8172 gcc_assert (cl
!= NO_REGS
);
8173 regno
= ira_class_hard_regs
[cl
][0];
8175 if (FP_REGNO_P (regno
))
8177 if (GET_MODE (operands
[1]) != DDmode
)
8178 operands
[1] = gen_rtx_SUBREG (DDmode
, operands
[1], 0);
8179 emit_insn (gen_movsd_load (operands
[0], operands
[1]));
8181 else if (INT_REGNO_P (regno
))
8182 emit_insn (gen_movsd_hardfloat (operands
[0], operands
[1]));
8188 if (reload_in_progress
8190 && cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
8191 && MEM_P (operands
[0])
8192 && rtx_equal_p (operands
[0], cfun
->machine
->sdmode_stack_slot
)
8193 && REG_P (operands
[1]))
8195 if (FP_REGNO_P (REGNO (operands
[1])))
8197 rtx mem
= adjust_address_nv (operands
[0], DDmode
, 0);
8198 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
8199 emit_insn (gen_movsd_store (mem
, operands
[1]));
8201 else if (INT_REGNO_P (REGNO (operands
[1])))
8203 rtx mem
= operands
[0];
8204 if (BYTES_BIG_ENDIAN
)
8205 mem
= adjust_address_nv (mem
, mode
, 4);
8206 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
8207 emit_insn (gen_movsd_hardfloat (mem
, operands
[1]));
8213 if (reload_in_progress
8215 && REG_P (operands
[0])
8216 && MEM_P (operands
[1])
8217 && cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
8218 && rtx_equal_p (operands
[1], cfun
->machine
->sdmode_stack_slot
))
8220 if (FP_REGNO_P (REGNO (operands
[0])))
8222 rtx mem
= adjust_address_nv (operands
[1], DDmode
, 0);
8223 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
8224 emit_insn (gen_movsd_load (operands
[0], mem
));
8226 else if (INT_REGNO_P (REGNO (operands
[0])))
8228 rtx mem
= operands
[1];
8229 if (BYTES_BIG_ENDIAN
)
8230 mem
= adjust_address_nv (mem
, mode
, 4);
8231 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
8232 emit_insn (gen_movsd_hardfloat (operands
[0], mem
));
8239 /* FIXME: In the long term, this switch statement should go away
8240 and be replaced by a sequence of tests based on things like
8246 if (CONSTANT_P (operands
[1])
8247 && GET_CODE (operands
[1]) != CONST_INT
)
8248 operands
[1] = force_const_mem (mode
, operands
[1]);
8253 rs6000_eliminate_indexed_memrefs (operands
);
8260 if (CONSTANT_P (operands
[1])
8261 && ! easy_fp_constant (operands
[1], mode
))
8262 operands
[1] = force_const_mem (mode
, operands
[1]);
8275 if (CONSTANT_P (operands
[1])
8276 && !easy_vector_constant (operands
[1], mode
))
8277 operands
[1] = force_const_mem (mode
, operands
[1]);
8282 /* Use default pattern for address of ELF small data */
8285 && DEFAULT_ABI
== ABI_V4
8286 && (GET_CODE (operands
[1]) == SYMBOL_REF
8287 || GET_CODE (operands
[1]) == CONST
)
8288 && small_data_operand (operands
[1], mode
))
8290 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
8294 if (DEFAULT_ABI
== ABI_V4
8295 && mode
== Pmode
&& mode
== SImode
8296 && flag_pic
== 1 && got_operand (operands
[1], mode
))
8298 emit_insn (gen_movsi_got (operands
[0], operands
[1]));
8302 if ((TARGET_ELF
|| DEFAULT_ABI
== ABI_DARWIN
)
8306 && CONSTANT_P (operands
[1])
8307 && GET_CODE (operands
[1]) != HIGH
8308 && GET_CODE (operands
[1]) != CONST_INT
)
8310 rtx target
= (!can_create_pseudo_p ()
8312 : gen_reg_rtx (mode
));
8314 /* If this is a function address on -mcall-aixdesc,
8315 convert it to the address of the descriptor. */
8316 if (DEFAULT_ABI
== ABI_AIX
8317 && GET_CODE (operands
[1]) == SYMBOL_REF
8318 && XSTR (operands
[1], 0)[0] == '.')
8320 const char *name
= XSTR (operands
[1], 0);
8322 while (*name
== '.')
8324 new_ref
= gen_rtx_SYMBOL_REF (Pmode
, name
);
8325 CONSTANT_POOL_ADDRESS_P (new_ref
)
8326 = CONSTANT_POOL_ADDRESS_P (operands
[1]);
8327 SYMBOL_REF_FLAGS (new_ref
) = SYMBOL_REF_FLAGS (operands
[1]);
8328 SYMBOL_REF_USED (new_ref
) = SYMBOL_REF_USED (operands
[1]);
8329 SYMBOL_REF_DATA (new_ref
) = SYMBOL_REF_DATA (operands
[1]);
8330 operands
[1] = new_ref
;
8333 if (DEFAULT_ABI
== ABI_DARWIN
)
8336 if (MACHO_DYNAMIC_NO_PIC_P
)
8338 /* Take care of any required data indirection. */
8339 operands
[1] = rs6000_machopic_legitimize_pic_address (
8340 operands
[1], mode
, operands
[0]);
8341 if (operands
[0] != operands
[1])
8342 emit_insn (gen_rtx_SET (VOIDmode
,
8343 operands
[0], operands
[1]));
8347 emit_insn (gen_macho_high (target
, operands
[1]));
8348 emit_insn (gen_macho_low (operands
[0], target
, operands
[1]));
8352 emit_insn (gen_elf_high (target
, operands
[1]));
8353 emit_insn (gen_elf_low (operands
[0], target
, operands
[1]));
8357 /* If this is a SYMBOL_REF that refers to a constant pool entry,
8358 and we have put it in the TOC, we just need to make a TOC-relative
8361 && GET_CODE (operands
[1]) == SYMBOL_REF
8362 && use_toc_relative_ref (operands
[1]))
8363 operands
[1] = create_TOC_reference (operands
[1], operands
[0]);
8364 else if (mode
== Pmode
8365 && CONSTANT_P (operands
[1])
8366 && GET_CODE (operands
[1]) != HIGH
8367 && ((GET_CODE (operands
[1]) != CONST_INT
8368 && ! easy_fp_constant (operands
[1], mode
))
8369 || (GET_CODE (operands
[1]) == CONST_INT
8370 && (num_insns_constant (operands
[1], mode
)
8371 > (TARGET_CMODEL
!= CMODEL_SMALL
? 3 : 2)))
8372 || (GET_CODE (operands
[0]) == REG
8373 && FP_REGNO_P (REGNO (operands
[0]))))
8374 && !toc_relative_expr_p (operands
[1], false)
8375 && (TARGET_CMODEL
== CMODEL_SMALL
8376 || can_create_pseudo_p ()
8377 || (REG_P (operands
[0])
8378 && INT_REG_OK_FOR_BASE_P (operands
[0], true))))
8382 /* Darwin uses a special PIC legitimizer. */
8383 if (DEFAULT_ABI
== ABI_DARWIN
&& MACHOPIC_INDIRECT
)
8386 rs6000_machopic_legitimize_pic_address (operands
[1], mode
,
8388 if (operands
[0] != operands
[1])
8389 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
8394 /* If we are to limit the number of things we put in the TOC and
8395 this is a symbol plus a constant we can add in one insn,
8396 just put the symbol in the TOC and add the constant. Don't do
8397 this if reload is in progress. */
8398 if (GET_CODE (operands
[1]) == CONST
8399 && TARGET_NO_SUM_IN_TOC
&& ! reload_in_progress
8400 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
8401 && add_operand (XEXP (XEXP (operands
[1], 0), 1), mode
)
8402 && (GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == LABEL_REF
8403 || GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == SYMBOL_REF
)
8404 && ! side_effects_p (operands
[0]))
8407 force_const_mem (mode
, XEXP (XEXP (operands
[1], 0), 0));
8408 rtx other
= XEXP (XEXP (operands
[1], 0), 1);
8410 sym
= force_reg (mode
, sym
);
8411 emit_insn (gen_add3_insn (operands
[0], sym
, other
));
8415 operands
[1] = force_const_mem (mode
, operands
[1]);
8418 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
8419 && constant_pool_expr_p (XEXP (operands
[1], 0))
8420 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
8421 get_pool_constant (XEXP (operands
[1], 0)),
8422 get_pool_mode (XEXP (operands
[1], 0))))
8424 rtx tocref
= create_TOC_reference (XEXP (operands
[1], 0),
8426 operands
[1] = gen_const_mem (mode
, tocref
);
8427 set_mem_alias_set (operands
[1], get_TOC_alias_set ());
8433 if (!VECTOR_MEM_VSX_P (TImode
))
8434 rs6000_eliminate_indexed_memrefs (operands
);
8438 rs6000_eliminate_indexed_memrefs (operands
);
8442 fatal_insn ("bad move", gen_rtx_SET (VOIDmode
, dest
, source
));
8445 /* Above, we may have called force_const_mem which may have returned
8446 an invalid address. If we can, fix this up; otherwise, reload will
8447 have to deal with it. */
8448 if (GET_CODE (operands
[1]) == MEM
&& ! reload_in_progress
)
8449 operands
[1] = validize_mem (operands
[1]);
8452 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
8455 /* Return true if a structure, union or array containing FIELD should be
8456 accessed using `BLKMODE'.
8458 For the SPE, simd types are V2SI, and gcc can be tempted to put the
8459 entire thing in a DI and use subregs to access the internals.
8460 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
8461 back-end. Because a single GPR can hold a V2SI, but not a DI, the
8462 best thing to do is set structs to BLKmode and avoid Severe Tire
8465 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
8466 fit into 1, whereas DI still needs two. */
8469 rs6000_member_type_forces_blk (const_tree field
, enum machine_mode mode
)
8471 return ((TARGET_SPE
&& TREE_CODE (TREE_TYPE (field
)) == VECTOR_TYPE
)
8472 || (TARGET_E500_DOUBLE
&& mode
== DFmode
));
8475 /* Nonzero if we can use a floating-point register to pass this arg. */
8476 #define USE_FP_FOR_ARG_P(CUM,MODE) \
8477 (SCALAR_FLOAT_MODE_P (MODE) \
8478 && (CUM)->fregno <= FP_ARG_MAX_REG \
8479 && TARGET_HARD_FLOAT && TARGET_FPRS)
8481 /* Nonzero if we can use an AltiVec register to pass this arg. */
8482 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
8483 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
8484 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
8485 && TARGET_ALTIVEC_ABI \
8488 /* Walk down the type tree of TYPE counting consecutive base elements.
8489 If *MODEP is VOIDmode, then set it to the first valid floating point
8490 or vector type. If a non-floating point or vector type is found, or
8491 if a floating point or vector type that doesn't match a non-VOIDmode
8492 *MODEP is found, then return -1, otherwise return the count in the
8496 rs6000_aggregate_candidate (const_tree type
, enum machine_mode
*modep
)
8498 enum machine_mode mode
;
8501 switch (TREE_CODE (type
))
8504 mode
= TYPE_MODE (type
);
8505 if (!SCALAR_FLOAT_MODE_P (mode
))
8508 if (*modep
== VOIDmode
)
8517 mode
= TYPE_MODE (TREE_TYPE (type
));
8518 if (!SCALAR_FLOAT_MODE_P (mode
))
8521 if (*modep
== VOIDmode
)
8530 if (!TARGET_ALTIVEC_ABI
|| !TARGET_ALTIVEC
)
8533 /* Use V4SImode as representative of all 128-bit vector types. */
8534 size
= int_size_in_bytes (type
);
8544 if (*modep
== VOIDmode
)
8547 /* Vector modes are considered to be opaque: two vectors are
8548 equivalent for the purposes of being homogeneous aggregates
8549 if they are the same size. */
8558 tree index
= TYPE_DOMAIN (type
);
8560 /* Can't handle incomplete types. */
8561 if (!COMPLETE_TYPE_P (type
))
8564 count
= rs6000_aggregate_candidate (TREE_TYPE (type
), modep
);
8567 || !TYPE_MAX_VALUE (index
)
8568 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index
))
8569 || !TYPE_MIN_VALUE (index
)
8570 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index
))
8574 count
*= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index
))
8575 - tree_to_uhwi (TYPE_MIN_VALUE (index
)));
8577 /* There must be no padding. */
8578 if (!tree_fits_uhwi_p (TYPE_SIZE (type
))
8579 || ((HOST_WIDE_INT
) tree_to_uhwi (TYPE_SIZE (type
))
8580 != count
* GET_MODE_BITSIZE (*modep
)))
8592 /* Can't handle incomplete types. */
8593 if (!COMPLETE_TYPE_P (type
))
8596 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
8598 if (TREE_CODE (field
) != FIELD_DECL
)
8601 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
8607 /* There must be no padding. */
8608 if (!tree_fits_uhwi_p (TYPE_SIZE (type
))
8609 || ((HOST_WIDE_INT
) tree_to_uhwi (TYPE_SIZE (type
))
8610 != count
* GET_MODE_BITSIZE (*modep
)))
8617 case QUAL_UNION_TYPE
:
8619 /* These aren't very interesting except in a degenerate case. */
8624 /* Can't handle incomplete types. */
8625 if (!COMPLETE_TYPE_P (type
))
8628 for (field
= TYPE_FIELDS (type
); field
; field
= TREE_CHAIN (field
))
8630 if (TREE_CODE (field
) != FIELD_DECL
)
8633 sub_count
= rs6000_aggregate_candidate (TREE_TYPE (field
), modep
);
8636 count
= count
> sub_count
? count
: sub_count
;
8639 /* There must be no padding. */
8640 if (!tree_fits_uhwi_p (TYPE_SIZE (type
))
8641 || ((HOST_WIDE_INT
) tree_to_uhwi (TYPE_SIZE (type
))
8642 != count
* GET_MODE_BITSIZE (*modep
)))
8655 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
8656 float or vector aggregate that shall be passed in FP/vector registers
8657 according to the ELFv2 ABI, return the homogeneous element mode in
8658 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
8660 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
8663 rs6000_discover_homogeneous_aggregate (enum machine_mode mode
, const_tree type
,
8664 enum machine_mode
*elt_mode
,
8667 /* Note that we do not accept complex types at the top level as
8668 homogeneous aggregates; these types are handled via the
8669 targetm.calls.split_complex_arg mechanism. Complex types
8670 can be elements of homogeneous aggregates, however. */
8671 if (DEFAULT_ABI
== ABI_ELFv2
&& type
&& AGGREGATE_TYPE_P (type
))
8673 enum machine_mode field_mode
= VOIDmode
;
8674 int field_count
= rs6000_aggregate_candidate (type
, &field_mode
);
8676 if (field_count
> 0)
8678 int n_regs
= (SCALAR_FLOAT_MODE_P (field_mode
)?
8679 (GET_MODE_SIZE (field_mode
) + 7) >> 3 : 1);
8681 /* The ELFv2 ABI allows homogeneous aggregates to occupy
8682 up to AGGR_ARG_NUM_REG registers. */
8683 if (field_count
* n_regs
<= AGGR_ARG_NUM_REG
)
8686 *elt_mode
= field_mode
;
8688 *n_elts
= field_count
;
8701 /* Return a nonzero value to say to return the function value in
8702 memory, just as large structures are always returned. TYPE will be
8703 the data type of the value, and FNTYPE will be the type of the
8704 function doing the returning, or @code{NULL} for libcalls.
8706 The AIX ABI for the RS/6000 specifies that all structures are
8707 returned in memory. The Darwin ABI does the same.
8709 For the Darwin 64 Bit ABI, a function result can be returned in
8710 registers or in memory, depending on the size of the return data
8711 type. If it is returned in registers, the value occupies the same
8712 registers as it would if it were the first and only function
8713 argument. Otherwise, the function places its result in memory at
8714 the location pointed to by GPR3.
8716 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
8717 but a draft put them in memory, and GCC used to implement the draft
8718 instead of the final standard. Therefore, aix_struct_return
8719 controls this instead of DEFAULT_ABI; V.4 targets needing backward
8720 compatibility can change DRAFT_V4_STRUCT_RET to override the
8721 default, and -m switches get the final word. See
8722 rs6000_option_override_internal for more details.
8724 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
8725 long double support is enabled. These values are returned in memory.
8727 int_size_in_bytes returns -1 for variable size objects, which go in
8728 memory always. The cast to unsigned makes -1 > 8. */
8731 rs6000_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
8733 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
8735 && rs6000_darwin64_abi
8736 && TREE_CODE (type
) == RECORD_TYPE
8737 && int_size_in_bytes (type
) > 0)
8739 CUMULATIVE_ARGS valcum
;
8743 valcum
.fregno
= FP_ARG_MIN_REG
;
8744 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
8745 /* Do a trial code generation as if this were going to be passed
8746 as an argument; if any part goes in memory, we return NULL. */
8747 valret
= rs6000_darwin64_record_arg (&valcum
, type
, true, true);
8750 /* Otherwise fall through to more conventional ABI rules. */
8753 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
8754 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type
), type
,
8758 /* The ELFv2 ABI returns aggregates up to 16B in registers */
8759 if (DEFAULT_ABI
== ABI_ELFv2
&& AGGREGATE_TYPE_P (type
)
8760 && (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) <= 16)
8763 if (AGGREGATE_TYPE_P (type
)
8764 && (aix_struct_return
8765 || (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8))
8768 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
8769 modes only exist for GCC vector types if -maltivec. */
8770 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
8771 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
8774 /* Return synthetic vectors in memory. */
8775 if (TREE_CODE (type
) == VECTOR_TYPE
8776 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
8778 static bool warned_for_return_big_vectors
= false;
8779 if (!warned_for_return_big_vectors
)
8781 warning (0, "GCC vector returned by reference: "
8782 "non-standard ABI extension with no compatibility guarantee");
8783 warned_for_return_big_vectors
= true;
8788 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& TYPE_MODE (type
) == TFmode
)
8794 /* Specify whether values returned in registers should be at the most
8795 significant end of a register. We want aggregates returned by
8796 value to match the way aggregates are passed to functions. */
8799 rs6000_return_in_msb (const_tree valtype
)
8801 return (DEFAULT_ABI
== ABI_ELFv2
8803 && AGGREGATE_TYPE_P (valtype
)
8804 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype
), valtype
) == upward
);
8807 #ifdef HAVE_AS_GNU_ATTRIBUTE
8808 /* Return TRUE if a call to function FNDECL may be one that
8809 potentially affects the function calling ABI of the object file. */
8812 call_ABI_of_interest (tree fndecl
)
8814 if (cgraph_state
== CGRAPH_STATE_EXPANSION
)
8816 struct cgraph_node
*c_node
;
8818 /* Libcalls are always interesting. */
8819 if (fndecl
== NULL_TREE
)
8822 /* Any call to an external function is interesting. */
8823 if (DECL_EXTERNAL (fndecl
))
8826 /* Interesting functions that we are emitting in this object file. */
8827 c_node
= cgraph_get_node (fndecl
);
8828 c_node
= cgraph_function_or_thunk_node (c_node
, NULL
);
8829 return !cgraph_only_called_directly_p (c_node
);
8835 /* Initialize a variable CUM of type CUMULATIVE_ARGS
8836 for a call to a function whose data type is FNTYPE.
8837 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
8839 For incoming args we set the number of arguments in the prototype large
8840 so we never return a PARALLEL. */
8843 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
8844 rtx libname ATTRIBUTE_UNUSED
, int incoming
,
8845 int libcall
, int n_named_args
,
8846 tree fndecl ATTRIBUTE_UNUSED
,
8847 enum machine_mode return_mode ATTRIBUTE_UNUSED
)
8849 static CUMULATIVE_ARGS zero_cumulative
;
8851 *cum
= zero_cumulative
;
8853 cum
->fregno
= FP_ARG_MIN_REG
;
8854 cum
->vregno
= ALTIVEC_ARG_MIN_REG
;
8855 cum
->prototype
= (fntype
&& prototype_p (fntype
));
8856 cum
->call_cookie
= ((DEFAULT_ABI
== ABI_V4
&& libcall
)
8857 ? CALL_LIBCALL
: CALL_NORMAL
);
8858 cum
->sysv_gregno
= GP_ARG_MIN_REG
;
8859 cum
->stdarg
= stdarg_p (fntype
);
8861 cum
->nargs_prototype
= 0;
8862 if (incoming
|| cum
->prototype
)
8863 cum
->nargs_prototype
= n_named_args
;
8865 /* Check for a longcall attribute. */
8866 if ((!fntype
&& rs6000_default_long_calls
)
8868 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype
))
8869 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype
))))
8870 cum
->call_cookie
|= CALL_LONG
;
8872 if (TARGET_DEBUG_ARG
)
8874 fprintf (stderr
, "\ninit_cumulative_args:");
8877 tree ret_type
= TREE_TYPE (fntype
);
8878 fprintf (stderr
, " ret code = %s,",
8879 get_tree_code_name (TREE_CODE (ret_type
)));
8882 if (cum
->call_cookie
& CALL_LONG
)
8883 fprintf (stderr
, " longcall,");
8885 fprintf (stderr
, " proto = %d, nargs = %d\n",
8886 cum
->prototype
, cum
->nargs_prototype
);
8889 #ifdef HAVE_AS_GNU_ATTRIBUTE
8890 if (DEFAULT_ABI
== ABI_V4
)
8892 cum
->escapes
= call_ABI_of_interest (fndecl
);
8899 return_type
= TREE_TYPE (fntype
);
8900 return_mode
= TYPE_MODE (return_type
);
8903 return_type
= lang_hooks
.types
.type_for_mode (return_mode
, 0);
8905 if (return_type
!= NULL
)
8907 if (TREE_CODE (return_type
) == RECORD_TYPE
8908 && TYPE_TRANSPARENT_AGGR (return_type
))
8910 return_type
= TREE_TYPE (first_field (return_type
));
8911 return_mode
= TYPE_MODE (return_type
);
8913 if (AGGREGATE_TYPE_P (return_type
)
8914 && ((unsigned HOST_WIDE_INT
) int_size_in_bytes (return_type
)
8916 rs6000_returns_struct
= true;
8918 if (SCALAR_FLOAT_MODE_P (return_mode
))
8919 rs6000_passes_float
= true;
8920 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode
)
8921 || SPE_VECTOR_MODE (return_mode
))
8922 rs6000_passes_vector
= true;
8929 && TARGET_ALTIVEC_ABI
8930 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype
))))
8932 error ("cannot return value in vector register because"
8933 " altivec instructions are disabled, use -maltivec"
8938 /* Return true if TYPE must be passed on the stack and not in registers. */
8941 rs6000_must_pass_in_stack (enum machine_mode mode
, const_tree type
)
8943 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
|| TARGET_64BIT
)
8944 return must_pass_in_stack_var_size (mode
, type
);
8946 return must_pass_in_stack_var_size_or_pad (mode
, type
);
8949 /* If defined, a C expression which determines whether, and in which
8950 direction, to pad out an argument with extra space. The value
8951 should be of type `enum direction': either `upward' to pad above
8952 the argument, `downward' to pad below, or `none' to inhibit
8955 For the AIX ABI structs are always stored left shifted in their
8959 function_arg_padding (enum machine_mode mode
, const_tree type
)
8961 #ifndef AGGREGATE_PADDING_FIXED
8962 #define AGGREGATE_PADDING_FIXED 0
8964 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
8965 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
8968 if (!AGGREGATE_PADDING_FIXED
)
8970 /* GCC used to pass structures of the same size as integer types as
8971 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
8972 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
8973 passed padded downward, except that -mstrict-align further
8974 muddied the water in that multi-component structures of 2 and 4
8975 bytes in size were passed padded upward.
8977 The following arranges for best compatibility with previous
8978 versions of gcc, but removes the -mstrict-align dependency. */
8979 if (BYTES_BIG_ENDIAN
)
8981 HOST_WIDE_INT size
= 0;
8983 if (mode
== BLKmode
)
8985 if (type
&& TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
8986 size
= int_size_in_bytes (type
);
8989 size
= GET_MODE_SIZE (mode
);
8991 if (size
== 1 || size
== 2 || size
== 4)
8997 if (AGGREGATES_PAD_UPWARD_ALWAYS
)
8999 if (type
!= 0 && AGGREGATE_TYPE_P (type
))
9003 /* Fall back to the default. */
9004 return DEFAULT_FUNCTION_ARG_PADDING (mode
, type
);
9007 /* If defined, a C expression that gives the alignment boundary, in bits,
9008 of an argument with the specified mode and type. If it is not defined,
9009 PARM_BOUNDARY is used for all arguments.
9011 V.4 wants long longs and doubles to be double word aligned. Just
9012 testing the mode size is a boneheaded way to do this as it means
9013 that other types such as complex int are also double word aligned.
9014 However, we're stuck with this because changing the ABI might break
9015 existing library interfaces.
9017 Doubleword align SPE vectors.
9018 Quadword align Altivec/VSX vectors.
9019 Quadword align large synthetic vector types. */
9022 rs6000_function_arg_boundary (enum machine_mode mode
, const_tree type
)
9024 enum machine_mode elt_mode
;
9027 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
9029 if (DEFAULT_ABI
== ABI_V4
9030 && (GET_MODE_SIZE (mode
) == 8
9031 || (TARGET_HARD_FLOAT
9033 && (mode
== TFmode
|| mode
== TDmode
))))
9035 else if (SPE_VECTOR_MODE (mode
)
9036 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
9037 && int_size_in_bytes (type
) >= 8
9038 && int_size_in_bytes (type
) < 16))
9040 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
9041 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
9042 && int_size_in_bytes (type
) >= 16))
9044 else if (((TARGET_MACHO
&& rs6000_darwin64_abi
)
9045 || DEFAULT_ABI
== ABI_ELFv2
9046 || (DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
))
9048 && type
&& TYPE_ALIGN (type
) > 64)
9051 return PARM_BOUNDARY
;
9054 /* The offset in words to the start of the parameter save area. */
9057 rs6000_parm_offset (void)
9059 return (DEFAULT_ABI
== ABI_V4
? 2
9060 : DEFAULT_ABI
== ABI_ELFv2
? 4
9064 /* For a function parm of MODE and TYPE, return the starting word in
9065 the parameter area. NWORDS of the parameter area are already used. */
9068 rs6000_parm_start (enum machine_mode mode
, const_tree type
,
9069 unsigned int nwords
)
9073 align
= rs6000_function_arg_boundary (mode
, type
) / PARM_BOUNDARY
- 1;
9074 return nwords
+ (-(rs6000_parm_offset () + nwords
) & align
);
9077 /* Compute the size (in words) of a function argument. */
9079 static unsigned long
9080 rs6000_arg_size (enum machine_mode mode
, const_tree type
)
9084 if (mode
!= BLKmode
)
9085 size
= GET_MODE_SIZE (mode
);
9087 size
= int_size_in_bytes (type
);
9090 return (size
+ 3) >> 2;
9092 return (size
+ 7) >> 3;
9095 /* Use this to flush pending int fields. */
9098 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*cum
,
9099 HOST_WIDE_INT bitpos
, int final
)
9101 unsigned int startbit
, endbit
;
9102 int intregs
, intoffset
;
9103 enum machine_mode mode
;
9105 /* Handle the situations where a float is taking up the first half
9106 of the GPR, and the other half is empty (typically due to
9107 alignment restrictions). We can detect this by a 8-byte-aligned
9108 int field, or by seeing that this is the final flush for this
9109 argument. Count the word and continue on. */
9110 if (cum
->floats_in_gpr
== 1
9111 && (cum
->intoffset
% 64 == 0
9112 || (cum
->intoffset
== -1 && final
)))
9115 cum
->floats_in_gpr
= 0;
9118 if (cum
->intoffset
== -1)
9121 intoffset
= cum
->intoffset
;
9122 cum
->intoffset
= -1;
9123 cum
->floats_in_gpr
= 0;
9125 if (intoffset
% BITS_PER_WORD
!= 0)
9127 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
9129 if (mode
== BLKmode
)
9131 /* We couldn't find an appropriate mode, which happens,
9132 e.g., in packed structs when there are 3 bytes to load.
9133 Back intoffset back to the beginning of the word in this
9135 intoffset
= intoffset
& -BITS_PER_WORD
;
9139 startbit
= intoffset
& -BITS_PER_WORD
;
9140 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
9141 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
9142 cum
->words
+= intregs
;
9143 /* words should be unsigned. */
9144 if ((unsigned)cum
->words
< (endbit
/BITS_PER_WORD
))
9146 int pad
= (endbit
/BITS_PER_WORD
) - cum
->words
;
9151 /* The darwin64 ABI calls for us to recurse down through structs,
9152 looking for elements passed in registers. Unfortunately, we have
9153 to track int register count here also because of misalignments
9154 in powerpc alignment mode. */
9157 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*cum
,
9159 HOST_WIDE_INT startbitpos
)
9163 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
9164 if (TREE_CODE (f
) == FIELD_DECL
)
9166 HOST_WIDE_INT bitpos
= startbitpos
;
9167 tree ftype
= TREE_TYPE (f
);
9168 enum machine_mode mode
;
9169 if (ftype
== error_mark_node
)
9171 mode
= TYPE_MODE (ftype
);
9173 if (DECL_SIZE (f
) != 0
9174 && tree_fits_uhwi_p (bit_position (f
)))
9175 bitpos
+= int_bit_position (f
);
9177 /* ??? FIXME: else assume zero offset. */
9179 if (TREE_CODE (ftype
) == RECORD_TYPE
)
9180 rs6000_darwin64_record_arg_advance_recurse (cum
, ftype
, bitpos
);
9181 else if (USE_FP_FOR_ARG_P (cum
, mode
))
9183 unsigned n_fpregs
= (GET_MODE_SIZE (mode
) + 7) >> 3;
9184 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
9185 cum
->fregno
+= n_fpregs
;
9186 /* Single-precision floats present a special problem for
9187 us, because they are smaller than an 8-byte GPR, and so
9188 the structure-packing rules combined with the standard
9189 varargs behavior mean that we want to pack float/float
9190 and float/int combinations into a single register's
9191 space. This is complicated by the arg advance flushing,
9192 which works on arbitrarily large groups of int-type
9196 if (cum
->floats_in_gpr
== 1)
9198 /* Two floats in a word; count the word and reset
9201 cum
->floats_in_gpr
= 0;
9203 else if (bitpos
% 64 == 0)
9205 /* A float at the beginning of an 8-byte word;
9206 count it and put off adjusting cum->words until
9207 we see if a arg advance flush is going to do it
9209 cum
->floats_in_gpr
++;
9213 /* The float is at the end of a word, preceded
9214 by integer fields, so the arg advance flush
9215 just above has already set cum->words and
9216 everything is taken care of. */
9220 cum
->words
+= n_fpregs
;
9222 else if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
9224 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
, 0);
9228 else if (cum
->intoffset
== -1)
9229 cum
->intoffset
= bitpos
;
9233 /* Check for an item that needs to be considered specially under the darwin 64
9234 bit ABI. These are record types where the mode is BLK or the structure is
9237 rs6000_darwin64_struct_check_p (enum machine_mode mode
, const_tree type
)
9239 return rs6000_darwin64_abi
9240 && ((mode
== BLKmode
9241 && TREE_CODE (type
) == RECORD_TYPE
9242 && int_size_in_bytes (type
) > 0)
9243 || (type
&& TREE_CODE (type
) == RECORD_TYPE
9244 && int_size_in_bytes (type
) == 8)) ? 1 : 0;
9247 /* Update the data in CUM to advance over an argument
9248 of mode MODE and data type TYPE.
9249 (TYPE is null for libcalls where that information may not be available.)
9251 Note that for args passed by reference, function_arg will be called
9252 with MODE and TYPE set to that of the pointer to the arg, not the arg
9256 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
9257 const_tree type
, bool named
, int depth
)
9259 enum machine_mode elt_mode
;
9262 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
9264 /* Only tick off an argument if we're not recursing. */
9266 cum
->nargs_prototype
--;
9268 #ifdef HAVE_AS_GNU_ATTRIBUTE
9269 if (DEFAULT_ABI
== ABI_V4
9272 if (SCALAR_FLOAT_MODE_P (mode
))
9273 rs6000_passes_float
= true;
9274 else if (named
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
9275 rs6000_passes_vector
= true;
9276 else if (SPE_VECTOR_MODE (mode
)
9278 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
)
9279 rs6000_passes_vector
= true;
9283 if (TARGET_ALTIVEC_ABI
9284 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode
)
9285 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
9286 && int_size_in_bytes (type
) == 16)))
9290 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
9292 cum
->vregno
+= n_elts
;
9294 if (!TARGET_ALTIVEC
)
9295 error ("cannot pass argument in vector register because"
9296 " altivec instructions are disabled, use -maltivec"
9299 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
9300 even if it is going to be passed in a vector register.
9301 Darwin does the same for variable-argument functions. */
9302 if (((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
9304 || (cum
->stdarg
&& DEFAULT_ABI
!= ABI_V4
))
9314 /* Vector parameters must be 16-byte aligned. In 32-bit
9315 mode this means we need to take into account the offset
9316 to the parameter save area. In 64-bit mode, they just
9317 have to start on an even word, since the parameter save
9318 area is 16-byte aligned. */
9320 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
9322 align
= cum
->words
& 1;
9323 cum
->words
+= align
+ rs6000_arg_size (mode
, type
);
9325 if (TARGET_DEBUG_ARG
)
9327 fprintf (stderr
, "function_adv: words = %2d, align=%d, ",
9329 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s\n",
9330 cum
->nargs_prototype
, cum
->prototype
,
9331 GET_MODE_NAME (mode
));
9335 else if (TARGET_SPE_ABI
&& TARGET_SPE
&& SPE_VECTOR_MODE (mode
)
9337 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
)
9340 else if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
9342 int size
= int_size_in_bytes (type
);
9343 /* Variable sized types have size == -1 and are
9344 treated as if consisting entirely of ints.
9345 Pad to 16 byte boundary if needed. */
9346 if (TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
9347 && (cum
->words
% 2) != 0)
9349 /* For varargs, we can just go up by the size of the struct. */
9351 cum
->words
+= (size
+ 7) / 8;
9354 /* It is tempting to say int register count just goes up by
9355 sizeof(type)/8, but this is wrong in a case such as
9356 { int; double; int; } [powerpc alignment]. We have to
9357 grovel through the fields for these too. */
9359 cum
->floats_in_gpr
= 0;
9360 rs6000_darwin64_record_arg_advance_recurse (cum
, type
, 0);
9361 rs6000_darwin64_record_arg_advance_flush (cum
,
9362 size
* BITS_PER_UNIT
, 1);
9364 if (TARGET_DEBUG_ARG
)
9366 fprintf (stderr
, "function_adv: words = %2d, align=%d, size=%d",
9367 cum
->words
, TYPE_ALIGN (type
), size
);
9369 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
9370 cum
->nargs_prototype
, cum
->prototype
,
9371 GET_MODE_NAME (mode
));
9374 else if (DEFAULT_ABI
== ABI_V4
)
9376 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
9377 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
9378 || (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
9379 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)
9380 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
))
9382 /* _Decimal128 must use an even/odd register pair. This assumes
9383 that the register number is odd when fregno is odd. */
9384 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
9387 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
9388 <= FP_ARG_V4_MAX_REG
)
9389 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
9392 cum
->fregno
= FP_ARG_V4_MAX_REG
+ 1;
9393 if (mode
== DFmode
|| mode
== TFmode
9394 || mode
== DDmode
|| mode
== TDmode
)
9395 cum
->words
+= cum
->words
& 1;
9396 cum
->words
+= rs6000_arg_size (mode
, type
);
9401 int n_words
= rs6000_arg_size (mode
, type
);
9402 int gregno
= cum
->sysv_gregno
;
9404 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
9405 (r7,r8) or (r9,r10). As does any other 2 word item such
9406 as complex int due to a historical mistake. */
9408 gregno
+= (1 - gregno
) & 1;
9410 /* Multi-reg args are not split between registers and stack. */
9411 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
9413 /* Long long and SPE vectors are aligned on the stack.
9414 So are other 2 word items such as complex int due to
9415 a historical mistake. */
9417 cum
->words
+= cum
->words
& 1;
9418 cum
->words
+= n_words
;
9421 /* Note: continuing to accumulate gregno past when we've started
9422 spilling to the stack indicates the fact that we've started
9423 spilling to the stack to expand_builtin_saveregs. */
9424 cum
->sysv_gregno
= gregno
+ n_words
;
9427 if (TARGET_DEBUG_ARG
)
9429 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
9430 cum
->words
, cum
->fregno
);
9431 fprintf (stderr
, "gregno = %2d, nargs = %4d, proto = %d, ",
9432 cum
->sysv_gregno
, cum
->nargs_prototype
, cum
->prototype
);
9433 fprintf (stderr
, "mode = %4s, named = %d\n",
9434 GET_MODE_NAME (mode
), named
);
9439 int n_words
= rs6000_arg_size (mode
, type
);
9440 int start_words
= cum
->words
;
9441 int align_words
= rs6000_parm_start (mode
, type
, start_words
);
9443 cum
->words
= align_words
+ n_words
;
9445 if (SCALAR_FLOAT_MODE_P (elt_mode
)
9446 && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
9448 /* _Decimal128 must be passed in an even/odd float register pair.
9449 This assumes that the register number is odd when fregno is
9451 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
9453 cum
->fregno
+= n_elts
* ((GET_MODE_SIZE (elt_mode
) + 7) >> 3);
9456 if (TARGET_DEBUG_ARG
)
9458 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
9459 cum
->words
, cum
->fregno
);
9460 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s, ",
9461 cum
->nargs_prototype
, cum
->prototype
, GET_MODE_NAME (mode
));
9462 fprintf (stderr
, "named = %d, align = %d, depth = %d\n",
9463 named
, align_words
- start_words
, depth
);
9469 rs6000_function_arg_advance (cumulative_args_t cum
, enum machine_mode mode
,
9470 const_tree type
, bool named
)
9472 rs6000_function_arg_advance_1 (get_cumulative_args (cum
), mode
, type
, named
,
9477 spe_build_register_parallel (enum machine_mode mode
, int gregno
)
9484 r1
= gen_rtx_REG (DImode
, gregno
);
9485 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
9486 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, r1
));
9490 r1
= gen_rtx_REG (DImode
, gregno
);
9491 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
9492 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
9493 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
9494 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r3
));
9497 r1
= gen_rtx_REG (DImode
, gregno
);
9498 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
9499 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
9500 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
9501 r5
= gen_rtx_REG (DImode
, gregno
+ 4);
9502 r5
= gen_rtx_EXPR_LIST (VOIDmode
, r5
, GEN_INT (16));
9503 r7
= gen_rtx_REG (DImode
, gregno
+ 6);
9504 r7
= gen_rtx_EXPR_LIST (VOIDmode
, r7
, GEN_INT (24));
9505 return gen_rtx_PARALLEL (mode
, gen_rtvec (4, r1
, r3
, r5
, r7
));
9512 /* Determine where to put a SIMD argument on the SPE. */
9514 rs6000_spe_function_arg (const CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
9517 int gregno
= cum
->sysv_gregno
;
9519 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
9520 are passed and returned in a pair of GPRs for ABI compatibility. */
9521 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
9522 || mode
== DCmode
|| mode
== TCmode
))
9524 int n_words
= rs6000_arg_size (mode
, type
);
9526 /* Doubles go in an odd/even register pair (r5/r6, etc). */
9528 gregno
+= (1 - gregno
) & 1;
9530 /* Multi-reg args are not split between registers and stack. */
9531 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
9534 return spe_build_register_parallel (mode
, gregno
);
9538 int n_words
= rs6000_arg_size (mode
, type
);
9540 /* SPE vectors are put in odd registers. */
9541 if (n_words
== 2 && (gregno
& 1) == 0)
9544 if (gregno
+ n_words
- 1 <= GP_ARG_MAX_REG
)
9547 enum machine_mode m
= SImode
;
9549 r1
= gen_rtx_REG (m
, gregno
);
9550 r1
= gen_rtx_EXPR_LIST (m
, r1
, const0_rtx
);
9551 r2
= gen_rtx_REG (m
, gregno
+ 1);
9552 r2
= gen_rtx_EXPR_LIST (m
, r2
, GEN_INT (4));
9553 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
9560 if (gregno
<= GP_ARG_MAX_REG
)
9561 return gen_rtx_REG (mode
, gregno
);
9567 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
9568 structure between cum->intoffset and bitpos to integer registers. */
9571 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*cum
,
9572 HOST_WIDE_INT bitpos
, rtx rvec
[], int *k
)
9574 enum machine_mode mode
;
9576 unsigned int startbit
, endbit
;
9577 int this_regno
, intregs
, intoffset
;
9580 if (cum
->intoffset
== -1)
9583 intoffset
= cum
->intoffset
;
9584 cum
->intoffset
= -1;
9586 /* If this is the trailing part of a word, try to only load that
9587 much into the register. Otherwise load the whole register. Note
9588 that in the latter case we may pick up unwanted bits. It's not a
9589 problem at the moment but may wish to revisit. */
9591 if (intoffset
% BITS_PER_WORD
!= 0)
9593 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
9595 if (mode
== BLKmode
)
9597 /* We couldn't find an appropriate mode, which happens,
9598 e.g., in packed structs when there are 3 bytes to load.
9599 Back intoffset back to the beginning of the word in this
9601 intoffset
= intoffset
& -BITS_PER_WORD
;
9608 startbit
= intoffset
& -BITS_PER_WORD
;
9609 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
9610 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
9611 this_regno
= cum
->words
+ intoffset
/ BITS_PER_WORD
;
9613 if (intregs
> 0 && intregs
> GP_ARG_NUM_REG
- this_regno
)
9616 intregs
= MIN (intregs
, GP_ARG_NUM_REG
- this_regno
);
9620 intoffset
/= BITS_PER_UNIT
;
9623 regno
= GP_ARG_MIN_REG
+ this_regno
;
9624 reg
= gen_rtx_REG (mode
, regno
);
9626 gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
9629 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
9633 while (intregs
> 0);
9636 /* Recursive workhorse for the following. */
9639 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*cum
, const_tree type
,
9640 HOST_WIDE_INT startbitpos
, rtx rvec
[],
9645 for (f
= TYPE_FIELDS (type
); f
; f
= DECL_CHAIN (f
))
9646 if (TREE_CODE (f
) == FIELD_DECL
)
9648 HOST_WIDE_INT bitpos
= startbitpos
;
9649 tree ftype
= TREE_TYPE (f
);
9650 enum machine_mode mode
;
9651 if (ftype
== error_mark_node
)
9653 mode
= TYPE_MODE (ftype
);
9655 if (DECL_SIZE (f
) != 0
9656 && tree_fits_uhwi_p (bit_position (f
)))
9657 bitpos
+= int_bit_position (f
);
9659 /* ??? FIXME: else assume zero offset. */
9661 if (TREE_CODE (ftype
) == RECORD_TYPE
)
9662 rs6000_darwin64_record_arg_recurse (cum
, ftype
, bitpos
, rvec
, k
);
9663 else if (cum
->named
&& USE_FP_FOR_ARG_P (cum
, mode
))
9665 unsigned n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
9669 case SCmode
: mode
= SFmode
; break;
9670 case DCmode
: mode
= DFmode
; break;
9671 case TCmode
: mode
= TFmode
; break;
9675 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
9676 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
9678 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
9679 && (mode
== TFmode
|| mode
== TDmode
));
9680 /* Long double or _Decimal128 split over regs and memory. */
9681 mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
: DFmode
;
9685 = gen_rtx_EXPR_LIST (VOIDmode
,
9686 gen_rtx_REG (mode
, cum
->fregno
++),
9687 GEN_INT (bitpos
/ BITS_PER_UNIT
));
9688 if (mode
== TFmode
|| mode
== TDmode
)
9691 else if (cum
->named
&& USE_ALTIVEC_FOR_ARG_P (cum
, mode
, 1))
9693 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
9695 = gen_rtx_EXPR_LIST (VOIDmode
,
9696 gen_rtx_REG (mode
, cum
->vregno
++),
9697 GEN_INT (bitpos
/ BITS_PER_UNIT
));
9699 else if (cum
->intoffset
== -1)
9700 cum
->intoffset
= bitpos
;
9704 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
9705 the register(s) to be used for each field and subfield of a struct
9706 being passed by value, along with the offset of where the
9707 register's value may be found in the block. FP fields go in FP
9708 register, vector fields go in vector registers, and everything
9709 else goes in int registers, packed as in memory.
9711 This code is also used for function return values. RETVAL indicates
9712 whether this is the case.
9714 Much of this is taken from the SPARC V9 port, which has a similar
9715 calling convention. */
9718 rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*orig_cum
, const_tree type
,
9719 bool named
, bool retval
)
9721 rtx rvec
[FIRST_PSEUDO_REGISTER
];
9722 int k
= 1, kbase
= 1;
9723 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
9724 /* This is a copy; modifications are not visible to our caller. */
9725 CUMULATIVE_ARGS copy_cum
= *orig_cum
;
9726 CUMULATIVE_ARGS
*cum
= ©_cum
;
9728 /* Pad to 16 byte boundary if needed. */
9729 if (!retval
&& TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
9730 && (cum
->words
% 2) != 0)
9737 /* Put entries into rvec[] for individual FP and vector fields, and
9738 for the chunks of memory that go in int regs. Note we start at
9739 element 1; 0 is reserved for an indication of using memory, and
9740 may or may not be filled in below. */
9741 rs6000_darwin64_record_arg_recurse (cum
, type
, /* startbit pos= */ 0, rvec
, &k
);
9742 rs6000_darwin64_record_arg_flush (cum
, typesize
* BITS_PER_UNIT
, rvec
, &k
);
9744 /* If any part of the struct went on the stack put all of it there.
9745 This hack is because the generic code for
9746 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
9747 parts of the struct are not at the beginning. */
9751 return NULL_RTX
; /* doesn't go in registers at all */
9753 rvec
[0] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
9755 if (k
> 1 || cum
->use_stack
)
9756 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (k
- kbase
, &rvec
[kbase
]));
9761 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
9764 rs6000_mixed_function_arg (enum machine_mode mode
, const_tree type
,
9769 rtx rvec
[GP_ARG_NUM_REG
+ 1];
9771 if (align_words
>= GP_ARG_NUM_REG
)
9774 n_units
= rs6000_arg_size (mode
, type
);
9776 /* Optimize the simple case where the arg fits in one gpr, except in
9777 the case of BLKmode due to assign_parms assuming that registers are
9778 BITS_PER_WORD wide. */
9780 || (n_units
== 1 && mode
!= BLKmode
))
9781 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
9784 if (align_words
+ n_units
> GP_ARG_NUM_REG
)
9785 /* Not all of the arg fits in gprs. Say that it goes in memory too,
9786 using a magic NULL_RTX component.
9787 This is not strictly correct. Only some of the arg belongs in
9788 memory, not all of it. However, the normal scheme using
9789 function_arg_partial_nregs can result in unusual subregs, eg.
9790 (subreg:SI (reg:DF) 4), which are not handled well. The code to
9791 store the whole arg to memory is often more efficient than code
9792 to store pieces, and we know that space is available in the right
9793 place for the whole arg. */
9794 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
9799 rtx r
= gen_rtx_REG (SImode
, GP_ARG_MIN_REG
+ align_words
);
9800 rtx off
= GEN_INT (i
++ * 4);
9801 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
9803 while (++align_words
< GP_ARG_NUM_REG
&& --n_units
!= 0);
9805 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
9808 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
9809 but must also be copied into the parameter save area starting at
9810 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
9811 to the GPRs and/or memory. Return the number of elements used. */
9814 rs6000_psave_function_arg (enum machine_mode mode
, const_tree type
,
9815 int align_words
, rtx
*rvec
)
9819 if (align_words
< GP_ARG_NUM_REG
)
9821 int n_words
= rs6000_arg_size (mode
, type
);
9823 if (align_words
+ n_words
> GP_ARG_NUM_REG
9825 || (TARGET_32BIT
&& TARGET_POWERPC64
))
9827 /* If this is partially on the stack, then we only
9828 include the portion actually in registers here. */
9829 enum machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
9832 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
9834 /* Not all of the arg fits in gprs. Say that it goes in memory
9835 too, using a magic NULL_RTX component. Also see comment in
9836 rs6000_mixed_function_arg for why the normal
9837 function_arg_partial_nregs scheme doesn't work in this case. */
9838 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
9843 rtx r
= gen_rtx_REG (rmode
, GP_ARG_MIN_REG
+ align_words
);
9844 rtx off
= GEN_INT (i
++ * GET_MODE_SIZE (rmode
));
9845 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
9847 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
9851 /* The whole arg fits in gprs. */
9852 rtx r
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
9853 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
9858 /* It's entirely in memory. */
9859 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
9865 /* RVEC is a vector of K components of an argument of mode MODE.
9866 Construct the final function_arg return value from it. */
9869 rs6000_finish_function_arg (enum machine_mode mode
, rtx
*rvec
, int k
)
9871 gcc_assert (k
>= 1);
9873 /* Avoid returning a PARALLEL in the trivial cases. */
9876 if (XEXP (rvec
[0], 0) == NULL_RTX
)
9879 if (GET_MODE (XEXP (rvec
[0], 0)) == mode
)
9880 return XEXP (rvec
[0], 0);
9883 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
9886 /* Determine where to put an argument to a function.
9887 Value is zero to push the argument on the stack,
9888 or a hard register in which to store the argument.
9890 MODE is the argument's machine mode.
9891 TYPE is the data type of the argument (as a tree).
9892 This is null for libcalls where that information may
9894 CUM is a variable of type CUMULATIVE_ARGS which gives info about
9895 the preceding args and about the function being called. It is
9896 not modified in this routine.
9897 NAMED is nonzero if this argument is a named parameter
9898 (otherwise it is an extra parameter matching an ellipsis).
9900 On RS/6000 the first eight words of non-FP are normally in registers
9901 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
9902 Under V.4, the first 8 FP args are in registers.
9904 If this is floating-point and no prototype is specified, we use
9905 both an FP and integer register (or possibly FP reg and stack). Library
9906 functions (when CALL_LIBCALL is set) always have the proper types for args,
9907 so we can pass the FP value just in one register. emit_library_function
9908 doesn't support PARALLEL anyway.
9910 Note that for args passed by reference, function_arg will be called
9911 with MODE and TYPE set to that of the pointer to the arg, not the arg
9915 rs6000_function_arg (cumulative_args_t cum_v
, enum machine_mode mode
,
9916 const_tree type
, bool named
)
9918 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
9919 enum rs6000_abi abi
= DEFAULT_ABI
;
9920 enum machine_mode elt_mode
;
9923 /* Return a marker to indicate whether CR1 needs to set or clear the
9924 bit that V.4 uses to say fp args were passed in registers.
9925 Assume that we don't need the marker for software floating point,
9926 or compiler generated library calls. */
9927 if (mode
== VOIDmode
)
9930 && (cum
->call_cookie
& CALL_LIBCALL
) == 0
9932 || (cum
->nargs_prototype
< 0
9933 && (cum
->prototype
|| TARGET_NO_PROTOTYPE
))))
9935 /* For the SPE, we need to crxor CR6 always. */
9937 return GEN_INT (cum
->call_cookie
| CALL_V4_SET_FP_ARGS
);
9938 else if (TARGET_HARD_FLOAT
&& TARGET_FPRS
)
9939 return GEN_INT (cum
->call_cookie
9940 | ((cum
->fregno
== FP_ARG_MIN_REG
)
9941 ? CALL_V4_SET_FP_ARGS
9942 : CALL_V4_CLEAR_FP_ARGS
));
9945 return GEN_INT (cum
->call_cookie
& ~CALL_LIBCALL
);
9948 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
9950 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
9952 rtx rslt
= rs6000_darwin64_record_arg (cum
, type
, named
, /*retval= */false);
9953 if (rslt
!= NULL_RTX
)
9955 /* Else fall through to usual handling. */
9958 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
9960 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
9964 /* Do we also need to pass this argument in the parameter
9966 if (TARGET_64BIT
&& ! cum
->prototype
)
9968 int align_words
= (cum
->words
+ 1) & ~1;
9969 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
9972 /* Describe where this argument goes in the vector registers. */
9973 for (i
= 0; i
< n_elts
&& cum
->vregno
+ i
<= ALTIVEC_ARG_MAX_REG
; i
++)
9975 r
= gen_rtx_REG (elt_mode
, cum
->vregno
+ i
);
9976 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
9977 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
9980 return rs6000_finish_function_arg (mode
, rvec
, k
);
9982 else if (TARGET_ALTIVEC_ABI
9983 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
9984 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
9985 && int_size_in_bytes (type
) == 16)))
9987 if (named
|| abi
== ABI_V4
)
9991 /* Vector parameters to varargs functions under AIX or Darwin
9992 get passed in memory and possibly also in GPRs. */
9993 int align
, align_words
, n_words
;
9994 enum machine_mode part_mode
;
9996 /* Vector parameters must be 16-byte aligned. In 32-bit
9997 mode this means we need to take into account the offset
9998 to the parameter save area. In 64-bit mode, they just
9999 have to start on an even word, since the parameter save
10000 area is 16-byte aligned. */
10002 align
= -(rs6000_parm_offset () + cum
->words
) & 3;
10004 align
= cum
->words
& 1;
10005 align_words
= cum
->words
+ align
;
10007 /* Out of registers? Memory, then. */
10008 if (align_words
>= GP_ARG_NUM_REG
)
10011 if (TARGET_32BIT
&& TARGET_POWERPC64
)
10012 return rs6000_mixed_function_arg (mode
, type
, align_words
);
10014 /* The vector value goes in GPRs. Only the part of the
10015 value in GPRs is reported here. */
10017 n_words
= rs6000_arg_size (mode
, type
);
10018 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
10019 /* Fortunately, there are only two possibilities, the value
10020 is either wholly in GPRs or half in GPRs and half not. */
10021 part_mode
= DImode
;
10023 return gen_rtx_REG (part_mode
, GP_ARG_MIN_REG
+ align_words
);
10026 else if (TARGET_SPE_ABI
&& TARGET_SPE
10027 && (SPE_VECTOR_MODE (mode
)
10028 || (TARGET_E500_DOUBLE
&& (mode
== DFmode
10031 || mode
== TCmode
))))
10032 return rs6000_spe_function_arg (cum
, mode
, type
);
10034 else if (abi
== ABI_V4
)
10036 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
10037 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
10038 || (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
10039 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)
10040 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
))
10042 /* _Decimal128 must use an even/odd register pair. This assumes
10043 that the register number is odd when fregno is odd. */
10044 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
10047 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
10048 <= FP_ARG_V4_MAX_REG
)
10049 return gen_rtx_REG (mode
, cum
->fregno
);
10055 int n_words
= rs6000_arg_size (mode
, type
);
10056 int gregno
= cum
->sysv_gregno
;
10058 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
10059 (r7,r8) or (r9,r10). As does any other 2 word item such
10060 as complex int due to a historical mistake. */
10062 gregno
+= (1 - gregno
) & 1;
10064 /* Multi-reg args are not split between registers and stack. */
10065 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
10068 if (TARGET_32BIT
&& TARGET_POWERPC64
)
10069 return rs6000_mixed_function_arg (mode
, type
,
10070 gregno
- GP_ARG_MIN_REG
);
10071 return gen_rtx_REG (mode
, gregno
);
10076 int align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
10078 /* _Decimal128 must be passed in an even/odd float register pair.
10079 This assumes that the register number is odd when fregno is odd. */
10080 if (elt_mode
== TDmode
&& (cum
->fregno
% 2) == 1)
10083 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
10085 rtx rvec
[GP_ARG_NUM_REG
+ AGGR_ARG_NUM_REG
+ 1];
10088 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
10090 /* Do we also need to pass this argument in the parameter
10092 if (type
&& (cum
->nargs_prototype
<= 0
10093 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
10094 && TARGET_XL_COMPAT
10095 && align_words
>= GP_ARG_NUM_REG
)))
10096 k
= rs6000_psave_function_arg (mode
, type
, align_words
, rvec
);
10098 /* Describe where this argument goes in the fprs. */
10099 for (i
= 0; i
< n_elts
10100 && cum
->fregno
+ i
* n_fpreg
<= FP_ARG_MAX_REG
; i
++)
10102 /* Check if the argument is split over registers and memory.
10103 This can only ever happen for long double or _Decimal128;
10104 complex types are handled via split_complex_arg. */
10105 enum machine_mode fmode
= elt_mode
;
10106 if (cum
->fregno
+ (i
+ 1) * n_fpreg
> FP_ARG_MAX_REG
+ 1)
10108 gcc_assert (fmode
== TFmode
|| fmode
== TDmode
);
10109 fmode
= DECIMAL_FLOAT_MODE_P (fmode
) ? DDmode
: DFmode
;
10112 r
= gen_rtx_REG (fmode
, cum
->fregno
+ i
* n_fpreg
);
10113 off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
10114 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
10117 return rs6000_finish_function_arg (mode
, rvec
, k
);
10119 else if (align_words
< GP_ARG_NUM_REG
)
10121 if (TARGET_32BIT
&& TARGET_POWERPC64
)
10122 return rs6000_mixed_function_arg (mode
, type
, align_words
);
10124 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
10131 /* For an arg passed partly in registers and partly in memory, this is
10132 the number of bytes passed in registers. For args passed entirely in
10133 registers or entirely in memory, zero. When an arg is described by a
10134 PARALLEL, perhaps using more than one register type, this function
10135 returns the number of bytes used by the first element of the PARALLEL. */
10138 rs6000_arg_partial_bytes (cumulative_args_t cum_v
, enum machine_mode mode
,
10139 tree type
, bool named
)
10141 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
10142 bool passed_in_gprs
= true;
10145 enum machine_mode elt_mode
;
10148 rs6000_discover_homogeneous_aggregate (mode
, type
, &elt_mode
, &n_elts
);
10150 if (DEFAULT_ABI
== ABI_V4
)
10153 if (USE_ALTIVEC_FOR_ARG_P (cum
, elt_mode
, named
))
10155 /* If we are passing this arg in the fixed parameter save area
10156 (gprs or memory) as well as VRs, we do not use the partial
10157 bytes mechanism; instead, rs6000_function_arg will return a
10158 PARALLEL including a memory element as necessary. */
10159 if (TARGET_64BIT
&& ! cum
->prototype
)
10162 /* Otherwise, we pass in VRs only. Check for partial copies. */
10163 passed_in_gprs
= false;
10164 if (cum
->vregno
+ n_elts
> ALTIVEC_ARG_MAX_REG
+ 1)
10165 ret
= (ALTIVEC_ARG_MAX_REG
+ 1 - cum
->vregno
) * 16;
10168 /* In this complicated case we just disable the partial_nregs code. */
10169 if (TARGET_MACHO
&& rs6000_darwin64_struct_check_p (mode
, type
))
10172 align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
10174 if (USE_FP_FOR_ARG_P (cum
, elt_mode
))
10176 unsigned long n_fpreg
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
10178 /* If we are passing this arg in the fixed parameter save area
10179 (gprs or memory) as well as FPRs, we do not use the partial
10180 bytes mechanism; instead, rs6000_function_arg will return a
10181 PARALLEL including a memory element as necessary. */
10183 && (cum
->nargs_prototype
<= 0
10184 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
10185 && TARGET_XL_COMPAT
10186 && align_words
>= GP_ARG_NUM_REG
)))
10189 /* Otherwise, we pass in FPRs only. Check for partial copies. */
10190 passed_in_gprs
= false;
10191 if (cum
->fregno
+ n_elts
* n_fpreg
> FP_ARG_MAX_REG
+ 1)
10192 ret
= ((FP_ARG_MAX_REG
+ 1 - cum
->fregno
)
10193 * MIN (8, GET_MODE_SIZE (elt_mode
)));
10197 && align_words
< GP_ARG_NUM_REG
10198 && GP_ARG_NUM_REG
< align_words
+ rs6000_arg_size (mode
, type
))
10199 ret
= (GP_ARG_NUM_REG
- align_words
) * (TARGET_32BIT
? 4 : 8);
10201 if (ret
!= 0 && TARGET_DEBUG_ARG
)
10202 fprintf (stderr
, "rs6000_arg_partial_bytes: %d\n", ret
);
10207 /* A C expression that indicates when an argument must be passed by
10208 reference. If nonzero for an argument, a copy of that argument is
10209 made in memory and a pointer to the argument is passed instead of
10210 the argument itself. The pointer is passed in whatever way is
10211 appropriate for passing a pointer to that type.
10213 Under V.4, aggregates and long double are passed by reference.
10215 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
10216 reference unless the AltiVec vector extension ABI is in force.
10218 As an extension to all ABIs, variable sized types are passed by
10222 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
10223 enum machine_mode mode
, const_tree type
,
10224 bool named ATTRIBUTE_UNUSED
)
10226 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& mode
== TFmode
)
10228 if (TARGET_DEBUG_ARG
)
10229 fprintf (stderr
, "function_arg_pass_by_reference: V4 long double\n");
10236 if (DEFAULT_ABI
== ABI_V4
&& AGGREGATE_TYPE_P (type
))
10238 if (TARGET_DEBUG_ARG
)
10239 fprintf (stderr
, "function_arg_pass_by_reference: V4 aggregate\n");
10243 if (int_size_in_bytes (type
) < 0)
10245 if (TARGET_DEBUG_ARG
)
10246 fprintf (stderr
, "function_arg_pass_by_reference: variable size\n");
10250 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10251 modes only exist for GCC vector types if -maltivec. */
10252 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
10254 if (TARGET_DEBUG_ARG
)
10255 fprintf (stderr
, "function_arg_pass_by_reference: AltiVec\n");
10259 /* Pass synthetic vectors in memory. */
10260 if (TREE_CODE (type
) == VECTOR_TYPE
10261 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
10263 static bool warned_for_pass_big_vectors
= false;
10264 if (TARGET_DEBUG_ARG
)
10265 fprintf (stderr
, "function_arg_pass_by_reference: synthetic vector\n");
10266 if (!warned_for_pass_big_vectors
)
10268 warning (0, "GCC vector passed by reference: "
10269 "non-standard ABI extension with no compatibility guarantee");
10270 warned_for_pass_big_vectors
= true;
10278 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
10279 already processes. Return true if the parameter must be passed
10280 (fully or partially) on the stack. */
10283 rs6000_parm_needs_stack (cumulative_args_t args_so_far
, tree type
)
10285 enum machine_mode mode
;
10289 /* Catch errors. */
10290 if (type
== NULL
|| type
== error_mark_node
)
10293 /* Handle types with no storage requirement. */
10294 if (TYPE_MODE (type
) == VOIDmode
)
10297 /* Handle complex types. */
10298 if (TREE_CODE (type
) == COMPLEX_TYPE
)
10299 return (rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
))
10300 || rs6000_parm_needs_stack (args_so_far
, TREE_TYPE (type
)));
10302 /* Handle transparent aggregates. */
10303 if ((TREE_CODE (type
) == UNION_TYPE
|| TREE_CODE (type
) == RECORD_TYPE
)
10304 && TYPE_TRANSPARENT_AGGR (type
))
10305 type
= TREE_TYPE (first_field (type
));
10307 /* See if this arg was passed by invisible reference. */
10308 if (pass_by_reference (get_cumulative_args (args_so_far
),
10309 TYPE_MODE (type
), type
, true))
10310 type
= build_pointer_type (type
);
10312 /* Find mode as it is passed by the ABI. */
10313 unsignedp
= TYPE_UNSIGNED (type
);
10314 mode
= promote_mode (type
, TYPE_MODE (type
), &unsignedp
);
10316 /* If we must pass in stack, we need a stack. */
10317 if (rs6000_must_pass_in_stack (mode
, type
))
10320 /* If there is no incoming register, we need a stack. */
10321 entry_parm
= rs6000_function_arg (args_so_far
, mode
, type
, true);
10322 if (entry_parm
== NULL
)
10325 /* Likewise if we need to pass both in registers and on the stack. */
10326 if (GET_CODE (entry_parm
) == PARALLEL
10327 && XEXP (XVECEXP (entry_parm
, 0, 0), 0) == NULL_RTX
)
10330 /* Also true if we're partially in registers and partially not. */
10331 if (rs6000_arg_partial_bytes (args_so_far
, mode
, type
, true) != 0)
10334 /* Update info on where next arg arrives in registers. */
10335 rs6000_function_arg_advance (args_so_far
, mode
, type
, true);
10339 /* Return true if FUN has no prototype, has a variable argument
10340 list, or passes any parameter in memory. */
10343 rs6000_function_parms_need_stack (tree fun
)
10345 function_args_iterator args_iter
;
10347 CUMULATIVE_ARGS args_so_far_v
;
10348 cumulative_args_t args_so_far
;
10351 /* Must be a libcall, all of which only use reg parms. */
10354 fun
= TREE_TYPE (fun
);
10356 /* Varargs functions need the parameter save area. */
10357 if (!prototype_p (fun
) || stdarg_p (fun
))
10360 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v
, fun
, NULL_RTX
);
10361 args_so_far
= pack_cumulative_args (&args_so_far_v
);
10363 if (aggregate_value_p (TREE_TYPE (fun
), fun
))
10365 tree type
= build_pointer_type (TREE_TYPE (fun
));
10366 rs6000_parm_needs_stack (args_so_far
, type
);
10369 FOREACH_FUNCTION_ARGS (fun
, arg_type
, args_iter
)
10370 if (rs6000_parm_needs_stack (args_so_far
, arg_type
))
10376 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
10377 usually a constant depending on the ABI. However, in the ELFv2 ABI
10378 the register parameter area is optional when calling a function that
10379 has a prototype is scope, has no variable argument list, and passes
10380 all parameters in registers. */
10383 rs6000_reg_parm_stack_space (tree fun
)
10385 int reg_parm_stack_space
;
10387 switch (DEFAULT_ABI
)
10390 reg_parm_stack_space
= 0;
10395 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
10399 /* ??? Recomputing this every time is a bit expensive. Is there
10400 a place to cache this information? */
10401 if (rs6000_function_parms_need_stack (fun
))
10402 reg_parm_stack_space
= TARGET_64BIT
? 64 : 32;
10404 reg_parm_stack_space
= 0;
10408 return reg_parm_stack_space
;
10412 rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
)
10415 enum machine_mode reg_mode
= TARGET_32BIT
? SImode
: DImode
;
10420 for (i
= 0; i
< nregs
; i
++)
10422 rtx tem
= adjust_address_nv (x
, reg_mode
, i
* GET_MODE_SIZE (reg_mode
));
10423 if (reload_completed
)
10425 if (! strict_memory_address_p (reg_mode
, XEXP (tem
, 0)))
10428 tem
= simplify_gen_subreg (reg_mode
, x
, BLKmode
,
10429 i
* GET_MODE_SIZE (reg_mode
));
10432 tem
= replace_equiv_address (tem
, XEXP (tem
, 0));
10436 emit_move_insn (tem
, gen_rtx_REG (reg_mode
, regno
+ i
));
10440 /* Perform any needed actions needed for a function that is receiving a
10441 variable number of arguments.
10445 MODE and TYPE are the mode and type of the current parameter.
10447 PRETEND_SIZE is a variable that should be set to the amount of stack
10448 that must be pushed by the prolog to pretend that our caller pushed
10451 Normally, this macro will push all remaining incoming registers on the
10452 stack and set PRETEND_SIZE to the length of the registers pushed. */
10455 setup_incoming_varargs (cumulative_args_t cum
, enum machine_mode mode
,
10456 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
10459 CUMULATIVE_ARGS next_cum
;
10460 int reg_size
= TARGET_32BIT
? 4 : 8;
10461 rtx save_area
= NULL_RTX
, mem
;
10462 int first_reg_offset
;
10463 alias_set_type set
;
10465 /* Skip the last named argument. */
10466 next_cum
= *get_cumulative_args (cum
);
10467 rs6000_function_arg_advance_1 (&next_cum
, mode
, type
, true, 0);
10469 if (DEFAULT_ABI
== ABI_V4
)
10471 first_reg_offset
= next_cum
.sysv_gregno
- GP_ARG_MIN_REG
;
10475 int gpr_reg_num
= 0, gpr_size
= 0, fpr_size
= 0;
10476 HOST_WIDE_INT offset
= 0;
10478 /* Try to optimize the size of the varargs save area.
10479 The ABI requires that ap.reg_save_area is doubleword
10480 aligned, but we don't need to allocate space for all
10481 the bytes, only those to which we actually will save
10483 if (cfun
->va_list_gpr_size
&& first_reg_offset
< GP_ARG_NUM_REG
)
10484 gpr_reg_num
= GP_ARG_NUM_REG
- first_reg_offset
;
10485 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
10486 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
10487 && cfun
->va_list_fpr_size
)
10490 fpr_size
= (next_cum
.fregno
- FP_ARG_MIN_REG
)
10491 * UNITS_PER_FP_WORD
;
10492 if (cfun
->va_list_fpr_size
10493 < FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
10494 fpr_size
+= cfun
->va_list_fpr_size
* UNITS_PER_FP_WORD
;
10496 fpr_size
+= (FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
10497 * UNITS_PER_FP_WORD
;
10501 offset
= -((first_reg_offset
* reg_size
) & ~7);
10502 if (!fpr_size
&& gpr_reg_num
> cfun
->va_list_gpr_size
)
10504 gpr_reg_num
= cfun
->va_list_gpr_size
;
10505 if (reg_size
== 4 && (first_reg_offset
& 1))
10508 gpr_size
= (gpr_reg_num
* reg_size
+ 7) & ~7;
10511 offset
= - (int) (next_cum
.fregno
- FP_ARG_MIN_REG
)
10512 * UNITS_PER_FP_WORD
10513 - (int) (GP_ARG_NUM_REG
* reg_size
);
10515 if (gpr_size
+ fpr_size
)
10518 = assign_stack_local (BLKmode
, gpr_size
+ fpr_size
, 64);
10519 gcc_assert (GET_CODE (reg_save_area
) == MEM
);
10520 reg_save_area
= XEXP (reg_save_area
, 0);
10521 if (GET_CODE (reg_save_area
) == PLUS
)
10523 gcc_assert (XEXP (reg_save_area
, 0)
10524 == virtual_stack_vars_rtx
);
10525 gcc_assert (GET_CODE (XEXP (reg_save_area
, 1)) == CONST_INT
);
10526 offset
+= INTVAL (XEXP (reg_save_area
, 1));
10529 gcc_assert (reg_save_area
== virtual_stack_vars_rtx
);
10532 cfun
->machine
->varargs_save_offset
= offset
;
10533 save_area
= plus_constant (Pmode
, virtual_stack_vars_rtx
, offset
);
10538 first_reg_offset
= next_cum
.words
;
10539 save_area
= virtual_incoming_args_rtx
;
10541 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
10542 first_reg_offset
+= rs6000_arg_size (TYPE_MODE (type
), type
);
10545 set
= get_varargs_alias_set ();
10546 if (! no_rtl
&& first_reg_offset
< GP_ARG_NUM_REG
10547 && cfun
->va_list_gpr_size
)
10549 int n_gpr
, nregs
= GP_ARG_NUM_REG
- first_reg_offset
;
10551 if (va_list_gpr_counter_field
)
10552 /* V4 va_list_gpr_size counts number of registers needed. */
10553 n_gpr
= cfun
->va_list_gpr_size
;
10555 /* char * va_list instead counts number of bytes needed. */
10556 n_gpr
= (cfun
->va_list_gpr_size
+ reg_size
- 1) / reg_size
;
10561 mem
= gen_rtx_MEM (BLKmode
,
10562 plus_constant (Pmode
, save_area
,
10563 first_reg_offset
* reg_size
));
10564 MEM_NOTRAP_P (mem
) = 1;
10565 set_mem_alias_set (mem
, set
);
10566 set_mem_align (mem
, BITS_PER_WORD
);
10568 rs6000_move_block_from_reg (GP_ARG_MIN_REG
+ first_reg_offset
, mem
,
10572 /* Save FP registers if needed. */
10573 if (DEFAULT_ABI
== ABI_V4
10574 && TARGET_HARD_FLOAT
&& TARGET_FPRS
10576 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
10577 && cfun
->va_list_fpr_size
)
10579 int fregno
= next_cum
.fregno
, nregs
;
10580 rtx cr1
= gen_rtx_REG (CCmode
, CR1_REGNO
);
10581 rtx lab
= gen_label_rtx ();
10582 int off
= (GP_ARG_NUM_REG
* reg_size
) + ((fregno
- FP_ARG_MIN_REG
)
10583 * UNITS_PER_FP_WORD
);
10586 (gen_rtx_SET (VOIDmode
,
10588 gen_rtx_IF_THEN_ELSE (VOIDmode
,
10589 gen_rtx_NE (VOIDmode
, cr1
,
10591 gen_rtx_LABEL_REF (VOIDmode
, lab
),
10595 fregno
<= FP_ARG_V4_MAX_REG
&& nregs
< cfun
->va_list_fpr_size
;
10596 fregno
++, off
+= UNITS_PER_FP_WORD
, nregs
++)
10598 mem
= gen_rtx_MEM ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
10600 plus_constant (Pmode
, save_area
, off
));
10601 MEM_NOTRAP_P (mem
) = 1;
10602 set_mem_alias_set (mem
, set
);
10603 set_mem_align (mem
, GET_MODE_ALIGNMENT (
10604 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
10605 ? DFmode
: SFmode
));
10606 emit_move_insn (mem
, gen_rtx_REG (
10607 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
10608 ? DFmode
: SFmode
, fregno
));
10615 /* Create the va_list data type. */
10618 rs6000_build_builtin_va_list (void)
10620 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
, record
, type_decl
;
10622 /* For AIX, prefer 'char *' because that's what the system
10623 header files like. */
10624 if (DEFAULT_ABI
!= ABI_V4
)
10625 return build_pointer_type (char_type_node
);
10627 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
10628 type_decl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
10629 get_identifier ("__va_list_tag"), record
);
10631 f_gpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("gpr"),
10632 unsigned_char_type_node
);
10633 f_fpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("fpr"),
10634 unsigned_char_type_node
);
10635 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
10636 every user file. */
10637 f_res
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
10638 get_identifier ("reserved"), short_unsigned_type_node
);
10639 f_ovf
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
10640 get_identifier ("overflow_arg_area"),
10642 f_sav
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
10643 get_identifier ("reg_save_area"),
10646 va_list_gpr_counter_field
= f_gpr
;
10647 va_list_fpr_counter_field
= f_fpr
;
10649 DECL_FIELD_CONTEXT (f_gpr
) = record
;
10650 DECL_FIELD_CONTEXT (f_fpr
) = record
;
10651 DECL_FIELD_CONTEXT (f_res
) = record
;
10652 DECL_FIELD_CONTEXT (f_ovf
) = record
;
10653 DECL_FIELD_CONTEXT (f_sav
) = record
;
10655 TYPE_STUB_DECL (record
) = type_decl
;
10656 TYPE_NAME (record
) = type_decl
;
10657 TYPE_FIELDS (record
) = f_gpr
;
10658 DECL_CHAIN (f_gpr
) = f_fpr
;
10659 DECL_CHAIN (f_fpr
) = f_res
;
10660 DECL_CHAIN (f_res
) = f_ovf
;
10661 DECL_CHAIN (f_ovf
) = f_sav
;
10663 layout_type (record
);
10665 /* The correct type is an array type of one element. */
10666 return build_array_type (record
, build_index_type (size_zero_node
));
10669 /* Implement va_start. */
10672 rs6000_va_start (tree valist
, rtx nextarg
)
10674 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
10675 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
10676 tree gpr
, fpr
, ovf
, sav
, t
;
10678 /* Only SVR4 needs something special. */
10679 if (DEFAULT_ABI
!= ABI_V4
)
10681 std_expand_builtin_va_start (valist
, nextarg
);
10685 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
10686 f_fpr
= DECL_CHAIN (f_gpr
);
10687 f_res
= DECL_CHAIN (f_fpr
);
10688 f_ovf
= DECL_CHAIN (f_res
);
10689 f_sav
= DECL_CHAIN (f_ovf
);
10691 valist
= build_simple_mem_ref (valist
);
10692 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
10693 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
10695 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
10697 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
10700 /* Count number of gp and fp argument registers used. */
10701 words
= crtl
->args
.info
.words
;
10702 n_gpr
= MIN (crtl
->args
.info
.sysv_gregno
- GP_ARG_MIN_REG
,
10704 n_fpr
= MIN (crtl
->args
.info
.fregno
- FP_ARG_MIN_REG
,
10707 if (TARGET_DEBUG_ARG
)
10708 fprintf (stderr
, "va_start: words = "HOST_WIDE_INT_PRINT_DEC
", n_gpr = "
10709 HOST_WIDE_INT_PRINT_DEC
", n_fpr = "HOST_WIDE_INT_PRINT_DEC
"\n",
10710 words
, n_gpr
, n_fpr
);
10712 if (cfun
->va_list_gpr_size
)
10714 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
10715 build_int_cst (NULL_TREE
, n_gpr
));
10716 TREE_SIDE_EFFECTS (t
) = 1;
10717 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
10720 if (cfun
->va_list_fpr_size
)
10722 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
10723 build_int_cst (NULL_TREE
, n_fpr
));
10724 TREE_SIDE_EFFECTS (t
) = 1;
10725 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
10727 #ifdef HAVE_AS_GNU_ATTRIBUTE
10728 if (call_ABI_of_interest (cfun
->decl
))
10729 rs6000_passes_float
= true;
10733 /* Find the overflow area. */
10734 t
= make_tree (TREE_TYPE (ovf
), virtual_incoming_args_rtx
);
10736 t
= fold_build_pointer_plus_hwi (t
, words
* UNITS_PER_WORD
);
10737 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
10738 TREE_SIDE_EFFECTS (t
) = 1;
10739 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
10741 /* If there were no va_arg invocations, don't set up the register
10743 if (!cfun
->va_list_gpr_size
10744 && !cfun
->va_list_fpr_size
10745 && n_gpr
< GP_ARG_NUM_REG
10746 && n_fpr
< FP_ARG_V4_MAX_REG
)
10749 /* Find the register save area. */
10750 t
= make_tree (TREE_TYPE (sav
), virtual_stack_vars_rtx
);
10751 if (cfun
->machine
->varargs_save_offset
)
10752 t
= fold_build_pointer_plus_hwi (t
, cfun
->machine
->varargs_save_offset
);
10753 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
10754 TREE_SIDE_EFFECTS (t
) = 1;
10755 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
10758 /* Implement va_arg. */
10761 rs6000_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
10762 gimple_seq
*post_p
)
10764 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
10765 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
10766 int size
, rsize
, n_reg
, sav_ofs
, sav_scale
;
10767 tree lab_false
, lab_over
, addr
;
10769 tree ptrtype
= build_pointer_type_for_mode (type
, ptr_mode
, true);
10773 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
10775 t
= rs6000_gimplify_va_arg (valist
, ptrtype
, pre_p
, post_p
);
10776 return build_va_arg_indirect_ref (t
);
10779 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
10780 earlier version of gcc, with the property that it always applied alignment
10781 adjustments to the va-args (even for zero-sized types). The cheapest way
10782 to deal with this is to replicate the effect of the part of
10783 std_gimplify_va_arg_expr that carries out the align adjust, for the case
10785 We don't need to check for pass-by-reference because of the test above.
10786 We can return a simplifed answer, since we know there's no offset to add. */
10789 && rs6000_darwin64_abi
)
10790 || DEFAULT_ABI
== ABI_ELFv2
10791 || (DEFAULT_ABI
== ABI_AIX
&& !rs6000_compat_align_parm
))
10792 && integer_zerop (TYPE_SIZE (type
)))
10794 unsigned HOST_WIDE_INT align
, boundary
;
10795 tree valist_tmp
= get_initialized_tmp_var (valist
, pre_p
, NULL
);
10796 align
= PARM_BOUNDARY
/ BITS_PER_UNIT
;
10797 boundary
= rs6000_function_arg_boundary (TYPE_MODE (type
), type
);
10798 if (boundary
> MAX_SUPPORTED_STACK_ALIGNMENT
)
10799 boundary
= MAX_SUPPORTED_STACK_ALIGNMENT
;
10800 boundary
/= BITS_PER_UNIT
;
10801 if (boundary
> align
)
10804 /* This updates arg ptr by the amount that would be necessary
10805 to align the zero-sized (but not zero-alignment) item. */
10806 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
10807 fold_build_pointer_plus_hwi (valist_tmp
, boundary
- 1));
10808 gimplify_and_add (t
, pre_p
);
10810 t
= fold_convert (sizetype
, valist_tmp
);
10811 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist_tmp
,
10812 fold_convert (TREE_TYPE (valist
),
10813 fold_build2 (BIT_AND_EXPR
, sizetype
, t
,
10814 size_int (-boundary
))));
10815 t
= build2 (MODIFY_EXPR
, TREE_TYPE (valist
), valist
, t
);
10816 gimplify_and_add (t
, pre_p
);
10818 /* Since it is zero-sized there's no increment for the item itself. */
10819 valist_tmp
= fold_convert (build_pointer_type (type
), valist_tmp
);
10820 return build_va_arg_indirect_ref (valist_tmp
);
10823 if (DEFAULT_ABI
!= ABI_V4
)
10825 if (targetm
.calls
.split_complex_arg
&& TREE_CODE (type
) == COMPLEX_TYPE
)
10827 tree elem_type
= TREE_TYPE (type
);
10828 enum machine_mode elem_mode
= TYPE_MODE (elem_type
);
10829 int elem_size
= GET_MODE_SIZE (elem_mode
);
10831 if (elem_size
< UNITS_PER_WORD
)
10833 tree real_part
, imag_part
;
10834 gimple_seq post
= NULL
;
10836 real_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
10838 /* Copy the value into a temporary, lest the formal temporary
10839 be reused out from under us. */
10840 real_part
= get_initialized_tmp_var (real_part
, pre_p
, &post
);
10841 gimple_seq_add_seq (pre_p
, post
);
10843 imag_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
10846 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
10850 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
10853 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
10854 f_fpr
= DECL_CHAIN (f_gpr
);
10855 f_res
= DECL_CHAIN (f_fpr
);
10856 f_ovf
= DECL_CHAIN (f_res
);
10857 f_sav
= DECL_CHAIN (f_ovf
);
10859 valist
= build_va_arg_indirect_ref (valist
);
10860 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
10861 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
10863 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
10865 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
10868 size
= int_size_in_bytes (type
);
10869 rsize
= (size
+ 3) / 4;
10872 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
10873 && ((TARGET_SINGLE_FLOAT
&& TYPE_MODE (type
) == SFmode
)
10874 || (TARGET_DOUBLE_FLOAT
10875 && (TYPE_MODE (type
) == DFmode
10876 || TYPE_MODE (type
) == TFmode
10877 || TYPE_MODE (type
) == SDmode
10878 || TYPE_MODE (type
) == DDmode
10879 || TYPE_MODE (type
) == TDmode
))))
10881 /* FP args go in FP registers, if present. */
10883 n_reg
= (size
+ 7) / 8;
10884 sav_ofs
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4) * 4;
10885 sav_scale
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4);
10886 if (TYPE_MODE (type
) != SFmode
&& TYPE_MODE (type
) != SDmode
)
10891 /* Otherwise into GP registers. */
10900 /* Pull the value out of the saved registers.... */
10903 addr
= create_tmp_var (ptr_type_node
, "addr");
10905 /* AltiVec vectors never go in registers when -mabi=altivec. */
10906 if (TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
10910 lab_false
= create_artificial_label (input_location
);
10911 lab_over
= create_artificial_label (input_location
);
10913 /* Long long and SPE vectors are aligned in the registers.
10914 As are any other 2 gpr item such as complex int due to a
10915 historical mistake. */
10917 if (n_reg
== 2 && reg
== gpr
)
10920 u
= build2 (BIT_AND_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
10921 build_int_cst (TREE_TYPE (reg
), n_reg
- 1));
10922 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
),
10923 unshare_expr (reg
), u
);
10925 /* _Decimal128 is passed in even/odd fpr pairs; the stored
10926 reg number is 0 for f1, so we want to make it odd. */
10927 else if (reg
== fpr
&& TYPE_MODE (type
) == TDmode
)
10929 t
= build2 (BIT_IOR_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
10930 build_int_cst (TREE_TYPE (reg
), 1));
10931 u
= build2 (MODIFY_EXPR
, void_type_node
, unshare_expr (reg
), t
);
10934 t
= fold_convert (TREE_TYPE (reg
), size_int (8 - n_reg
+ 1));
10935 t
= build2 (GE_EXPR
, boolean_type_node
, u
, t
);
10936 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
10937 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
10938 gimplify_and_add (t
, pre_p
);
10942 t
= fold_build_pointer_plus_hwi (sav
, sav_ofs
);
10944 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
10945 build_int_cst (TREE_TYPE (reg
), n_reg
));
10946 u
= fold_convert (sizetype
, u
);
10947 u
= build2 (MULT_EXPR
, sizetype
, u
, size_int (sav_scale
));
10948 t
= fold_build_pointer_plus (t
, u
);
10950 /* _Decimal32 varargs are located in the second word of the 64-bit
10951 FP register for 32-bit binaries. */
10952 if (!TARGET_POWERPC64
10953 && TARGET_HARD_FLOAT
&& TARGET_FPRS
10954 && TYPE_MODE (type
) == SDmode
)
10955 t
= fold_build_pointer_plus_hwi (t
, size
);
10957 gimplify_assign (addr
, t
, pre_p
);
10959 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
10961 stmt
= gimple_build_label (lab_false
);
10962 gimple_seq_add_stmt (pre_p
, stmt
);
10964 if ((n_reg
== 2 && !regalign
) || n_reg
> 2)
10966 /* Ensure that we don't find any more args in regs.
10967 Alignment has taken care of for special cases. */
10968 gimplify_assign (reg
, build_int_cst (TREE_TYPE (reg
), 8), pre_p
);
10972 /* ... otherwise out of the overflow area. */
10974 /* Care for on-stack alignment if needed. */
10978 t
= fold_build_pointer_plus_hwi (t
, align
- 1);
10979 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
10980 build_int_cst (TREE_TYPE (t
), -align
));
10982 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
10984 gimplify_assign (unshare_expr (addr
), t
, pre_p
);
10986 t
= fold_build_pointer_plus_hwi (t
, size
);
10987 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
10991 stmt
= gimple_build_label (lab_over
);
10992 gimple_seq_add_stmt (pre_p
, stmt
);
10995 if (STRICT_ALIGNMENT
10996 && (TYPE_ALIGN (type
)
10997 > (unsigned) BITS_PER_UNIT
* (align
< 4 ? 4 : align
)))
10999 /* The value (of type complex double, for example) may not be
11000 aligned in memory in the saved registers, so copy via a
11001 temporary. (This is the same code as used for SPARC.) */
11002 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
11003 tree dest_addr
= build_fold_addr_expr (tmp
);
11005 tree copy
= build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY
),
11006 3, dest_addr
, addr
, size_int (rsize
* 4));
11008 gimplify_and_add (copy
, pre_p
);
11012 addr
= fold_convert (ptrtype
, addr
);
11013 return build_va_arg_indirect_ref (addr
);
11019 def_builtin (const char *name
, tree type
, enum rs6000_builtins code
)
11022 unsigned classify
= rs6000_builtin_info
[(int)code
].attr
;
11023 const char *attr_string
= "";
11025 gcc_assert (name
!= NULL
);
11026 gcc_assert (IN_RANGE ((int)code
, 0, (int)RS6000_BUILTIN_COUNT
));
11028 if (rs6000_builtin_decls
[(int)code
])
11029 fatal_error ("internal error: builtin function %s already processed", name
);
11031 rs6000_builtin_decls
[(int)code
] = t
=
11032 add_builtin_function (name
, type
, (int)code
, BUILT_IN_MD
, NULL
, NULL_TREE
);
11034 /* Set any special attributes. */
11035 if ((classify
& RS6000_BTC_CONST
) != 0)
11037 /* const function, function only depends on the inputs. */
11038 TREE_READONLY (t
) = 1;
11039 TREE_NOTHROW (t
) = 1;
11040 attr_string
= ", pure";
11042 else if ((classify
& RS6000_BTC_PURE
) != 0)
11044 /* pure function, function can read global memory, but does not set any
11046 DECL_PURE_P (t
) = 1;
11047 TREE_NOTHROW (t
) = 1;
11048 attr_string
= ", const";
11050 else if ((classify
& RS6000_BTC_FP
) != 0)
11052 /* Function is a math function. If rounding mode is on, then treat the
11053 function as not reading global memory, but it can have arbitrary side
11054 effects. If it is off, then assume the function is a const function.
11055 This mimics the ATTR_MATHFN_FPROUNDING attribute in
11056 builtin-attribute.def that is used for the math functions. */
11057 TREE_NOTHROW (t
) = 1;
11058 if (flag_rounding_math
)
11060 DECL_PURE_P (t
) = 1;
11061 DECL_IS_NOVOPS (t
) = 1;
11062 attr_string
= ", fp, pure";
11066 TREE_READONLY (t
) = 1;
11067 attr_string
= ", fp, const";
11070 else if ((classify
& RS6000_BTC_ATTR_MASK
) != 0)
11071 gcc_unreachable ();
11073 if (TARGET_DEBUG_BUILTIN
)
11074 fprintf (stderr
, "rs6000_builtin, code = %4d, %s%s\n",
11075 (int)code
, name
, attr_string
);
11078 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
11080 #undef RS6000_BUILTIN_1
11081 #undef RS6000_BUILTIN_2
11082 #undef RS6000_BUILTIN_3
11083 #undef RS6000_BUILTIN_A
11084 #undef RS6000_BUILTIN_D
11085 #undef RS6000_BUILTIN_E
11086 #undef RS6000_BUILTIN_H
11087 #undef RS6000_BUILTIN_P
11088 #undef RS6000_BUILTIN_Q
11089 #undef RS6000_BUILTIN_S
11090 #undef RS6000_BUILTIN_X
11092 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11093 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11094 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
11095 { MASK, ICODE, NAME, ENUM },
11097 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11098 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11099 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11100 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11101 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11102 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11103 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11104 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11106 static const struct builtin_description bdesc_3arg
[] =
11108 #include "rs6000-builtin.def"
11111 /* DST operations: void foo (void *, const int, const char). */
11113 #undef RS6000_BUILTIN_1
11114 #undef RS6000_BUILTIN_2
11115 #undef RS6000_BUILTIN_3
11116 #undef RS6000_BUILTIN_A
11117 #undef RS6000_BUILTIN_D
11118 #undef RS6000_BUILTIN_E
11119 #undef RS6000_BUILTIN_H
11120 #undef RS6000_BUILTIN_P
11121 #undef RS6000_BUILTIN_Q
11122 #undef RS6000_BUILTIN_S
11123 #undef RS6000_BUILTIN_X
11125 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11126 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11127 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11128 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11129 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
11130 { MASK, ICODE, NAME, ENUM },
11132 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11133 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11134 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11135 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11136 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11137 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11139 static const struct builtin_description bdesc_dst
[] =
11141 #include "rs6000-builtin.def"
11144 /* Simple binary operations: VECc = foo (VECa, VECb). */
11146 #undef RS6000_BUILTIN_1
11147 #undef RS6000_BUILTIN_2
11148 #undef RS6000_BUILTIN_3
11149 #undef RS6000_BUILTIN_A
11150 #undef RS6000_BUILTIN_D
11151 #undef RS6000_BUILTIN_E
11152 #undef RS6000_BUILTIN_H
11153 #undef RS6000_BUILTIN_P
11154 #undef RS6000_BUILTIN_Q
11155 #undef RS6000_BUILTIN_S
11156 #undef RS6000_BUILTIN_X
11158 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11159 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
11160 { MASK, ICODE, NAME, ENUM },
11162 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11163 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11164 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11165 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11166 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11167 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11168 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11169 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11170 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11172 static const struct builtin_description bdesc_2arg
[] =
11174 #include "rs6000-builtin.def"
11177 #undef RS6000_BUILTIN_1
11178 #undef RS6000_BUILTIN_2
11179 #undef RS6000_BUILTIN_3
11180 #undef RS6000_BUILTIN_A
11181 #undef RS6000_BUILTIN_D
11182 #undef RS6000_BUILTIN_E
11183 #undef RS6000_BUILTIN_H
11184 #undef RS6000_BUILTIN_P
11185 #undef RS6000_BUILTIN_Q
11186 #undef RS6000_BUILTIN_S
11187 #undef RS6000_BUILTIN_X
11189 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11190 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11191 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11192 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11193 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11194 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11195 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11196 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
11197 { MASK, ICODE, NAME, ENUM },
11199 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11200 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11201 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11203 /* AltiVec predicates. */
11205 static const struct builtin_description bdesc_altivec_preds
[] =
11207 #include "rs6000-builtin.def"
11210 /* SPE predicates. */
11211 #undef RS6000_BUILTIN_1
11212 #undef RS6000_BUILTIN_2
11213 #undef RS6000_BUILTIN_3
11214 #undef RS6000_BUILTIN_A
11215 #undef RS6000_BUILTIN_D
11216 #undef RS6000_BUILTIN_E
11217 #undef RS6000_BUILTIN_H
11218 #undef RS6000_BUILTIN_P
11219 #undef RS6000_BUILTIN_Q
11220 #undef RS6000_BUILTIN_S
11221 #undef RS6000_BUILTIN_X
11223 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11224 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11225 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11226 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11227 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11228 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11229 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11230 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11231 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11232 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
11233 { MASK, ICODE, NAME, ENUM },
11235 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11237 static const struct builtin_description bdesc_spe_predicates
[] =
11239 #include "rs6000-builtin.def"
11242 /* SPE evsel predicates. */
11243 #undef RS6000_BUILTIN_1
11244 #undef RS6000_BUILTIN_2
11245 #undef RS6000_BUILTIN_3
11246 #undef RS6000_BUILTIN_A
11247 #undef RS6000_BUILTIN_D
11248 #undef RS6000_BUILTIN_E
11249 #undef RS6000_BUILTIN_H
11250 #undef RS6000_BUILTIN_P
11251 #undef RS6000_BUILTIN_Q
11252 #undef RS6000_BUILTIN_S
11253 #undef RS6000_BUILTIN_X
11255 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11256 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11257 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11258 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11259 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11260 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
11261 { MASK, ICODE, NAME, ENUM },
11263 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11264 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11265 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11266 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11267 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11269 static const struct builtin_description bdesc_spe_evsel
[] =
11271 #include "rs6000-builtin.def"
11274 /* PAIRED predicates. */
11275 #undef RS6000_BUILTIN_1
11276 #undef RS6000_BUILTIN_2
11277 #undef RS6000_BUILTIN_3
11278 #undef RS6000_BUILTIN_A
11279 #undef RS6000_BUILTIN_D
11280 #undef RS6000_BUILTIN_E
11281 #undef RS6000_BUILTIN_H
11282 #undef RS6000_BUILTIN_P
11283 #undef RS6000_BUILTIN_Q
11284 #undef RS6000_BUILTIN_S
11285 #undef RS6000_BUILTIN_X
11287 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11288 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11289 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11290 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11291 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11292 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11293 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11294 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11295 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
11296 { MASK, ICODE, NAME, ENUM },
11298 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11299 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11301 static const struct builtin_description bdesc_paired_preds
[] =
11303 #include "rs6000-builtin.def"
11306 /* ABS* operations. */
11308 #undef RS6000_BUILTIN_1
11309 #undef RS6000_BUILTIN_2
11310 #undef RS6000_BUILTIN_3
11311 #undef RS6000_BUILTIN_A
11312 #undef RS6000_BUILTIN_D
11313 #undef RS6000_BUILTIN_E
11314 #undef RS6000_BUILTIN_H
11315 #undef RS6000_BUILTIN_P
11316 #undef RS6000_BUILTIN_Q
11317 #undef RS6000_BUILTIN_S
11318 #undef RS6000_BUILTIN_X
11320 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11321 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11322 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11323 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
11324 { MASK, ICODE, NAME, ENUM },
11326 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11327 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11328 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11329 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11330 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11331 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11332 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11334 static const struct builtin_description bdesc_abs
[] =
11336 #include "rs6000-builtin.def"
11339 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
11342 #undef RS6000_BUILTIN_1
11343 #undef RS6000_BUILTIN_2
11344 #undef RS6000_BUILTIN_3
11345 #undef RS6000_BUILTIN_A
11346 #undef RS6000_BUILTIN_D
11347 #undef RS6000_BUILTIN_E
11348 #undef RS6000_BUILTIN_H
11349 #undef RS6000_BUILTIN_P
11350 #undef RS6000_BUILTIN_Q
11351 #undef RS6000_BUILTIN_S
11352 #undef RS6000_BUILTIN_X
11354 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
11355 { MASK, ICODE, NAME, ENUM },
11357 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11358 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11359 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11360 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11361 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11362 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11363 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11364 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11365 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11366 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11368 static const struct builtin_description bdesc_1arg
[] =
11370 #include "rs6000-builtin.def"
11373 /* HTM builtins. */
11374 #undef RS6000_BUILTIN_1
11375 #undef RS6000_BUILTIN_2
11376 #undef RS6000_BUILTIN_3
11377 #undef RS6000_BUILTIN_A
11378 #undef RS6000_BUILTIN_D
11379 #undef RS6000_BUILTIN_E
11380 #undef RS6000_BUILTIN_H
11381 #undef RS6000_BUILTIN_P
11382 #undef RS6000_BUILTIN_Q
11383 #undef RS6000_BUILTIN_S
11384 #undef RS6000_BUILTIN_X
11386 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11387 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11388 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11389 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11390 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11391 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11392 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
11393 { MASK, ICODE, NAME, ENUM },
11395 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11396 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11397 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11398 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11400 static const struct builtin_description bdesc_htm
[] =
11402 #include "rs6000-builtin.def"
11405 #undef RS6000_BUILTIN_1
11406 #undef RS6000_BUILTIN_2
11407 #undef RS6000_BUILTIN_3
11408 #undef RS6000_BUILTIN_A
11409 #undef RS6000_BUILTIN_D
11410 #undef RS6000_BUILTIN_E
11411 #undef RS6000_BUILTIN_H
11412 #undef RS6000_BUILTIN_P
11413 #undef RS6000_BUILTIN_Q
11414 #undef RS6000_BUILTIN_S
11416 /* Return true if a builtin function is overloaded. */
11418 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode
)
11420 return (rs6000_builtin_info
[(int)fncode
].attr
& RS6000_BTC_OVERLOADED
) != 0;
11423 /* Expand an expression EXP that calls a builtin without arguments. */
11425 rs6000_expand_zeroop_builtin (enum insn_code icode
, rtx target
)
11428 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11430 if (icode
== CODE_FOR_nothing
)
11431 /* Builtin not supported on this processor. */
11435 || GET_MODE (target
) != tmode
11436 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11437 target
= gen_reg_rtx (tmode
);
11439 pat
= GEN_FCN (icode
) (target
);
11449 rs6000_expand_unop_builtin (enum insn_code icode
, tree exp
, rtx target
)
11452 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11453 rtx op0
= expand_normal (arg0
);
11454 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11455 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11457 if (icode
== CODE_FOR_nothing
)
11458 /* Builtin not supported on this processor. */
11461 /* If we got invalid arguments bail out before generating bad rtl. */
11462 if (arg0
== error_mark_node
)
11465 if (icode
== CODE_FOR_altivec_vspltisb
11466 || icode
== CODE_FOR_altivec_vspltish
11467 || icode
== CODE_FOR_altivec_vspltisw
11468 || icode
== CODE_FOR_spe_evsplatfi
11469 || icode
== CODE_FOR_spe_evsplati
)
11471 /* Only allow 5-bit *signed* literals. */
11472 if (GET_CODE (op0
) != CONST_INT
11473 || INTVAL (op0
) > 15
11474 || INTVAL (op0
) < -16)
11476 error ("argument 1 must be a 5-bit signed literal");
11482 || GET_MODE (target
) != tmode
11483 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11484 target
= gen_reg_rtx (tmode
);
11486 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11487 op0
= copy_to_mode_reg (mode0
, op0
);
11489 pat
= GEN_FCN (icode
) (target
, op0
);
11498 altivec_expand_abs_builtin (enum insn_code icode
, tree exp
, rtx target
)
11500 rtx pat
, scratch1
, scratch2
;
11501 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11502 rtx op0
= expand_normal (arg0
);
11503 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11504 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11506 /* If we have invalid arguments, bail out before generating bad rtl. */
11507 if (arg0
== error_mark_node
)
11511 || GET_MODE (target
) != tmode
11512 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11513 target
= gen_reg_rtx (tmode
);
11515 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11516 op0
= copy_to_mode_reg (mode0
, op0
);
11518 scratch1
= gen_reg_rtx (mode0
);
11519 scratch2
= gen_reg_rtx (mode0
);
11521 pat
= GEN_FCN (icode
) (target
, op0
, scratch1
, scratch2
);
11530 rs6000_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
11533 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11534 tree arg1
= CALL_EXPR_ARG (exp
, 1);
11535 rtx op0
= expand_normal (arg0
);
11536 rtx op1
= expand_normal (arg1
);
11537 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11538 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11539 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
11541 if (icode
== CODE_FOR_nothing
)
11542 /* Builtin not supported on this processor. */
11545 /* If we got invalid arguments bail out before generating bad rtl. */
11546 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
11549 if (icode
== CODE_FOR_altivec_vcfux
11550 || icode
== CODE_FOR_altivec_vcfsx
11551 || icode
== CODE_FOR_altivec_vctsxs
11552 || icode
== CODE_FOR_altivec_vctuxs
11553 || icode
== CODE_FOR_altivec_vspltb
11554 || icode
== CODE_FOR_altivec_vsplth
11555 || icode
== CODE_FOR_altivec_vspltw
11556 || icode
== CODE_FOR_spe_evaddiw
11557 || icode
== CODE_FOR_spe_evldd
11558 || icode
== CODE_FOR_spe_evldh
11559 || icode
== CODE_FOR_spe_evldw
11560 || icode
== CODE_FOR_spe_evlhhesplat
11561 || icode
== CODE_FOR_spe_evlhhossplat
11562 || icode
== CODE_FOR_spe_evlhhousplat
11563 || icode
== CODE_FOR_spe_evlwhe
11564 || icode
== CODE_FOR_spe_evlwhos
11565 || icode
== CODE_FOR_spe_evlwhou
11566 || icode
== CODE_FOR_spe_evlwhsplat
11567 || icode
== CODE_FOR_spe_evlwwsplat
11568 || icode
== CODE_FOR_spe_evrlwi
11569 || icode
== CODE_FOR_spe_evslwi
11570 || icode
== CODE_FOR_spe_evsrwis
11571 || icode
== CODE_FOR_spe_evsubifw
11572 || icode
== CODE_FOR_spe_evsrwiu
)
11574 /* Only allow 5-bit unsigned literals. */
11576 if (TREE_CODE (arg1
) != INTEGER_CST
11577 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
11579 error ("argument 2 must be a 5-bit unsigned literal");
11585 || GET_MODE (target
) != tmode
11586 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11587 target
= gen_reg_rtx (tmode
);
11589 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11590 op0
= copy_to_mode_reg (mode0
, op0
);
11591 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
11592 op1
= copy_to_mode_reg (mode1
, op1
);
11594 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
11603 altivec_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
11606 tree cr6_form
= CALL_EXPR_ARG (exp
, 0);
11607 tree arg0
= CALL_EXPR_ARG (exp
, 1);
11608 tree arg1
= CALL_EXPR_ARG (exp
, 2);
11609 rtx op0
= expand_normal (arg0
);
11610 rtx op1
= expand_normal (arg1
);
11611 enum machine_mode tmode
= SImode
;
11612 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
11613 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
11616 if (TREE_CODE (cr6_form
) != INTEGER_CST
)
11618 error ("argument 1 of __builtin_altivec_predicate must be a constant");
11622 cr6_form_int
= TREE_INT_CST_LOW (cr6_form
);
11624 gcc_assert (mode0
== mode1
);
11626 /* If we have invalid arguments, bail out before generating bad rtl. */
11627 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
11631 || GET_MODE (target
) != tmode
11632 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11633 target
= gen_reg_rtx (tmode
);
11635 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
11636 op0
= copy_to_mode_reg (mode0
, op0
);
11637 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
11638 op1
= copy_to_mode_reg (mode1
, op1
);
11640 scratch
= gen_reg_rtx (mode0
);
11642 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
11647 /* The vec_any* and vec_all* predicates use the same opcodes for two
11648 different operations, but the bits in CR6 will be different
11649 depending on what information we want. So we have to play tricks
11650 with CR6 to get the right bits out.
11652 If you think this is disgusting, look at the specs for the
11653 AltiVec predicates. */
11655 switch (cr6_form_int
)
11658 emit_insn (gen_cr6_test_for_zero (target
));
11661 emit_insn (gen_cr6_test_for_zero_reverse (target
));
11664 emit_insn (gen_cr6_test_for_lt (target
));
11667 emit_insn (gen_cr6_test_for_lt_reverse (target
));
11670 error ("argument 1 of __builtin_altivec_predicate is out of range");
11678 paired_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
)
11681 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11682 tree arg1
= CALL_EXPR_ARG (exp
, 1);
11683 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11684 enum machine_mode mode0
= Pmode
;
11685 enum machine_mode mode1
= Pmode
;
11686 rtx op0
= expand_normal (arg0
);
11687 rtx op1
= expand_normal (arg1
);
11689 if (icode
== CODE_FOR_nothing
)
11690 /* Builtin not supported on this processor. */
11693 /* If we got invalid arguments bail out before generating bad rtl. */
11694 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
11698 || GET_MODE (target
) != tmode
11699 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11700 target
= gen_reg_rtx (tmode
);
11702 op1
= copy_to_mode_reg (mode1
, op1
);
11704 if (op0
== const0_rtx
)
11706 addr
= gen_rtx_MEM (tmode
, op1
);
11710 op0
= copy_to_mode_reg (mode0
, op0
);
11711 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
11714 pat
= GEN_FCN (icode
) (target
, addr
);
11724 altivec_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
11727 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11728 tree arg1
= CALL_EXPR_ARG (exp
, 1);
11729 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11730 enum machine_mode mode0
= Pmode
;
11731 enum machine_mode mode1
= Pmode
;
11732 rtx op0
= expand_normal (arg0
);
11733 rtx op1
= expand_normal (arg1
);
11735 if (icode
== CODE_FOR_nothing
)
11736 /* Builtin not supported on this processor. */
11739 /* If we got invalid arguments bail out before generating bad rtl. */
11740 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
11744 || GET_MODE (target
) != tmode
11745 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11746 target
= gen_reg_rtx (tmode
);
11748 op1
= copy_to_mode_reg (mode1
, op1
);
11750 if (op0
== const0_rtx
)
11752 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
11756 op0
= copy_to_mode_reg (mode0
, op0
);
11757 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
11760 pat
= GEN_FCN (icode
) (target
, addr
);
11770 spe_expand_stv_builtin (enum insn_code icode
, tree exp
)
11772 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11773 tree arg1
= CALL_EXPR_ARG (exp
, 1);
11774 tree arg2
= CALL_EXPR_ARG (exp
, 2);
11775 rtx op0
= expand_normal (arg0
);
11776 rtx op1
= expand_normal (arg1
);
11777 rtx op2
= expand_normal (arg2
);
11779 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
11780 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
11781 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
11783 /* Invalid arguments. Bail before doing anything stoopid! */
11784 if (arg0
== error_mark_node
11785 || arg1
== error_mark_node
11786 || arg2
== error_mark_node
)
11789 if (! (*insn_data
[icode
].operand
[2].predicate
) (op0
, mode2
))
11790 op0
= copy_to_mode_reg (mode2
, op0
);
11791 if (! (*insn_data
[icode
].operand
[0].predicate
) (op1
, mode0
))
11792 op1
= copy_to_mode_reg (mode0
, op1
);
11793 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
11794 op2
= copy_to_mode_reg (mode1
, op2
);
11796 pat
= GEN_FCN (icode
) (op1
, op2
, op0
);
11803 paired_expand_stv_builtin (enum insn_code icode
, tree exp
)
11805 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11806 tree arg1
= CALL_EXPR_ARG (exp
, 1);
11807 tree arg2
= CALL_EXPR_ARG (exp
, 2);
11808 rtx op0
= expand_normal (arg0
);
11809 rtx op1
= expand_normal (arg1
);
11810 rtx op2
= expand_normal (arg2
);
11812 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11813 enum machine_mode mode1
= Pmode
;
11814 enum machine_mode mode2
= Pmode
;
11816 /* Invalid arguments. Bail before doing anything stoopid! */
11817 if (arg0
== error_mark_node
11818 || arg1
== error_mark_node
11819 || arg2
== error_mark_node
)
11822 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, tmode
))
11823 op0
= copy_to_mode_reg (tmode
, op0
);
11825 op2
= copy_to_mode_reg (mode2
, op2
);
11827 if (op1
== const0_rtx
)
11829 addr
= gen_rtx_MEM (tmode
, op2
);
11833 op1
= copy_to_mode_reg (mode1
, op1
);
11834 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
11837 pat
= GEN_FCN (icode
) (addr
, op0
);
11844 altivec_expand_stv_builtin (enum insn_code icode
, tree exp
)
11846 tree arg0
= CALL_EXPR_ARG (exp
, 0);
11847 tree arg1
= CALL_EXPR_ARG (exp
, 1);
11848 tree arg2
= CALL_EXPR_ARG (exp
, 2);
11849 rtx op0
= expand_normal (arg0
);
11850 rtx op1
= expand_normal (arg1
);
11851 rtx op2
= expand_normal (arg2
);
11853 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11854 enum machine_mode smode
= insn_data
[icode
].operand
[1].mode
;
11855 enum machine_mode mode1
= Pmode
;
11856 enum machine_mode mode2
= Pmode
;
11858 /* Invalid arguments. Bail before doing anything stoopid! */
11859 if (arg0
== error_mark_node
11860 || arg1
== error_mark_node
11861 || arg2
== error_mark_node
)
11864 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, smode
))
11865 op0
= copy_to_mode_reg (smode
, op0
);
11867 op2
= copy_to_mode_reg (mode2
, op2
);
11869 if (op1
== const0_rtx
)
11871 addr
= gen_rtx_MEM (tmode
, op2
);
11875 op1
= copy_to_mode_reg (mode1
, op1
);
11876 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
11879 pat
= GEN_FCN (icode
) (addr
, op0
);
11885 /* Return the appropriate SPR number associated with the given builtin. */
11886 static inline HOST_WIDE_INT
11887 htm_spr_num (enum rs6000_builtins code
)
11889 if (code
== HTM_BUILTIN_GET_TFHAR
11890 || code
== HTM_BUILTIN_SET_TFHAR
)
11892 else if (code
== HTM_BUILTIN_GET_TFIAR
11893 || code
== HTM_BUILTIN_SET_TFIAR
)
11895 else if (code
== HTM_BUILTIN_GET_TEXASR
11896 || code
== HTM_BUILTIN_SET_TEXASR
)
11898 gcc_assert (code
== HTM_BUILTIN_GET_TEXASRU
11899 || code
== HTM_BUILTIN_SET_TEXASRU
);
11900 return TEXASRU_SPR
;
11903 /* Return the appropriate SPR regno associated with the given builtin. */
11904 static inline HOST_WIDE_INT
11905 htm_spr_regno (enum rs6000_builtins code
)
11907 if (code
== HTM_BUILTIN_GET_TFHAR
11908 || code
== HTM_BUILTIN_SET_TFHAR
)
11909 return TFHAR_REGNO
;
11910 else if (code
== HTM_BUILTIN_GET_TFIAR
11911 || code
== HTM_BUILTIN_SET_TFIAR
)
11912 return TFIAR_REGNO
;
11913 gcc_assert (code
== HTM_BUILTIN_GET_TEXASR
11914 || code
== HTM_BUILTIN_SET_TEXASR
11915 || code
== HTM_BUILTIN_GET_TEXASRU
11916 || code
== HTM_BUILTIN_SET_TEXASRU
);
11917 return TEXASR_REGNO
;
11920 /* Return the correct ICODE value depending on whether we are
11921 setting or reading the HTM SPRs. */
11922 static inline enum insn_code
11923 rs6000_htm_spr_icode (bool nonvoid
)
11926 return (TARGET_64BIT
) ? CODE_FOR_htm_mfspr_di
: CODE_FOR_htm_mfspr_si
;
11928 return (TARGET_64BIT
) ? CODE_FOR_htm_mtspr_di
: CODE_FOR_htm_mtspr_si
;
11931 /* Expand the HTM builtin in EXP and store the result in TARGET.
11932 Store true in *EXPANDEDP if we found a builtin to expand. */
11934 htm_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
11936 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
11937 bool nonvoid
= TREE_TYPE (TREE_TYPE (fndecl
)) != void_type_node
;
11938 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
11939 const struct builtin_description
*d
;
11942 *expandedp
= false;
11944 /* Expand the HTM builtins. */
11946 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
11947 if (d
->code
== fcode
)
11949 rtx op
[MAX_HTM_OPERANDS
], pat
;
11952 call_expr_arg_iterator iter
;
11953 unsigned attr
= rs6000_builtin_info
[fcode
].attr
;
11954 enum insn_code icode
= d
->icode
;
11956 if (attr
& RS6000_BTC_SPR
)
11957 icode
= rs6000_htm_spr_icode (nonvoid
);
11961 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
11963 || GET_MODE (target
) != tmode
11964 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
11965 target
= gen_reg_rtx (tmode
);
11966 op
[nopnds
++] = target
;
11969 FOR_EACH_CALL_EXPR_ARG (arg
, iter
, exp
)
11971 const struct insn_operand_data
*insn_op
;
11973 if (arg
== error_mark_node
|| nopnds
>= MAX_HTM_OPERANDS
)
11976 insn_op
= &insn_data
[icode
].operand
[nopnds
];
11978 op
[nopnds
] = expand_normal (arg
);
11980 if (!(*insn_op
->predicate
) (op
[nopnds
], insn_op
->mode
))
11982 if (!strcmp (insn_op
->constraint
, "n"))
11984 int arg_num
= (nonvoid
) ? nopnds
: nopnds
+ 1;
11985 if (!CONST_INT_P (op
[nopnds
]))
11986 error ("argument %d must be an unsigned literal", arg_num
);
11988 error ("argument %d is an unsigned literal that is "
11989 "out of range", arg_num
);
11992 op
[nopnds
] = copy_to_mode_reg (insn_op
->mode
, op
[nopnds
]);
11998 /* Handle the builtins for extended mnemonics. These accept
11999 no arguments, but map to builtins that take arguments. */
12002 case HTM_BUILTIN_TENDALL
: /* Alias for: tend. 1 */
12003 case HTM_BUILTIN_TRESUME
: /* Alias for: tsr. 1 */
12004 op
[nopnds
++] = GEN_INT (1);
12005 #ifdef ENABLE_CHECKING
12006 attr
|= RS6000_BTC_UNARY
;
12009 case HTM_BUILTIN_TSUSPEND
: /* Alias for: tsr. 0 */
12010 op
[nopnds
++] = GEN_INT (0);
12011 #ifdef ENABLE_CHECKING
12012 attr
|= RS6000_BTC_UNARY
;
12019 /* If this builtin accesses SPRs, then pass in the appropriate
12020 SPR number and SPR regno as the last two operands. */
12021 if (attr
& RS6000_BTC_SPR
)
12023 op
[nopnds
++] = gen_rtx_CONST_INT (Pmode
, htm_spr_num (fcode
));
12024 op
[nopnds
++] = gen_rtx_REG (Pmode
, htm_spr_regno (fcode
));
12027 #ifdef ENABLE_CHECKING
12028 int expected_nopnds
= 0;
12029 if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_UNARY
)
12030 expected_nopnds
= 1;
12031 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_BINARY
)
12032 expected_nopnds
= 2;
12033 else if ((attr
& RS6000_BTC_TYPE_MASK
) == RS6000_BTC_TERNARY
)
12034 expected_nopnds
= 3;
12035 if (!(attr
& RS6000_BTC_VOID
))
12036 expected_nopnds
+= 1;
12037 if (attr
& RS6000_BTC_SPR
)
12038 expected_nopnds
+= 2;
12040 gcc_assert (nopnds
== expected_nopnds
&& nopnds
<= MAX_HTM_OPERANDS
);
12046 pat
= GEN_FCN (icode
) (op
[0]);
12049 pat
= GEN_FCN (icode
) (op
[0], op
[1]);
12052 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2]);
12055 pat
= GEN_FCN (icode
) (op
[0], op
[1], op
[2], op
[3]);
12058 gcc_unreachable ();
12074 rs6000_expand_ternop_builtin (enum insn_code icode
, tree exp
, rtx target
)
12077 tree arg0
= CALL_EXPR_ARG (exp
, 0);
12078 tree arg1
= CALL_EXPR_ARG (exp
, 1);
12079 tree arg2
= CALL_EXPR_ARG (exp
, 2);
12080 rtx op0
= expand_normal (arg0
);
12081 rtx op1
= expand_normal (arg1
);
12082 rtx op2
= expand_normal (arg2
);
12083 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
12084 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
12085 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
12086 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
12088 if (icode
== CODE_FOR_nothing
)
12089 /* Builtin not supported on this processor. */
12092 /* If we got invalid arguments bail out before generating bad rtl. */
12093 if (arg0
== error_mark_node
12094 || arg1
== error_mark_node
12095 || arg2
== error_mark_node
)
12098 /* Check and prepare argument depending on the instruction code.
12100 Note that a switch statement instead of the sequence of tests
12101 would be incorrect as many of the CODE_FOR values could be
12102 CODE_FOR_nothing and that would yield multiple alternatives
12103 with identical values. We'd never reach here at runtime in
12105 if (icode
== CODE_FOR_altivec_vsldoi_v4sf
12106 || icode
== CODE_FOR_altivec_vsldoi_v4si
12107 || icode
== CODE_FOR_altivec_vsldoi_v8hi
12108 || icode
== CODE_FOR_altivec_vsldoi_v16qi
)
12110 /* Only allow 4-bit unsigned literals. */
12112 if (TREE_CODE (arg2
) != INTEGER_CST
12113 || TREE_INT_CST_LOW (arg2
) & ~0xf)
12115 error ("argument 3 must be a 4-bit unsigned literal");
12119 else if (icode
== CODE_FOR_vsx_xxpermdi_v2df
12120 || icode
== CODE_FOR_vsx_xxpermdi_v2di
12121 || icode
== CODE_FOR_vsx_xxsldwi_v16qi
12122 || icode
== CODE_FOR_vsx_xxsldwi_v8hi
12123 || icode
== CODE_FOR_vsx_xxsldwi_v4si
12124 || icode
== CODE_FOR_vsx_xxsldwi_v4sf
12125 || icode
== CODE_FOR_vsx_xxsldwi_v2di
12126 || icode
== CODE_FOR_vsx_xxsldwi_v2df
)
12128 /* Only allow 2-bit unsigned literals. */
12130 if (TREE_CODE (arg2
) != INTEGER_CST
12131 || TREE_INT_CST_LOW (arg2
) & ~0x3)
12133 error ("argument 3 must be a 2-bit unsigned literal");
12137 else if (icode
== CODE_FOR_vsx_set_v2df
12138 || icode
== CODE_FOR_vsx_set_v2di
)
12140 /* Only allow 1-bit unsigned literals. */
12142 if (TREE_CODE (arg2
) != INTEGER_CST
12143 || TREE_INT_CST_LOW (arg2
) & ~0x1)
12145 error ("argument 3 must be a 1-bit unsigned literal");
12149 else if (icode
== CODE_FOR_crypto_vshasigmaw
12150 || icode
== CODE_FOR_crypto_vshasigmad
)
12152 /* Check whether the 2nd and 3rd arguments are integer constants and in
12153 range and prepare arguments. */
12155 if (TREE_CODE (arg1
) != INTEGER_CST
12156 || !IN_RANGE (TREE_INT_CST_LOW (arg1
), 0, 1))
12158 error ("argument 2 must be 0 or 1");
12163 if (TREE_CODE (arg2
) != INTEGER_CST
12164 || !IN_RANGE (TREE_INT_CST_LOW (arg2
), 0, 15))
12166 error ("argument 3 must be in the range 0..15");
12172 || GET_MODE (target
) != tmode
12173 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12174 target
= gen_reg_rtx (tmode
);
12176 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12177 op0
= copy_to_mode_reg (mode0
, op0
);
12178 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
12179 op1
= copy_to_mode_reg (mode1
, op1
);
12180 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
12181 op2
= copy_to_mode_reg (mode2
, op2
);
12183 if (TARGET_PAIRED_FLOAT
&& icode
== CODE_FOR_selv2sf4
)
12184 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
, CONST0_RTX (SFmode
));
12186 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
12194 /* Expand the lvx builtins. */
12196 altivec_expand_ld_builtin (tree exp
, rtx target
, bool *expandedp
)
12198 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
12199 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
12201 enum machine_mode tmode
, mode0
;
12203 enum insn_code icode
;
12207 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi
:
12208 icode
= CODE_FOR_vector_altivec_load_v16qi
;
12210 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi
:
12211 icode
= CODE_FOR_vector_altivec_load_v8hi
;
12213 case ALTIVEC_BUILTIN_LD_INTERNAL_4si
:
12214 icode
= CODE_FOR_vector_altivec_load_v4si
;
12216 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf
:
12217 icode
= CODE_FOR_vector_altivec_load_v4sf
;
12219 case ALTIVEC_BUILTIN_LD_INTERNAL_2df
:
12220 icode
= CODE_FOR_vector_altivec_load_v2df
;
12222 case ALTIVEC_BUILTIN_LD_INTERNAL_2di
:
12223 icode
= CODE_FOR_vector_altivec_load_v2di
;
12226 *expandedp
= false;
12232 arg0
= CALL_EXPR_ARG (exp
, 0);
12233 op0
= expand_normal (arg0
);
12234 tmode
= insn_data
[icode
].operand
[0].mode
;
12235 mode0
= insn_data
[icode
].operand
[1].mode
;
12238 || GET_MODE (target
) != tmode
12239 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12240 target
= gen_reg_rtx (tmode
);
12242 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12243 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
12245 pat
= GEN_FCN (icode
) (target
, op0
);
12252 /* Expand the stvx builtins. */
12254 altivec_expand_st_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
12257 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
12258 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
12260 enum machine_mode mode0
, mode1
;
12262 enum insn_code icode
;
12266 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi
:
12267 icode
= CODE_FOR_vector_altivec_store_v16qi
;
12269 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi
:
12270 icode
= CODE_FOR_vector_altivec_store_v8hi
;
12272 case ALTIVEC_BUILTIN_ST_INTERNAL_4si
:
12273 icode
= CODE_FOR_vector_altivec_store_v4si
;
12275 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf
:
12276 icode
= CODE_FOR_vector_altivec_store_v4sf
;
12278 case ALTIVEC_BUILTIN_ST_INTERNAL_2df
:
12279 icode
= CODE_FOR_vector_altivec_store_v2df
;
12281 case ALTIVEC_BUILTIN_ST_INTERNAL_2di
:
12282 icode
= CODE_FOR_vector_altivec_store_v2di
;
12285 *expandedp
= false;
12289 arg0
= CALL_EXPR_ARG (exp
, 0);
12290 arg1
= CALL_EXPR_ARG (exp
, 1);
12291 op0
= expand_normal (arg0
);
12292 op1
= expand_normal (arg1
);
12293 mode0
= insn_data
[icode
].operand
[0].mode
;
12294 mode1
= insn_data
[icode
].operand
[1].mode
;
12296 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
12297 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
12298 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
12299 op1
= copy_to_mode_reg (mode1
, op1
);
12301 pat
= GEN_FCN (icode
) (op0
, op1
);
12309 /* Expand the dst builtins. */
12311 altivec_expand_dst_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
12314 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
12315 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
12316 tree arg0
, arg1
, arg2
;
12317 enum machine_mode mode0
, mode1
;
12318 rtx pat
, op0
, op1
, op2
;
12319 const struct builtin_description
*d
;
12322 *expandedp
= false;
12324 /* Handle DST variants. */
12326 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
12327 if (d
->code
== fcode
)
12329 arg0
= CALL_EXPR_ARG (exp
, 0);
12330 arg1
= CALL_EXPR_ARG (exp
, 1);
12331 arg2
= CALL_EXPR_ARG (exp
, 2);
12332 op0
= expand_normal (arg0
);
12333 op1
= expand_normal (arg1
);
12334 op2
= expand_normal (arg2
);
12335 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
12336 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
12338 /* Invalid arguments, bail out before generating bad rtl. */
12339 if (arg0
== error_mark_node
12340 || arg1
== error_mark_node
12341 || arg2
== error_mark_node
)
12346 if (TREE_CODE (arg2
) != INTEGER_CST
12347 || TREE_INT_CST_LOW (arg2
) & ~0x3)
12349 error ("argument to %qs must be a 2-bit unsigned literal", d
->name
);
12353 if (! (*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
12354 op0
= copy_to_mode_reg (Pmode
, op0
);
12355 if (! (*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
12356 op1
= copy_to_mode_reg (mode1
, op1
);
12358 pat
= GEN_FCN (d
->icode
) (op0
, op1
, op2
);
12368 /* Expand vec_init builtin. */
12370 altivec_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
12372 enum machine_mode tmode
= TYPE_MODE (type
);
12373 enum machine_mode inner_mode
= GET_MODE_INNER (tmode
);
12374 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
12375 rtvec v
= rtvec_alloc (n_elt
);
12377 gcc_assert (VECTOR_MODE_P (tmode
));
12378 gcc_assert (n_elt
== call_expr_nargs (exp
));
12380 for (i
= 0; i
< n_elt
; ++i
)
12382 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
12383 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
12386 if (!target
|| !register_operand (target
, tmode
))
12387 target
= gen_reg_rtx (tmode
);
12389 rs6000_expand_vector_init (target
, gen_rtx_PARALLEL (tmode
, v
));
12393 /* Return the integer constant in ARG. Constrain it to be in the range
12394 of the subparts of VEC_TYPE; issue an error if not. */
12397 get_element_number (tree vec_type
, tree arg
)
12399 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
12401 if (!tree_fits_uhwi_p (arg
)
12402 || (elt
= tree_to_uhwi (arg
), elt
> max
))
12404 error ("selector must be an integer constant in the range 0..%wi", max
);
12411 /* Expand vec_set builtin. */
12413 altivec_expand_vec_set_builtin (tree exp
)
12415 enum machine_mode tmode
, mode1
;
12416 tree arg0
, arg1
, arg2
;
12420 arg0
= CALL_EXPR_ARG (exp
, 0);
12421 arg1
= CALL_EXPR_ARG (exp
, 1);
12422 arg2
= CALL_EXPR_ARG (exp
, 2);
12424 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
12425 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
12426 gcc_assert (VECTOR_MODE_P (tmode
));
12428 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
12429 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
12430 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
12432 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
12433 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
12435 op0
= force_reg (tmode
, op0
);
12436 op1
= force_reg (mode1
, op1
);
12438 rs6000_expand_vector_set (op0
, op1
, elt
);
12443 /* Expand vec_ext builtin. */
12445 altivec_expand_vec_ext_builtin (tree exp
, rtx target
)
12447 enum machine_mode tmode
, mode0
;
12452 arg0
= CALL_EXPR_ARG (exp
, 0);
12453 arg1
= CALL_EXPR_ARG (exp
, 1);
12455 op0
= expand_normal (arg0
);
12456 elt
= get_element_number (TREE_TYPE (arg0
), arg1
);
12458 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
12459 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
12460 gcc_assert (VECTOR_MODE_P (mode0
));
12462 op0
= force_reg (mode0
, op0
);
12464 if (optimize
|| !target
|| !register_operand (target
, tmode
))
12465 target
= gen_reg_rtx (tmode
);
12467 rs6000_expand_vector_extract (target
, op0
, elt
);
12472 /* Expand the builtin in EXP and store the result in TARGET. Store
12473 true in *EXPANDEDP if we found a builtin to expand. */
12475 altivec_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
12477 const struct builtin_description
*d
;
12479 enum insn_code icode
;
12480 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
12483 enum machine_mode tmode
, mode0
;
12484 enum rs6000_builtins fcode
12485 = (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
12487 if (rs6000_overloaded_builtin_p (fcode
))
12490 error ("unresolved overload for Altivec builtin %qF", fndecl
);
12492 /* Given it is invalid, just generate a normal call. */
12493 return expand_call (exp
, target
, false);
12496 target
= altivec_expand_ld_builtin (exp
, target
, expandedp
);
12500 target
= altivec_expand_st_builtin (exp
, target
, expandedp
);
12504 target
= altivec_expand_dst_builtin (exp
, target
, expandedp
);
12512 case ALTIVEC_BUILTIN_STVX
:
12513 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si
, exp
);
12514 case ALTIVEC_BUILTIN_STVEBX
:
12515 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx
, exp
);
12516 case ALTIVEC_BUILTIN_STVEHX
:
12517 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx
, exp
);
12518 case ALTIVEC_BUILTIN_STVEWX
:
12519 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx
, exp
);
12520 case ALTIVEC_BUILTIN_STVXL
:
12521 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl
, exp
);
12523 case ALTIVEC_BUILTIN_STVLX
:
12524 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx
, exp
);
12525 case ALTIVEC_BUILTIN_STVLXL
:
12526 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl
, exp
);
12527 case ALTIVEC_BUILTIN_STVRX
:
12528 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx
, exp
);
12529 case ALTIVEC_BUILTIN_STVRXL
:
12530 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl
, exp
);
12532 case VSX_BUILTIN_STXVD2X_V2DF
:
12533 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df
, exp
);
12534 case VSX_BUILTIN_STXVD2X_V2DI
:
12535 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di
, exp
);
12536 case VSX_BUILTIN_STXVW4X_V4SF
:
12537 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf
, exp
);
12538 case VSX_BUILTIN_STXVW4X_V4SI
:
12539 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si
, exp
);
12540 case VSX_BUILTIN_STXVW4X_V8HI
:
12541 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi
, exp
);
12542 case VSX_BUILTIN_STXVW4X_V16QI
:
12543 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi
, exp
);
12545 case ALTIVEC_BUILTIN_MFVSCR
:
12546 icode
= CODE_FOR_altivec_mfvscr
;
12547 tmode
= insn_data
[icode
].operand
[0].mode
;
12550 || GET_MODE (target
) != tmode
12551 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12552 target
= gen_reg_rtx (tmode
);
12554 pat
= GEN_FCN (icode
) (target
);
12560 case ALTIVEC_BUILTIN_MTVSCR
:
12561 icode
= CODE_FOR_altivec_mtvscr
;
12562 arg0
= CALL_EXPR_ARG (exp
, 0);
12563 op0
= expand_normal (arg0
);
12564 mode0
= insn_data
[icode
].operand
[0].mode
;
12566 /* If we got invalid arguments bail out before generating bad rtl. */
12567 if (arg0
== error_mark_node
)
12570 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
12571 op0
= copy_to_mode_reg (mode0
, op0
);
12573 pat
= GEN_FCN (icode
) (op0
);
12578 case ALTIVEC_BUILTIN_DSSALL
:
12579 emit_insn (gen_altivec_dssall ());
12582 case ALTIVEC_BUILTIN_DSS
:
12583 icode
= CODE_FOR_altivec_dss
;
12584 arg0
= CALL_EXPR_ARG (exp
, 0);
12586 op0
= expand_normal (arg0
);
12587 mode0
= insn_data
[icode
].operand
[0].mode
;
12589 /* If we got invalid arguments bail out before generating bad rtl. */
12590 if (arg0
== error_mark_node
)
12593 if (TREE_CODE (arg0
) != INTEGER_CST
12594 || TREE_INT_CST_LOW (arg0
) & ~0x3)
12596 error ("argument to dss must be a 2-bit unsigned literal");
12600 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
12601 op0
= copy_to_mode_reg (mode0
, op0
);
12603 emit_insn (gen_altivec_dss (op0
));
12606 case ALTIVEC_BUILTIN_VEC_INIT_V4SI
:
12607 case ALTIVEC_BUILTIN_VEC_INIT_V8HI
:
12608 case ALTIVEC_BUILTIN_VEC_INIT_V16QI
:
12609 case ALTIVEC_BUILTIN_VEC_INIT_V4SF
:
12610 case VSX_BUILTIN_VEC_INIT_V2DF
:
12611 case VSX_BUILTIN_VEC_INIT_V2DI
:
12612 return altivec_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
12614 case ALTIVEC_BUILTIN_VEC_SET_V4SI
:
12615 case ALTIVEC_BUILTIN_VEC_SET_V8HI
:
12616 case ALTIVEC_BUILTIN_VEC_SET_V16QI
:
12617 case ALTIVEC_BUILTIN_VEC_SET_V4SF
:
12618 case VSX_BUILTIN_VEC_SET_V2DF
:
12619 case VSX_BUILTIN_VEC_SET_V2DI
:
12620 return altivec_expand_vec_set_builtin (exp
);
12622 case ALTIVEC_BUILTIN_VEC_EXT_V4SI
:
12623 case ALTIVEC_BUILTIN_VEC_EXT_V8HI
:
12624 case ALTIVEC_BUILTIN_VEC_EXT_V16QI
:
12625 case ALTIVEC_BUILTIN_VEC_EXT_V4SF
:
12626 case VSX_BUILTIN_VEC_EXT_V2DF
:
12627 case VSX_BUILTIN_VEC_EXT_V2DI
:
12628 return altivec_expand_vec_ext_builtin (exp
, target
);
12632 /* Fall through. */
12635 /* Expand abs* operations. */
12637 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
12638 if (d
->code
== fcode
)
12639 return altivec_expand_abs_builtin (d
->icode
, exp
, target
);
12641 /* Expand the AltiVec predicates. */
12642 d
= bdesc_altivec_preds
;
12643 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
12644 if (d
->code
== fcode
)
12645 return altivec_expand_predicate_builtin (d
->icode
, exp
, target
);
12647 /* LV* are funky. We initialized them differently. */
12650 case ALTIVEC_BUILTIN_LVSL
:
12651 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl
,
12652 exp
, target
, false);
12653 case ALTIVEC_BUILTIN_LVSR
:
12654 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr
,
12655 exp
, target
, false);
12656 case ALTIVEC_BUILTIN_LVEBX
:
12657 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx
,
12658 exp
, target
, false);
12659 case ALTIVEC_BUILTIN_LVEHX
:
12660 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx
,
12661 exp
, target
, false);
12662 case ALTIVEC_BUILTIN_LVEWX
:
12663 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx
,
12664 exp
, target
, false);
12665 case ALTIVEC_BUILTIN_LVXL
:
12666 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl
,
12667 exp
, target
, false);
12668 case ALTIVEC_BUILTIN_LVX
:
12669 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si
,
12670 exp
, target
, false);
12671 case ALTIVEC_BUILTIN_LVLX
:
12672 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx
,
12673 exp
, target
, true);
12674 case ALTIVEC_BUILTIN_LVLXL
:
12675 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl
,
12676 exp
, target
, true);
12677 case ALTIVEC_BUILTIN_LVRX
:
12678 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx
,
12679 exp
, target
, true);
12680 case ALTIVEC_BUILTIN_LVRXL
:
12681 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl
,
12682 exp
, target
, true);
12683 case VSX_BUILTIN_LXVD2X_V2DF
:
12684 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df
,
12685 exp
, target
, false);
12686 case VSX_BUILTIN_LXVD2X_V2DI
:
12687 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di
,
12688 exp
, target
, false);
12689 case VSX_BUILTIN_LXVW4X_V4SF
:
12690 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf
,
12691 exp
, target
, false);
12692 case VSX_BUILTIN_LXVW4X_V4SI
:
12693 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si
,
12694 exp
, target
, false);
12695 case VSX_BUILTIN_LXVW4X_V8HI
:
12696 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi
,
12697 exp
, target
, false);
12698 case VSX_BUILTIN_LXVW4X_V16QI
:
12699 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi
,
12700 exp
, target
, false);
12704 /* Fall through. */
12707 *expandedp
= false;
12711 /* Expand the builtin in EXP and store the result in TARGET. Store
12712 true in *EXPANDEDP if we found a builtin to expand. */
12714 paired_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
12716 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
12717 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
12718 const struct builtin_description
*d
;
12725 case PAIRED_BUILTIN_STX
:
12726 return paired_expand_stv_builtin (CODE_FOR_paired_stx
, exp
);
12727 case PAIRED_BUILTIN_LX
:
12728 return paired_expand_lv_builtin (CODE_FOR_paired_lx
, exp
, target
);
12731 /* Fall through. */
12734 /* Expand the paired predicates. */
12735 d
= bdesc_paired_preds
;
12736 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); i
++, d
++)
12737 if (d
->code
== fcode
)
12738 return paired_expand_predicate_builtin (d
->icode
, exp
, target
);
12740 *expandedp
= false;
12744 /* Binops that need to be initialized manually, but can be expanded
12745 automagically by rs6000_expand_binop_builtin. */
12746 static const struct builtin_description bdesc_2arg_spe
[] =
12748 { RS6000_BTM_SPE
, CODE_FOR_spe_evlddx
, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX
},
12749 { RS6000_BTM_SPE
, CODE_FOR_spe_evldwx
, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX
},
12750 { RS6000_BTM_SPE
, CODE_FOR_spe_evldhx
, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX
},
12751 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhex
, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX
},
12752 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhoux
, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX
},
12753 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhosx
, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX
},
12754 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwwsplatx
, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX
},
12755 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhsplatx
, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX
},
12756 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhesplatx
, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX
},
12757 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhousplatx
, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX
},
12758 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhossplatx
, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX
},
12759 { RS6000_BTM_SPE
, CODE_FOR_spe_evldd
, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD
},
12760 { RS6000_BTM_SPE
, CODE_FOR_spe_evldw
, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW
},
12761 { RS6000_BTM_SPE
, CODE_FOR_spe_evldh
, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH
},
12762 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhe
, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE
},
12763 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhou
, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU
},
12764 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhos
, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS
},
12765 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwwsplat
, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT
},
12766 { RS6000_BTM_SPE
, CODE_FOR_spe_evlwhsplat
, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT
},
12767 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhesplat
, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT
},
12768 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhousplat
, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT
},
12769 { RS6000_BTM_SPE
, CODE_FOR_spe_evlhhossplat
, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT
}
12772 /* Expand the builtin in EXP and store the result in TARGET. Store
12773 true in *EXPANDEDP if we found a builtin to expand.
12775 This expands the SPE builtins that are not simple unary and binary
12778 spe_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
12780 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
12782 enum rs6000_builtins fcode
= (enum rs6000_builtins
) DECL_FUNCTION_CODE (fndecl
);
12783 enum insn_code icode
;
12784 enum machine_mode tmode
, mode0
;
12786 const struct builtin_description
*d
;
12791 /* Syntax check for a 5-bit unsigned immediate. */
12794 case SPE_BUILTIN_EVSTDD
:
12795 case SPE_BUILTIN_EVSTDH
:
12796 case SPE_BUILTIN_EVSTDW
:
12797 case SPE_BUILTIN_EVSTWHE
:
12798 case SPE_BUILTIN_EVSTWHO
:
12799 case SPE_BUILTIN_EVSTWWE
:
12800 case SPE_BUILTIN_EVSTWWO
:
12801 arg1
= CALL_EXPR_ARG (exp
, 2);
12802 if (TREE_CODE (arg1
) != INTEGER_CST
12803 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
12805 error ("argument 2 must be a 5-bit unsigned literal");
12813 /* The evsplat*i instructions are not quite generic. */
12816 case SPE_BUILTIN_EVSPLATFI
:
12817 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi
,
12819 case SPE_BUILTIN_EVSPLATI
:
12820 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati
,
12826 d
= bdesc_2arg_spe
;
12827 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg_spe
); ++i
, ++d
)
12828 if (d
->code
== fcode
)
12829 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
12831 d
= bdesc_spe_predicates
;
12832 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, ++d
)
12833 if (d
->code
== fcode
)
12834 return spe_expand_predicate_builtin (d
->icode
, exp
, target
);
12836 d
= bdesc_spe_evsel
;
12837 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, ++d
)
12838 if (d
->code
== fcode
)
12839 return spe_expand_evsel_builtin (d
->icode
, exp
, target
);
12843 case SPE_BUILTIN_EVSTDDX
:
12844 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx
, exp
);
12845 case SPE_BUILTIN_EVSTDHX
:
12846 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx
, exp
);
12847 case SPE_BUILTIN_EVSTDWX
:
12848 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx
, exp
);
12849 case SPE_BUILTIN_EVSTWHEX
:
12850 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex
, exp
);
12851 case SPE_BUILTIN_EVSTWHOX
:
12852 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox
, exp
);
12853 case SPE_BUILTIN_EVSTWWEX
:
12854 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex
, exp
);
12855 case SPE_BUILTIN_EVSTWWOX
:
12856 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox
, exp
);
12857 case SPE_BUILTIN_EVSTDD
:
12858 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd
, exp
);
12859 case SPE_BUILTIN_EVSTDH
:
12860 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh
, exp
);
12861 case SPE_BUILTIN_EVSTDW
:
12862 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw
, exp
);
12863 case SPE_BUILTIN_EVSTWHE
:
12864 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe
, exp
);
12865 case SPE_BUILTIN_EVSTWHO
:
12866 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho
, exp
);
12867 case SPE_BUILTIN_EVSTWWE
:
12868 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe
, exp
);
12869 case SPE_BUILTIN_EVSTWWO
:
12870 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo
, exp
);
12871 case SPE_BUILTIN_MFSPEFSCR
:
12872 icode
= CODE_FOR_spe_mfspefscr
;
12873 tmode
= insn_data
[icode
].operand
[0].mode
;
12876 || GET_MODE (target
) != tmode
12877 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
12878 target
= gen_reg_rtx (tmode
);
12880 pat
= GEN_FCN (icode
) (target
);
12885 case SPE_BUILTIN_MTSPEFSCR
:
12886 icode
= CODE_FOR_spe_mtspefscr
;
12887 arg0
= CALL_EXPR_ARG (exp
, 0);
12888 op0
= expand_normal (arg0
);
12889 mode0
= insn_data
[icode
].operand
[0].mode
;
12891 if (arg0
== error_mark_node
)
12894 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
12895 op0
= copy_to_mode_reg (mode0
, op0
);
12897 pat
= GEN_FCN (icode
) (op0
);
12905 *expandedp
= false;
12910 paired_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
12912 rtx pat
, scratch
, tmp
;
12913 tree form
= CALL_EXPR_ARG (exp
, 0);
12914 tree arg0
= CALL_EXPR_ARG (exp
, 1);
12915 tree arg1
= CALL_EXPR_ARG (exp
, 2);
12916 rtx op0
= expand_normal (arg0
);
12917 rtx op1
= expand_normal (arg1
);
12918 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
12919 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
12921 enum rtx_code code
;
12923 if (TREE_CODE (form
) != INTEGER_CST
)
12925 error ("argument 1 of __builtin_paired_predicate must be a constant");
12929 form_int
= TREE_INT_CST_LOW (form
);
12931 gcc_assert (mode0
== mode1
);
12933 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
12937 || GET_MODE (target
) != SImode
12938 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
12939 target
= gen_reg_rtx (SImode
);
12940 if (!(*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
12941 op0
= copy_to_mode_reg (mode0
, op0
);
12942 if (!(*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
12943 op1
= copy_to_mode_reg (mode1
, op1
);
12945 scratch
= gen_reg_rtx (CCFPmode
);
12947 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
12969 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
12972 error ("argument 1 of __builtin_paired_predicate is out of range");
12976 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
12977 emit_move_insn (target
, tmp
);
12982 spe_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
12984 rtx pat
, scratch
, tmp
;
12985 tree form
= CALL_EXPR_ARG (exp
, 0);
12986 tree arg0
= CALL_EXPR_ARG (exp
, 1);
12987 tree arg1
= CALL_EXPR_ARG (exp
, 2);
12988 rtx op0
= expand_normal (arg0
);
12989 rtx op1
= expand_normal (arg1
);
12990 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
12991 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
12993 enum rtx_code code
;
12995 if (TREE_CODE (form
) != INTEGER_CST
)
12997 error ("argument 1 of __builtin_spe_predicate must be a constant");
13001 form_int
= TREE_INT_CST_LOW (form
);
13003 gcc_assert (mode0
== mode1
);
13005 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
13009 || GET_MODE (target
) != SImode
13010 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
13011 target
= gen_reg_rtx (SImode
);
13013 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13014 op0
= copy_to_mode_reg (mode0
, op0
);
13015 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
13016 op1
= copy_to_mode_reg (mode1
, op1
);
13018 scratch
= gen_reg_rtx (CCmode
);
13020 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
13025 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
13026 _lower_. We use one compare, but look in different bits of the
13027 CR for each variant.
13029 There are 2 elements in each SPE simd type (upper/lower). The CR
13030 bits are set as follows:
13032 BIT0 | BIT 1 | BIT 2 | BIT 3
13033 U | L | (U | L) | (U & L)
13035 So, for an "all" relationship, BIT 3 would be set.
13036 For an "any" relationship, BIT 2 would be set. Etc.
13038 Following traditional nomenclature, these bits map to:
13040 BIT0 | BIT 1 | BIT 2 | BIT 3
13043 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
13048 /* All variant. OV bit. */
13050 /* We need to get to the OV bit, which is the ORDERED bit. We
13051 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
13052 that's ugly and will make validate_condition_mode die.
13053 So let's just use another pattern. */
13054 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
13056 /* Any variant. EQ bit. */
13060 /* Upper variant. LT bit. */
13064 /* Lower variant. GT bit. */
13069 error ("argument 1 of __builtin_spe_predicate is out of range");
13073 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
13074 emit_move_insn (target
, tmp
);
13079 /* The evsel builtins look like this:
13081 e = __builtin_spe_evsel_OP (a, b, c, d);
13083 and work like this:
13085 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
13086 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
13090 spe_expand_evsel_builtin (enum insn_code icode
, tree exp
, rtx target
)
13093 tree arg0
= CALL_EXPR_ARG (exp
, 0);
13094 tree arg1
= CALL_EXPR_ARG (exp
, 1);
13095 tree arg2
= CALL_EXPR_ARG (exp
, 2);
13096 tree arg3
= CALL_EXPR_ARG (exp
, 3);
13097 rtx op0
= expand_normal (arg0
);
13098 rtx op1
= expand_normal (arg1
);
13099 rtx op2
= expand_normal (arg2
);
13100 rtx op3
= expand_normal (arg3
);
13101 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
13102 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
13104 gcc_assert (mode0
== mode1
);
13106 if (arg0
== error_mark_node
|| arg1
== error_mark_node
13107 || arg2
== error_mark_node
|| arg3
== error_mark_node
)
13111 || GET_MODE (target
) != mode0
13112 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode0
))
13113 target
= gen_reg_rtx (mode0
);
13115 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
13116 op0
= copy_to_mode_reg (mode0
, op0
);
13117 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
13118 op1
= copy_to_mode_reg (mode0
, op1
);
13119 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
13120 op2
= copy_to_mode_reg (mode0
, op2
);
13121 if (! (*insn_data
[icode
].operand
[1].predicate
) (op3
, mode1
))
13122 op3
= copy_to_mode_reg (mode0
, op3
);
13124 /* Generate the compare. */
13125 scratch
= gen_reg_rtx (CCmode
);
13126 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
13131 if (mode0
== V2SImode
)
13132 emit_insn (gen_spe_evsel (target
, op2
, op3
, scratch
));
13134 emit_insn (gen_spe_evsel_fs (target
, op2
, op3
, scratch
));
13139 /* Raise an error message for a builtin function that is called without the
13140 appropriate target options being set. */
13143 rs6000_invalid_builtin (enum rs6000_builtins fncode
)
13145 size_t uns_fncode
= (size_t)fncode
;
13146 const char *name
= rs6000_builtin_info
[uns_fncode
].name
;
13147 HOST_WIDE_INT fnmask
= rs6000_builtin_info
[uns_fncode
].mask
;
13149 gcc_assert (name
!= NULL
);
13150 if ((fnmask
& RS6000_BTM_CELL
) != 0)
13151 error ("Builtin function %s is only valid for the cell processor", name
);
13152 else if ((fnmask
& RS6000_BTM_VSX
) != 0)
13153 error ("Builtin function %s requires the -mvsx option", name
);
13154 else if ((fnmask
& RS6000_BTM_HTM
) != 0)
13155 error ("Builtin function %s requires the -mhtm option", name
);
13156 else if ((fnmask
& RS6000_BTM_ALTIVEC
) != 0)
13157 error ("Builtin function %s requires the -maltivec option", name
);
13158 else if ((fnmask
& RS6000_BTM_PAIRED
) != 0)
13159 error ("Builtin function %s requires the -mpaired option", name
);
13160 else if ((fnmask
& RS6000_BTM_SPE
) != 0)
13161 error ("Builtin function %s requires the -mspe option", name
);
13163 error ("Builtin function %s is not supported with the current options",
13167 /* Expand an expression EXP that calls a built-in function,
13168 with result going to TARGET if that's convenient
13169 (and in mode MODE if that's convenient).
13170 SUBTARGET may be used as the target for computing one of EXP's operands.
13171 IGNORE is nonzero if the value is to be ignored. */
13174 rs6000_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
13175 enum machine_mode mode ATTRIBUTE_UNUSED
,
13176 int ignore ATTRIBUTE_UNUSED
)
13178 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
13179 enum rs6000_builtins fcode
13180 = (enum rs6000_builtins
)DECL_FUNCTION_CODE (fndecl
);
13181 size_t uns_fcode
= (size_t)fcode
;
13182 const struct builtin_description
*d
;
13186 HOST_WIDE_INT mask
= rs6000_builtin_info
[uns_fcode
].mask
;
13187 bool func_valid_p
= ((rs6000_builtin_mask
& mask
) == mask
);
13189 if (TARGET_DEBUG_BUILTIN
)
13191 enum insn_code icode
= rs6000_builtin_info
[uns_fcode
].icode
;
13192 const char *name1
= rs6000_builtin_info
[uns_fcode
].name
;
13193 const char *name2
= ((icode
!= CODE_FOR_nothing
)
13194 ? get_insn_name ((int)icode
)
13198 switch (rs6000_builtin_info
[uns_fcode
].attr
& RS6000_BTC_TYPE_MASK
)
13200 default: name3
= "unknown"; break;
13201 case RS6000_BTC_SPECIAL
: name3
= "special"; break;
13202 case RS6000_BTC_UNARY
: name3
= "unary"; break;
13203 case RS6000_BTC_BINARY
: name3
= "binary"; break;
13204 case RS6000_BTC_TERNARY
: name3
= "ternary"; break;
13205 case RS6000_BTC_PREDICATE
: name3
= "predicate"; break;
13206 case RS6000_BTC_ABS
: name3
= "abs"; break;
13207 case RS6000_BTC_EVSEL
: name3
= "evsel"; break;
13208 case RS6000_BTC_DST
: name3
= "dst"; break;
13213 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
13214 (name1
) ? name1
: "---", fcode
,
13215 (name2
) ? name2
: "---", (int)icode
,
13217 func_valid_p
? "" : ", not valid");
13222 rs6000_invalid_builtin (fcode
);
13224 /* Given it is invalid, just generate a normal call. */
13225 return expand_call (exp
, target
, ignore
);
13230 case RS6000_BUILTIN_RECIP
:
13231 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3
, exp
, target
);
13233 case RS6000_BUILTIN_RECIPF
:
13234 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3
, exp
, target
);
13236 case RS6000_BUILTIN_RSQRTF
:
13237 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2
, exp
, target
);
13239 case RS6000_BUILTIN_RSQRT
:
13240 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2
, exp
, target
);
13242 case POWER7_BUILTIN_BPERMD
:
13243 return rs6000_expand_binop_builtin (((TARGET_64BIT
)
13244 ? CODE_FOR_bpermd_di
13245 : CODE_FOR_bpermd_si
), exp
, target
);
13247 case RS6000_BUILTIN_GET_TB
:
13248 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase
,
13251 case RS6000_BUILTIN_MFTB
:
13252 return rs6000_expand_zeroop_builtin (((TARGET_64BIT
)
13253 ? CODE_FOR_rs6000_mftb_di
13254 : CODE_FOR_rs6000_mftb_si
),
13257 case ALTIVEC_BUILTIN_MASK_FOR_LOAD
:
13258 case ALTIVEC_BUILTIN_MASK_FOR_STORE
:
13260 int icode
= (BYTES_BIG_ENDIAN
? (int) CODE_FOR_altivec_lvsr
13261 : (int) CODE_FOR_altivec_lvsl
);
13262 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
13263 enum machine_mode mode
= insn_data
[icode
].operand
[1].mode
;
13267 gcc_assert (TARGET_ALTIVEC
);
13269 arg
= CALL_EXPR_ARG (exp
, 0);
13270 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg
)));
13271 op
= expand_expr (arg
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
13272 addr
= memory_address (mode
, op
);
13273 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
13277 /* For the load case need to negate the address. */
13278 op
= gen_reg_rtx (GET_MODE (addr
));
13279 emit_insn (gen_rtx_SET (VOIDmode
, op
,
13280 gen_rtx_NEG (GET_MODE (addr
), addr
)));
13282 op
= gen_rtx_MEM (mode
, op
);
13285 || GET_MODE (target
) != tmode
13286 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
13287 target
= gen_reg_rtx (tmode
);
13289 /*pat = gen_altivec_lvsr (target, op);*/
13290 pat
= GEN_FCN (icode
) (target
, op
);
13298 case ALTIVEC_BUILTIN_VCFUX
:
13299 case ALTIVEC_BUILTIN_VCFSX
:
13300 case ALTIVEC_BUILTIN_VCTUXS
:
13301 case ALTIVEC_BUILTIN_VCTSXS
:
13302 /* FIXME: There's got to be a nicer way to handle this case than
13303 constructing a new CALL_EXPR. */
13304 if (call_expr_nargs (exp
) == 1)
13306 exp
= build_call_nary (TREE_TYPE (exp
), CALL_EXPR_FN (exp
),
13307 2, CALL_EXPR_ARG (exp
, 0), integer_zero_node
);
13315 if (TARGET_ALTIVEC
)
13317 ret
= altivec_expand_builtin (exp
, target
, &success
);
13324 ret
= spe_expand_builtin (exp
, target
, &success
);
13329 if (TARGET_PAIRED_FLOAT
)
13331 ret
= paired_expand_builtin (exp
, target
, &success
);
13338 ret
= htm_expand_builtin (exp
, target
, &success
);
13344 gcc_assert (TARGET_ALTIVEC
|| TARGET_VSX
|| TARGET_SPE
|| TARGET_PAIRED_FLOAT
);
13346 /* Handle simple unary operations. */
13348 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
13349 if (d
->code
== fcode
)
13350 return rs6000_expand_unop_builtin (d
->icode
, exp
, target
);
13352 /* Handle simple binary operations. */
13354 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
13355 if (d
->code
== fcode
)
13356 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
13358 /* Handle simple ternary operations. */
13360 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
13361 if (d
->code
== fcode
)
13362 return rs6000_expand_ternop_builtin (d
->icode
, exp
, target
);
13364 gcc_unreachable ();
13368 rs6000_init_builtins (void)
13372 enum machine_mode mode
;
13374 if (TARGET_DEBUG_BUILTIN
)
13375 fprintf (stderr
, "rs6000_init_builtins%s%s%s%s\n",
13376 (TARGET_PAIRED_FLOAT
) ? ", paired" : "",
13377 (TARGET_SPE
) ? ", spe" : "",
13378 (TARGET_ALTIVEC
) ? ", altivec" : "",
13379 (TARGET_VSX
) ? ", vsx" : "");
13381 V2SI_type_node
= build_vector_type (intSI_type_node
, 2);
13382 V2SF_type_node
= build_vector_type (float_type_node
, 2);
13383 V2DI_type_node
= build_vector_type (intDI_type_node
, 2);
13384 V2DF_type_node
= build_vector_type (double_type_node
, 2);
13385 V4HI_type_node
= build_vector_type (intHI_type_node
, 4);
13386 V4SI_type_node
= build_vector_type (intSI_type_node
, 4);
13387 V4SF_type_node
= build_vector_type (float_type_node
, 4);
13388 V8HI_type_node
= build_vector_type (intHI_type_node
, 8);
13389 V16QI_type_node
= build_vector_type (intQI_type_node
, 16);
13391 unsigned_V16QI_type_node
= build_vector_type (unsigned_intQI_type_node
, 16);
13392 unsigned_V8HI_type_node
= build_vector_type (unsigned_intHI_type_node
, 8);
13393 unsigned_V4SI_type_node
= build_vector_type (unsigned_intSI_type_node
, 4);
13394 unsigned_V2DI_type_node
= build_vector_type (unsigned_intDI_type_node
, 2);
13396 opaque_V2SF_type_node
= build_opaque_vector_type (float_type_node
, 2);
13397 opaque_V2SI_type_node
= build_opaque_vector_type (intSI_type_node
, 2);
13398 opaque_p_V2SI_type_node
= build_pointer_type (opaque_V2SI_type_node
);
13399 opaque_V4SI_type_node
= build_opaque_vector_type (intSI_type_node
, 4);
13401 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
13402 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
13403 'vector unsigned short'. */
13405 bool_char_type_node
= build_distinct_type_copy (unsigned_intQI_type_node
);
13406 bool_short_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
13407 bool_int_type_node
= build_distinct_type_copy (unsigned_intSI_type_node
);
13408 bool_long_type_node
= build_distinct_type_copy (unsigned_intDI_type_node
);
13409 pixel_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
13411 long_integer_type_internal_node
= long_integer_type_node
;
13412 long_unsigned_type_internal_node
= long_unsigned_type_node
;
13413 long_long_integer_type_internal_node
= long_long_integer_type_node
;
13414 long_long_unsigned_type_internal_node
= long_long_unsigned_type_node
;
13415 intQI_type_internal_node
= intQI_type_node
;
13416 uintQI_type_internal_node
= unsigned_intQI_type_node
;
13417 intHI_type_internal_node
= intHI_type_node
;
13418 uintHI_type_internal_node
= unsigned_intHI_type_node
;
13419 intSI_type_internal_node
= intSI_type_node
;
13420 uintSI_type_internal_node
= unsigned_intSI_type_node
;
13421 intDI_type_internal_node
= intDI_type_node
;
13422 uintDI_type_internal_node
= unsigned_intDI_type_node
;
13423 float_type_internal_node
= float_type_node
;
13424 double_type_internal_node
= double_type_node
;
13425 void_type_internal_node
= void_type_node
;
13427 /* Initialize the modes for builtin_function_type, mapping a machine mode to
13429 builtin_mode_to_type
[QImode
][0] = integer_type_node
;
13430 builtin_mode_to_type
[HImode
][0] = integer_type_node
;
13431 builtin_mode_to_type
[SImode
][0] = intSI_type_node
;
13432 builtin_mode_to_type
[SImode
][1] = unsigned_intSI_type_node
;
13433 builtin_mode_to_type
[DImode
][0] = intDI_type_node
;
13434 builtin_mode_to_type
[DImode
][1] = unsigned_intDI_type_node
;
13435 builtin_mode_to_type
[SFmode
][0] = float_type_node
;
13436 builtin_mode_to_type
[DFmode
][0] = double_type_node
;
13437 builtin_mode_to_type
[V2SImode
][0] = V2SI_type_node
;
13438 builtin_mode_to_type
[V2SFmode
][0] = V2SF_type_node
;
13439 builtin_mode_to_type
[V2DImode
][0] = V2DI_type_node
;
13440 builtin_mode_to_type
[V2DImode
][1] = unsigned_V2DI_type_node
;
13441 builtin_mode_to_type
[V2DFmode
][0] = V2DF_type_node
;
13442 builtin_mode_to_type
[V4HImode
][0] = V4HI_type_node
;
13443 builtin_mode_to_type
[V4SImode
][0] = V4SI_type_node
;
13444 builtin_mode_to_type
[V4SImode
][1] = unsigned_V4SI_type_node
;
13445 builtin_mode_to_type
[V4SFmode
][0] = V4SF_type_node
;
13446 builtin_mode_to_type
[V8HImode
][0] = V8HI_type_node
;
13447 builtin_mode_to_type
[V8HImode
][1] = unsigned_V8HI_type_node
;
13448 builtin_mode_to_type
[V16QImode
][0] = V16QI_type_node
;
13449 builtin_mode_to_type
[V16QImode
][1] = unsigned_V16QI_type_node
;
13451 tdecl
= add_builtin_type ("__bool char", bool_char_type_node
);
13452 TYPE_NAME (bool_char_type_node
) = tdecl
;
13454 tdecl
= add_builtin_type ("__bool short", bool_short_type_node
);
13455 TYPE_NAME (bool_short_type_node
) = tdecl
;
13457 tdecl
= add_builtin_type ("__bool int", bool_int_type_node
);
13458 TYPE_NAME (bool_int_type_node
) = tdecl
;
13460 tdecl
= add_builtin_type ("__pixel", pixel_type_node
);
13461 TYPE_NAME (pixel_type_node
) = tdecl
;
13463 bool_V16QI_type_node
= build_vector_type (bool_char_type_node
, 16);
13464 bool_V8HI_type_node
= build_vector_type (bool_short_type_node
, 8);
13465 bool_V4SI_type_node
= build_vector_type (bool_int_type_node
, 4);
13466 bool_V2DI_type_node
= build_vector_type (bool_long_type_node
, 2);
13467 pixel_V8HI_type_node
= build_vector_type (pixel_type_node
, 8);
13469 tdecl
= add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node
);
13470 TYPE_NAME (unsigned_V16QI_type_node
) = tdecl
;
13472 tdecl
= add_builtin_type ("__vector signed char", V16QI_type_node
);
13473 TYPE_NAME (V16QI_type_node
) = tdecl
;
13475 tdecl
= add_builtin_type ("__vector __bool char", bool_V16QI_type_node
);
13476 TYPE_NAME ( bool_V16QI_type_node
) = tdecl
;
13478 tdecl
= add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node
);
13479 TYPE_NAME (unsigned_V8HI_type_node
) = tdecl
;
13481 tdecl
= add_builtin_type ("__vector signed short", V8HI_type_node
);
13482 TYPE_NAME (V8HI_type_node
) = tdecl
;
13484 tdecl
= add_builtin_type ("__vector __bool short", bool_V8HI_type_node
);
13485 TYPE_NAME (bool_V8HI_type_node
) = tdecl
;
13487 tdecl
= add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node
);
13488 TYPE_NAME (unsigned_V4SI_type_node
) = tdecl
;
13490 tdecl
= add_builtin_type ("__vector signed int", V4SI_type_node
);
13491 TYPE_NAME (V4SI_type_node
) = tdecl
;
13493 tdecl
= add_builtin_type ("__vector __bool int", bool_V4SI_type_node
);
13494 TYPE_NAME (bool_V4SI_type_node
) = tdecl
;
13496 tdecl
= add_builtin_type ("__vector float", V4SF_type_node
);
13497 TYPE_NAME (V4SF_type_node
) = tdecl
;
13499 tdecl
= add_builtin_type ("__vector __pixel", pixel_V8HI_type_node
);
13500 TYPE_NAME (pixel_V8HI_type_node
) = tdecl
;
13502 tdecl
= add_builtin_type ("__vector double", V2DF_type_node
);
13503 TYPE_NAME (V2DF_type_node
) = tdecl
;
13505 tdecl
= add_builtin_type ("__vector long", V2DI_type_node
);
13506 TYPE_NAME (V2DI_type_node
) = tdecl
;
13508 tdecl
= add_builtin_type ("__vector unsigned long", unsigned_V2DI_type_node
);
13509 TYPE_NAME (unsigned_V2DI_type_node
) = tdecl
;
13511 tdecl
= add_builtin_type ("__vector __bool long", bool_V2DI_type_node
);
13512 TYPE_NAME (bool_V2DI_type_node
) = tdecl
;
13514 /* Paired and SPE builtins are only available if you build a compiler with
13515 the appropriate options, so only create those builtins with the
13516 appropriate compiler option. Create Altivec and VSX builtins on machines
13517 with at least the general purpose extensions (970 and newer) to allow the
13518 use of the target attribute. */
13519 if (TARGET_PAIRED_FLOAT
)
13520 paired_init_builtins ();
13522 spe_init_builtins ();
13523 if (TARGET_EXTRA_BUILTINS
)
13524 altivec_init_builtins ();
13526 htm_init_builtins ();
13528 if (TARGET_EXTRA_BUILTINS
|| TARGET_SPE
|| TARGET_PAIRED_FLOAT
)
13529 rs6000_common_init_builtins ();
13531 ftype
= builtin_function_type (DFmode
, DFmode
, DFmode
, VOIDmode
,
13532 RS6000_BUILTIN_RECIP
, "__builtin_recipdiv");
13533 def_builtin ("__builtin_recipdiv", ftype
, RS6000_BUILTIN_RECIP
);
13535 ftype
= builtin_function_type (SFmode
, SFmode
, SFmode
, VOIDmode
,
13536 RS6000_BUILTIN_RECIPF
, "__builtin_recipdivf");
13537 def_builtin ("__builtin_recipdivf", ftype
, RS6000_BUILTIN_RECIPF
);
13539 ftype
= builtin_function_type (DFmode
, DFmode
, VOIDmode
, VOIDmode
,
13540 RS6000_BUILTIN_RSQRT
, "__builtin_rsqrt");
13541 def_builtin ("__builtin_rsqrt", ftype
, RS6000_BUILTIN_RSQRT
);
13543 ftype
= builtin_function_type (SFmode
, SFmode
, VOIDmode
, VOIDmode
,
13544 RS6000_BUILTIN_RSQRTF
, "__builtin_rsqrtf");
13545 def_builtin ("__builtin_rsqrtf", ftype
, RS6000_BUILTIN_RSQRTF
);
13547 mode
= (TARGET_64BIT
) ? DImode
: SImode
;
13548 ftype
= builtin_function_type (mode
, mode
, mode
, VOIDmode
,
13549 POWER7_BUILTIN_BPERMD
, "__builtin_bpermd");
13550 def_builtin ("__builtin_bpermd", ftype
, POWER7_BUILTIN_BPERMD
);
13552 ftype
= build_function_type_list (unsigned_intDI_type_node
,
13554 def_builtin ("__builtin_ppc_get_timebase", ftype
, RS6000_BUILTIN_GET_TB
);
13557 ftype
= build_function_type_list (unsigned_intDI_type_node
,
13560 ftype
= build_function_type_list (unsigned_intSI_type_node
,
13562 def_builtin ("__builtin_ppc_mftb", ftype
, RS6000_BUILTIN_MFTB
);
13565 /* AIX libm provides clog as __clog. */
13566 if ((tdecl
= builtin_decl_explicit (BUILT_IN_CLOG
)) != NULL_TREE
)
13567 set_user_assembler_name (tdecl
, "__clog");
13570 #ifdef SUBTARGET_INIT_BUILTINS
13571 SUBTARGET_INIT_BUILTINS
;
13575 /* Returns the rs6000 builtin decl for CODE. */
13578 rs6000_builtin_decl (unsigned code
, bool initialize_p ATTRIBUTE_UNUSED
)
13580 HOST_WIDE_INT fnmask
;
13582 if (code
>= RS6000_BUILTIN_COUNT
)
13583 return error_mark_node
;
13585 fnmask
= rs6000_builtin_info
[code
].mask
;
13586 if ((fnmask
& rs6000_builtin_mask
) != fnmask
)
13588 rs6000_invalid_builtin ((enum rs6000_builtins
)code
);
13589 return error_mark_node
;
13592 return rs6000_builtin_decls
[code
];
13596 spe_init_builtins (void)
13598 tree puint_type_node
= build_pointer_type (unsigned_type_node
);
13599 tree pushort_type_node
= build_pointer_type (short_unsigned_type_node
);
13600 const struct builtin_description
*d
;
13603 tree v2si_ftype_4_v2si
13604 = build_function_type_list (opaque_V2SI_type_node
,
13605 opaque_V2SI_type_node
,
13606 opaque_V2SI_type_node
,
13607 opaque_V2SI_type_node
,
13608 opaque_V2SI_type_node
,
13611 tree v2sf_ftype_4_v2sf
13612 = build_function_type_list (opaque_V2SF_type_node
,
13613 opaque_V2SF_type_node
,
13614 opaque_V2SF_type_node
,
13615 opaque_V2SF_type_node
,
13616 opaque_V2SF_type_node
,
13619 tree int_ftype_int_v2si_v2si
13620 = build_function_type_list (integer_type_node
,
13622 opaque_V2SI_type_node
,
13623 opaque_V2SI_type_node
,
13626 tree int_ftype_int_v2sf_v2sf
13627 = build_function_type_list (integer_type_node
,
13629 opaque_V2SF_type_node
,
13630 opaque_V2SF_type_node
,
13633 tree void_ftype_v2si_puint_int
13634 = build_function_type_list (void_type_node
,
13635 opaque_V2SI_type_node
,
13640 tree void_ftype_v2si_puint_char
13641 = build_function_type_list (void_type_node
,
13642 opaque_V2SI_type_node
,
13647 tree void_ftype_v2si_pv2si_int
13648 = build_function_type_list (void_type_node
,
13649 opaque_V2SI_type_node
,
13650 opaque_p_V2SI_type_node
,
13654 tree void_ftype_v2si_pv2si_char
13655 = build_function_type_list (void_type_node
,
13656 opaque_V2SI_type_node
,
13657 opaque_p_V2SI_type_node
,
13661 tree void_ftype_int
13662 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
13664 tree int_ftype_void
13665 = build_function_type_list (integer_type_node
, NULL_TREE
);
13667 tree v2si_ftype_pv2si_int
13668 = build_function_type_list (opaque_V2SI_type_node
,
13669 opaque_p_V2SI_type_node
,
13673 tree v2si_ftype_puint_int
13674 = build_function_type_list (opaque_V2SI_type_node
,
13679 tree v2si_ftype_pushort_int
13680 = build_function_type_list (opaque_V2SI_type_node
,
13685 tree v2si_ftype_signed_char
13686 = build_function_type_list (opaque_V2SI_type_node
,
13687 signed_char_type_node
,
13690 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node
);
13692 /* Initialize irregular SPE builtins. */
13694 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int
, SPE_BUILTIN_MTSPEFSCR
);
13695 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void
, SPE_BUILTIN_MFSPEFSCR
);
13696 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDDX
);
13697 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDHX
);
13698 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDWX
);
13699 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHEX
);
13700 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHOX
);
13701 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWEX
);
13702 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWOX
);
13703 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDD
);
13704 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDH
);
13705 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDW
);
13706 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHE
);
13707 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHO
);
13708 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWE
);
13709 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWO
);
13710 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATFI
);
13711 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATI
);
13714 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDDX
);
13715 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDWX
);
13716 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDHX
);
13717 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHEX
);
13718 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOUX
);
13719 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOSX
);
13720 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLATX
);
13721 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLATX
);
13722 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLATX
);
13723 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLATX
);
13724 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLATX
);
13725 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDD
);
13726 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDW
);
13727 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDH
);
13728 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLAT
);
13729 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLAT
);
13730 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLAT
);
13731 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHE
);
13732 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOS
);
13733 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOU
);
13734 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLAT
);
13735 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLAT
);
13738 d
= bdesc_spe_predicates
;
13739 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, d
++)
13743 switch (insn_data
[d
->icode
].operand
[1].mode
)
13746 type
= int_ftype_int_v2si_v2si
;
13749 type
= int_ftype_int_v2sf_v2sf
;
13752 gcc_unreachable ();
13755 def_builtin (d
->name
, type
, d
->code
);
13758 /* Evsel predicates. */
13759 d
= bdesc_spe_evsel
;
13760 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, d
++)
13764 switch (insn_data
[d
->icode
].operand
[1].mode
)
13767 type
= v2si_ftype_4_v2si
;
13770 type
= v2sf_ftype_4_v2sf
;
13773 gcc_unreachable ();
13776 def_builtin (d
->name
, type
, d
->code
);
13781 paired_init_builtins (void)
13783 const struct builtin_description
*d
;
13786 tree int_ftype_int_v2sf_v2sf
13787 = build_function_type_list (integer_type_node
,
13792 tree pcfloat_type_node
=
13793 build_pointer_type (build_qualified_type
13794 (float_type_node
, TYPE_QUAL_CONST
));
13796 tree v2sf_ftype_long_pcfloat
= build_function_type_list (V2SF_type_node
,
13797 long_integer_type_node
,
13800 tree void_ftype_v2sf_long_pcfloat
=
13801 build_function_type_list (void_type_node
,
13803 long_integer_type_node
,
13808 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat
,
13809 PAIRED_BUILTIN_LX
);
13812 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat
,
13813 PAIRED_BUILTIN_STX
);
13816 d
= bdesc_paired_preds
;
13817 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); ++i
, d
++)
13821 if (TARGET_DEBUG_BUILTIN
)
13822 fprintf (stderr
, "paired pred #%d, insn = %s [%d], mode = %s\n",
13823 (int)i
, get_insn_name (d
->icode
), (int)d
->icode
,
13824 GET_MODE_NAME (insn_data
[d
->icode
].operand
[1].mode
));
13826 switch (insn_data
[d
->icode
].operand
[1].mode
)
13829 type
= int_ftype_int_v2sf_v2sf
;
13832 gcc_unreachable ();
13835 def_builtin (d
->name
, type
, d
->code
);
13840 altivec_init_builtins (void)
13842 const struct builtin_description
*d
;
13847 tree pvoid_type_node
= build_pointer_type (void_type_node
);
13849 tree pcvoid_type_node
13850 = build_pointer_type (build_qualified_type (void_type_node
,
13853 tree int_ftype_opaque
13854 = build_function_type_list (integer_type_node
,
13855 opaque_V4SI_type_node
, NULL_TREE
);
13856 tree opaque_ftype_opaque
13857 = build_function_type_list (integer_type_node
, NULL_TREE
);
13858 tree opaque_ftype_opaque_int
13859 = build_function_type_list (opaque_V4SI_type_node
,
13860 opaque_V4SI_type_node
, integer_type_node
, NULL_TREE
);
13861 tree opaque_ftype_opaque_opaque_int
13862 = build_function_type_list (opaque_V4SI_type_node
,
13863 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
13864 integer_type_node
, NULL_TREE
);
13865 tree int_ftype_int_opaque_opaque
13866 = build_function_type_list (integer_type_node
,
13867 integer_type_node
, opaque_V4SI_type_node
,
13868 opaque_V4SI_type_node
, NULL_TREE
);
13869 tree int_ftype_int_v4si_v4si
13870 = build_function_type_list (integer_type_node
,
13871 integer_type_node
, V4SI_type_node
,
13872 V4SI_type_node
, NULL_TREE
);
13873 tree int_ftype_int_v2di_v2di
13874 = build_function_type_list (integer_type_node
,
13875 integer_type_node
, V2DI_type_node
,
13876 V2DI_type_node
, NULL_TREE
);
13877 tree void_ftype_v4si
13878 = build_function_type_list (void_type_node
, V4SI_type_node
, NULL_TREE
);
13879 tree v8hi_ftype_void
13880 = build_function_type_list (V8HI_type_node
, NULL_TREE
);
13881 tree void_ftype_void
13882 = build_function_type_list (void_type_node
, NULL_TREE
);
13883 tree void_ftype_int
13884 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
13886 tree opaque_ftype_long_pcvoid
13887 = build_function_type_list (opaque_V4SI_type_node
,
13888 long_integer_type_node
, pcvoid_type_node
,
13890 tree v16qi_ftype_long_pcvoid
13891 = build_function_type_list (V16QI_type_node
,
13892 long_integer_type_node
, pcvoid_type_node
,
13894 tree v8hi_ftype_long_pcvoid
13895 = build_function_type_list (V8HI_type_node
,
13896 long_integer_type_node
, pcvoid_type_node
,
13898 tree v4si_ftype_long_pcvoid
13899 = build_function_type_list (V4SI_type_node
,
13900 long_integer_type_node
, pcvoid_type_node
,
13902 tree v4sf_ftype_long_pcvoid
13903 = build_function_type_list (V4SF_type_node
,
13904 long_integer_type_node
, pcvoid_type_node
,
13906 tree v2df_ftype_long_pcvoid
13907 = build_function_type_list (V2DF_type_node
,
13908 long_integer_type_node
, pcvoid_type_node
,
13910 tree v2di_ftype_long_pcvoid
13911 = build_function_type_list (V2DI_type_node
,
13912 long_integer_type_node
, pcvoid_type_node
,
13915 tree void_ftype_opaque_long_pvoid
13916 = build_function_type_list (void_type_node
,
13917 opaque_V4SI_type_node
, long_integer_type_node
,
13918 pvoid_type_node
, NULL_TREE
);
13919 tree void_ftype_v4si_long_pvoid
13920 = build_function_type_list (void_type_node
,
13921 V4SI_type_node
, long_integer_type_node
,
13922 pvoid_type_node
, NULL_TREE
);
13923 tree void_ftype_v16qi_long_pvoid
13924 = build_function_type_list (void_type_node
,
13925 V16QI_type_node
, long_integer_type_node
,
13926 pvoid_type_node
, NULL_TREE
);
13927 tree void_ftype_v8hi_long_pvoid
13928 = build_function_type_list (void_type_node
,
13929 V8HI_type_node
, long_integer_type_node
,
13930 pvoid_type_node
, NULL_TREE
);
13931 tree void_ftype_v4sf_long_pvoid
13932 = build_function_type_list (void_type_node
,
13933 V4SF_type_node
, long_integer_type_node
,
13934 pvoid_type_node
, NULL_TREE
);
13935 tree void_ftype_v2df_long_pvoid
13936 = build_function_type_list (void_type_node
,
13937 V2DF_type_node
, long_integer_type_node
,
13938 pvoid_type_node
, NULL_TREE
);
13939 tree void_ftype_v2di_long_pvoid
13940 = build_function_type_list (void_type_node
,
13941 V2DI_type_node
, long_integer_type_node
,
13942 pvoid_type_node
, NULL_TREE
);
13943 tree int_ftype_int_v8hi_v8hi
13944 = build_function_type_list (integer_type_node
,
13945 integer_type_node
, V8HI_type_node
,
13946 V8HI_type_node
, NULL_TREE
);
13947 tree int_ftype_int_v16qi_v16qi
13948 = build_function_type_list (integer_type_node
,
13949 integer_type_node
, V16QI_type_node
,
13950 V16QI_type_node
, NULL_TREE
);
13951 tree int_ftype_int_v4sf_v4sf
13952 = build_function_type_list (integer_type_node
,
13953 integer_type_node
, V4SF_type_node
,
13954 V4SF_type_node
, NULL_TREE
);
13955 tree int_ftype_int_v2df_v2df
13956 = build_function_type_list (integer_type_node
,
13957 integer_type_node
, V2DF_type_node
,
13958 V2DF_type_node
, NULL_TREE
);
13959 tree v2di_ftype_v2di
13960 = build_function_type_list (V2DI_type_node
, V2DI_type_node
, NULL_TREE
);
13961 tree v4si_ftype_v4si
13962 = build_function_type_list (V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
13963 tree v8hi_ftype_v8hi
13964 = build_function_type_list (V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
13965 tree v16qi_ftype_v16qi
13966 = build_function_type_list (V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
13967 tree v4sf_ftype_v4sf
13968 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
13969 tree v2df_ftype_v2df
13970 = build_function_type_list (V2DF_type_node
, V2DF_type_node
, NULL_TREE
);
13971 tree void_ftype_pcvoid_int_int
13972 = build_function_type_list (void_type_node
,
13973 pcvoid_type_node
, integer_type_node
,
13974 integer_type_node
, NULL_TREE
);
13976 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si
, ALTIVEC_BUILTIN_MTVSCR
);
13977 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void
, ALTIVEC_BUILTIN_MFVSCR
);
13978 def_builtin ("__builtin_altivec_dssall", void_ftype_void
, ALTIVEC_BUILTIN_DSSALL
);
13979 def_builtin ("__builtin_altivec_dss", void_ftype_int
, ALTIVEC_BUILTIN_DSS
);
13980 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSL
);
13981 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSR
);
13982 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEBX
);
13983 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEHX
);
13984 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEWX
);
13985 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVXL
);
13986 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVX
);
13987 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVX
);
13988 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVEWX
);
13989 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVXL
);
13990 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVEBX
);
13991 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid
, ALTIVEC_BUILTIN_STVEHX
);
13992 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LD
);
13993 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDE
);
13994 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDL
);
13995 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSL
);
13996 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSR
);
13997 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEBX
);
13998 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEHX
);
13999 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEWX
);
14000 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_ST
);
14001 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STE
);
14002 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STL
);
14003 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEWX
);
14004 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEBX
);
14005 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEHX
);
14007 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid
,
14008 VSX_BUILTIN_LXVD2X_V2DF
);
14009 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid
,
14010 VSX_BUILTIN_LXVD2X_V2DI
);
14011 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid
,
14012 VSX_BUILTIN_LXVW4X_V4SF
);
14013 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid
,
14014 VSX_BUILTIN_LXVW4X_V4SI
);
14015 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid
,
14016 VSX_BUILTIN_LXVW4X_V8HI
);
14017 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid
,
14018 VSX_BUILTIN_LXVW4X_V16QI
);
14019 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid
,
14020 VSX_BUILTIN_STXVD2X_V2DF
);
14021 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid
,
14022 VSX_BUILTIN_STXVD2X_V2DI
);
14023 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid
,
14024 VSX_BUILTIN_STXVW4X_V4SF
);
14025 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid
,
14026 VSX_BUILTIN_STXVW4X_V4SI
);
14027 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid
,
14028 VSX_BUILTIN_STXVW4X_V8HI
);
14029 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid
,
14030 VSX_BUILTIN_STXVW4X_V16QI
);
14031 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid
,
14032 VSX_BUILTIN_VEC_LD
);
14033 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid
,
14034 VSX_BUILTIN_VEC_ST
);
14036 def_builtin ("__builtin_vec_step", int_ftype_opaque
, ALTIVEC_BUILTIN_VEC_STEP
);
14037 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_SPLATS
);
14038 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_PROMOTE
);
14040 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_SLD
);
14041 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_SPLAT
);
14042 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_EXTRACT
);
14043 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_INSERT
);
14044 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTW
);
14045 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTH
);
14046 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTB
);
14047 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTF
);
14048 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFSX
);
14049 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFUX
);
14050 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTS
);
14051 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTU
);
14053 /* Cell builtins. */
14054 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLX
);
14055 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLXL
);
14056 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRX
);
14057 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRXL
);
14059 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLX
);
14060 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLXL
);
14061 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRX
);
14062 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRXL
);
14064 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLX
);
14065 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLXL
);
14066 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRX
);
14067 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRXL
);
14069 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLX
);
14070 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLXL
);
14071 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRX
);
14072 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRXL
);
14074 /* Add the DST variants. */
14076 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
14077 def_builtin (d
->name
, void_ftype_pcvoid_int_int
, d
->code
);
14079 /* Initialize the predicates. */
14080 d
= bdesc_altivec_preds
;
14081 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, d
++)
14083 enum machine_mode mode1
;
14086 if (rs6000_overloaded_builtin_p (d
->code
))
14089 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
14094 type
= int_ftype_int_opaque_opaque
;
14097 type
= int_ftype_int_v2di_v2di
;
14100 type
= int_ftype_int_v4si_v4si
;
14103 type
= int_ftype_int_v8hi_v8hi
;
14106 type
= int_ftype_int_v16qi_v16qi
;
14109 type
= int_ftype_int_v4sf_v4sf
;
14112 type
= int_ftype_int_v2df_v2df
;
14115 gcc_unreachable ();
14118 def_builtin (d
->name
, type
, d
->code
);
14121 /* Initialize the abs* operators. */
14123 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
14125 enum machine_mode mode0
;
14128 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
14133 type
= v2di_ftype_v2di
;
14136 type
= v4si_ftype_v4si
;
14139 type
= v8hi_ftype_v8hi
;
14142 type
= v16qi_ftype_v16qi
;
14145 type
= v4sf_ftype_v4sf
;
14148 type
= v2df_ftype_v2df
;
14151 gcc_unreachable ();
14154 def_builtin (d
->name
, type
, d
->code
);
14157 /* Initialize target builtin that implements
14158 targetm.vectorize.builtin_mask_for_load. */
14160 decl
= add_builtin_function ("__builtin_altivec_mask_for_load",
14161 v16qi_ftype_long_pcvoid
,
14162 ALTIVEC_BUILTIN_MASK_FOR_LOAD
,
14163 BUILT_IN_MD
, NULL
, NULL_TREE
);
14164 TREE_READONLY (decl
) = 1;
14165 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
14166 altivec_builtin_mask_for_load
= decl
;
14168 /* Access to the vec_init patterns. */
14169 ftype
= build_function_type_list (V4SI_type_node
, integer_type_node
,
14170 integer_type_node
, integer_type_node
,
14171 integer_type_node
, NULL_TREE
);
14172 def_builtin ("__builtin_vec_init_v4si", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SI
);
14174 ftype
= build_function_type_list (V8HI_type_node
, short_integer_type_node
,
14175 short_integer_type_node
,
14176 short_integer_type_node
,
14177 short_integer_type_node
,
14178 short_integer_type_node
,
14179 short_integer_type_node
,
14180 short_integer_type_node
,
14181 short_integer_type_node
, NULL_TREE
);
14182 def_builtin ("__builtin_vec_init_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V8HI
);
14184 ftype
= build_function_type_list (V16QI_type_node
, char_type_node
,
14185 char_type_node
, char_type_node
,
14186 char_type_node
, char_type_node
,
14187 char_type_node
, char_type_node
,
14188 char_type_node
, char_type_node
,
14189 char_type_node
, char_type_node
,
14190 char_type_node
, char_type_node
,
14191 char_type_node
, char_type_node
,
14192 char_type_node
, NULL_TREE
);
14193 def_builtin ("__builtin_vec_init_v16qi", ftype
,
14194 ALTIVEC_BUILTIN_VEC_INIT_V16QI
);
14196 ftype
= build_function_type_list (V4SF_type_node
, float_type_node
,
14197 float_type_node
, float_type_node
,
14198 float_type_node
, NULL_TREE
);
14199 def_builtin ("__builtin_vec_init_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_INIT_V4SF
);
14201 /* VSX builtins. */
14202 ftype
= build_function_type_list (V2DF_type_node
, double_type_node
,
14203 double_type_node
, NULL_TREE
);
14204 def_builtin ("__builtin_vec_init_v2df", ftype
, VSX_BUILTIN_VEC_INIT_V2DF
);
14206 ftype
= build_function_type_list (V2DI_type_node
, intDI_type_node
,
14207 intDI_type_node
, NULL_TREE
);
14208 def_builtin ("__builtin_vec_init_v2di", ftype
, VSX_BUILTIN_VEC_INIT_V2DI
);
14210 /* Access to the vec_set patterns. */
14211 ftype
= build_function_type_list (V4SI_type_node
, V4SI_type_node
,
14213 integer_type_node
, NULL_TREE
);
14214 def_builtin ("__builtin_vec_set_v4si", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SI
);
14216 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
14218 integer_type_node
, NULL_TREE
);
14219 def_builtin ("__builtin_vec_set_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V8HI
);
14221 ftype
= build_function_type_list (V16QI_type_node
, V16QI_type_node
,
14223 integer_type_node
, NULL_TREE
);
14224 def_builtin ("__builtin_vec_set_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_SET_V16QI
);
14226 ftype
= build_function_type_list (V4SF_type_node
, V4SF_type_node
,
14228 integer_type_node
, NULL_TREE
);
14229 def_builtin ("__builtin_vec_set_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_SET_V4SF
);
14231 ftype
= build_function_type_list (V2DF_type_node
, V2DF_type_node
,
14233 integer_type_node
, NULL_TREE
);
14234 def_builtin ("__builtin_vec_set_v2df", ftype
, VSX_BUILTIN_VEC_SET_V2DF
);
14236 ftype
= build_function_type_list (V2DI_type_node
, V2DI_type_node
,
14238 integer_type_node
, NULL_TREE
);
14239 def_builtin ("__builtin_vec_set_v2di", ftype
, VSX_BUILTIN_VEC_SET_V2DI
);
14241 /* Access to the vec_extract patterns. */
14242 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
14243 integer_type_node
, NULL_TREE
);
14244 def_builtin ("__builtin_vec_ext_v4si", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SI
);
14246 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
14247 integer_type_node
, NULL_TREE
);
14248 def_builtin ("__builtin_vec_ext_v8hi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V8HI
);
14250 ftype
= build_function_type_list (intQI_type_node
, V16QI_type_node
,
14251 integer_type_node
, NULL_TREE
);
14252 def_builtin ("__builtin_vec_ext_v16qi", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V16QI
);
14254 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
14255 integer_type_node
, NULL_TREE
);
14256 def_builtin ("__builtin_vec_ext_v4sf", ftype
, ALTIVEC_BUILTIN_VEC_EXT_V4SF
);
14258 ftype
= build_function_type_list (double_type_node
, V2DF_type_node
,
14259 integer_type_node
, NULL_TREE
);
14260 def_builtin ("__builtin_vec_ext_v2df", ftype
, VSX_BUILTIN_VEC_EXT_V2DF
);
14262 ftype
= build_function_type_list (intDI_type_node
, V2DI_type_node
,
14263 integer_type_node
, NULL_TREE
);
14264 def_builtin ("__builtin_vec_ext_v2di", ftype
, VSX_BUILTIN_VEC_EXT_V2DI
);
14268 htm_init_builtins (void)
14270 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
14271 const struct builtin_description
*d
;
14275 for (i
= 0; i
< ARRAY_SIZE (bdesc_htm
); i
++, d
++)
14277 tree op
[MAX_HTM_OPERANDS
], type
;
14278 HOST_WIDE_INT mask
= d
->mask
;
14279 unsigned attr
= rs6000_builtin_info
[d
->code
].attr
;
14280 bool void_func
= (attr
& RS6000_BTC_VOID
);
14281 int attr_args
= (attr
& RS6000_BTC_TYPE_MASK
);
14283 tree argtype
= (attr
& RS6000_BTC_SPR
) ? long_unsigned_type_node
14284 : unsigned_type_node
;
14286 if ((mask
& builtin_mask
) != mask
)
14288 if (TARGET_DEBUG_BUILTIN
)
14289 fprintf (stderr
, "htm_builtin, skip binary %s\n", d
->name
);
14295 if (TARGET_DEBUG_BUILTIN
)
14296 fprintf (stderr
, "htm_builtin, bdesc_htm[%ld] no name\n",
14297 (long unsigned) i
);
14301 op
[nopnds
++] = (void_func
) ? void_type_node
: argtype
;
14303 if (attr_args
== RS6000_BTC_UNARY
)
14304 op
[nopnds
++] = argtype
;
14305 else if (attr_args
== RS6000_BTC_BINARY
)
14307 op
[nopnds
++] = argtype
;
14308 op
[nopnds
++] = argtype
;
14310 else if (attr_args
== RS6000_BTC_TERNARY
)
14312 op
[nopnds
++] = argtype
;
14313 op
[nopnds
++] = argtype
;
14314 op
[nopnds
++] = argtype
;
14320 type
= build_function_type_list (op
[0], NULL_TREE
);
14323 type
= build_function_type_list (op
[0], op
[1], NULL_TREE
);
14326 type
= build_function_type_list (op
[0], op
[1], op
[2], NULL_TREE
);
14329 type
= build_function_type_list (op
[0], op
[1], op
[2], op
[3],
14333 gcc_unreachable ();
14336 def_builtin (d
->name
, type
, d
->code
);
14340 /* Hash function for builtin functions with up to 3 arguments and a return
14343 builtin_hash_function (const void *hash_entry
)
14347 const struct builtin_hash_struct
*bh
=
14348 (const struct builtin_hash_struct
*) hash_entry
;
14350 for (i
= 0; i
< 4; i
++)
14352 ret
= (ret
* (unsigned)MAX_MACHINE_MODE
) + ((unsigned)bh
->mode
[i
]);
14353 ret
= (ret
* 2) + bh
->uns_p
[i
];
14359 /* Compare builtin hash entries H1 and H2 for equivalence. */
14361 builtin_hash_eq (const void *h1
, const void *h2
)
14363 const struct builtin_hash_struct
*p1
= (const struct builtin_hash_struct
*) h1
;
14364 const struct builtin_hash_struct
*p2
= (const struct builtin_hash_struct
*) h2
;
14366 return ((p1
->mode
[0] == p2
->mode
[0])
14367 && (p1
->mode
[1] == p2
->mode
[1])
14368 && (p1
->mode
[2] == p2
->mode
[2])
14369 && (p1
->mode
[3] == p2
->mode
[3])
14370 && (p1
->uns_p
[0] == p2
->uns_p
[0])
14371 && (p1
->uns_p
[1] == p2
->uns_p
[1])
14372 && (p1
->uns_p
[2] == p2
->uns_p
[2])
14373 && (p1
->uns_p
[3] == p2
->uns_p
[3]));
14376 /* Map types for builtin functions with an explicit return type and up to 3
14377 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
14378 of the argument. */
14380 builtin_function_type (enum machine_mode mode_ret
, enum machine_mode mode_arg0
,
14381 enum machine_mode mode_arg1
, enum machine_mode mode_arg2
,
14382 enum rs6000_builtins builtin
, const char *name
)
14384 struct builtin_hash_struct h
;
14385 struct builtin_hash_struct
*h2
;
14389 tree ret_type
= NULL_TREE
;
14390 tree arg_type
[3] = { NULL_TREE
, NULL_TREE
, NULL_TREE
};
14392 /* Create builtin_hash_table. */
14393 if (builtin_hash_table
== NULL
)
14394 builtin_hash_table
= htab_create_ggc (1500, builtin_hash_function
,
14395 builtin_hash_eq
, NULL
);
14397 h
.type
= NULL_TREE
;
14398 h
.mode
[0] = mode_ret
;
14399 h
.mode
[1] = mode_arg0
;
14400 h
.mode
[2] = mode_arg1
;
14401 h
.mode
[3] = mode_arg2
;
14407 /* If the builtin is a type that produces unsigned results or takes unsigned
14408 arguments, and it is returned as a decl for the vectorizer (such as
14409 widening multiplies, permute), make sure the arguments and return value
14410 are type correct. */
14413 /* unsigned 1 argument functions. */
14414 case CRYPTO_BUILTIN_VSBOX
:
14415 case P8V_BUILTIN_VGBBD
:
14420 /* unsigned 2 argument functions. */
14421 case ALTIVEC_BUILTIN_VMULEUB_UNS
:
14422 case ALTIVEC_BUILTIN_VMULEUH_UNS
:
14423 case ALTIVEC_BUILTIN_VMULOUB_UNS
:
14424 case ALTIVEC_BUILTIN_VMULOUH_UNS
:
14425 case CRYPTO_BUILTIN_VCIPHER
:
14426 case CRYPTO_BUILTIN_VCIPHERLAST
:
14427 case CRYPTO_BUILTIN_VNCIPHER
:
14428 case CRYPTO_BUILTIN_VNCIPHERLAST
:
14429 case CRYPTO_BUILTIN_VPMSUMB
:
14430 case CRYPTO_BUILTIN_VPMSUMH
:
14431 case CRYPTO_BUILTIN_VPMSUMW
:
14432 case CRYPTO_BUILTIN_VPMSUMD
:
14433 case CRYPTO_BUILTIN_VPMSUM
:
14439 /* unsigned 3 argument functions. */
14440 case ALTIVEC_BUILTIN_VPERM_16QI_UNS
:
14441 case ALTIVEC_BUILTIN_VPERM_8HI_UNS
:
14442 case ALTIVEC_BUILTIN_VPERM_4SI_UNS
:
14443 case ALTIVEC_BUILTIN_VPERM_2DI_UNS
:
14444 case ALTIVEC_BUILTIN_VSEL_16QI_UNS
:
14445 case ALTIVEC_BUILTIN_VSEL_8HI_UNS
:
14446 case ALTIVEC_BUILTIN_VSEL_4SI_UNS
:
14447 case ALTIVEC_BUILTIN_VSEL_2DI_UNS
:
14448 case VSX_BUILTIN_VPERM_16QI_UNS
:
14449 case VSX_BUILTIN_VPERM_8HI_UNS
:
14450 case VSX_BUILTIN_VPERM_4SI_UNS
:
14451 case VSX_BUILTIN_VPERM_2DI_UNS
:
14452 case VSX_BUILTIN_XXSEL_16QI_UNS
:
14453 case VSX_BUILTIN_XXSEL_8HI_UNS
:
14454 case VSX_BUILTIN_XXSEL_4SI_UNS
:
14455 case VSX_BUILTIN_XXSEL_2DI_UNS
:
14456 case CRYPTO_BUILTIN_VPERMXOR
:
14457 case CRYPTO_BUILTIN_VPERMXOR_V2DI
:
14458 case CRYPTO_BUILTIN_VPERMXOR_V4SI
:
14459 case CRYPTO_BUILTIN_VPERMXOR_V8HI
:
14460 case CRYPTO_BUILTIN_VPERMXOR_V16QI
:
14461 case CRYPTO_BUILTIN_VSHASIGMAW
:
14462 case CRYPTO_BUILTIN_VSHASIGMAD
:
14463 case CRYPTO_BUILTIN_VSHASIGMA
:
14470 /* signed permute functions with unsigned char mask. */
14471 case ALTIVEC_BUILTIN_VPERM_16QI
:
14472 case ALTIVEC_BUILTIN_VPERM_8HI
:
14473 case ALTIVEC_BUILTIN_VPERM_4SI
:
14474 case ALTIVEC_BUILTIN_VPERM_4SF
:
14475 case ALTIVEC_BUILTIN_VPERM_2DI
:
14476 case ALTIVEC_BUILTIN_VPERM_2DF
:
14477 case VSX_BUILTIN_VPERM_16QI
:
14478 case VSX_BUILTIN_VPERM_8HI
:
14479 case VSX_BUILTIN_VPERM_4SI
:
14480 case VSX_BUILTIN_VPERM_4SF
:
14481 case VSX_BUILTIN_VPERM_2DI
:
14482 case VSX_BUILTIN_VPERM_2DF
:
14486 /* unsigned args, signed return. */
14487 case VSX_BUILTIN_XVCVUXDDP_UNS
:
14488 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF
:
14492 /* signed args, unsigned return. */
14493 case VSX_BUILTIN_XVCVDPUXDS_UNS
:
14494 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI
:
14502 /* Figure out how many args are present. */
14503 while (num_args
> 0 && h
.mode
[num_args
] == VOIDmode
)
14507 fatal_error ("internal error: builtin function %s had no type", name
);
14509 ret_type
= builtin_mode_to_type
[h
.mode
[0]][h
.uns_p
[0]];
14510 if (!ret_type
&& h
.uns_p
[0])
14511 ret_type
= builtin_mode_to_type
[h
.mode
[0]][0];
14514 fatal_error ("internal error: builtin function %s had an unexpected "
14515 "return type %s", name
, GET_MODE_NAME (h
.mode
[0]));
14517 for (i
= 0; i
< (int) ARRAY_SIZE (arg_type
); i
++)
14518 arg_type
[i
] = NULL_TREE
;
14520 for (i
= 0; i
< num_args
; i
++)
14522 int m
= (int) h
.mode
[i
+1];
14523 int uns_p
= h
.uns_p
[i
+1];
14525 arg_type
[i
] = builtin_mode_to_type
[m
][uns_p
];
14526 if (!arg_type
[i
] && uns_p
)
14527 arg_type
[i
] = builtin_mode_to_type
[m
][0];
14530 fatal_error ("internal error: builtin function %s, argument %d "
14531 "had unexpected argument type %s", name
, i
,
14532 GET_MODE_NAME (m
));
14535 found
= htab_find_slot (builtin_hash_table
, &h
, INSERT
);
14536 if (*found
== NULL
)
14538 h2
= ggc_alloc_builtin_hash_struct ();
14540 *found
= (void *)h2
;
14542 h2
->type
= build_function_type_list (ret_type
, arg_type
[0], arg_type
[1],
14543 arg_type
[2], NULL_TREE
);
14546 return ((struct builtin_hash_struct
*)(*found
))->type
;
14550 rs6000_common_init_builtins (void)
14552 const struct builtin_description
*d
;
14555 tree opaque_ftype_opaque
= NULL_TREE
;
14556 tree opaque_ftype_opaque_opaque
= NULL_TREE
;
14557 tree opaque_ftype_opaque_opaque_opaque
= NULL_TREE
;
14558 tree v2si_ftype_qi
= NULL_TREE
;
14559 tree v2si_ftype_v2si_qi
= NULL_TREE
;
14560 tree v2si_ftype_int_qi
= NULL_TREE
;
14561 HOST_WIDE_INT builtin_mask
= rs6000_builtin_mask
;
14563 if (!TARGET_PAIRED_FLOAT
)
14565 builtin_mode_to_type
[V2SImode
][0] = opaque_V2SI_type_node
;
14566 builtin_mode_to_type
[V2SFmode
][0] = opaque_V2SF_type_node
;
14569 /* Paired and SPE builtins are only available if you build a compiler with
14570 the appropriate options, so only create those builtins with the
14571 appropriate compiler option. Create Altivec and VSX builtins on machines
14572 with at least the general purpose extensions (970 and newer) to allow the
14573 use of the target attribute.. */
14575 if (TARGET_EXTRA_BUILTINS
)
14576 builtin_mask
|= RS6000_BTM_COMMON
;
14578 /* Add the ternary operators. */
14580 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
14583 HOST_WIDE_INT mask
= d
->mask
;
14585 if ((mask
& builtin_mask
) != mask
)
14587 if (TARGET_DEBUG_BUILTIN
)
14588 fprintf (stderr
, "rs6000_builtin, skip ternary %s\n", d
->name
);
14592 if (rs6000_overloaded_builtin_p (d
->code
))
14594 if (! (type
= opaque_ftype_opaque_opaque_opaque
))
14595 type
= opaque_ftype_opaque_opaque_opaque
14596 = build_function_type_list (opaque_V4SI_type_node
,
14597 opaque_V4SI_type_node
,
14598 opaque_V4SI_type_node
,
14599 opaque_V4SI_type_node
,
14604 enum insn_code icode
= d
->icode
;
14607 if (TARGET_DEBUG_BUILTIN
)
14608 fprintf (stderr
, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
14614 if (icode
== CODE_FOR_nothing
)
14616 if (TARGET_DEBUG_BUILTIN
)
14617 fprintf (stderr
, "rs6000_builtin, skip ternary %s (no code)\n",
14623 type
= builtin_function_type (insn_data
[icode
].operand
[0].mode
,
14624 insn_data
[icode
].operand
[1].mode
,
14625 insn_data
[icode
].operand
[2].mode
,
14626 insn_data
[icode
].operand
[3].mode
,
14630 def_builtin (d
->name
, type
, d
->code
);
14633 /* Add the binary operators. */
14635 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
14637 enum machine_mode mode0
, mode1
, mode2
;
14639 HOST_WIDE_INT mask
= d
->mask
;
14641 if ((mask
& builtin_mask
) != mask
)
14643 if (TARGET_DEBUG_BUILTIN
)
14644 fprintf (stderr
, "rs6000_builtin, skip binary %s\n", d
->name
);
14648 if (rs6000_overloaded_builtin_p (d
->code
))
14650 if (! (type
= opaque_ftype_opaque_opaque
))
14651 type
= opaque_ftype_opaque_opaque
14652 = build_function_type_list (opaque_V4SI_type_node
,
14653 opaque_V4SI_type_node
,
14654 opaque_V4SI_type_node
,
14659 enum insn_code icode
= d
->icode
;
14662 if (TARGET_DEBUG_BUILTIN
)
14663 fprintf (stderr
, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
14669 if (icode
== CODE_FOR_nothing
)
14671 if (TARGET_DEBUG_BUILTIN
)
14672 fprintf (stderr
, "rs6000_builtin, skip binary %s (no code)\n",
14678 mode0
= insn_data
[icode
].operand
[0].mode
;
14679 mode1
= insn_data
[icode
].operand
[1].mode
;
14680 mode2
= insn_data
[icode
].operand
[2].mode
;
14682 if (mode0
== V2SImode
&& mode1
== V2SImode
&& mode2
== QImode
)
14684 if (! (type
= v2si_ftype_v2si_qi
))
14685 type
= v2si_ftype_v2si_qi
14686 = build_function_type_list (opaque_V2SI_type_node
,
14687 opaque_V2SI_type_node
,
14692 else if (mode0
== V2SImode
&& GET_MODE_CLASS (mode1
) == MODE_INT
14693 && mode2
== QImode
)
14695 if (! (type
= v2si_ftype_int_qi
))
14696 type
= v2si_ftype_int_qi
14697 = build_function_type_list (opaque_V2SI_type_node
,
14704 type
= builtin_function_type (mode0
, mode1
, mode2
, VOIDmode
,
14708 def_builtin (d
->name
, type
, d
->code
);
14711 /* Add the simple unary operators. */
14713 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
14715 enum machine_mode mode0
, mode1
;
14717 HOST_WIDE_INT mask
= d
->mask
;
14719 if ((mask
& builtin_mask
) != mask
)
14721 if (TARGET_DEBUG_BUILTIN
)
14722 fprintf (stderr
, "rs6000_builtin, skip unary %s\n", d
->name
);
14726 if (rs6000_overloaded_builtin_p (d
->code
))
14728 if (! (type
= opaque_ftype_opaque
))
14729 type
= opaque_ftype_opaque
14730 = build_function_type_list (opaque_V4SI_type_node
,
14731 opaque_V4SI_type_node
,
14736 enum insn_code icode
= d
->icode
;
14739 if (TARGET_DEBUG_BUILTIN
)
14740 fprintf (stderr
, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
14746 if (icode
== CODE_FOR_nothing
)
14748 if (TARGET_DEBUG_BUILTIN
)
14749 fprintf (stderr
, "rs6000_builtin, skip unary %s (no code)\n",
14755 mode0
= insn_data
[icode
].operand
[0].mode
;
14756 mode1
= insn_data
[icode
].operand
[1].mode
;
14758 if (mode0
== V2SImode
&& mode1
== QImode
)
14760 if (! (type
= v2si_ftype_qi
))
14761 type
= v2si_ftype_qi
14762 = build_function_type_list (opaque_V2SI_type_node
,
14768 type
= builtin_function_type (mode0
, mode1
, VOIDmode
, VOIDmode
,
14772 def_builtin (d
->name
, type
, d
->code
);
14777 rs6000_init_libfuncs (void)
14779 if (!TARGET_IEEEQUAD
)
14780 /* AIX/Darwin/64-bit Linux quad floating point routines. */
14781 if (!TARGET_XL_COMPAT
)
14783 set_optab_libfunc (add_optab
, TFmode
, "__gcc_qadd");
14784 set_optab_libfunc (sub_optab
, TFmode
, "__gcc_qsub");
14785 set_optab_libfunc (smul_optab
, TFmode
, "__gcc_qmul");
14786 set_optab_libfunc (sdiv_optab
, TFmode
, "__gcc_qdiv");
14788 if (!(TARGET_HARD_FLOAT
&& (TARGET_FPRS
|| TARGET_E500_DOUBLE
)))
14790 set_optab_libfunc (neg_optab
, TFmode
, "__gcc_qneg");
14791 set_optab_libfunc (eq_optab
, TFmode
, "__gcc_qeq");
14792 set_optab_libfunc (ne_optab
, TFmode
, "__gcc_qne");
14793 set_optab_libfunc (gt_optab
, TFmode
, "__gcc_qgt");
14794 set_optab_libfunc (ge_optab
, TFmode
, "__gcc_qge");
14795 set_optab_libfunc (lt_optab
, TFmode
, "__gcc_qlt");
14796 set_optab_libfunc (le_optab
, TFmode
, "__gcc_qle");
14798 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "__gcc_stoq");
14799 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "__gcc_dtoq");
14800 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "__gcc_qtos");
14801 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "__gcc_qtod");
14802 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "__gcc_qtoi");
14803 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "__gcc_qtou");
14804 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "__gcc_itoq");
14805 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "__gcc_utoq");
14808 if (!(TARGET_HARD_FLOAT
&& TARGET_FPRS
))
14809 set_optab_libfunc (unord_optab
, TFmode
, "__gcc_qunord");
14813 set_optab_libfunc (add_optab
, TFmode
, "_xlqadd");
14814 set_optab_libfunc (sub_optab
, TFmode
, "_xlqsub");
14815 set_optab_libfunc (smul_optab
, TFmode
, "_xlqmul");
14816 set_optab_libfunc (sdiv_optab
, TFmode
, "_xlqdiv");
14820 /* 32-bit SVR4 quad floating point routines. */
14822 set_optab_libfunc (add_optab
, TFmode
, "_q_add");
14823 set_optab_libfunc (sub_optab
, TFmode
, "_q_sub");
14824 set_optab_libfunc (neg_optab
, TFmode
, "_q_neg");
14825 set_optab_libfunc (smul_optab
, TFmode
, "_q_mul");
14826 set_optab_libfunc (sdiv_optab
, TFmode
, "_q_div");
14827 if (TARGET_PPC_GPOPT
)
14828 set_optab_libfunc (sqrt_optab
, TFmode
, "_q_sqrt");
14830 set_optab_libfunc (eq_optab
, TFmode
, "_q_feq");
14831 set_optab_libfunc (ne_optab
, TFmode
, "_q_fne");
14832 set_optab_libfunc (gt_optab
, TFmode
, "_q_fgt");
14833 set_optab_libfunc (ge_optab
, TFmode
, "_q_fge");
14834 set_optab_libfunc (lt_optab
, TFmode
, "_q_flt");
14835 set_optab_libfunc (le_optab
, TFmode
, "_q_fle");
14837 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_q_stoq");
14838 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_q_dtoq");
14839 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_q_qtos");
14840 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_q_qtod");
14841 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_q_qtoi");
14842 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_q_qtou");
14843 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "_q_itoq");
14844 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "_q_utoq");
14849 /* Expand a block clear operation, and return 1 if successful. Return 0
14850 if we should let the compiler generate normal code.
14852 operands[0] is the destination
14853 operands[1] is the length
14854 operands[3] is the alignment */
14857 expand_block_clear (rtx operands
[])
14859 rtx orig_dest
= operands
[0];
14860 rtx bytes_rtx
= operands
[1];
14861 rtx align_rtx
= operands
[3];
14862 bool constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
14863 HOST_WIDE_INT align
;
14864 HOST_WIDE_INT bytes
;
14869 /* If this is not a fixed size move, just call memcpy */
14873 /* This must be a fixed size alignment */
14874 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
14875 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
14877 /* Anything to clear? */
14878 bytes
= INTVAL (bytes_rtx
);
14882 /* Use the builtin memset after a point, to avoid huge code bloat.
14883 When optimize_size, avoid any significant code bloat; calling
14884 memset is about 4 instructions, so allow for one instruction to
14885 load zero and three to do clearing. */
14886 if (TARGET_ALTIVEC
&& align
>= 128)
14888 else if (TARGET_POWERPC64
&& align
>= 32)
14890 else if (TARGET_SPE
&& align
>= 64)
14895 if (optimize_size
&& bytes
> 3 * clear_step
)
14897 if (! optimize_size
&& bytes
> 8 * clear_step
)
14900 for (offset
= 0; bytes
> 0; offset
+= clear_bytes
, bytes
-= clear_bytes
)
14902 enum machine_mode mode
= BLKmode
;
14905 if (bytes
>= 16 && TARGET_ALTIVEC
&& align
>= 128)
14910 else if (bytes
>= 8 && TARGET_SPE
&& align
>= 64)
14915 else if (bytes
>= 8 && TARGET_POWERPC64
14916 /* 64-bit loads and stores require word-aligned
14918 && (align
>= 64 || (!STRICT_ALIGNMENT
&& align
>= 32)))
14923 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
14924 { /* move 4 bytes */
14928 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
14929 { /* move 2 bytes */
14933 else /* move 1 byte at a time */
14939 dest
= adjust_address (orig_dest
, mode
, offset
);
14941 emit_move_insn (dest
, CONST0_RTX (mode
));
14948 /* Expand a block move operation, and return 1 if successful. Return 0
14949 if we should let the compiler generate normal code.
14951 operands[0] is the destination
14952 operands[1] is the source
14953 operands[2] is the length
14954 operands[3] is the alignment */
14956 #define MAX_MOVE_REG 4
14959 expand_block_move (rtx operands
[])
14961 rtx orig_dest
= operands
[0];
14962 rtx orig_src
= operands
[1];
14963 rtx bytes_rtx
= operands
[2];
14964 rtx align_rtx
= operands
[3];
14965 int constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
14970 rtx stores
[MAX_MOVE_REG
];
14973 /* If this is not a fixed size move, just call memcpy */
14977 /* This must be a fixed size alignment */
14978 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
14979 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
14981 /* Anything to move? */
14982 bytes
= INTVAL (bytes_rtx
);
14986 if (bytes
> rs6000_block_move_inline_limit
)
14989 for (offset
= 0; bytes
> 0; offset
+= move_bytes
, bytes
-= move_bytes
)
14992 rtx (*movmemsi
) (rtx
, rtx
, rtx
, rtx
);
14993 rtx (*mov
) (rtx
, rtx
);
14995 enum machine_mode mode
= BLKmode
;
14998 /* Altivec first, since it will be faster than a string move
14999 when it applies, and usually not significantly larger. */
15000 if (TARGET_ALTIVEC
&& bytes
>= 16 && align
>= 128)
15004 gen_func
.mov
= gen_movv4si
;
15006 else if (TARGET_SPE
&& bytes
>= 8 && align
>= 64)
15010 gen_func
.mov
= gen_movv2si
;
15012 else if (TARGET_STRING
15013 && bytes
> 24 /* move up to 32 bytes at a time */
15019 && ! fixed_regs
[10]
15020 && ! fixed_regs
[11]
15021 && ! fixed_regs
[12])
15023 move_bytes
= (bytes
> 32) ? 32 : bytes
;
15024 gen_func
.movmemsi
= gen_movmemsi_8reg
;
15026 else if (TARGET_STRING
15027 && bytes
> 16 /* move up to 24 bytes at a time */
15033 && ! fixed_regs
[10])
15035 move_bytes
= (bytes
> 24) ? 24 : bytes
;
15036 gen_func
.movmemsi
= gen_movmemsi_6reg
;
15038 else if (TARGET_STRING
15039 && bytes
> 8 /* move up to 16 bytes at a time */
15043 && ! fixed_regs
[8])
15045 move_bytes
= (bytes
> 16) ? 16 : bytes
;
15046 gen_func
.movmemsi
= gen_movmemsi_4reg
;
15048 else if (bytes
>= 8 && TARGET_POWERPC64
15049 /* 64-bit loads and stores require word-aligned
15051 && (align
>= 64 || (!STRICT_ALIGNMENT
&& align
>= 32)))
15055 gen_func
.mov
= gen_movdi
;
15057 else if (TARGET_STRING
&& bytes
> 4 && !TARGET_POWERPC64
)
15058 { /* move up to 8 bytes at a time */
15059 move_bytes
= (bytes
> 8) ? 8 : bytes
;
15060 gen_func
.movmemsi
= gen_movmemsi_2reg
;
15062 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
15063 { /* move 4 bytes */
15066 gen_func
.mov
= gen_movsi
;
15068 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
15069 { /* move 2 bytes */
15072 gen_func
.mov
= gen_movhi
;
15074 else if (TARGET_STRING
&& bytes
> 1)
15075 { /* move up to 4 bytes at a time */
15076 move_bytes
= (bytes
> 4) ? 4 : bytes
;
15077 gen_func
.movmemsi
= gen_movmemsi_1reg
;
15079 else /* move 1 byte at a time */
15083 gen_func
.mov
= gen_movqi
;
15086 src
= adjust_address (orig_src
, mode
, offset
);
15087 dest
= adjust_address (orig_dest
, mode
, offset
);
15089 if (mode
!= BLKmode
)
15091 rtx tmp_reg
= gen_reg_rtx (mode
);
15093 emit_insn ((*gen_func
.mov
) (tmp_reg
, src
));
15094 stores
[num_reg
++] = (*gen_func
.mov
) (dest
, tmp_reg
);
15097 if (mode
== BLKmode
|| num_reg
>= MAX_MOVE_REG
|| bytes
== move_bytes
)
15100 for (i
= 0; i
< num_reg
; i
++)
15101 emit_insn (stores
[i
]);
15105 if (mode
== BLKmode
)
15107 /* Move the address into scratch registers. The movmemsi
15108 patterns require zero offset. */
15109 if (!REG_P (XEXP (src
, 0)))
15111 rtx src_reg
= copy_addr_to_reg (XEXP (src
, 0));
15112 src
= replace_equiv_address (src
, src_reg
);
15114 set_mem_size (src
, move_bytes
);
15116 if (!REG_P (XEXP (dest
, 0)))
15118 rtx dest_reg
= copy_addr_to_reg (XEXP (dest
, 0));
15119 dest
= replace_equiv_address (dest
, dest_reg
);
15121 set_mem_size (dest
, move_bytes
);
15123 emit_insn ((*gen_func
.movmemsi
) (dest
, src
,
15124 GEN_INT (move_bytes
& 31),
15133 /* Return a string to perform a load_multiple operation.
15134 operands[0] is the vector.
15135 operands[1] is the source address.
15136 operands[2] is the first destination register. */
15139 rs6000_output_load_multiple (rtx operands
[3])
15141 /* We have to handle the case where the pseudo used to contain the address
15142 is assigned to one of the output registers. */
15144 int words
= XVECLEN (operands
[0], 0);
15147 if (XVECLEN (operands
[0], 0) == 1)
15148 return "lwz %2,0(%1)";
15150 for (i
= 0; i
< words
; i
++)
15151 if (refers_to_regno_p (REGNO (operands
[2]) + i
,
15152 REGNO (operands
[2]) + i
+ 1, operands
[1], 0))
15156 xop
[0] = GEN_INT (4 * (words
-1));
15157 xop
[1] = operands
[1];
15158 xop
[2] = operands
[2];
15159 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop
);
15164 xop
[0] = GEN_INT (4 * (words
-1));
15165 xop
[1] = operands
[1];
15166 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
15167 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop
);
15172 for (j
= 0; j
< words
; j
++)
15175 xop
[0] = GEN_INT (j
* 4);
15176 xop
[1] = operands
[1];
15177 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + j
);
15178 output_asm_insn ("lwz %2,%0(%1)", xop
);
15180 xop
[0] = GEN_INT (i
* 4);
15181 xop
[1] = operands
[1];
15182 output_asm_insn ("lwz %1,%0(%1)", xop
);
15187 return "lswi %2,%1,%N0";
15191 /* A validation routine: say whether CODE, a condition code, and MODE
15192 match. The other alternatives either don't make sense or should
15193 never be generated. */
15196 validate_condition_mode (enum rtx_code code
, enum machine_mode mode
)
15198 gcc_assert ((GET_RTX_CLASS (code
) == RTX_COMPARE
15199 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
15200 && GET_MODE_CLASS (mode
) == MODE_CC
);
15202 /* These don't make sense. */
15203 gcc_assert ((code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
)
15204 || mode
!= CCUNSmode
);
15206 gcc_assert ((code
!= GTU
&& code
!= LTU
&& code
!= GEU
&& code
!= LEU
)
15207 || mode
== CCUNSmode
);
15209 gcc_assert (mode
== CCFPmode
15210 || (code
!= ORDERED
&& code
!= UNORDERED
15211 && code
!= UNEQ
&& code
!= LTGT
15212 && code
!= UNGT
&& code
!= UNLT
15213 && code
!= UNGE
&& code
!= UNLE
));
15215 /* These should never be generated except for
15216 flag_finite_math_only. */
15217 gcc_assert (mode
!= CCFPmode
15218 || flag_finite_math_only
15219 || (code
!= LE
&& code
!= GE
15220 && code
!= UNEQ
&& code
!= LTGT
15221 && code
!= UNGT
&& code
!= UNLT
));
15223 /* These are invalid; the information is not there. */
15224 gcc_assert (mode
!= CCEQmode
|| code
== EQ
|| code
== NE
);
15228 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
15229 mask required to convert the result of a rotate insn into a shift
15230 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
15233 includes_lshift_p (rtx shiftop
, rtx andop
)
15235 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
15237 shift_mask
<<= INTVAL (shiftop
);
15239 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
15242 /* Similar, but for right shift. */
15245 includes_rshift_p (rtx shiftop
, rtx andop
)
15247 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
15249 shift_mask
>>= INTVAL (shiftop
);
15251 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
15254 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
15255 to perform a left shift. It must have exactly SHIFTOP least
15256 significant 0's, then one or more 1's, then zero or more 0's. */
15259 includes_rldic_lshift_p (rtx shiftop
, rtx andop
)
15261 if (GET_CODE (andop
) == CONST_INT
)
15263 HOST_WIDE_INT c
, lsb
, shift_mask
;
15265 c
= INTVAL (andop
);
15266 if (c
== 0 || c
== ~0)
15270 shift_mask
<<= INTVAL (shiftop
);
15272 /* Find the least significant one bit. */
15275 /* It must coincide with the LSB of the shift mask. */
15276 if (-lsb
!= shift_mask
)
15279 /* Invert to look for the next transition (if any). */
15282 /* Remove the low group of ones (originally low group of zeros). */
15285 /* Again find the lsb, and check we have all 1's above. */
15293 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
15294 to perform a left shift. It must have SHIFTOP or more least
15295 significant 0's, with the remainder of the word 1's. */
15298 includes_rldicr_lshift_p (rtx shiftop
, rtx andop
)
15300 if (GET_CODE (andop
) == CONST_INT
)
15302 HOST_WIDE_INT c
, lsb
, shift_mask
;
15305 shift_mask
<<= INTVAL (shiftop
);
15306 c
= INTVAL (andop
);
15308 /* Find the least significant one bit. */
15311 /* It must be covered by the shift mask.
15312 This test also rejects c == 0. */
15313 if ((lsb
& shift_mask
) == 0)
15316 /* Check we have all 1's above the transition, and reject all 1's. */
15317 return c
== -lsb
&& lsb
!= 1;
15323 /* Return 1 if operands will generate a valid arguments to rlwimi
15324 instruction for insert with right shift in 64-bit mode. The mask may
15325 not start on the first bit or stop on the last bit because wrap-around
15326 effects of instruction do not correspond to semantics of RTL insn. */
15329 insvdi_rshift_rlwimi_p (rtx sizeop
, rtx startop
, rtx shiftop
)
15331 if (INTVAL (startop
) > 32
15332 && INTVAL (startop
) < 64
15333 && INTVAL (sizeop
) > 1
15334 && INTVAL (sizeop
) + INTVAL (startop
) < 64
15335 && INTVAL (shiftop
) > 0
15336 && INTVAL (sizeop
) + INTVAL (shiftop
) < 32
15337 && (64 - (INTVAL (shiftop
) & 63)) >= INTVAL (sizeop
))
15343 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
15344 for lfq and stfq insns iff the registers are hard registers. */
15347 registers_ok_for_quad_peep (rtx reg1
, rtx reg2
)
15349 /* We might have been passed a SUBREG. */
15350 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
15353 /* We might have been passed non floating point registers. */
15354 if (!FP_REGNO_P (REGNO (reg1
))
15355 || !FP_REGNO_P (REGNO (reg2
)))
15358 return (REGNO (reg1
) == REGNO (reg2
) - 1);
15361 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
15362 addr1 and addr2 must be in consecutive memory locations
15363 (addr2 == addr1 + 8). */
15366 mems_ok_for_quad_peep (rtx mem1
, rtx mem2
)
15369 unsigned int reg1
, reg2
;
15370 int offset1
, offset2
;
15372 /* The mems cannot be volatile. */
15373 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
15376 addr1
= XEXP (mem1
, 0);
15377 addr2
= XEXP (mem2
, 0);
15379 /* Extract an offset (if used) from the first addr. */
15380 if (GET_CODE (addr1
) == PLUS
)
15382 /* If not a REG, return zero. */
15383 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
15387 reg1
= REGNO (XEXP (addr1
, 0));
15388 /* The offset must be constant! */
15389 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
15391 offset1
= INTVAL (XEXP (addr1
, 1));
15394 else if (GET_CODE (addr1
) != REG
)
15398 reg1
= REGNO (addr1
);
15399 /* This was a simple (mem (reg)) expression. Offset is 0. */
15403 /* And now for the second addr. */
15404 if (GET_CODE (addr2
) == PLUS
)
15406 /* If not a REG, return zero. */
15407 if (GET_CODE (XEXP (addr2
, 0)) != REG
)
15411 reg2
= REGNO (XEXP (addr2
, 0));
15412 /* The offset must be constant. */
15413 if (GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
15415 offset2
= INTVAL (XEXP (addr2
, 1));
15418 else if (GET_CODE (addr2
) != REG
)
15422 reg2
= REGNO (addr2
);
15423 /* This was a simple (mem (reg)) expression. Offset is 0. */
15427 /* Both of these must have the same base register. */
15431 /* The offset for the second addr must be 8 more than the first addr. */
15432 if (offset2
!= offset1
+ 8)
15435 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
15442 rs6000_secondary_memory_needed_rtx (enum machine_mode mode
)
15444 static bool eliminated
= false;
15447 if (mode
!= SDmode
|| TARGET_NO_SDMODE_STACK
)
15448 ret
= assign_stack_local (mode
, GET_MODE_SIZE (mode
), 0);
15451 rtx mem
= cfun
->machine
->sdmode_stack_slot
;
15452 gcc_assert (mem
!= NULL_RTX
);
15456 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
15457 cfun
->machine
->sdmode_stack_slot
= mem
;
15463 if (TARGET_DEBUG_ADDR
)
15465 fprintf (stderr
, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
15466 GET_MODE_NAME (mode
));
15468 fprintf (stderr
, "\tNULL_RTX\n");
15476 /* Return the mode to be used for memory when a secondary memory
15477 location is needed. For SDmode values we need to use DDmode, in
15478 all other cases we can use the same mode. */
15480 rs6000_secondary_memory_needed_mode (enum machine_mode mode
)
15482 if (mode
== SDmode
)
15488 rs6000_check_sdmode (tree
*tp
, int *walk_subtrees
, void *data ATTRIBUTE_UNUSED
)
15490 /* Don't walk into types. */
15491 if (*tp
== NULL_TREE
|| *tp
== error_mark_node
|| TYPE_P (*tp
))
15493 *walk_subtrees
= 0;
15497 switch (TREE_CODE (*tp
))
15506 case VIEW_CONVERT_EXPR
:
15507 if (TYPE_MODE (TREE_TYPE (*tp
)) == SDmode
)
15517 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
15518 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
15519 only work on the traditional altivec registers, note if an altivec register
15522 static enum rs6000_reg_type
15523 register_to_reg_type (rtx reg
, bool *is_altivec
)
15525 HOST_WIDE_INT regno
;
15526 enum reg_class rclass
;
15528 if (GET_CODE (reg
) == SUBREG
)
15529 reg
= SUBREG_REG (reg
);
15532 return NO_REG_TYPE
;
15534 regno
= REGNO (reg
);
15535 if (regno
>= FIRST_PSEUDO_REGISTER
)
15537 if (!lra_in_progress
&& !reload_in_progress
&& !reload_completed
)
15538 return PSEUDO_REG_TYPE
;
15540 regno
= true_regnum (reg
);
15541 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
15542 return PSEUDO_REG_TYPE
;
15545 gcc_assert (regno
>= 0);
15547 if (is_altivec
&& ALTIVEC_REGNO_P (regno
))
15548 *is_altivec
= true;
15550 rclass
= rs6000_regno_regclass
[regno
];
15551 return reg_class_to_reg_type
[(int)rclass
];
15554 /* Helper function for rs6000_secondary_reload to return true if a move to a
15555 different register classe is really a simple move. */
15558 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type
,
15559 enum rs6000_reg_type from_type
,
15560 enum machine_mode mode
)
15564 /* Add support for various direct moves available. In this function, we only
15565 look at cases where we don't need any extra registers, and one or more
15566 simple move insns are issued. At present, 32-bit integers are not allowed
15567 in FPR/VSX registers. Single precision binary floating is not a simple
15568 move because we need to convert to the single precision memory layout.
15569 The 4-byte SDmode can be moved. */
15570 size
= GET_MODE_SIZE (mode
);
15571 if (TARGET_DIRECT_MOVE
15572 && ((mode
== SDmode
) || (TARGET_POWERPC64
&& size
== 8))
15573 && ((to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
15574 || (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
15577 else if (TARGET_MFPGPR
&& TARGET_POWERPC64
&& size
== 8
15578 && ((to_type
== GPR_REG_TYPE
&& from_type
== FPR_REG_TYPE
)
15579 || (to_type
== FPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
15582 else if ((size
== 4 || (TARGET_POWERPC64
&& size
== 8))
15583 && ((to_type
== GPR_REG_TYPE
&& from_type
== SPR_REG_TYPE
)
15584 || (to_type
== SPR_REG_TYPE
&& from_type
== GPR_REG_TYPE
)))
15590 /* Power8 helper function for rs6000_secondary_reload, handle all of the
15591 special direct moves that involve allocating an extra register, return the
15592 insn code of the helper function if there is such a function or
15593 CODE_FOR_nothing if not. */
15596 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type
,
15597 enum rs6000_reg_type from_type
,
15598 enum machine_mode mode
,
15599 secondary_reload_info
*sri
,
15603 enum insn_code icode
= CODE_FOR_nothing
;
15605 int size
= GET_MODE_SIZE (mode
);
15607 if (TARGET_POWERPC64
)
15611 /* Handle moving 128-bit values from GPRs to VSX point registers on
15612 power8 when running in 64-bit mode using XXPERMDI to glue the two
15613 64-bit values back together. */
15614 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
15616 cost
= 3; /* 2 mtvsrd's, 1 xxpermdi. */
15617 icode
= reg_addr
[mode
].reload_vsx_gpr
;
15620 /* Handle moving 128-bit values from VSX point registers to GPRs on
15621 power8 when running in 64-bit mode using XXPERMDI to get access to the
15622 bottom 64-bit value. */
15623 else if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
15625 cost
= 3; /* 2 mfvsrd's, 1 xxpermdi. */
15626 icode
= reg_addr
[mode
].reload_gpr_vsx
;
15630 else if (mode
== SFmode
)
15632 if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
15634 cost
= 3; /* xscvdpspn, mfvsrd, and. */
15635 icode
= reg_addr
[mode
].reload_gpr_vsx
;
15638 else if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
15640 cost
= 2; /* mtvsrz, xscvspdpn. */
15641 icode
= reg_addr
[mode
].reload_vsx_gpr
;
15646 if (TARGET_POWERPC64
&& size
== 16)
15648 /* Handle moving 128-bit values from GPRs to VSX point registers on
15649 power8 when running in 64-bit mode using XXPERMDI to glue the two
15650 64-bit values back together. */
15651 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
)
15653 cost
= 3; /* 2 mtvsrd's, 1 xxpermdi. */
15654 icode
= reg_addr
[mode
].reload_vsx_gpr
;
15657 /* Handle moving 128-bit values from VSX point registers to GPRs on
15658 power8 when running in 64-bit mode using XXPERMDI to get access to the
15659 bottom 64-bit value. */
15660 else if (to_type
== GPR_REG_TYPE
&& from_type
== VSX_REG_TYPE
)
15662 cost
= 3; /* 2 mfvsrd's, 1 xxpermdi. */
15663 icode
= reg_addr
[mode
].reload_gpr_vsx
;
15667 else if (!TARGET_POWERPC64
&& size
== 8)
15669 /* Handle moving 64-bit values from GPRs to floating point registers on
15670 power8 when running in 32-bit mode using FMRGOW to glue the two 32-bit
15671 values back together. Altivec register classes must be handled
15672 specially since a different instruction is used, and the secondary
15673 reload support requires a single instruction class in the scratch
15674 register constraint. However, right now TFmode is not allowed in
15675 Altivec registers, so the pattern will never match. */
15676 if (to_type
== VSX_REG_TYPE
&& from_type
== GPR_REG_TYPE
&& !altivec_p
)
15678 cost
= 3; /* 2 mtvsrwz's, 1 fmrgow. */
15679 icode
= reg_addr
[mode
].reload_fpr_gpr
;
15683 if (icode
!= CODE_FOR_nothing
)
15688 sri
->icode
= icode
;
15689 sri
->extra_cost
= cost
;
15696 /* Return whether a move between two register classes can be done either
15697 directly (simple move) or via a pattern that uses a single extra temporary
15698 (using power8's direct move in this case. */
15701 rs6000_secondary_reload_move (enum rs6000_reg_type to_type
,
15702 enum rs6000_reg_type from_type
,
15703 enum machine_mode mode
,
15704 secondary_reload_info
*sri
,
15707 /* Fall back to load/store reloads if either type is not a register. */
15708 if (to_type
== NO_REG_TYPE
|| from_type
== NO_REG_TYPE
)
15711 /* If we haven't allocated registers yet, assume the move can be done for the
15712 standard register types. */
15713 if ((to_type
== PSEUDO_REG_TYPE
&& from_type
== PSEUDO_REG_TYPE
)
15714 || (to_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (from_type
))
15715 || (from_type
== PSEUDO_REG_TYPE
&& IS_STD_REG_TYPE (to_type
)))
15718 /* Moves to the same set of registers is a simple move for non-specialized
15720 if (to_type
== from_type
&& IS_STD_REG_TYPE (to_type
))
15723 /* Check whether a simple move can be done directly. */
15724 if (rs6000_secondary_reload_simple_move (to_type
, from_type
, mode
))
15728 sri
->icode
= CODE_FOR_nothing
;
15729 sri
->extra_cost
= 0;
15734 /* Now check if we can do it in a few steps. */
15735 return rs6000_secondary_reload_direct_move (to_type
, from_type
, mode
, sri
,
15739 /* Inform reload about cases where moving X with a mode MODE to a register in
15740 RCLASS requires an extra scratch or immediate register. Return the class
15741 needed for the immediate register.
15743 For VSX and Altivec, we may need a register to convert sp+offset into
15746 For misaligned 64-bit gpr loads and stores we need a register to
15747 convert an offset address to indirect. */
15750 rs6000_secondary_reload (bool in_p
,
15752 reg_class_t rclass_i
,
15753 enum machine_mode mode
,
15754 secondary_reload_info
*sri
)
15756 enum reg_class rclass
= (enum reg_class
) rclass_i
;
15757 reg_class_t ret
= ALL_REGS
;
15758 enum insn_code icode
;
15759 bool default_p
= false;
15761 sri
->icode
= CODE_FOR_nothing
;
15763 ? reg_addr
[mode
].reload_load
15764 : reg_addr
[mode
].reload_store
);
15766 if (REG_P (x
) || register_operand (x
, mode
))
15768 enum rs6000_reg_type to_type
= reg_class_to_reg_type
[(int)rclass
];
15769 bool altivec_p
= (rclass
== ALTIVEC_REGS
);
15770 enum rs6000_reg_type from_type
= register_to_reg_type (x
, &altivec_p
);
15774 enum rs6000_reg_type exchange
= to_type
;
15775 to_type
= from_type
;
15776 from_type
= exchange
;
15779 /* Can we do a direct move of some sort? */
15780 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
, sri
,
15783 icode
= (enum insn_code
)sri
->icode
;
15789 /* Handle vector moves with reload helper functions. */
15790 if (ret
== ALL_REGS
&& icode
!= CODE_FOR_nothing
)
15793 sri
->icode
= CODE_FOR_nothing
;
15794 sri
->extra_cost
= 0;
15796 if (GET_CODE (x
) == MEM
)
15798 rtx addr
= XEXP (x
, 0);
15800 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
15801 an extra register in that case, but it would need an extra
15802 register if the addressing is reg+reg or (reg+reg)&(-16). Special
15803 case load/store quad. */
15804 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
)
15806 if (TARGET_POWERPC64
&& TARGET_QUAD_MEMORY
15807 && GET_MODE_SIZE (mode
) == 16
15808 && quad_memory_operand (x
, mode
))
15810 sri
->icode
= icode
;
15811 sri
->extra_cost
= 2;
15814 else if (!legitimate_indirect_address_p (addr
, false)
15815 && !rs6000_legitimate_offset_address_p (PTImode
, addr
,
15818 sri
->icode
= icode
;
15819 /* account for splitting the loads, and converting the
15820 address from reg+reg to reg. */
15821 sri
->extra_cost
= (((TARGET_64BIT
) ? 3 : 5)
15822 + ((GET_CODE (addr
) == AND
) ? 1 : 0));
15825 /* Allow scalar loads to/from the traditional floating point
15826 registers, even if VSX memory is set. */
15827 else if ((rclass
== FLOAT_REGS
|| rclass
== NO_REGS
)
15828 && (GET_MODE_SIZE (mode
) == 4 || GET_MODE_SIZE (mode
) == 8)
15829 && (legitimate_indirect_address_p (addr
, false)
15830 || legitimate_indirect_address_p (addr
, false)
15831 || rs6000_legitimate_offset_address_p (mode
, addr
,
15835 /* Loads to and stores from vector registers can only do reg+reg
15836 addressing. Altivec registers can also do (reg+reg)&(-16). Allow
15837 scalar modes loading up the traditional floating point registers
15838 to use offset addresses. */
15839 else if (rclass
== VSX_REGS
|| rclass
== ALTIVEC_REGS
15840 || rclass
== FLOAT_REGS
|| rclass
== NO_REGS
)
15842 if (!VECTOR_MEM_ALTIVEC_P (mode
)
15843 && GET_CODE (addr
) == AND
15844 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
15845 && INTVAL (XEXP (addr
, 1)) == -16
15846 && (legitimate_indirect_address_p (XEXP (addr
, 0), false)
15847 || legitimate_indexed_address_p (XEXP (addr
, 0), false)))
15849 sri
->icode
= icode
;
15850 sri
->extra_cost
= ((GET_CODE (XEXP (addr
, 0)) == PLUS
)
15853 else if (!legitimate_indirect_address_p (addr
, false)
15854 && (rclass
== NO_REGS
15855 || !legitimate_indexed_address_p (addr
, false)))
15857 sri
->icode
= icode
;
15858 sri
->extra_cost
= 1;
15861 icode
= CODE_FOR_nothing
;
15863 /* Any other loads, including to pseudo registers which haven't been
15864 assigned to a register yet, default to require a scratch
15868 sri
->icode
= icode
;
15869 sri
->extra_cost
= 2;
15872 else if (REG_P (x
))
15874 int regno
= true_regnum (x
);
15876 icode
= CODE_FOR_nothing
;
15877 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
15881 enum reg_class xclass
= REGNO_REG_CLASS (regno
);
15882 enum rs6000_reg_type rtype1
= reg_class_to_reg_type
[(int)rclass
];
15883 enum rs6000_reg_type rtype2
= reg_class_to_reg_type
[(int)xclass
];
15885 /* If memory is needed, use default_secondary_reload to create the
15887 if (rtype1
!= rtype2
|| !IS_STD_REG_TYPE (rtype1
))
15896 else if (TARGET_POWERPC64
15897 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
15899 && GET_MODE_SIZE (GET_MODE (x
)) >= UNITS_PER_WORD
)
15901 rtx addr
= XEXP (x
, 0);
15902 rtx off
= address_offset (addr
);
15904 if (off
!= NULL_RTX
)
15906 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
15907 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
15909 /* We need a secondary reload when our legitimate_address_p
15910 says the address is good (as otherwise the entire address
15911 will be reloaded), and the offset is not a multiple of
15912 four or we have an address wrap. Address wrap will only
15913 occur for LO_SUMs since legitimate_offset_address_p
15914 rejects addresses for 16-byte mems that will wrap. */
15915 if (GET_CODE (addr
) == LO_SUM
15916 ? (1 /* legitimate_address_p allows any offset for lo_sum */
15917 && ((offset
& 3) != 0
15918 || ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
))
15919 : (offset
+ 0x8000 < 0x10000 - extra
/* legitimate_address_p */
15920 && (offset
& 3) != 0))
15923 sri
->icode
= CODE_FOR_reload_di_load
;
15925 sri
->icode
= CODE_FOR_reload_di_store
;
15926 sri
->extra_cost
= 2;
15935 else if (!TARGET_POWERPC64
15936 && reg_class_to_reg_type
[(int)rclass
] == GPR_REG_TYPE
15938 && GET_MODE_SIZE (GET_MODE (x
)) > UNITS_PER_WORD
)
15940 rtx addr
= XEXP (x
, 0);
15941 rtx off
= address_offset (addr
);
15943 if (off
!= NULL_RTX
)
15945 unsigned int extra
= GET_MODE_SIZE (GET_MODE (x
)) - UNITS_PER_WORD
;
15946 unsigned HOST_WIDE_INT offset
= INTVAL (off
);
15948 /* We need a secondary reload when our legitimate_address_p
15949 says the address is good (as otherwise the entire address
15950 will be reloaded), and we have a wrap.
15952 legitimate_lo_sum_address_p allows LO_SUM addresses to
15953 have any offset so test for wrap in the low 16 bits.
15955 legitimate_offset_address_p checks for the range
15956 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
15957 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
15958 [0x7ff4,0x7fff] respectively, so test for the
15959 intersection of these ranges, [0x7ffc,0x7fff] and
15960 [0x7ff4,0x7ff7] respectively.
15962 Note that the address we see here may have been
15963 manipulated by legitimize_reload_address. */
15964 if (GET_CODE (addr
) == LO_SUM
15965 ? ((offset
& 0xffff) ^ 0x8000) >= 0x10000 - extra
15966 : offset
- (0x8000 - extra
) < UNITS_PER_WORD
)
15969 sri
->icode
= CODE_FOR_reload_si_load
;
15971 sri
->icode
= CODE_FOR_reload_si_store
;
15972 sri
->extra_cost
= 2;
15985 ret
= default_secondary_reload (in_p
, x
, rclass
, mode
, sri
);
15987 gcc_assert (ret
!= ALL_REGS
);
15989 if (TARGET_DEBUG_ADDR
)
15992 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
15994 reg_class_names
[ret
],
15995 in_p
? "true" : "false",
15996 reg_class_names
[rclass
],
15997 GET_MODE_NAME (mode
));
16000 fprintf (stderr
, ", default secondary reload");
16002 if (sri
->icode
!= CODE_FOR_nothing
)
16003 fprintf (stderr
, ", reload func = %s, extra cost = %d\n",
16004 insn_data
[sri
->icode
].name
, sri
->extra_cost
);
16006 fprintf (stderr
, "\n");
16014 /* Better tracing for rs6000_secondary_reload_inner. */
16017 rs6000_secondary_reload_trace (int line
, rtx reg
, rtx mem
, rtx scratch
,
16022 gcc_assert (reg
!= NULL_RTX
&& mem
!= NULL_RTX
&& scratch
!= NULL_RTX
);
16024 fprintf (stderr
, "rs6000_secondary_reload_inner:%d, type = %s\n", line
,
16025 store_p
? "store" : "load");
16028 set
= gen_rtx_SET (VOIDmode
, mem
, reg
);
16030 set
= gen_rtx_SET (VOIDmode
, reg
, mem
);
16032 clobber
= gen_rtx_CLOBBER (VOIDmode
, scratch
);
16033 debug_rtx (gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set
, clobber
)));
16037 rs6000_secondary_reload_fail (int line
, rtx reg
, rtx mem
, rtx scratch
,
16040 rs6000_secondary_reload_trace (line
, reg
, mem
, scratch
, store_p
);
16041 gcc_unreachable ();
16044 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
16045 to SP+reg addressing. */
16048 rs6000_secondary_reload_inner (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
16050 int regno
= true_regnum (reg
);
16051 enum machine_mode mode
= GET_MODE (reg
);
16052 enum reg_class rclass
;
16054 rtx and_op2
= NULL_RTX
;
16057 rtx scratch_or_premodify
= scratch
;
16061 if (TARGET_DEBUG_ADDR
)
16062 rs6000_secondary_reload_trace (__LINE__
, reg
, mem
, scratch
, store_p
);
16064 if (regno
< 0 || regno
>= FIRST_PSEUDO_REGISTER
)
16065 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
16067 if (GET_CODE (mem
) != MEM
)
16068 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
16070 rclass
= REGNO_REG_CLASS (regno
);
16071 addr
= XEXP (mem
, 0);
16075 /* GPRs can handle reg + small constant, all other addresses need to use
16076 the scratch register. */
16079 if (GET_CODE (addr
) == AND
)
16081 and_op2
= XEXP (addr
, 1);
16082 addr
= XEXP (addr
, 0);
16085 if (GET_CODE (addr
) == PRE_MODIFY
)
16087 scratch_or_premodify
= XEXP (addr
, 0);
16088 if (!REG_P (scratch_or_premodify
))
16089 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
16091 if (GET_CODE (XEXP (addr
, 1)) != PLUS
)
16092 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
16094 addr
= XEXP (addr
, 1);
16097 if (GET_CODE (addr
) == PLUS
16098 && (and_op2
!= NULL_RTX
16099 || !rs6000_legitimate_offset_address_p (PTImode
, addr
,
16102 addr_op1
= XEXP (addr
, 0);
16103 addr_op2
= XEXP (addr
, 1);
16104 if (!legitimate_indirect_address_p (addr_op1
, false))
16105 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
16107 if (!REG_P (addr_op2
)
16108 && (GET_CODE (addr_op2
) != CONST_INT
16109 || !satisfies_constraint_I (addr_op2
)))
16111 if (TARGET_DEBUG_ADDR
)
16114 "\nMove plus addr to register %s, mode = %s: ",
16115 rs6000_reg_names
[REGNO (scratch
)],
16116 GET_MODE_NAME (mode
));
16117 debug_rtx (addr_op2
);
16119 rs6000_emit_move (scratch
, addr_op2
, Pmode
);
16120 addr_op2
= scratch
;
16123 emit_insn (gen_rtx_SET (VOIDmode
,
16124 scratch_or_premodify
,
16125 gen_rtx_PLUS (Pmode
,
16129 addr
= scratch_or_premodify
;
16130 scratch_or_premodify
= scratch
;
16132 else if (!legitimate_indirect_address_p (addr
, false)
16133 && !rs6000_legitimate_offset_address_p (PTImode
, addr
,
16136 if (TARGET_DEBUG_ADDR
)
16138 fprintf (stderr
, "\nMove addr to register %s, mode = %s: ",
16139 rs6000_reg_names
[REGNO (scratch_or_premodify
)],
16140 GET_MODE_NAME (mode
));
16143 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
16144 addr
= scratch_or_premodify
;
16145 scratch_or_premodify
= scratch
;
16149 /* Float registers can do offset+reg addressing for scalar types. */
16151 if (legitimate_indirect_address_p (addr
, false) /* reg */
16152 || legitimate_indexed_address_p (addr
, false) /* reg+reg */
16153 || ((GET_MODE_SIZE (mode
) == 4 || GET_MODE_SIZE (mode
) == 8)
16154 && and_op2
== NULL_RTX
16155 && scratch_or_premodify
== scratch
16156 && rs6000_legitimate_offset_address_p (mode
, addr
, false, false)))
16159 /* If this isn't a legacy floating point load/store, fall through to the
16162 /* VSX/Altivec registers can only handle reg+reg addressing. Move other
16163 addresses into a scratch register. */
16167 /* With float regs, we need to handle the AND ourselves, since we can't
16168 use the Altivec instruction with an implicit AND -16. Allow scalar
16169 loads to float registers to use reg+offset even if VSX. */
16170 if (GET_CODE (addr
) == AND
16171 && (rclass
!= ALTIVEC_REGS
|| GET_MODE_SIZE (mode
) != 16
16172 || GET_CODE (XEXP (addr
, 1)) != CONST_INT
16173 || INTVAL (XEXP (addr
, 1)) != -16
16174 || !VECTOR_MEM_ALTIVEC_P (mode
)))
16176 and_op2
= XEXP (addr
, 1);
16177 addr
= XEXP (addr
, 0);
16180 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
16181 as the address later. */
16182 if (GET_CODE (addr
) == PRE_MODIFY
16183 && ((ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
16184 && (rclass
!= FLOAT_REGS
16185 || (GET_MODE_SIZE (mode
) != 4 && GET_MODE_SIZE (mode
) != 8)))
16186 || and_op2
!= NULL_RTX
16187 || !legitimate_indexed_address_p (XEXP (addr
, 1), false)))
16189 scratch_or_premodify
= XEXP (addr
, 0);
16190 if (!legitimate_indirect_address_p (scratch_or_premodify
, false))
16191 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
16193 if (GET_CODE (XEXP (addr
, 1)) != PLUS
)
16194 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
16196 addr
= XEXP (addr
, 1);
16199 if (legitimate_indirect_address_p (addr
, false) /* reg */
16200 || legitimate_indexed_address_p (addr
, false) /* reg+reg */
16201 || (GET_CODE (addr
) == AND
/* Altivec memory */
16202 && rclass
== ALTIVEC_REGS
16203 && GET_CODE (XEXP (addr
, 1)) == CONST_INT
16204 && INTVAL (XEXP (addr
, 1)) == -16
16205 && (legitimate_indirect_address_p (XEXP (addr
, 0), false)
16206 || legitimate_indexed_address_p (XEXP (addr
, 0), false))))
16209 else if (GET_CODE (addr
) == PLUS
)
16211 addr_op1
= XEXP (addr
, 0);
16212 addr_op2
= XEXP (addr
, 1);
16213 if (!REG_P (addr_op1
))
16214 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
16216 if (TARGET_DEBUG_ADDR
)
16218 fprintf (stderr
, "\nMove plus addr to register %s, mode = %s: ",
16219 rs6000_reg_names
[REGNO (scratch
)], GET_MODE_NAME (mode
));
16220 debug_rtx (addr_op2
);
16222 rs6000_emit_move (scratch
, addr_op2
, Pmode
);
16223 emit_insn (gen_rtx_SET (VOIDmode
,
16224 scratch_or_premodify
,
16225 gen_rtx_PLUS (Pmode
,
16228 addr
= scratch_or_premodify
;
16229 scratch_or_premodify
= scratch
;
16232 else if (GET_CODE (addr
) == SYMBOL_REF
|| GET_CODE (addr
) == CONST
16233 || GET_CODE (addr
) == CONST_INT
|| GET_CODE (addr
) == LO_SUM
16236 if (TARGET_DEBUG_ADDR
)
16238 fprintf (stderr
, "\nMove addr to register %s, mode = %s: ",
16239 rs6000_reg_names
[REGNO (scratch_or_premodify
)],
16240 GET_MODE_NAME (mode
));
16244 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
16245 addr
= scratch_or_premodify
;
16246 scratch_or_premodify
= scratch
;
16250 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
16255 rs6000_secondary_reload_fail (__LINE__
, reg
, mem
, scratch
, store_p
);
16258 /* If the original address involved a pre-modify that we couldn't use the VSX
16259 memory instruction with update, and we haven't taken care of already,
16260 store the address in the pre-modify register and use that as the
16262 if (scratch_or_premodify
!= scratch
&& scratch_or_premodify
!= addr
)
16264 emit_insn (gen_rtx_SET (VOIDmode
, scratch_or_premodify
, addr
));
16265 addr
= scratch_or_premodify
;
16268 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
16269 memory instruction, recreate the AND now, including the clobber which is
16270 generated by the general ANDSI3/ANDDI3 patterns for the
16271 andi. instruction. */
16272 if (and_op2
!= NULL_RTX
)
16274 if (! legitimate_indirect_address_p (addr
, false))
16276 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, addr
));
16280 if (TARGET_DEBUG_ADDR
)
16282 fprintf (stderr
, "\nAnd addr to register %s, mode = %s: ",
16283 rs6000_reg_names
[REGNO (scratch
)], GET_MODE_NAME (mode
));
16284 debug_rtx (and_op2
);
16287 and_rtx
= gen_rtx_SET (VOIDmode
,
16289 gen_rtx_AND (Pmode
,
16293 cc_clobber
= gen_rtx_CLOBBER (CCmode
, gen_rtx_SCRATCH (CCmode
));
16294 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
16295 gen_rtvec (2, and_rtx
, cc_clobber
)));
16299 /* Adjust the address if it changed. */
16300 if (addr
!= XEXP (mem
, 0))
16302 mem
= replace_equiv_address_nv (mem
, addr
);
16303 if (TARGET_DEBUG_ADDR
)
16304 fprintf (stderr
, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
16307 /* Now create the move. */
16309 emit_insn (gen_rtx_SET (VOIDmode
, mem
, reg
));
16311 emit_insn (gen_rtx_SET (VOIDmode
, reg
, mem
));
16316 /* Convert reloads involving 64-bit gprs and misaligned offset
16317 addressing, or multiple 32-bit gprs and offsets that are too large,
16318 to use indirect addressing. */
16321 rs6000_secondary_reload_gpr (rtx reg
, rtx mem
, rtx scratch
, bool store_p
)
16323 int regno
= true_regnum (reg
);
16324 enum reg_class rclass
;
16326 rtx scratch_or_premodify
= scratch
;
16328 if (TARGET_DEBUG_ADDR
)
16330 fprintf (stderr
, "\nrs6000_secondary_reload_gpr, type = %s\n",
16331 store_p
? "store" : "load");
16332 fprintf (stderr
, "reg:\n");
16334 fprintf (stderr
, "mem:\n");
16336 fprintf (stderr
, "scratch:\n");
16337 debug_rtx (scratch
);
16340 gcc_assert (regno
>= 0 && regno
< FIRST_PSEUDO_REGISTER
);
16341 gcc_assert (GET_CODE (mem
) == MEM
);
16342 rclass
= REGNO_REG_CLASS (regno
);
16343 gcc_assert (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
);
16344 addr
= XEXP (mem
, 0);
16346 if (GET_CODE (addr
) == PRE_MODIFY
)
16348 scratch_or_premodify
= XEXP (addr
, 0);
16349 gcc_assert (REG_P (scratch_or_premodify
));
16350 addr
= XEXP (addr
, 1);
16352 gcc_assert (GET_CODE (addr
) == PLUS
|| GET_CODE (addr
) == LO_SUM
);
16354 rs6000_emit_move (scratch_or_premodify
, addr
, Pmode
);
16356 mem
= replace_equiv_address_nv (mem
, scratch_or_premodify
);
16358 /* Now create the move. */
16360 emit_insn (gen_rtx_SET (VOIDmode
, mem
, reg
));
16362 emit_insn (gen_rtx_SET (VOIDmode
, reg
, mem
));
16367 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
16368 this function has any SDmode references. If we are on a power7 or later, we
16369 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
16370 can load/store the value. */
16373 rs6000_alloc_sdmode_stack_slot (void)
16377 gimple_stmt_iterator gsi
;
16379 gcc_assert (cfun
->machine
->sdmode_stack_slot
== NULL_RTX
);
16380 /* We use a different approach for dealing with the secondary
16385 if (TARGET_NO_SDMODE_STACK
)
16389 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
16391 tree ret
= walk_gimple_op (gsi_stmt (gsi
), rs6000_check_sdmode
, NULL
);
16394 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
16395 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
16401 /* Check for any SDmode parameters of the function. */
16402 for (t
= DECL_ARGUMENTS (cfun
->decl
); t
; t
= DECL_CHAIN (t
))
16404 if (TREE_TYPE (t
) == error_mark_node
)
16407 if (TYPE_MODE (TREE_TYPE (t
)) == SDmode
16408 || TYPE_MODE (DECL_ARG_TYPE (t
)) == SDmode
)
16410 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
16411 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
16419 rs6000_instantiate_decls (void)
16421 if (cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
16422 instantiate_decl_rtl (cfun
->machine
->sdmode_stack_slot
);
16425 /* Given an rtx X being reloaded into a reg required to be
16426 in class CLASS, return the class of reg to actually use.
16427 In general this is just CLASS; but on some machines
16428 in some cases it is preferable to use a more restrictive class.
16430 On the RS/6000, we have to return NO_REGS when we want to reload a
16431 floating-point CONST_DOUBLE to force it to be copied to memory.
16433 We also don't want to reload integer values into floating-point
16434 registers if we can at all help it. In fact, this can
16435 cause reload to die, if it tries to generate a reload of CTR
16436 into a FP register and discovers it doesn't have the memory location
16439 ??? Would it be a good idea to have reload do the converse, that is
16440 try to reload floating modes into FP registers if possible?
16443 static enum reg_class
16444 rs6000_preferred_reload_class (rtx x
, enum reg_class rclass
)
16446 enum machine_mode mode
= GET_MODE (x
);
16448 if (TARGET_VSX
&& x
== CONST0_RTX (mode
) && VSX_REG_CLASS_P (rclass
))
16451 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
16452 && (rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
16453 && easy_vector_constant (x
, mode
))
16454 return ALTIVEC_REGS
;
16456 if (CONSTANT_P (x
) && reg_classes_intersect_p (rclass
, FLOAT_REGS
))
16459 if (GET_MODE_CLASS (mode
) == MODE_INT
&& rclass
== NON_SPECIAL_REGS
)
16460 return GENERAL_REGS
;
16462 /* For VSX, prefer the traditional registers for 64-bit values because we can
16463 use the non-VSX loads. Prefer the Altivec registers if Altivec is
16464 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
16465 prefer Altivec loads.. */
16466 if (rclass
== VSX_REGS
)
16468 if (GET_MODE_SIZE (mode
) <= 8)
16471 if (VECTOR_UNIT_ALTIVEC_P (mode
) || VECTOR_MEM_ALTIVEC_P (mode
))
16472 return ALTIVEC_REGS
;
16480 /* Debug version of rs6000_preferred_reload_class. */
16481 static enum reg_class
16482 rs6000_debug_preferred_reload_class (rtx x
, enum reg_class rclass
)
16484 enum reg_class ret
= rs6000_preferred_reload_class (x
, rclass
);
16487 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
16489 reg_class_names
[ret
], reg_class_names
[rclass
],
16490 GET_MODE_NAME (GET_MODE (x
)));
16496 /* If we are copying between FP or AltiVec registers and anything else, we need
16497 a memory location. The exception is when we are targeting ppc64 and the
16498 move to/from fpr to gpr instructions are available. Also, under VSX, you
16499 can copy vector registers from the FP register set to the Altivec register
16500 set and vice versa. */
16503 rs6000_secondary_memory_needed (enum reg_class from_class
,
16504 enum reg_class to_class
,
16505 enum machine_mode mode
)
16507 enum rs6000_reg_type from_type
, to_type
;
16508 bool altivec_p
= ((from_class
== ALTIVEC_REGS
)
16509 || (to_class
== ALTIVEC_REGS
));
16511 /* If a simple/direct move is available, we don't need secondary memory */
16512 from_type
= reg_class_to_reg_type
[(int)from_class
];
16513 to_type
= reg_class_to_reg_type
[(int)to_class
];
16515 if (rs6000_secondary_reload_move (to_type
, from_type
, mode
,
16516 (secondary_reload_info
*)0, altivec_p
))
16519 /* If we have a floating point or vector register class, we need to use
16520 memory to transfer the data. */
16521 if (IS_FP_VECT_REG_TYPE (from_type
) || IS_FP_VECT_REG_TYPE (to_type
))
16527 /* Debug version of rs6000_secondary_memory_needed. */
16529 rs6000_debug_secondary_memory_needed (enum reg_class from_class
,
16530 enum reg_class to_class
,
16531 enum machine_mode mode
)
16533 bool ret
= rs6000_secondary_memory_needed (from_class
, to_class
, mode
);
16536 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
16537 "to_class = %s, mode = %s\n",
16538 ret
? "true" : "false",
16539 reg_class_names
[from_class
],
16540 reg_class_names
[to_class
],
16541 GET_MODE_NAME (mode
));
16546 /* Return the register class of a scratch register needed to copy IN into
16547 or out of a register in RCLASS in MODE. If it can be done directly,
16548 NO_REGS is returned. */
16550 static enum reg_class
16551 rs6000_secondary_reload_class (enum reg_class rclass
, enum machine_mode mode
,
16556 if (TARGET_ELF
|| (DEFAULT_ABI
== ABI_DARWIN
16558 && MACHOPIC_INDIRECT
16562 /* We cannot copy a symbolic operand directly into anything
16563 other than BASE_REGS for TARGET_ELF. So indicate that a
16564 register from BASE_REGS is needed as an intermediate
16567 On Darwin, pic addresses require a load from memory, which
16568 needs a base register. */
16569 if (rclass
!= BASE_REGS
16570 && (GET_CODE (in
) == SYMBOL_REF
16571 || GET_CODE (in
) == HIGH
16572 || GET_CODE (in
) == LABEL_REF
16573 || GET_CODE (in
) == CONST
))
16577 if (GET_CODE (in
) == REG
)
16579 regno
= REGNO (in
);
16580 if (regno
>= FIRST_PSEUDO_REGISTER
)
16582 regno
= true_regnum (in
);
16583 if (regno
>= FIRST_PSEUDO_REGISTER
)
16587 else if (GET_CODE (in
) == SUBREG
)
16589 regno
= true_regnum (in
);
16590 if (regno
>= FIRST_PSEUDO_REGISTER
)
16596 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
16598 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
16599 || (regno
>= 0 && INT_REGNO_P (regno
)))
16602 /* Constants, memory, and FP registers can go into FP registers. */
16603 if ((regno
== -1 || FP_REGNO_P (regno
))
16604 && (rclass
== FLOAT_REGS
|| rclass
== NON_SPECIAL_REGS
))
16605 return (mode
!= SDmode
|| lra_in_progress
) ? NO_REGS
: GENERAL_REGS
;
16607 /* Memory, and FP/altivec registers can go into fp/altivec registers under
16608 VSX. However, for scalar variables, use the traditional floating point
16609 registers so that we can use offset+register addressing. */
16611 && (regno
== -1 || VSX_REGNO_P (regno
))
16612 && VSX_REG_CLASS_P (rclass
))
16614 if (GET_MODE_SIZE (mode
) < 16)
16620 /* Memory, and AltiVec registers can go into AltiVec registers. */
16621 if ((regno
== -1 || ALTIVEC_REGNO_P (regno
))
16622 && rclass
== ALTIVEC_REGS
)
16625 /* We can copy among the CR registers. */
16626 if ((rclass
== CR_REGS
|| rclass
== CR0_REGS
)
16627 && regno
>= 0 && CR_REGNO_P (regno
))
16630 /* Otherwise, we need GENERAL_REGS. */
16631 return GENERAL_REGS
;
16634 /* Debug version of rs6000_secondary_reload_class. */
16635 static enum reg_class
16636 rs6000_debug_secondary_reload_class (enum reg_class rclass
,
16637 enum machine_mode mode
, rtx in
)
16639 enum reg_class ret
= rs6000_secondary_reload_class (rclass
, mode
, in
);
16641 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
16642 "mode = %s, input rtx:\n",
16643 reg_class_names
[ret
], reg_class_names
[rclass
],
16644 GET_MODE_NAME (mode
));
16650 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
16653 rs6000_cannot_change_mode_class (enum machine_mode from
,
16654 enum machine_mode to
,
16655 enum reg_class rclass
)
16657 unsigned from_size
= GET_MODE_SIZE (from
);
16658 unsigned to_size
= GET_MODE_SIZE (to
);
16660 if (from_size
!= to_size
)
16662 enum reg_class xclass
= (TARGET_VSX
) ? VSX_REGS
: FLOAT_REGS
;
16664 if (reg_classes_intersect_p (xclass
, rclass
))
16666 unsigned to_nregs
= hard_regno_nregs
[FIRST_FPR_REGNO
][to
];
16667 unsigned from_nregs
= hard_regno_nregs
[FIRST_FPR_REGNO
][from
];
16669 /* Don't allow 64-bit types to overlap with 128-bit types that take a
16670 single register under VSX because the scalar part of the register
16671 is in the upper 64-bits, and not the lower 64-bits. Types like
16672 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
16673 IEEE floating point can't overlap, and neither can small
16676 if (TARGET_IEEEQUAD
&& (to
== TFmode
|| from
== TFmode
))
16679 if (from_size
< 8 || to_size
< 8)
16682 if (from_size
== 8 && (8 * to_nregs
) != to_size
)
16685 if (to_size
== 8 && (8 * from_nregs
) != from_size
)
16694 if (TARGET_E500_DOUBLE
16695 && ((((to
) == DFmode
) + ((from
) == DFmode
)) == 1
16696 || (((to
) == TFmode
) + ((from
) == TFmode
)) == 1
16697 || (((to
) == DDmode
) + ((from
) == DDmode
)) == 1
16698 || (((to
) == TDmode
) + ((from
) == TDmode
)) == 1
16699 || (((to
) == DImode
) + ((from
) == DImode
)) == 1))
16702 /* Since the VSX register set includes traditional floating point registers
16703 and altivec registers, just check for the size being different instead of
16704 trying to check whether the modes are vector modes. Otherwise it won't
16705 allow say DF and DI to change classes. For types like TFmode and TDmode
16706 that take 2 64-bit registers, rather than a single 128-bit register, don't
16707 allow subregs of those types to other 128 bit types. */
16708 if (TARGET_VSX
&& VSX_REG_CLASS_P (rclass
))
16710 unsigned num_regs
= (from_size
+ 15) / 16;
16711 if (hard_regno_nregs
[FIRST_FPR_REGNO
][to
] > num_regs
16712 || hard_regno_nregs
[FIRST_FPR_REGNO
][from
] > num_regs
)
16715 return (from_size
!= 8 && from_size
!= 16);
16718 if (TARGET_ALTIVEC
&& rclass
== ALTIVEC_REGS
16719 && (ALTIVEC_VECTOR_MODE (from
) + ALTIVEC_VECTOR_MODE (to
)) == 1)
16722 if (TARGET_SPE
&& (SPE_VECTOR_MODE (from
) + SPE_VECTOR_MODE (to
)) == 1
16723 && reg_classes_intersect_p (GENERAL_REGS
, rclass
))
16729 /* Debug version of rs6000_cannot_change_mode_class. */
16731 rs6000_debug_cannot_change_mode_class (enum machine_mode from
,
16732 enum machine_mode to
,
16733 enum reg_class rclass
)
16735 bool ret
= rs6000_cannot_change_mode_class (from
, to
, rclass
);
16738 "rs6000_cannot_change_mode_class, return %s, from = %s, "
16739 "to = %s, rclass = %s\n",
16740 ret
? "true" : "false",
16741 GET_MODE_NAME (from
), GET_MODE_NAME (to
),
16742 reg_class_names
[rclass
]);
16747 /* Return a string to do a move operation of 128 bits of data. */
16750 rs6000_output_move_128bit (rtx operands
[])
16752 rtx dest
= operands
[0];
16753 rtx src
= operands
[1];
16754 enum machine_mode mode
= GET_MODE (dest
);
16757 bool dest_gpr_p
, dest_fp_p
, dest_vmx_p
, dest_vsx_p
;
16758 bool src_gpr_p
, src_fp_p
, src_vmx_p
, src_vsx_p
;
16762 dest_regno
= REGNO (dest
);
16763 dest_gpr_p
= INT_REGNO_P (dest_regno
);
16764 dest_fp_p
= FP_REGNO_P (dest_regno
);
16765 dest_vmx_p
= ALTIVEC_REGNO_P (dest_regno
);
16766 dest_vsx_p
= dest_fp_p
| dest_vmx_p
;
16771 dest_gpr_p
= dest_fp_p
= dest_vmx_p
= dest_vsx_p
= false;
16776 src_regno
= REGNO (src
);
16777 src_gpr_p
= INT_REGNO_P (src_regno
);
16778 src_fp_p
= FP_REGNO_P (src_regno
);
16779 src_vmx_p
= ALTIVEC_REGNO_P (src_regno
);
16780 src_vsx_p
= src_fp_p
| src_vmx_p
;
16785 src_gpr_p
= src_fp_p
= src_vmx_p
= src_vsx_p
= false;
16788 /* Register moves. */
16789 if (dest_regno
>= 0 && src_regno
>= 0)
16796 else if (TARGET_VSX
&& TARGET_DIRECT_MOVE
&& src_vsx_p
)
16800 else if (TARGET_VSX
&& dest_vsx_p
)
16803 return "xxlor %x0,%x1,%x1";
16805 else if (TARGET_DIRECT_MOVE
&& src_gpr_p
)
16809 else if (TARGET_ALTIVEC
&& dest_vmx_p
&& src_vmx_p
)
16810 return "vor %0,%1,%1";
16812 else if (dest_fp_p
&& src_fp_p
)
16817 else if (dest_regno
>= 0 && MEM_P (src
))
16821 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
16827 else if (TARGET_ALTIVEC
&& dest_vmx_p
16828 && altivec_indexed_or_indirect_operand (src
, mode
))
16829 return "lvx %0,%y1";
16831 else if (TARGET_VSX
&& dest_vsx_p
)
16833 if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
16834 return "lxvw4x %x0,%y1";
16836 return "lxvd2x %x0,%y1";
16839 else if (TARGET_ALTIVEC
&& dest_vmx_p
)
16840 return "lvx %0,%y1";
16842 else if (dest_fp_p
)
16847 else if (src_regno
>= 0 && MEM_P (dest
))
16851 if (TARGET_QUAD_MEMORY
&& quad_load_store_p (dest
, src
))
16852 return "stq %1,%0";
16857 else if (TARGET_ALTIVEC
&& src_vmx_p
16858 && altivec_indexed_or_indirect_operand (src
, mode
))
16859 return "stvx %1,%y0";
16861 else if (TARGET_VSX
&& src_vsx_p
)
16863 if (mode
== V16QImode
|| mode
== V8HImode
|| mode
== V4SImode
)
16864 return "stxvw4x %x1,%y0";
16866 return "stxvd2x %x1,%y0";
16869 else if (TARGET_ALTIVEC
&& src_vmx_p
)
16870 return "stvx %1,%y0";
16877 else if (dest_regno
>= 0
16878 && (GET_CODE (src
) == CONST_INT
16879 || GET_CODE (src
) == CONST_DOUBLE
16880 || GET_CODE (src
) == CONST_VECTOR
))
16885 else if (TARGET_VSX
&& dest_vsx_p
&& zero_constant (src
, mode
))
16886 return "xxlxor %x0,%x0,%x0";
16888 else if (TARGET_ALTIVEC
&& dest_vmx_p
)
16889 return output_vec_const_move (operands
);
16892 if (TARGET_DEBUG_ADDR
)
16894 fprintf (stderr
, "\n===== Bad 128 bit move:\n");
16895 debug_rtx (gen_rtx_SET (VOIDmode
, dest
, src
));
16898 gcc_unreachable ();
16902 /* Given a comparison operation, return the bit number in CCR to test. We
16903 know this is a valid comparison.
16905 SCC_P is 1 if this is for an scc. That means that %D will have been
16906 used instead of %C, so the bits will be in different places.
16908 Return -1 if OP isn't a valid comparison for some reason. */
16911 ccr_bit (rtx op
, int scc_p
)
16913 enum rtx_code code
= GET_CODE (op
);
16914 enum machine_mode cc_mode
;
16919 if (!COMPARISON_P (op
))
16922 reg
= XEXP (op
, 0);
16924 gcc_assert (GET_CODE (reg
) == REG
&& CR_REGNO_P (REGNO (reg
)));
16926 cc_mode
= GET_MODE (reg
);
16927 cc_regnum
= REGNO (reg
);
16928 base_bit
= 4 * (cc_regnum
- CR0_REGNO
);
16930 validate_condition_mode (code
, cc_mode
);
16932 /* When generating a sCOND operation, only positive conditions are
16935 || code
== EQ
|| code
== GT
|| code
== LT
|| code
== UNORDERED
16936 || code
== GTU
|| code
== LTU
);
16941 return scc_p
? base_bit
+ 3 : base_bit
+ 2;
16943 return base_bit
+ 2;
16944 case GT
: case GTU
: case UNLE
:
16945 return base_bit
+ 1;
16946 case LT
: case LTU
: case UNGE
:
16948 case ORDERED
: case UNORDERED
:
16949 return base_bit
+ 3;
16952 /* If scc, we will have done a cror to put the bit in the
16953 unordered position. So test that bit. For integer, this is ! LT
16954 unless this is an scc insn. */
16955 return scc_p
? base_bit
+ 3 : base_bit
;
16958 return scc_p
? base_bit
+ 3 : base_bit
+ 1;
16961 gcc_unreachable ();
16965 /* Return the GOT register. */
16968 rs6000_got_register (rtx value ATTRIBUTE_UNUSED
)
16970 /* The second flow pass currently (June 1999) can't update
16971 regs_ever_live without disturbing other parts of the compiler, so
16972 update it here to make the prolog/epilogue code happy. */
16973 if (!can_create_pseudo_p ()
16974 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
16975 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM
, true);
16977 crtl
->uses_pic_offset_table
= 1;
16979 return pic_offset_table_rtx
;
16982 static rs6000_stack_t stack_info
;
16984 /* Function to init struct machine_function.
16985 This will be called, via a pointer variable,
16986 from push_function_context. */
16988 static struct machine_function
*
16989 rs6000_init_machine_status (void)
16991 stack_info
.reload_completed
= 0;
16992 return ggc_alloc_cleared_machine_function ();
16995 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
16998 extract_MB (rtx op
)
17001 unsigned long val
= INTVAL (op
);
17003 /* If the high bit is zero, the value is the first 1 bit we find
17005 if ((val
& 0x80000000) == 0)
17007 gcc_assert (val
& 0xffffffff);
17010 while (((val
<<= 1) & 0x80000000) == 0)
17015 /* If the high bit is set and the low bit is not, or the mask is all
17016 1's, the value is zero. */
17017 if ((val
& 1) == 0 || (val
& 0xffffffff) == 0xffffffff)
17020 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
17023 while (((val
>>= 1) & 1) != 0)
17030 extract_ME (rtx op
)
17033 unsigned long val
= INTVAL (op
);
17035 /* If the low bit is zero, the value is the first 1 bit we find from
17037 if ((val
& 1) == 0)
17039 gcc_assert (val
& 0xffffffff);
17042 while (((val
>>= 1) & 1) == 0)
17048 /* If the low bit is set and the high bit is not, or the mask is all
17049 1's, the value is 31. */
17050 if ((val
& 0x80000000) == 0 || (val
& 0xffffffff) == 0xffffffff)
17053 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
17056 while (((val
<<= 1) & 0x80000000) != 0)
17062 /* Locate some local-dynamic symbol still in use by this function
17063 so that we can print its name in some tls_ld pattern. */
17065 static const char *
17066 rs6000_get_some_local_dynamic_name (void)
17070 if (cfun
->machine
->some_ld_name
)
17071 return cfun
->machine
->some_ld_name
;
17073 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
17075 && for_each_rtx (&PATTERN (insn
),
17076 rs6000_get_some_local_dynamic_name_1
, 0))
17077 return cfun
->machine
->some_ld_name
;
17079 gcc_unreachable ();
17082 /* Helper function for rs6000_get_some_local_dynamic_name. */
17085 rs6000_get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
17089 if (GET_CODE (x
) == SYMBOL_REF
)
17091 const char *str
= XSTR (x
, 0);
17092 if (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
17094 cfun
->machine
->some_ld_name
= str
;
17102 /* Write out a function code label. */
17105 rs6000_output_function_entry (FILE *file
, const char *fname
)
17107 if (fname
[0] != '.')
17109 switch (DEFAULT_ABI
)
17112 gcc_unreachable ();
17118 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "L.");
17128 RS6000_OUTPUT_BASENAME (file
, fname
);
17131 /* Print an operand. Recognize special options, documented below. */
17134 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
17135 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
17137 #define SMALL_DATA_RELOC "sda21"
17138 #define SMALL_DATA_REG 0
17142 print_operand (FILE *file
, rtx x
, int code
)
17145 unsigned HOST_WIDE_INT uval
;
17149 /* %a is output_address. */
17152 /* If constant, low-order 16 bits of constant, unsigned.
17153 Otherwise, write normally. */
17155 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 0xffff);
17157 print_operand (file
, x
, 0);
17161 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
17162 for 64-bit mask direction. */
17163 putc (((INTVAL (x
) & 1) == 0 ? 'r' : 'l'), file
);
17166 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
17170 /* Like 'J' but get to the GT bit only. */
17171 gcc_assert (REG_P (x
));
17173 /* Bit 1 is GT bit. */
17174 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 1;
17176 /* Add one for shift count in rlinm for scc. */
17177 fprintf (file
, "%d", i
+ 1);
17181 /* X is a CR register. Print the number of the EQ bit of the CR */
17182 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
17183 output_operand_lossage ("invalid %%E value");
17185 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 2);
17189 /* X is a CR register. Print the shift count needed to move it
17190 to the high-order four bits. */
17191 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
17192 output_operand_lossage ("invalid %%f value");
17194 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
));
17198 /* Similar, but print the count for the rotate in the opposite
17200 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
17201 output_operand_lossage ("invalid %%F value");
17203 fprintf (file
, "%d", 32 - 4 * (REGNO (x
) - CR0_REGNO
));
17207 /* X is a constant integer. If it is negative, print "m",
17208 otherwise print "z". This is to make an aze or ame insn. */
17209 if (GET_CODE (x
) != CONST_INT
)
17210 output_operand_lossage ("invalid %%G value");
17211 else if (INTVAL (x
) >= 0)
17218 /* If constant, output low-order five bits. Otherwise, write
17221 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 31);
17223 print_operand (file
, x
, 0);
17227 /* If constant, output low-order six bits. Otherwise, write
17230 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
) & 63);
17232 print_operand (file
, x
, 0);
17236 /* Print `i' if this is a constant, else nothing. */
17242 /* Write the bit number in CCR for jump. */
17243 i
= ccr_bit (x
, 0);
17245 output_operand_lossage ("invalid %%j code");
17247 fprintf (file
, "%d", i
);
17251 /* Similar, but add one for shift count in rlinm for scc and pass
17252 scc flag to `ccr_bit'. */
17253 i
= ccr_bit (x
, 1);
17255 output_operand_lossage ("invalid %%J code");
17257 /* If we want bit 31, write a shift count of zero, not 32. */
17258 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
17262 /* X must be a constant. Write the 1's complement of the
17265 output_operand_lossage ("invalid %%k value");
17267 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INTVAL (x
));
17271 /* X must be a symbolic constant on ELF. Write an
17272 expression suitable for an 'addi' that adds in the low 16
17273 bits of the MEM. */
17274 if (GET_CODE (x
) == CONST
)
17276 if (GET_CODE (XEXP (x
, 0)) != PLUS
17277 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
17278 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
17279 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
17280 output_operand_lossage ("invalid %%K value");
17282 print_operand_address (file
, x
);
17283 fputs ("@l", file
);
17286 /* %l is output_asm_label. */
17289 /* Write second word of DImode or DFmode reference. Works on register
17290 or non-indexed memory only. */
17292 fputs (reg_names
[REGNO (x
) + 1], file
);
17293 else if (MEM_P (x
))
17295 /* Handle possible auto-increment. Since it is pre-increment and
17296 we have already done it, we can just use an offset of word. */
17297 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
17298 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
17299 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
17301 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
17302 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0),
17305 output_address (XEXP (adjust_address_nv (x
, SImode
,
17309 if (small_data_operand (x
, GET_MODE (x
)))
17310 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
17311 reg_names
[SMALL_DATA_REG
]);
17316 /* MB value for a mask operand. */
17317 if (! mask_operand (x
, SImode
))
17318 output_operand_lossage ("invalid %%m value");
17320 fprintf (file
, "%d", extract_MB (x
));
17324 /* ME value for a mask operand. */
17325 if (! mask_operand (x
, SImode
))
17326 output_operand_lossage ("invalid %%M value");
17328 fprintf (file
, "%d", extract_ME (x
));
17331 /* %n outputs the negative of its operand. */
17334 /* Write the number of elements in the vector times 4. */
17335 if (GET_CODE (x
) != PARALLEL
)
17336 output_operand_lossage ("invalid %%N value");
17338 fprintf (file
, "%d", XVECLEN (x
, 0) * 4);
17342 /* Similar, but subtract 1 first. */
17343 if (GET_CODE (x
) != PARALLEL
)
17344 output_operand_lossage ("invalid %%O value");
17346 fprintf (file
, "%d", (XVECLEN (x
, 0) - 1) * 4);
17350 /* X is a CONST_INT that is a power of two. Output the logarithm. */
17353 || (i
= exact_log2 (INTVAL (x
))) < 0)
17354 output_operand_lossage ("invalid %%p value");
17356 fprintf (file
, "%d", i
);
17360 /* The operand must be an indirect memory reference. The result
17361 is the register name. */
17362 if (GET_CODE (x
) != MEM
|| GET_CODE (XEXP (x
, 0)) != REG
17363 || REGNO (XEXP (x
, 0)) >= 32)
17364 output_operand_lossage ("invalid %%P value");
17366 fputs (reg_names
[REGNO (XEXP (x
, 0))], file
);
17370 /* This outputs the logical code corresponding to a boolean
17371 expression. The expression may have one or both operands
17372 negated (if one, only the first one). For condition register
17373 logical operations, it will also treat the negated
17374 CR codes as NOTs, but not handle NOTs of them. */
17376 const char *const *t
= 0;
17378 enum rtx_code code
= GET_CODE (x
);
17379 static const char * const tbl
[3][3] = {
17380 { "and", "andc", "nor" },
17381 { "or", "orc", "nand" },
17382 { "xor", "eqv", "xor" } };
17386 else if (code
== IOR
)
17388 else if (code
== XOR
)
17391 output_operand_lossage ("invalid %%q value");
17393 if (GET_CODE (XEXP (x
, 0)) != NOT
)
17397 if (GET_CODE (XEXP (x
, 1)) == NOT
)
17408 if (! TARGET_MFCRF
)
17414 /* X is a CR register. Print the mask for `mtcrf'. */
17415 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
17416 output_operand_lossage ("invalid %%R value");
17418 fprintf (file
, "%d", 128 >> (REGNO (x
) - CR0_REGNO
));
17422 /* Low 5 bits of 32 - value */
17424 output_operand_lossage ("invalid %%s value");
17426 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (32 - INTVAL (x
)) & 31);
17430 /* PowerPC64 mask position. All 0's is excluded.
17431 CONST_INT 32-bit mask is considered sign-extended so any
17432 transition must occur within the CONST_INT, not on the boundary. */
17433 if (! mask64_operand (x
, DImode
))
17434 output_operand_lossage ("invalid %%S value");
17438 if (uval
& 1) /* Clear Left */
17440 #if HOST_BITS_PER_WIDE_INT > 64
17441 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
17445 else /* Clear Right */
17448 #if HOST_BITS_PER_WIDE_INT > 64
17449 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
17455 gcc_assert (i
>= 0);
17456 fprintf (file
, "%d", i
);
17460 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
17461 gcc_assert (REG_P (x
) && GET_MODE (x
) == CCmode
);
17463 /* Bit 3 is OV bit. */
17464 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 3;
17466 /* If we want bit 31, write a shift count of zero, not 32. */
17467 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
17471 /* Print the symbolic name of a branch target register. */
17472 if (GET_CODE (x
) != REG
|| (REGNO (x
) != LR_REGNO
17473 && REGNO (x
) != CTR_REGNO
))
17474 output_operand_lossage ("invalid %%T value");
17475 else if (REGNO (x
) == LR_REGNO
)
17476 fputs ("lr", file
);
17478 fputs ("ctr", file
);
17482 /* High-order 16 bits of constant for use in unsigned operand. */
17484 output_operand_lossage ("invalid %%u value");
17486 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
17487 (INTVAL (x
) >> 16) & 0xffff);
17491 /* High-order 16 bits of constant for use in signed operand. */
17493 output_operand_lossage ("invalid %%v value");
17495 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
17496 (INTVAL (x
) >> 16) & 0xffff);
17500 /* Print `u' if this has an auto-increment or auto-decrement. */
17502 && (GET_CODE (XEXP (x
, 0)) == PRE_INC
17503 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
17504 || GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
))
17509 /* Print the trap code for this operand. */
17510 switch (GET_CODE (x
))
17513 fputs ("eq", file
); /* 4 */
17516 fputs ("ne", file
); /* 24 */
17519 fputs ("lt", file
); /* 16 */
17522 fputs ("le", file
); /* 20 */
17525 fputs ("gt", file
); /* 8 */
17528 fputs ("ge", file
); /* 12 */
17531 fputs ("llt", file
); /* 2 */
17534 fputs ("lle", file
); /* 6 */
17537 fputs ("lgt", file
); /* 1 */
17540 fputs ("lge", file
); /* 5 */
17543 gcc_unreachable ();
17548 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
17551 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
17552 ((INTVAL (x
) & 0xffff) ^ 0x8000) - 0x8000);
17554 print_operand (file
, x
, 0);
17558 /* MB value for a PowerPC64 rldic operand. */
17559 i
= clz_hwi (INTVAL (x
));
17561 fprintf (file
, "%d", i
);
17565 /* X is a FPR or Altivec register used in a VSX context. */
17566 if (GET_CODE (x
) != REG
|| !VSX_REGNO_P (REGNO (x
)))
17567 output_operand_lossage ("invalid %%x value");
17570 int reg
= REGNO (x
);
17571 int vsx_reg
= (FP_REGNO_P (reg
)
17573 : reg
- FIRST_ALTIVEC_REGNO
+ 32);
17575 #ifdef TARGET_REGNAMES
17576 if (TARGET_REGNAMES
)
17577 fprintf (file
, "%%vs%d", vsx_reg
);
17580 fprintf (file
, "%d", vsx_reg
);
17586 && (legitimate_indexed_address_p (XEXP (x
, 0), 0)
17587 || (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
17588 && legitimate_indexed_address_p (XEXP (XEXP (x
, 0), 1), 0))))
17593 /* Like 'L', for third word of TImode/PTImode */
17595 fputs (reg_names
[REGNO (x
) + 2], file
);
17596 else if (MEM_P (x
))
17598 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
17599 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
17600 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 8));
17601 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
17602 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 8));
17604 output_address (XEXP (adjust_address_nv (x
, SImode
, 8), 0));
17605 if (small_data_operand (x
, GET_MODE (x
)))
17606 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
17607 reg_names
[SMALL_DATA_REG
]);
17612 /* X is a SYMBOL_REF. Write out the name preceded by a
17613 period and without any trailing data in brackets. Used for function
17614 names. If we are configured for System V (or the embedded ABI) on
17615 the PowerPC, do not emit the period, since those systems do not use
17616 TOCs and the like. */
17617 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
17619 /* For macho, check to see if we need a stub. */
17622 const char *name
= XSTR (x
, 0);
17624 if (darwin_emit_branch_islands
17625 && MACHOPIC_INDIRECT
17626 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
17627 name
= machopic_indirection_name (x
, /*stub_p=*/true);
17629 assemble_name (file
, name
);
17631 else if (!DOT_SYMBOLS
)
17632 assemble_name (file
, XSTR (x
, 0));
17634 rs6000_output_function_entry (file
, XSTR (x
, 0));
17638 /* Like 'L', for last word of TImode/PTImode. */
17640 fputs (reg_names
[REGNO (x
) + 3], file
);
17641 else if (MEM_P (x
))
17643 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
17644 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
17645 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 12));
17646 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
17647 output_address (plus_constant (Pmode
, XEXP (XEXP (x
, 0), 0), 12));
17649 output_address (XEXP (adjust_address_nv (x
, SImode
, 12), 0));
17650 if (small_data_operand (x
, GET_MODE (x
)))
17651 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
17652 reg_names
[SMALL_DATA_REG
]);
17656 /* Print AltiVec or SPE memory operand. */
17661 gcc_assert (MEM_P (x
));
17665 /* Ugly hack because %y is overloaded. */
17666 if ((TARGET_SPE
|| TARGET_E500_DOUBLE
)
17667 && (GET_MODE_SIZE (GET_MODE (x
)) == 8
17668 || GET_MODE (x
) == TFmode
17669 || GET_MODE (x
) == TImode
17670 || GET_MODE (x
) == PTImode
))
17672 /* Handle [reg]. */
17675 fprintf (file
, "0(%s)", reg_names
[REGNO (tmp
)]);
17678 /* Handle [reg+UIMM]. */
17679 else if (GET_CODE (tmp
) == PLUS
&&
17680 GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
17684 gcc_assert (REG_P (XEXP (tmp
, 0)));
17686 x
= INTVAL (XEXP (tmp
, 1));
17687 fprintf (file
, "%d(%s)", x
, reg_names
[REGNO (XEXP (tmp
, 0))]);
17691 /* Fall through. Must be [reg+reg]. */
17693 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x
))
17694 && GET_CODE (tmp
) == AND
17695 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
17696 && INTVAL (XEXP (tmp
, 1)) == -16)
17697 tmp
= XEXP (tmp
, 0);
17698 else if (VECTOR_MEM_VSX_P (GET_MODE (x
))
17699 && GET_CODE (tmp
) == PRE_MODIFY
)
17700 tmp
= XEXP (tmp
, 1);
17702 fprintf (file
, "0,%s", reg_names
[REGNO (tmp
)]);
17705 if (!GET_CODE (tmp
) == PLUS
17706 || !REG_P (XEXP (tmp
, 0))
17707 || !REG_P (XEXP (tmp
, 1)))
17709 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
17713 if (REGNO (XEXP (tmp
, 0)) == 0)
17714 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 1)) ],
17715 reg_names
[ REGNO (XEXP (tmp
, 0)) ]);
17717 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 0)) ],
17718 reg_names
[ REGNO (XEXP (tmp
, 1)) ]);
17725 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
17726 else if (MEM_P (x
))
17728 /* We need to handle PRE_INC and PRE_DEC here, since we need to
17729 know the width from the mode. */
17730 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
)
17731 fprintf (file
, "%d(%s)", GET_MODE_SIZE (GET_MODE (x
)),
17732 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
17733 else if (GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
17734 fprintf (file
, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x
)),
17735 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
17736 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
17737 output_address (XEXP (XEXP (x
, 0), 1));
17739 output_address (XEXP (x
, 0));
17743 if (toc_relative_expr_p (x
, false))
17744 /* This hack along with a corresponding hack in
17745 rs6000_output_addr_const_extra arranges to output addends
17746 where the assembler expects to find them. eg.
17747 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
17748 without this hack would be output as "x@toc+4". We
17750 output_addr_const (file
, CONST_CAST_RTX (tocrel_base
));
17752 output_addr_const (file
, x
);
17757 assemble_name (file
, rs6000_get_some_local_dynamic_name ());
17761 output_operand_lossage ("invalid %%xn code");
17765 /* Print the address of an operand. */
17768 print_operand_address (FILE *file
, rtx x
)
17771 fprintf (file
, "0(%s)", reg_names
[ REGNO (x
) ]);
17772 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
17773 || GET_CODE (x
) == LABEL_REF
)
17775 output_addr_const (file
, x
);
17776 if (small_data_operand (x
, GET_MODE (x
)))
17777 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
17778 reg_names
[SMALL_DATA_REG
]);
17780 gcc_assert (!TARGET_TOC
);
17782 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
17783 && REG_P (XEXP (x
, 1)))
17785 if (REGNO (XEXP (x
, 0)) == 0)
17786 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 1)) ],
17787 reg_names
[ REGNO (XEXP (x
, 0)) ]);
17789 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 0)) ],
17790 reg_names
[ REGNO (XEXP (x
, 1)) ]);
17792 else if (GET_CODE (x
) == PLUS
&& REG_P (XEXP (x
, 0))
17793 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
17794 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"(%s)",
17795 INTVAL (XEXP (x
, 1)), reg_names
[ REGNO (XEXP (x
, 0)) ]);
17797 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
17798 && CONSTANT_P (XEXP (x
, 1)))
17800 fprintf (file
, "lo16(");
17801 output_addr_const (file
, XEXP (x
, 1));
17802 fprintf (file
, ")(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
17806 else if (GET_CODE (x
) == LO_SUM
&& REG_P (XEXP (x
, 0))
17807 && CONSTANT_P (XEXP (x
, 1)))
17809 output_addr_const (file
, XEXP (x
, 1));
17810 fprintf (file
, "@l(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
17813 else if (toc_relative_expr_p (x
, false))
17815 /* This hack along with a corresponding hack in
17816 rs6000_output_addr_const_extra arranges to output addends
17817 where the assembler expects to find them. eg.
17819 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
17820 without this hack would be output as "x@toc+8@l(9)". We
17821 want "x+8@toc@l(9)". */
17822 output_addr_const (file
, CONST_CAST_RTX (tocrel_base
));
17823 if (GET_CODE (x
) == LO_SUM
)
17824 fprintf (file
, "@l(%s)", reg_names
[REGNO (XEXP (x
, 0))]);
17826 fprintf (file
, "(%s)", reg_names
[REGNO (XVECEXP (tocrel_base
, 0, 1))]);
17829 gcc_unreachable ();
17832 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
17835 rs6000_output_addr_const_extra (FILE *file
, rtx x
)
17837 if (GET_CODE (x
) == UNSPEC
)
17838 switch (XINT (x
, 1))
17840 case UNSPEC_TOCREL
:
17841 gcc_checking_assert (GET_CODE (XVECEXP (x
, 0, 0)) == SYMBOL_REF
17842 && REG_P (XVECEXP (x
, 0, 1))
17843 && REGNO (XVECEXP (x
, 0, 1)) == TOC_REGISTER
);
17844 output_addr_const (file
, XVECEXP (x
, 0, 0));
17845 if (x
== tocrel_base
&& tocrel_offset
!= const0_rtx
)
17847 if (INTVAL (tocrel_offset
) >= 0)
17848 fprintf (file
, "+");
17849 output_addr_const (file
, CONST_CAST_RTX (tocrel_offset
));
17851 if (!TARGET_AIX
|| (TARGET_ELF
&& TARGET_MINIMAL_TOC
))
17854 assemble_name (file
, toc_label_name
);
17856 else if (TARGET_ELF
)
17857 fputs ("@toc", file
);
17861 case UNSPEC_MACHOPIC_OFFSET
:
17862 output_addr_const (file
, XVECEXP (x
, 0, 0));
17864 machopic_output_function_base_name (file
);
17871 /* Target hook for assembling integer objects. The PowerPC version has
17872 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
17873 is defined. It also needs to handle DI-mode objects on 64-bit
17877 rs6000_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
17879 #ifdef RELOCATABLE_NEEDS_FIXUP
17880 /* Special handling for SI values. */
17881 if (RELOCATABLE_NEEDS_FIXUP
&& size
== 4 && aligned_p
)
17883 static int recurse
= 0;
17885 /* For -mrelocatable, we mark all addresses that need to be fixed up in
17886 the .fixup section. Since the TOC section is already relocated, we
17887 don't need to mark it here. We used to skip the text section, but it
17888 should never be valid for relocated addresses to be placed in the text
17890 if (TARGET_RELOCATABLE
17891 && in_section
!= toc_section
17893 && GET_CODE (x
) != CONST_INT
17894 && GET_CODE (x
) != CONST_DOUBLE
17900 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCP", fixuplabelno
);
17902 ASM_OUTPUT_LABEL (asm_out_file
, buf
);
17903 fprintf (asm_out_file
, "\t.long\t(");
17904 output_addr_const (asm_out_file
, x
);
17905 fprintf (asm_out_file
, ")@fixup\n");
17906 fprintf (asm_out_file
, "\t.section\t\".fixup\",\"aw\"\n");
17907 ASM_OUTPUT_ALIGN (asm_out_file
, 2);
17908 fprintf (asm_out_file
, "\t.long\t");
17909 assemble_name (asm_out_file
, buf
);
17910 fprintf (asm_out_file
, "\n\t.previous\n");
17914 /* Remove initial .'s to turn a -mcall-aixdesc function
17915 address into the address of the descriptor, not the function
17917 else if (GET_CODE (x
) == SYMBOL_REF
17918 && XSTR (x
, 0)[0] == '.'
17919 && DEFAULT_ABI
== ABI_AIX
)
17921 const char *name
= XSTR (x
, 0);
17922 while (*name
== '.')
17925 fprintf (asm_out_file
, "\t.long\t%s\n", name
);
17929 #endif /* RELOCATABLE_NEEDS_FIXUP */
17930 return default_assemble_integer (x
, size
, aligned_p
);
17933 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
17934 /* Emit an assembler directive to set symbol visibility for DECL to
17935 VISIBILITY_TYPE. */
17938 rs6000_assemble_visibility (tree decl
, int vis
)
17943 /* Functions need to have their entry point symbol visibility set as
17944 well as their descriptor symbol visibility. */
17945 if (DEFAULT_ABI
== ABI_AIX
17947 && TREE_CODE (decl
) == FUNCTION_DECL
)
17949 static const char * const visibility_types
[] = {
17950 NULL
, "internal", "hidden", "protected"
17953 const char *name
, *type
;
17955 name
= ((* targetm
.strip_name_encoding
)
17956 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
))));
17957 type
= visibility_types
[vis
];
17959 fprintf (asm_out_file
, "\t.%s\t%s\n", type
, name
);
17960 fprintf (asm_out_file
, "\t.%s\t.%s\n", type
, name
);
17963 default_assemble_visibility (decl
, vis
);
17968 rs6000_reverse_condition (enum machine_mode mode
, enum rtx_code code
)
17970 /* Reversal of FP compares takes care -- an ordered compare
17971 becomes an unordered compare and vice versa. */
17972 if (mode
== CCFPmode
17973 && (!flag_finite_math_only
17974 || code
== UNLT
|| code
== UNLE
|| code
== UNGT
|| code
== UNGE
17975 || code
== UNEQ
|| code
== LTGT
))
17976 return reverse_condition_maybe_unordered (code
);
17978 return reverse_condition (code
);
17981 /* Generate a compare for CODE. Return a brand-new rtx that
17982 represents the result of the compare. */
17985 rs6000_generate_compare (rtx cmp
, enum machine_mode mode
)
17987 enum machine_mode comp_mode
;
17988 rtx compare_result
;
17989 enum rtx_code code
= GET_CODE (cmp
);
17990 rtx op0
= XEXP (cmp
, 0);
17991 rtx op1
= XEXP (cmp
, 1);
17993 if (FLOAT_MODE_P (mode
))
17994 comp_mode
= CCFPmode
;
17995 else if (code
== GTU
|| code
== LTU
17996 || code
== GEU
|| code
== LEU
)
17997 comp_mode
= CCUNSmode
;
17998 else if ((code
== EQ
|| code
== NE
)
17999 && unsigned_reg_p (op0
)
18000 && (unsigned_reg_p (op1
)
18001 || (CONST_INT_P (op1
) && INTVAL (op1
) != 0)))
18002 /* These are unsigned values, perhaps there will be a later
18003 ordering compare that can be shared with this one. */
18004 comp_mode
= CCUNSmode
;
18006 comp_mode
= CCmode
;
18008 /* If we have an unsigned compare, make sure we don't have a signed value as
18010 if (comp_mode
== CCUNSmode
&& GET_CODE (op1
) == CONST_INT
18011 && INTVAL (op1
) < 0)
18013 op0
= copy_rtx_if_shared (op0
);
18014 op1
= force_reg (GET_MODE (op0
), op1
);
18015 cmp
= gen_rtx_fmt_ee (code
, GET_MODE (cmp
), op0
, op1
);
18018 /* First, the compare. */
18019 compare_result
= gen_reg_rtx (comp_mode
);
18021 /* E500 FP compare instructions on the GPRs. Yuck! */
18022 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
)
18023 && FLOAT_MODE_P (mode
))
18025 rtx cmp
, or_result
, compare_result2
;
18026 enum machine_mode op_mode
= GET_MODE (op0
);
18029 if (op_mode
== VOIDmode
)
18030 op_mode
= GET_MODE (op1
);
18032 /* First reverse the condition codes that aren't directly supported. */
18040 code
= reverse_condition_maybe_unordered (code
);
18053 gcc_unreachable ();
18056 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
18057 This explains the following mess. */
18065 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18066 ? gen_tstsfeq_gpr (compare_result
, op0
, op1
)
18067 : gen_cmpsfeq_gpr (compare_result
, op0
, op1
);
18071 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18072 ? gen_tstdfeq_gpr (compare_result
, op0
, op1
)
18073 : gen_cmpdfeq_gpr (compare_result
, op0
, op1
);
18077 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18078 ? gen_tsttfeq_gpr (compare_result
, op0
, op1
)
18079 : gen_cmptfeq_gpr (compare_result
, op0
, op1
);
18083 gcc_unreachable ();
18092 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18093 ? gen_tstsfgt_gpr (compare_result
, op0
, op1
)
18094 : gen_cmpsfgt_gpr (compare_result
, op0
, op1
);
18098 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18099 ? gen_tstdfgt_gpr (compare_result
, op0
, op1
)
18100 : gen_cmpdfgt_gpr (compare_result
, op0
, op1
);
18104 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18105 ? gen_tsttfgt_gpr (compare_result
, op0
, op1
)
18106 : gen_cmptfgt_gpr (compare_result
, op0
, op1
);
18110 gcc_unreachable ();
18119 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18120 ? gen_tstsflt_gpr (compare_result
, op0
, op1
)
18121 : gen_cmpsflt_gpr (compare_result
, op0
, op1
);
18125 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18126 ? gen_tstdflt_gpr (compare_result
, op0
, op1
)
18127 : gen_cmpdflt_gpr (compare_result
, op0
, op1
);
18131 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18132 ? gen_tsttflt_gpr (compare_result
, op0
, op1
)
18133 : gen_cmptflt_gpr (compare_result
, op0
, op1
);
18137 gcc_unreachable ();
18142 gcc_unreachable ();
18145 /* Synthesize LE and GE from LT/GT || EQ. */
18146 if (code
== LE
|| code
== GE
)
18150 compare_result2
= gen_reg_rtx (CCFPmode
);
18156 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18157 ? gen_tstsfeq_gpr (compare_result2
, op0
, op1
)
18158 : gen_cmpsfeq_gpr (compare_result2
, op0
, op1
);
18162 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18163 ? gen_tstdfeq_gpr (compare_result2
, op0
, op1
)
18164 : gen_cmpdfeq_gpr (compare_result2
, op0
, op1
);
18168 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
18169 ? gen_tsttfeq_gpr (compare_result2
, op0
, op1
)
18170 : gen_cmptfeq_gpr (compare_result2
, op0
, op1
);
18174 gcc_unreachable ();
18179 /* OR them together. */
18180 or_result
= gen_reg_rtx (CCFPmode
);
18181 cmp
= gen_e500_cr_ior_compare (or_result
, compare_result
,
18183 compare_result
= or_result
;
18186 code
= reverse_p
? NE
: EQ
;
18192 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
18193 CLOBBERs to match cmptf_internal2 pattern. */
18194 if (comp_mode
== CCFPmode
&& TARGET_XL_COMPAT
18195 && GET_MODE (op0
) == TFmode
18196 && !TARGET_IEEEQUAD
18197 && TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_LONG_DOUBLE_128
)
18198 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
18200 gen_rtx_SET (VOIDmode
,
18202 gen_rtx_COMPARE (comp_mode
, op0
, op1
)),
18203 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
18204 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
18205 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
18206 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
18207 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
18208 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
18209 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
18210 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
18211 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (Pmode
)))));
18212 else if (GET_CODE (op1
) == UNSPEC
18213 && XINT (op1
, 1) == UNSPEC_SP_TEST
)
18215 rtx op1b
= XVECEXP (op1
, 0, 0);
18216 comp_mode
= CCEQmode
;
18217 compare_result
= gen_reg_rtx (CCEQmode
);
18219 emit_insn (gen_stack_protect_testdi (compare_result
, op0
, op1b
));
18221 emit_insn (gen_stack_protect_testsi (compare_result
, op0
, op1b
));
18224 emit_insn (gen_rtx_SET (VOIDmode
, compare_result
,
18225 gen_rtx_COMPARE (comp_mode
, op0
, op1
)));
18228 /* Some kinds of FP comparisons need an OR operation;
18229 under flag_finite_math_only we don't bother. */
18230 if (FLOAT_MODE_P (mode
)
18231 && !flag_finite_math_only
18232 && !(TARGET_HARD_FLOAT
&& !TARGET_FPRS
)
18233 && (code
== LE
|| code
== GE
18234 || code
== UNEQ
|| code
== LTGT
18235 || code
== UNGT
|| code
== UNLT
))
18237 enum rtx_code or1
, or2
;
18238 rtx or1_rtx
, or2_rtx
, compare2_rtx
;
18239 rtx or_result
= gen_reg_rtx (CCEQmode
);
18243 case LE
: or1
= LT
; or2
= EQ
; break;
18244 case GE
: or1
= GT
; or2
= EQ
; break;
18245 case UNEQ
: or1
= UNORDERED
; or2
= EQ
; break;
18246 case LTGT
: or1
= LT
; or2
= GT
; break;
18247 case UNGT
: or1
= UNORDERED
; or2
= GT
; break;
18248 case UNLT
: or1
= UNORDERED
; or2
= LT
; break;
18249 default: gcc_unreachable ();
18251 validate_condition_mode (or1
, comp_mode
);
18252 validate_condition_mode (or2
, comp_mode
);
18253 or1_rtx
= gen_rtx_fmt_ee (or1
, SImode
, compare_result
, const0_rtx
);
18254 or2_rtx
= gen_rtx_fmt_ee (or2
, SImode
, compare_result
, const0_rtx
);
18255 compare2_rtx
= gen_rtx_COMPARE (CCEQmode
,
18256 gen_rtx_IOR (SImode
, or1_rtx
, or2_rtx
),
18258 emit_insn (gen_rtx_SET (VOIDmode
, or_result
, compare2_rtx
));
18260 compare_result
= or_result
;
18264 validate_condition_mode (code
, GET_MODE (compare_result
));
18266 return gen_rtx_fmt_ee (code
, VOIDmode
, compare_result
, const0_rtx
);
18270 /* Emit the RTL for an sISEL pattern. */
18273 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED
, rtx operands
[])
18275 rs6000_emit_int_cmove (operands
[0], operands
[1], const1_rtx
, const0_rtx
);
18279 rs6000_emit_sCOND (enum machine_mode mode
, rtx operands
[])
18282 enum machine_mode op_mode
;
18283 enum rtx_code cond_code
;
18284 rtx result
= operands
[0];
18286 if (TARGET_ISEL
&& (mode
== SImode
|| mode
== DImode
))
18288 rs6000_emit_sISEL (mode
, operands
);
18292 condition_rtx
= rs6000_generate_compare (operands
[1], mode
);
18293 cond_code
= GET_CODE (condition_rtx
);
18295 if (FLOAT_MODE_P (mode
)
18296 && !TARGET_FPRS
&& TARGET_HARD_FLOAT
)
18300 PUT_MODE (condition_rtx
, SImode
);
18301 t
= XEXP (condition_rtx
, 0);
18303 gcc_assert (cond_code
== NE
|| cond_code
== EQ
);
18305 if (cond_code
== NE
)
18306 emit_insn (gen_e500_flip_gt_bit (t
, t
));
18308 emit_insn (gen_move_from_CR_gt_bit (result
, t
));
18312 if (cond_code
== NE
18313 || cond_code
== GE
|| cond_code
== LE
18314 || cond_code
== GEU
|| cond_code
== LEU
18315 || cond_code
== ORDERED
|| cond_code
== UNGE
|| cond_code
== UNLE
)
18317 rtx not_result
= gen_reg_rtx (CCEQmode
);
18318 rtx not_op
, rev_cond_rtx
;
18319 enum machine_mode cc_mode
;
18321 cc_mode
= GET_MODE (XEXP (condition_rtx
, 0));
18323 rev_cond_rtx
= gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode
, cond_code
),
18324 SImode
, XEXP (condition_rtx
, 0), const0_rtx
);
18325 not_op
= gen_rtx_COMPARE (CCEQmode
, rev_cond_rtx
, const0_rtx
);
18326 emit_insn (gen_rtx_SET (VOIDmode
, not_result
, not_op
));
18327 condition_rtx
= gen_rtx_EQ (VOIDmode
, not_result
, const0_rtx
);
18330 op_mode
= GET_MODE (XEXP (operands
[1], 0));
18331 if (op_mode
== VOIDmode
)
18332 op_mode
= GET_MODE (XEXP (operands
[1], 1));
18334 if (TARGET_POWERPC64
&& (op_mode
== DImode
|| FLOAT_MODE_P (mode
)))
18336 PUT_MODE (condition_rtx
, DImode
);
18337 convert_move (result
, condition_rtx
, 0);
18341 PUT_MODE (condition_rtx
, SImode
);
18342 emit_insn (gen_rtx_SET (VOIDmode
, result
, condition_rtx
));
18346 /* Emit a branch of kind CODE to location LOC. */
18349 rs6000_emit_cbranch (enum machine_mode mode
, rtx operands
[])
18351 rtx condition_rtx
, loc_ref
;
18353 condition_rtx
= rs6000_generate_compare (operands
[0], mode
);
18354 loc_ref
= gen_rtx_LABEL_REF (VOIDmode
, operands
[3]);
18355 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
,
18356 gen_rtx_IF_THEN_ELSE (VOIDmode
, condition_rtx
,
18357 loc_ref
, pc_rtx
)));
18360 /* Return the string to output a conditional branch to LABEL, which is
18361 the operand template of the label, or NULL if the branch is really a
18362 conditional return.
18364 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
18365 condition code register and its mode specifies what kind of
18366 comparison we made.
18368 REVERSED is nonzero if we should reverse the sense of the comparison.
18370 INSN is the insn. */
18373 output_cbranch (rtx op
, const char *label
, int reversed
, rtx insn
)
18375 static char string
[64];
18376 enum rtx_code code
= GET_CODE (op
);
18377 rtx cc_reg
= XEXP (op
, 0);
18378 enum machine_mode mode
= GET_MODE (cc_reg
);
18379 int cc_regno
= REGNO (cc_reg
) - CR0_REGNO
;
18380 int need_longbranch
= label
!= NULL
&& get_attr_length (insn
) == 8;
18381 int really_reversed
= reversed
^ need_longbranch
;
18387 validate_condition_mode (code
, mode
);
18389 /* Work out which way this really branches. We could use
18390 reverse_condition_maybe_unordered here always but this
18391 makes the resulting assembler clearer. */
18392 if (really_reversed
)
18394 /* Reversal of FP compares takes care -- an ordered compare
18395 becomes an unordered compare and vice versa. */
18396 if (mode
== CCFPmode
)
18397 code
= reverse_condition_maybe_unordered (code
);
18399 code
= reverse_condition (code
);
18402 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
) && mode
== CCFPmode
)
18404 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
18409 /* Opposite of GT. */
18418 gcc_unreachable ();
18424 /* Not all of these are actually distinct opcodes, but
18425 we distinguish them for clarity of the resulting assembler. */
18426 case NE
: case LTGT
:
18427 ccode
= "ne"; break;
18428 case EQ
: case UNEQ
:
18429 ccode
= "eq"; break;
18431 ccode
= "ge"; break;
18432 case GT
: case GTU
: case UNGT
:
18433 ccode
= "gt"; break;
18435 ccode
= "le"; break;
18436 case LT
: case LTU
: case UNLT
:
18437 ccode
= "lt"; break;
18438 case UNORDERED
: ccode
= "un"; break;
18439 case ORDERED
: ccode
= "nu"; break;
18440 case UNGE
: ccode
= "nl"; break;
18441 case UNLE
: ccode
= "ng"; break;
18443 gcc_unreachable ();
18446 /* Maybe we have a guess as to how likely the branch is. */
18448 note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
);
18449 if (note
!= NULL_RTX
)
18451 /* PROB is the difference from 50%. */
18452 int prob
= XINT (note
, 0) - REG_BR_PROB_BASE
/ 2;
18454 /* Only hint for highly probable/improbable branches on newer
18455 cpus as static prediction overrides processor dynamic
18456 prediction. For older cpus we may as well always hint, but
18457 assume not taken for branches that are very close to 50% as a
18458 mispredicted taken branch is more expensive than a
18459 mispredicted not-taken branch. */
18460 if (rs6000_always_hint
18461 || (abs (prob
) > REG_BR_PROB_BASE
/ 100 * 48
18462 && br_prob_note_reliable_p (note
)))
18464 if (abs (prob
) > REG_BR_PROB_BASE
/ 20
18465 && ((prob
> 0) ^ need_longbranch
))
18473 s
+= sprintf (s
, "b%slr%s ", ccode
, pred
);
18475 s
+= sprintf (s
, "b%s%s ", ccode
, pred
);
18477 /* We need to escape any '%' characters in the reg_names string.
18478 Assume they'd only be the first character.... */
18479 if (reg_names
[cc_regno
+ CR0_REGNO
][0] == '%')
18481 s
+= sprintf (s
, "%s", reg_names
[cc_regno
+ CR0_REGNO
]);
18485 /* If the branch distance was too far, we may have to use an
18486 unconditional branch to go the distance. */
18487 if (need_longbranch
)
18488 s
+= sprintf (s
, ",$+8\n\tb %s", label
);
18490 s
+= sprintf (s
, ",%s", label
);
18496 /* Return the string to flip the GT bit on a CR. */
18498 output_e500_flip_gt_bit (rtx dst
, rtx src
)
18500 static char string
[64];
18503 gcc_assert (GET_CODE (dst
) == REG
&& CR_REGNO_P (REGNO (dst
))
18504 && GET_CODE (src
) == REG
&& CR_REGNO_P (REGNO (src
)));
18507 a
= 4 * (REGNO (dst
) - CR0_REGNO
) + 1;
18508 b
= 4 * (REGNO (src
) - CR0_REGNO
) + 1;
18510 sprintf (string
, "crnot %d,%d", a
, b
);
18514 /* Return insn for VSX or Altivec comparisons. */
18517 rs6000_emit_vector_compare_inner (enum rtx_code code
, rtx op0
, rtx op1
)
18520 enum machine_mode mode
= GET_MODE (op0
);
18528 if (GET_MODE_CLASS (mode
) == MODE_VECTOR_INT
)
18538 mask
= gen_reg_rtx (mode
);
18539 emit_insn (gen_rtx_SET (VOIDmode
,
18541 gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
18548 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
18549 DMODE is expected destination mode. This is a recursive function. */
18552 rs6000_emit_vector_compare (enum rtx_code rcode
,
18554 enum machine_mode dmode
)
18557 bool swap_operands
= false;
18558 bool try_again
= false;
18560 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode
));
18561 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
18563 /* See if the comparison works as is. */
18564 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
18572 swap_operands
= true;
18577 swap_operands
= true;
18585 /* Invert condition and try again.
18586 e.g., A != B becomes ~(A==B). */
18588 enum rtx_code rev_code
;
18589 enum insn_code nor_code
;
18592 rev_code
= reverse_condition_maybe_unordered (rcode
);
18593 if (rev_code
== UNKNOWN
)
18596 nor_code
= optab_handler (one_cmpl_optab
, dmode
);
18597 if (nor_code
== CODE_FOR_nothing
)
18600 mask2
= rs6000_emit_vector_compare (rev_code
, op0
, op1
, dmode
);
18604 mask
= gen_reg_rtx (dmode
);
18605 emit_insn (GEN_FCN (nor_code
) (mask
, mask2
));
18613 /* Try GT/GTU/LT/LTU OR EQ */
18616 enum insn_code ior_code
;
18617 enum rtx_code new_code
;
18638 gcc_unreachable ();
18641 ior_code
= optab_handler (ior_optab
, dmode
);
18642 if (ior_code
== CODE_FOR_nothing
)
18645 c_rtx
= rs6000_emit_vector_compare (new_code
, op0
, op1
, dmode
);
18649 eq_rtx
= rs6000_emit_vector_compare (EQ
, op0
, op1
, dmode
);
18653 mask
= gen_reg_rtx (dmode
);
18654 emit_insn (GEN_FCN (ior_code
) (mask
, c_rtx
, eq_rtx
));
18672 mask
= rs6000_emit_vector_compare_inner (rcode
, op0
, op1
);
18677 /* You only get two chances. */
18681 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
18682 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
18683 operands for the relation operation COND. */
18686 rs6000_emit_vector_cond_expr (rtx dest
, rtx op_true
, rtx op_false
,
18687 rtx cond
, rtx cc_op0
, rtx cc_op1
)
18689 enum machine_mode dest_mode
= GET_MODE (dest
);
18690 enum machine_mode mask_mode
= GET_MODE (cc_op0
);
18691 enum rtx_code rcode
= GET_CODE (cond
);
18692 enum machine_mode cc_mode
= CCmode
;
18696 bool invert_move
= false;
18698 if (VECTOR_UNIT_NONE_P (dest_mode
))
18701 gcc_assert (GET_MODE_SIZE (dest_mode
) == GET_MODE_SIZE (mask_mode
)
18702 && GET_MODE_NUNITS (dest_mode
) == GET_MODE_NUNITS (mask_mode
));
18706 /* Swap operands if we can, and fall back to doing the operation as
18707 specified, and doing a NOR to invert the test. */
18713 /* Invert condition and try again.
18714 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
18715 invert_move
= true;
18716 rcode
= reverse_condition_maybe_unordered (rcode
);
18717 if (rcode
== UNKNOWN
)
18721 /* Mark unsigned tests with CCUNSmode. */
18726 cc_mode
= CCUNSmode
;
18733 /* Get the vector mask for the given relational operations. */
18734 mask
= rs6000_emit_vector_compare (rcode
, cc_op0
, cc_op1
, mask_mode
);
18742 op_true
= op_false
;
18746 cond2
= gen_rtx_fmt_ee (NE
, cc_mode
, gen_lowpart (dest_mode
, mask
),
18747 CONST0_RTX (dest_mode
));
18748 emit_insn (gen_rtx_SET (VOIDmode
,
18750 gen_rtx_IF_THEN_ELSE (dest_mode
,
18757 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
18758 operands of the last comparison is nonzero/true, FALSE_COND if it
18759 is zero/false. Return 0 if the hardware has no such operation. */
18762 rs6000_emit_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
18764 enum rtx_code code
= GET_CODE (op
);
18765 rtx op0
= XEXP (op
, 0);
18766 rtx op1
= XEXP (op
, 1);
18767 REAL_VALUE_TYPE c1
;
18768 enum machine_mode compare_mode
= GET_MODE (op0
);
18769 enum machine_mode result_mode
= GET_MODE (dest
);
18771 bool is_against_zero
;
18773 /* These modes should always match. */
18774 if (GET_MODE (op1
) != compare_mode
18775 /* In the isel case however, we can use a compare immediate, so
18776 op1 may be a small constant. */
18777 && (!TARGET_ISEL
|| !short_cint_operand (op1
, VOIDmode
)))
18779 if (GET_MODE (true_cond
) != result_mode
)
18781 if (GET_MODE (false_cond
) != result_mode
)
18784 /* Don't allow using floating point comparisons for integer results for
18786 if (FLOAT_MODE_P (compare_mode
) && !FLOAT_MODE_P (result_mode
))
18789 /* First, work out if the hardware can do this at all, or
18790 if it's too slow.... */
18791 if (!FLOAT_MODE_P (compare_mode
))
18794 return rs6000_emit_int_cmove (dest
, op
, true_cond
, false_cond
);
18797 else if (TARGET_HARD_FLOAT
&& !TARGET_FPRS
18798 && SCALAR_FLOAT_MODE_P (compare_mode
))
18801 is_against_zero
= op1
== CONST0_RTX (compare_mode
);
18803 /* A floating-point subtract might overflow, underflow, or produce
18804 an inexact result, thus changing the floating-point flags, so it
18805 can't be generated if we care about that. It's safe if one side
18806 of the construct is zero, since then no subtract will be
18808 if (SCALAR_FLOAT_MODE_P (compare_mode
)
18809 && flag_trapping_math
&& ! is_against_zero
)
18812 /* Eliminate half of the comparisons by switching operands, this
18813 makes the remaining code simpler. */
18814 if (code
== UNLT
|| code
== UNGT
|| code
== UNORDERED
|| code
== NE
18815 || code
== LTGT
|| code
== LT
|| code
== UNLE
)
18817 code
= reverse_condition_maybe_unordered (code
);
18819 true_cond
= false_cond
;
18823 /* UNEQ and LTGT take four instructions for a comparison with zero,
18824 it'll probably be faster to use a branch here too. */
18825 if (code
== UNEQ
&& HONOR_NANS (compare_mode
))
18828 if (GET_CODE (op1
) == CONST_DOUBLE
)
18829 REAL_VALUE_FROM_CONST_DOUBLE (c1
, op1
);
18831 /* We're going to try to implement comparisons by performing
18832 a subtract, then comparing against zero. Unfortunately,
18833 Inf - Inf is NaN which is not zero, and so if we don't
18834 know that the operand is finite and the comparison
18835 would treat EQ different to UNORDERED, we can't do it. */
18836 if (HONOR_INFINITIES (compare_mode
)
18837 && code
!= GT
&& code
!= UNGE
18838 && (GET_CODE (op1
) != CONST_DOUBLE
|| real_isinf (&c1
))
18839 /* Constructs of the form (a OP b ? a : b) are safe. */
18840 && ((! rtx_equal_p (op0
, false_cond
) && ! rtx_equal_p (op1
, false_cond
))
18841 || (! rtx_equal_p (op0
, true_cond
)
18842 && ! rtx_equal_p (op1
, true_cond
))))
18845 /* At this point we know we can use fsel. */
18847 /* Reduce the comparison to a comparison against zero. */
18848 if (! is_against_zero
)
18850 temp
= gen_reg_rtx (compare_mode
);
18851 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
18852 gen_rtx_MINUS (compare_mode
, op0
, op1
)));
18854 op1
= CONST0_RTX (compare_mode
);
18857 /* If we don't care about NaNs we can reduce some of the comparisons
18858 down to faster ones. */
18859 if (! HONOR_NANS (compare_mode
))
18865 true_cond
= false_cond
;
18878 /* Now, reduce everything down to a GE. */
18885 temp
= gen_reg_rtx (compare_mode
);
18886 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
18891 temp
= gen_reg_rtx (compare_mode
);
18892 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_ABS (compare_mode
, op0
)));
18897 temp
= gen_reg_rtx (compare_mode
);
18898 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
18899 gen_rtx_NEG (compare_mode
,
18900 gen_rtx_ABS (compare_mode
, op0
))));
18905 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
18906 temp
= gen_reg_rtx (result_mode
);
18907 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
18908 gen_rtx_IF_THEN_ELSE (result_mode
,
18909 gen_rtx_GE (VOIDmode
,
18911 true_cond
, false_cond
)));
18912 false_cond
= true_cond
;
18915 temp
= gen_reg_rtx (compare_mode
);
18916 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
18921 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
18922 temp
= gen_reg_rtx (result_mode
);
18923 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
18924 gen_rtx_IF_THEN_ELSE (result_mode
,
18925 gen_rtx_GE (VOIDmode
,
18927 true_cond
, false_cond
)));
18928 true_cond
= false_cond
;
18931 temp
= gen_reg_rtx (compare_mode
);
18932 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
18937 gcc_unreachable ();
18940 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
18941 gen_rtx_IF_THEN_ELSE (result_mode
,
18942 gen_rtx_GE (VOIDmode
,
18944 true_cond
, false_cond
)));
18948 /* Same as above, but for ints (isel). */
18951 rs6000_emit_int_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
18953 rtx condition_rtx
, cr
;
18954 enum machine_mode mode
= GET_MODE (dest
);
18955 enum rtx_code cond_code
;
18956 rtx (*isel_func
) (rtx
, rtx
, rtx
, rtx
, rtx
);
18959 if (mode
!= SImode
&& (!TARGET_POWERPC64
|| mode
!= DImode
))
18962 /* We still have to do the compare, because isel doesn't do a
18963 compare, it just looks at the CRx bits set by a previous compare
18965 condition_rtx
= rs6000_generate_compare (op
, mode
);
18966 cond_code
= GET_CODE (condition_rtx
);
18967 cr
= XEXP (condition_rtx
, 0);
18968 signedp
= GET_MODE (cr
) == CCmode
;
18970 isel_func
= (mode
== SImode
18971 ? (signedp
? gen_isel_signed_si
: gen_isel_unsigned_si
)
18972 : (signedp
? gen_isel_signed_di
: gen_isel_unsigned_di
));
18976 case LT
: case GT
: case LTU
: case GTU
: case EQ
:
18977 /* isel handles these directly. */
18981 /* We need to swap the sense of the comparison. */
18984 true_cond
= false_cond
;
18986 PUT_CODE (condition_rtx
, reverse_condition (cond_code
));
18991 false_cond
= force_reg (mode
, false_cond
);
18992 if (true_cond
!= const0_rtx
)
18993 true_cond
= force_reg (mode
, true_cond
);
18995 emit_insn (isel_func (dest
, condition_rtx
, true_cond
, false_cond
, cr
));
19001 output_isel (rtx
*operands
)
19003 enum rtx_code code
;
19005 code
= GET_CODE (operands
[1]);
19007 if (code
== GE
|| code
== GEU
|| code
== LE
|| code
== LEU
|| code
== NE
)
19009 gcc_assert (GET_CODE (operands
[2]) == REG
19010 && GET_CODE (operands
[3]) == REG
);
19011 PUT_CODE (operands
[1], reverse_condition (code
));
19012 return "isel %0,%3,%2,%j1";
19015 return "isel %0,%2,%3,%j1";
19019 rs6000_emit_minmax (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
19021 enum machine_mode mode
= GET_MODE (op0
);
19025 /* VSX/altivec have direct min/max insns. */
19026 if ((code
== SMAX
|| code
== SMIN
)
19027 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode
)
19028 || (mode
== SFmode
&& VECTOR_UNIT_VSX_P (DFmode
))))
19030 emit_insn (gen_rtx_SET (VOIDmode
,
19032 gen_rtx_fmt_ee (code
, mode
, op0
, op1
)));
19036 if (code
== SMAX
|| code
== SMIN
)
19041 if (code
== SMAX
|| code
== UMAX
)
19042 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
19043 op0
, op1
, mode
, 0);
19045 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
19046 op1
, op0
, mode
, 0);
19047 gcc_assert (target
);
19048 if (target
!= dest
)
19049 emit_move_insn (dest
, target
);
19052 /* A subroutine of the atomic operation splitters. Jump to LABEL if
19053 COND is true. Mark the jump as unlikely to be taken. */
19056 emit_unlikely_jump (rtx cond
, rtx label
)
19058 int very_unlikely
= REG_BR_PROB_BASE
/ 100 - 1;
19061 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
19062 x
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, x
));
19063 add_int_reg_note (x
, REG_BR_PROB
, very_unlikely
);
19066 /* A subroutine of the atomic operation splitters. Emit a load-locked
19067 instruction in MODE. For QI/HImode, possibly use a pattern than includes
19068 the zero_extend operation. */
19071 emit_load_locked (enum machine_mode mode
, rtx reg
, rtx mem
)
19073 rtx (*fn
) (rtx
, rtx
) = NULL
;
19078 fn
= gen_load_lockedqi
;
19081 fn
= gen_load_lockedhi
;
19084 if (GET_MODE (mem
) == QImode
)
19085 fn
= gen_load_lockedqi_si
;
19086 else if (GET_MODE (mem
) == HImode
)
19087 fn
= gen_load_lockedhi_si
;
19089 fn
= gen_load_lockedsi
;
19092 fn
= gen_load_lockeddi
;
19095 fn
= gen_load_lockedti
;
19098 gcc_unreachable ();
19100 emit_insn (fn (reg
, mem
));
19103 /* A subroutine of the atomic operation splitters. Emit a store-conditional
19104 instruction in MODE. */
19107 emit_store_conditional (enum machine_mode mode
, rtx res
, rtx mem
, rtx val
)
19109 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
19114 fn
= gen_store_conditionalqi
;
19117 fn
= gen_store_conditionalhi
;
19120 fn
= gen_store_conditionalsi
;
19123 fn
= gen_store_conditionaldi
;
19126 fn
= gen_store_conditionalti
;
19129 gcc_unreachable ();
19132 /* Emit sync before stwcx. to address PPC405 Erratum. */
19133 if (PPC405_ERRATUM77
)
19134 emit_insn (gen_hwsync ());
19136 emit_insn (fn (res
, mem
, val
));
19139 /* Expand barriers before and after a load_locked/store_cond sequence. */
19142 rs6000_pre_atomic_barrier (rtx mem
, enum memmodel model
)
19144 rtx addr
= XEXP (mem
, 0);
19145 int strict_p
= (reload_in_progress
|| reload_completed
);
19147 if (!legitimate_indirect_address_p (addr
, strict_p
)
19148 && !legitimate_indexed_address_p (addr
, strict_p
))
19150 addr
= force_reg (Pmode
, addr
);
19151 mem
= replace_equiv_address_nv (mem
, addr
);
19156 case MEMMODEL_RELAXED
:
19157 case MEMMODEL_CONSUME
:
19158 case MEMMODEL_ACQUIRE
:
19160 case MEMMODEL_RELEASE
:
19161 case MEMMODEL_ACQ_REL
:
19162 emit_insn (gen_lwsync ());
19164 case MEMMODEL_SEQ_CST
:
19165 emit_insn (gen_hwsync ());
19168 gcc_unreachable ();
19174 rs6000_post_atomic_barrier (enum memmodel model
)
19178 case MEMMODEL_RELAXED
:
19179 case MEMMODEL_CONSUME
:
19180 case MEMMODEL_RELEASE
:
19182 case MEMMODEL_ACQUIRE
:
19183 case MEMMODEL_ACQ_REL
:
19184 case MEMMODEL_SEQ_CST
:
19185 emit_insn (gen_isync ());
19188 gcc_unreachable ();
19192 /* A subroutine of the various atomic expanders. For sub-word operations,
19193 we must adjust things to operate on SImode. Given the original MEM,
19194 return a new aligned memory. Also build and return the quantities by
19195 which to shift and mask. */
19198 rs6000_adjust_atomic_subword (rtx orig_mem
, rtx
*pshift
, rtx
*pmask
)
19200 rtx addr
, align
, shift
, mask
, mem
;
19201 HOST_WIDE_INT shift_mask
;
19202 enum machine_mode mode
= GET_MODE (orig_mem
);
19204 /* For smaller modes, we have to implement this via SImode. */
19205 shift_mask
= (mode
== QImode
? 0x18 : 0x10);
19207 addr
= XEXP (orig_mem
, 0);
19208 addr
= force_reg (GET_MODE (addr
), addr
);
19210 /* Aligned memory containing subword. Generate a new memory. We
19211 do not want any of the existing MEM_ATTR data, as we're now
19212 accessing memory outside the original object. */
19213 align
= expand_simple_binop (Pmode
, AND
, addr
, GEN_INT (-4),
19214 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
19215 mem
= gen_rtx_MEM (SImode
, align
);
19216 MEM_VOLATILE_P (mem
) = MEM_VOLATILE_P (orig_mem
);
19217 if (MEM_ALIAS_SET (orig_mem
) == ALIAS_SET_MEMORY_BARRIER
)
19218 set_mem_alias_set (mem
, ALIAS_SET_MEMORY_BARRIER
);
19220 /* Shift amount for subword relative to aligned word. */
19221 shift
= gen_reg_rtx (SImode
);
19222 addr
= gen_lowpart (SImode
, addr
);
19223 emit_insn (gen_rlwinm (shift
, addr
, GEN_INT (3), GEN_INT (shift_mask
)));
19224 if (WORDS_BIG_ENDIAN
)
19225 shift
= expand_simple_binop (SImode
, XOR
, shift
, GEN_INT (shift_mask
),
19226 shift
, 1, OPTAB_LIB_WIDEN
);
19229 /* Mask for insertion. */
19230 mask
= expand_simple_binop (SImode
, ASHIFT
, GEN_INT (GET_MODE_MASK (mode
)),
19231 shift
, NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
19237 /* A subroutine of the various atomic expanders. For sub-word operands,
19238 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
19241 rs6000_mask_atomic_subword (rtx oldval
, rtx newval
, rtx mask
)
19245 x
= gen_reg_rtx (SImode
);
19246 emit_insn (gen_rtx_SET (VOIDmode
, x
,
19247 gen_rtx_AND (SImode
,
19248 gen_rtx_NOT (SImode
, mask
),
19251 x
= expand_simple_binop (SImode
, IOR
, newval
, x
, x
, 1, OPTAB_LIB_WIDEN
);
19256 /* A subroutine of the various atomic expanders. For sub-word operands,
19257 extract WIDE to NARROW via SHIFT. */
19260 rs6000_finish_atomic_subword (rtx narrow
, rtx wide
, rtx shift
)
19262 wide
= expand_simple_binop (SImode
, LSHIFTRT
, wide
, shift
,
19263 wide
, 1, OPTAB_LIB_WIDEN
);
19264 emit_move_insn (narrow
, gen_lowpart (GET_MODE (narrow
), wide
));
19267 /* Expand an atomic compare and swap operation. */
19270 rs6000_expand_atomic_compare_and_swap (rtx operands
[])
19272 rtx boolval
, retval
, mem
, oldval
, newval
, cond
;
19273 rtx label1
, label2
, x
, mask
, shift
;
19274 enum machine_mode mode
, orig_mode
;
19275 enum memmodel mod_s
, mod_f
;
19278 boolval
= operands
[0];
19279 retval
= operands
[1];
19281 oldval
= operands
[3];
19282 newval
= operands
[4];
19283 is_weak
= (INTVAL (operands
[5]) != 0);
19284 mod_s
= (enum memmodel
) INTVAL (operands
[6]);
19285 mod_f
= (enum memmodel
) INTVAL (operands
[7]);
19286 orig_mode
= mode
= GET_MODE (mem
);
19288 mask
= shift
= NULL_RTX
;
19289 if (mode
== QImode
|| mode
== HImode
)
19291 /* Before power8, we didn't have access to lbarx/lharx, so generate a
19292 lwarx and shift/mask operations. With power8, we need to do the
19293 comparison in SImode, but the store is still done in QI/HImode. */
19294 oldval
= convert_modes (SImode
, mode
, oldval
, 1);
19296 if (!TARGET_SYNC_HI_QI
)
19298 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
19300 /* Shift and mask OLDVAL into position with the word. */
19301 oldval
= expand_simple_binop (SImode
, ASHIFT
, oldval
, shift
,
19302 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
19304 /* Shift and mask NEWVAL into position within the word. */
19305 newval
= convert_modes (SImode
, mode
, newval
, 1);
19306 newval
= expand_simple_binop (SImode
, ASHIFT
, newval
, shift
,
19307 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
19310 /* Prepare to adjust the return value. */
19311 retval
= gen_reg_rtx (SImode
);
19314 else if (reg_overlap_mentioned_p (retval
, oldval
))
19315 oldval
= copy_to_reg (oldval
);
19317 mem
= rs6000_pre_atomic_barrier (mem
, mod_s
);
19322 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
19323 emit_label (XEXP (label1
, 0));
19325 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
19327 emit_load_locked (mode
, retval
, mem
);
19332 x
= expand_simple_binop (SImode
, AND
, retval
, mask
,
19333 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
19336 cond
= gen_reg_rtx (CCmode
);
19337 /* If we have TImode, synthesize a comparison. */
19338 if (mode
!= TImode
)
19339 x
= gen_rtx_COMPARE (CCmode
, x
, oldval
);
19342 rtx xor1_result
= gen_reg_rtx (DImode
);
19343 rtx xor2_result
= gen_reg_rtx (DImode
);
19344 rtx or_result
= gen_reg_rtx (DImode
);
19345 rtx new_word0
= simplify_gen_subreg (DImode
, x
, TImode
, 0);
19346 rtx new_word1
= simplify_gen_subreg (DImode
, x
, TImode
, 8);
19347 rtx old_word0
= simplify_gen_subreg (DImode
, oldval
, TImode
, 0);
19348 rtx old_word1
= simplify_gen_subreg (DImode
, oldval
, TImode
, 8);
19350 emit_insn (gen_xordi3 (xor1_result
, new_word0
, old_word0
));
19351 emit_insn (gen_xordi3 (xor2_result
, new_word1
, old_word1
));
19352 emit_insn (gen_iordi3 (or_result
, xor1_result
, xor2_result
));
19353 x
= gen_rtx_COMPARE (CCmode
, or_result
, const0_rtx
);
19356 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
19358 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
19359 emit_unlikely_jump (x
, label2
);
19363 x
= rs6000_mask_atomic_subword (retval
, newval
, mask
);
19365 emit_store_conditional (orig_mode
, cond
, mem
, x
);
19369 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
19370 emit_unlikely_jump (x
, label1
);
19373 if (mod_f
!= MEMMODEL_RELAXED
)
19374 emit_label (XEXP (label2
, 0));
19376 rs6000_post_atomic_barrier (mod_s
);
19378 if (mod_f
== MEMMODEL_RELAXED
)
19379 emit_label (XEXP (label2
, 0));
19382 rs6000_finish_atomic_subword (operands
[1], retval
, shift
);
19383 else if (mode
!= GET_MODE (operands
[1]))
19384 convert_move (operands
[1], retval
, 1);
19386 /* In all cases, CR0 contains EQ on success, and NE on failure. */
19387 x
= gen_rtx_EQ (SImode
, cond
, const0_rtx
);
19388 emit_insn (gen_rtx_SET (VOIDmode
, boolval
, x
));
19391 /* Expand an atomic exchange operation. */
19394 rs6000_expand_atomic_exchange (rtx operands
[])
19396 rtx retval
, mem
, val
, cond
;
19397 enum machine_mode mode
;
19398 enum memmodel model
;
19399 rtx label
, x
, mask
, shift
;
19401 retval
= operands
[0];
19404 model
= (enum memmodel
) INTVAL (operands
[3]);
19405 mode
= GET_MODE (mem
);
19407 mask
= shift
= NULL_RTX
;
19408 if (!TARGET_SYNC_HI_QI
&& (mode
== QImode
|| mode
== HImode
))
19410 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
19412 /* Shift and mask VAL into position with the word. */
19413 val
= convert_modes (SImode
, mode
, val
, 1);
19414 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
19415 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
19417 /* Prepare to adjust the return value. */
19418 retval
= gen_reg_rtx (SImode
);
19422 mem
= rs6000_pre_atomic_barrier (mem
, model
);
19424 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
19425 emit_label (XEXP (label
, 0));
19427 emit_load_locked (mode
, retval
, mem
);
19431 x
= rs6000_mask_atomic_subword (retval
, val
, mask
);
19433 cond
= gen_reg_rtx (CCmode
);
19434 emit_store_conditional (mode
, cond
, mem
, x
);
19436 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
19437 emit_unlikely_jump (x
, label
);
19439 rs6000_post_atomic_barrier (model
);
19442 rs6000_finish_atomic_subword (operands
[0], retval
, shift
);
19445 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
19446 to perform. MEM is the memory on which to operate. VAL is the second
19447 operand of the binary operator. BEFORE and AFTER are optional locations to
19448 return the value of MEM either before of after the operation. MODEL_RTX
19449 is a CONST_INT containing the memory model to use. */
19452 rs6000_expand_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
19453 rtx orig_before
, rtx orig_after
, rtx model_rtx
)
19455 enum memmodel model
= (enum memmodel
) INTVAL (model_rtx
);
19456 enum machine_mode mode
= GET_MODE (mem
);
19457 enum machine_mode store_mode
= mode
;
19458 rtx label
, x
, cond
, mask
, shift
;
19459 rtx before
= orig_before
, after
= orig_after
;
19461 mask
= shift
= NULL_RTX
;
19462 /* On power8, we want to use SImode for the operation. On previous systems,
19463 use the operation in a subword and shift/mask to get the proper byte or
19465 if (mode
== QImode
|| mode
== HImode
)
19467 if (TARGET_SYNC_HI_QI
)
19469 val
= convert_modes (SImode
, mode
, val
, 1);
19471 /* Prepare to adjust the return value. */
19472 before
= gen_reg_rtx (SImode
);
19474 after
= gen_reg_rtx (SImode
);
19479 mem
= rs6000_adjust_atomic_subword (mem
, &shift
, &mask
);
19481 /* Shift and mask VAL into position with the word. */
19482 val
= convert_modes (SImode
, mode
, val
, 1);
19483 val
= expand_simple_binop (SImode
, ASHIFT
, val
, shift
,
19484 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
19490 /* We've already zero-extended VAL. That is sufficient to
19491 make certain that it does not affect other bits. */
19496 /* If we make certain that all of the other bits in VAL are
19497 set, that will be sufficient to not affect other bits. */
19498 x
= gen_rtx_NOT (SImode
, mask
);
19499 x
= gen_rtx_IOR (SImode
, x
, val
);
19500 emit_insn (gen_rtx_SET (VOIDmode
, val
, x
));
19507 /* These will all affect bits outside the field and need
19508 adjustment via MASK within the loop. */
19512 gcc_unreachable ();
19515 /* Prepare to adjust the return value. */
19516 before
= gen_reg_rtx (SImode
);
19518 after
= gen_reg_rtx (SImode
);
19519 store_mode
= mode
= SImode
;
19523 mem
= rs6000_pre_atomic_barrier (mem
, model
);
19525 label
= gen_label_rtx ();
19526 emit_label (label
);
19527 label
= gen_rtx_LABEL_REF (VOIDmode
, label
);
19529 if (before
== NULL_RTX
)
19530 before
= gen_reg_rtx (mode
);
19532 emit_load_locked (mode
, before
, mem
);
19536 x
= expand_simple_binop (mode
, AND
, before
, val
,
19537 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
19538 after
= expand_simple_unop (mode
, NOT
, x
, after
, 1);
19542 after
= expand_simple_binop (mode
, code
, before
, val
,
19543 after
, 1, OPTAB_LIB_WIDEN
);
19549 x
= expand_simple_binop (SImode
, AND
, after
, mask
,
19550 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
19551 x
= rs6000_mask_atomic_subword (before
, x
, mask
);
19553 else if (store_mode
!= mode
)
19554 x
= convert_modes (store_mode
, mode
, x
, 1);
19556 cond
= gen_reg_rtx (CCmode
);
19557 emit_store_conditional (store_mode
, cond
, mem
, x
);
19559 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
19560 emit_unlikely_jump (x
, label
);
19562 rs6000_post_atomic_barrier (model
);
19566 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
19567 then do the calcuations in a SImode register. */
19569 rs6000_finish_atomic_subword (orig_before
, before
, shift
);
19571 rs6000_finish_atomic_subword (orig_after
, after
, shift
);
19573 else if (store_mode
!= mode
)
19575 /* QImode/HImode on machines with lbarx/lharx where we do the native
19576 operation and then do the calcuations in a SImode register. */
19578 convert_move (orig_before
, before
, 1);
19580 convert_move (orig_after
, after
, 1);
19582 else if (orig_after
&& after
!= orig_after
)
19583 emit_move_insn (orig_after
, after
);
19586 /* Emit instructions to move SRC to DST. Called by splitters for
19587 multi-register moves. It will emit at most one instruction for
19588 each register that is accessed; that is, it won't emit li/lis pairs
19589 (or equivalent for 64-bit code). One of SRC or DST must be a hard
19593 rs6000_split_multireg_move (rtx dst
, rtx src
)
19595 /* The register number of the first register being moved. */
19597 /* The mode that is to be moved. */
19598 enum machine_mode mode
;
19599 /* The mode that the move is being done in, and its size. */
19600 enum machine_mode reg_mode
;
19602 /* The number of registers that will be moved. */
19605 reg
= REG_P (dst
) ? REGNO (dst
) : REGNO (src
);
19606 mode
= GET_MODE (dst
);
19607 nregs
= hard_regno_nregs
[reg
][mode
];
19608 if (FP_REGNO_P (reg
))
19609 reg_mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
:
19610 ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? DFmode
: SFmode
);
19611 else if (ALTIVEC_REGNO_P (reg
))
19612 reg_mode
= V16QImode
;
19613 else if (TARGET_E500_DOUBLE
&& mode
== TFmode
)
19616 reg_mode
= word_mode
;
19617 reg_mode_size
= GET_MODE_SIZE (reg_mode
);
19619 gcc_assert (reg_mode_size
* nregs
== GET_MODE_SIZE (mode
));
19621 if (REG_P (src
) && REG_P (dst
) && (REGNO (src
) < REGNO (dst
)))
19623 /* Move register range backwards, if we might have destructive
19626 for (i
= nregs
- 1; i
>= 0; i
--)
19627 emit_insn (gen_rtx_SET (VOIDmode
,
19628 simplify_gen_subreg (reg_mode
, dst
, mode
,
19629 i
* reg_mode_size
),
19630 simplify_gen_subreg (reg_mode
, src
, mode
,
19631 i
* reg_mode_size
)));
19637 bool used_update
= false;
19638 rtx restore_basereg
= NULL_RTX
;
19640 if (MEM_P (src
) && INT_REGNO_P (reg
))
19644 if (GET_CODE (XEXP (src
, 0)) == PRE_INC
19645 || GET_CODE (XEXP (src
, 0)) == PRE_DEC
)
19648 breg
= XEXP (XEXP (src
, 0), 0);
19649 delta_rtx
= (GET_CODE (XEXP (src
, 0)) == PRE_INC
19650 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src
)))
19651 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src
))));
19652 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
19653 src
= replace_equiv_address (src
, breg
);
19655 else if (! rs6000_offsettable_memref_p (src
, reg_mode
))
19657 if (GET_CODE (XEXP (src
, 0)) == PRE_MODIFY
)
19659 rtx basereg
= XEXP (XEXP (src
, 0), 0);
19662 rtx ndst
= simplify_gen_subreg (reg_mode
, dst
, mode
, 0);
19663 emit_insn (gen_rtx_SET (VOIDmode
, ndst
,
19664 gen_rtx_MEM (reg_mode
, XEXP (src
, 0))));
19665 used_update
= true;
19668 emit_insn (gen_rtx_SET (VOIDmode
, basereg
,
19669 XEXP (XEXP (src
, 0), 1)));
19670 src
= replace_equiv_address (src
, basereg
);
19674 rtx basereg
= gen_rtx_REG (Pmode
, reg
);
19675 emit_insn (gen_rtx_SET (VOIDmode
, basereg
, XEXP (src
, 0)));
19676 src
= replace_equiv_address (src
, basereg
);
19680 breg
= XEXP (src
, 0);
19681 if (GET_CODE (breg
) == PLUS
|| GET_CODE (breg
) == LO_SUM
)
19682 breg
= XEXP (breg
, 0);
19684 /* If the base register we are using to address memory is
19685 also a destination reg, then change that register last. */
19687 && REGNO (breg
) >= REGNO (dst
)
19688 && REGNO (breg
) < REGNO (dst
) + nregs
)
19689 j
= REGNO (breg
) - REGNO (dst
);
19691 else if (MEM_P (dst
) && INT_REGNO_P (reg
))
19695 if (GET_CODE (XEXP (dst
, 0)) == PRE_INC
19696 || GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
19699 breg
= XEXP (XEXP (dst
, 0), 0);
19700 delta_rtx
= (GET_CODE (XEXP (dst
, 0)) == PRE_INC
19701 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst
)))
19702 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst
))));
19704 /* We have to update the breg before doing the store.
19705 Use store with update, if available. */
19709 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
19710 emit_insn (TARGET_32BIT
19711 ? (TARGET_POWERPC64
19712 ? gen_movdi_si_update (breg
, breg
, delta_rtx
, nsrc
)
19713 : gen_movsi_update (breg
, breg
, delta_rtx
, nsrc
))
19714 : gen_movdi_di_update (breg
, breg
, delta_rtx
, nsrc
));
19715 used_update
= true;
19718 emit_insn (gen_add3_insn (breg
, breg
, delta_rtx
));
19719 dst
= replace_equiv_address (dst
, breg
);
19721 else if (!rs6000_offsettable_memref_p (dst
, reg_mode
)
19722 && GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
19724 if (GET_CODE (XEXP (dst
, 0)) == PRE_MODIFY
)
19726 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
19729 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
19730 emit_insn (gen_rtx_SET (VOIDmode
,
19731 gen_rtx_MEM (reg_mode
, XEXP (dst
, 0)), nsrc
));
19732 used_update
= true;
19735 emit_insn (gen_rtx_SET (VOIDmode
, basereg
,
19736 XEXP (XEXP (dst
, 0), 1)));
19737 dst
= replace_equiv_address (dst
, basereg
);
19741 rtx basereg
= XEXP (XEXP (dst
, 0), 0);
19742 rtx offsetreg
= XEXP (XEXP (dst
, 0), 1);
19743 gcc_assert (GET_CODE (XEXP (dst
, 0)) == PLUS
19745 && REG_P (offsetreg
)
19746 && REGNO (basereg
) != REGNO (offsetreg
));
19747 if (REGNO (basereg
) == 0)
19749 rtx tmp
= offsetreg
;
19750 offsetreg
= basereg
;
19753 emit_insn (gen_add3_insn (basereg
, basereg
, offsetreg
));
19754 restore_basereg
= gen_sub3_insn (basereg
, basereg
, offsetreg
);
19755 dst
= replace_equiv_address (dst
, basereg
);
19758 else if (GET_CODE (XEXP (dst
, 0)) != LO_SUM
)
19759 gcc_assert (rs6000_offsettable_memref_p (dst
, reg_mode
));
19762 for (i
= 0; i
< nregs
; i
++)
19764 /* Calculate index to next subword. */
19769 /* If compiler already emitted move of first word by
19770 store with update, no need to do anything. */
19771 if (j
== 0 && used_update
)
19774 emit_insn (gen_rtx_SET (VOIDmode
,
19775 simplify_gen_subreg (reg_mode
, dst
, mode
,
19776 j
* reg_mode_size
),
19777 simplify_gen_subreg (reg_mode
, src
, mode
,
19778 j
* reg_mode_size
)));
19780 if (restore_basereg
!= NULL_RTX
)
19781 emit_insn (restore_basereg
);
19786 /* This page contains routines that are used to determine what the
19787 function prologue and epilogue code will do and write them out. */
19792 return !call_used_regs
[r
] && df_regs_ever_live_p (r
);
19795 /* Return the first fixed-point register that is required to be
19796 saved. 32 if none. */
19799 first_reg_to_save (void)
19803 /* Find lowest numbered live register. */
19804 for (first_reg
= 13; first_reg
<= 31; first_reg
++)
19805 if (save_reg_p (first_reg
))
19808 if (first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
19809 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
19810 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
19811 || (TARGET_TOC
&& TARGET_MINIMAL_TOC
))
19812 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
19813 first_reg
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
19817 && crtl
->uses_pic_offset_table
19818 && first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
19819 return RS6000_PIC_OFFSET_TABLE_REGNUM
;
19825 /* Similar, for FP regs. */
19828 first_fp_reg_to_save (void)
19832 /* Find lowest numbered live register. */
19833 for (first_reg
= 14 + 32; first_reg
<= 63; first_reg
++)
19834 if (save_reg_p (first_reg
))
19840 /* Similar, for AltiVec regs. */
19843 first_altivec_reg_to_save (void)
19847 /* Stack frame remains as is unless we are in AltiVec ABI. */
19848 if (! TARGET_ALTIVEC_ABI
)
19849 return LAST_ALTIVEC_REGNO
+ 1;
19851 /* On Darwin, the unwind routines are compiled without
19852 TARGET_ALTIVEC, and use save_world to save/restore the
19853 altivec registers when necessary. */
19854 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
19855 && ! TARGET_ALTIVEC
)
19856 return FIRST_ALTIVEC_REGNO
+ 20;
19858 /* Find lowest numbered live register. */
19859 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
<= LAST_ALTIVEC_REGNO
; ++i
)
19860 if (save_reg_p (i
))
19866 /* Return a 32-bit mask of the AltiVec registers we need to set in
19867 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
19868 the 32-bit word is 0. */
19870 static unsigned int
19871 compute_vrsave_mask (void)
19873 unsigned int i
, mask
= 0;
19875 /* On Darwin, the unwind routines are compiled without
19876 TARGET_ALTIVEC, and use save_world to save/restore the
19877 call-saved altivec registers when necessary. */
19878 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
19879 && ! TARGET_ALTIVEC
)
19882 /* First, find out if we use _any_ altivec registers. */
19883 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
19884 if (df_regs_ever_live_p (i
))
19885 mask
|= ALTIVEC_REG_BIT (i
);
19890 /* Next, remove the argument registers from the set. These must
19891 be in the VRSAVE mask set by the caller, so we don't need to add
19892 them in again. More importantly, the mask we compute here is
19893 used to generate CLOBBERs in the set_vrsave insn, and we do not
19894 wish the argument registers to die. */
19895 for (i
= crtl
->args
.info
.vregno
- 1; i
>= ALTIVEC_ARG_MIN_REG
; --i
)
19896 mask
&= ~ALTIVEC_REG_BIT (i
);
19898 /* Similarly, remove the return value from the set. */
19901 diddle_return_value (is_altivec_return_reg
, &yes
);
19903 mask
&= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN
);
19909 /* For a very restricted set of circumstances, we can cut down the
19910 size of prologues/epilogues by calling our own save/restore-the-world
19914 compute_save_world_info (rs6000_stack_t
*info_ptr
)
19916 info_ptr
->world_save_p
= 1;
19917 info_ptr
->world_save_p
19918 = (WORLD_SAVE_P (info_ptr
)
19919 && DEFAULT_ABI
== ABI_DARWIN
19920 && !cfun
->has_nonlocal_label
19921 && info_ptr
->first_fp_reg_save
== FIRST_SAVED_FP_REGNO
19922 && info_ptr
->first_gp_reg_save
== FIRST_SAVED_GP_REGNO
19923 && info_ptr
->first_altivec_reg_save
== FIRST_SAVED_ALTIVEC_REGNO
19924 && info_ptr
->cr_save_p
);
19926 /* This will not work in conjunction with sibcalls. Make sure there
19927 are none. (This check is expensive, but seldom executed.) */
19928 if (WORLD_SAVE_P (info_ptr
))
19931 for (insn
= get_last_insn_anywhere (); insn
; insn
= PREV_INSN (insn
))
19932 if (CALL_P (insn
) && SIBLING_CALL_P (insn
))
19934 info_ptr
->world_save_p
= 0;
19939 if (WORLD_SAVE_P (info_ptr
))
19941 /* Even if we're not touching VRsave, make sure there's room on the
19942 stack for it, if it looks like we're calling SAVE_WORLD, which
19943 will attempt to save it. */
19944 info_ptr
->vrsave_size
= 4;
19946 /* If we are going to save the world, we need to save the link register too. */
19947 info_ptr
->lr_save_p
= 1;
19949 /* "Save" the VRsave register too if we're saving the world. */
19950 if (info_ptr
->vrsave_mask
== 0)
19951 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
19953 /* Because the Darwin register save/restore routines only handle
19954 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
19956 gcc_assert (info_ptr
->first_fp_reg_save
>= FIRST_SAVED_FP_REGNO
19957 && (info_ptr
->first_altivec_reg_save
19958 >= FIRST_SAVED_ALTIVEC_REGNO
));
19965 is_altivec_return_reg (rtx reg
, void *xyes
)
19967 bool *yes
= (bool *) xyes
;
19968 if (REGNO (reg
) == ALTIVEC_ARG_RETURN
)
19973 /* Look for user-defined global regs in the range FIRST to LAST-1.
19974 We should not restore these, and so cannot use lmw or out-of-line
19975 restore functions if there are any. We also can't save them
19976 (well, emit frame notes for them), because frame unwinding during
19977 exception handling will restore saved registers. */
19980 global_regs_p (unsigned first
, unsigned last
)
19982 while (first
< last
)
19983 if (global_regs
[first
++])
19988 /* Determine the strategy for savings/restoring registers. */
19991 SAVRES_MULTIPLE
= 0x1,
19992 SAVE_INLINE_FPRS
= 0x2,
19993 SAVE_INLINE_GPRS
= 0x4,
19994 REST_INLINE_FPRS
= 0x8,
19995 REST_INLINE_GPRS
= 0x10,
19996 SAVE_NOINLINE_GPRS_SAVES_LR
= 0x20,
19997 SAVE_NOINLINE_FPRS_SAVES_LR
= 0x40,
19998 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
= 0x80,
19999 SAVE_INLINE_VRS
= 0x100,
20000 REST_INLINE_VRS
= 0x200
20004 rs6000_savres_strategy (rs6000_stack_t
*info
,
20005 bool using_static_chain_p
)
20010 if (TARGET_MULTIPLE
20011 && !TARGET_POWERPC64
20012 && !(TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
)
20013 && info
->first_gp_reg_save
< 31
20014 && !global_regs_p (info
->first_gp_reg_save
, 32))
20015 strategy
|= SAVRES_MULTIPLE
;
20017 if (crtl
->calls_eh_return
20018 || cfun
->machine
->ra_need_lr
)
20019 strategy
|= (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
20020 | SAVE_INLINE_GPRS
| REST_INLINE_GPRS
20021 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
20023 if (info
->first_fp_reg_save
== 64
20024 /* The out-of-line FP routines use double-precision stores;
20025 we can't use those routines if we don't have such stores. */
20026 || (TARGET_HARD_FLOAT
&& !TARGET_DOUBLE_FLOAT
)
20027 || global_regs_p (info
->first_fp_reg_save
, 64))
20028 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
20030 if (info
->first_gp_reg_save
== 32
20031 || (!(strategy
& SAVRES_MULTIPLE
)
20032 && global_regs_p (info
->first_gp_reg_save
, 32)))
20033 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
20035 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
20036 || global_regs_p (info
->first_altivec_reg_save
, LAST_ALTIVEC_REGNO
+ 1))
20037 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
20039 /* Define cutoff for using out-of-line functions to save registers. */
20040 if (DEFAULT_ABI
== ABI_V4
|| TARGET_ELF
)
20042 if (!optimize_size
)
20044 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
20045 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
20046 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
20050 /* Prefer out-of-line restore if it will exit. */
20051 if (info
->first_fp_reg_save
> 61)
20052 strategy
|= SAVE_INLINE_FPRS
;
20053 if (info
->first_gp_reg_save
> 29)
20055 if (info
->first_fp_reg_save
== 64)
20056 strategy
|= SAVE_INLINE_GPRS
;
20058 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
20060 if (info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
)
20061 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
20064 else if (DEFAULT_ABI
== ABI_DARWIN
)
20066 if (info
->first_fp_reg_save
> 60)
20067 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
20068 if (info
->first_gp_reg_save
> 29)
20069 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
20070 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
20074 gcc_checking_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
20075 if (info
->first_fp_reg_save
> 61)
20076 strategy
|= SAVE_INLINE_FPRS
| REST_INLINE_FPRS
;
20077 strategy
|= SAVE_INLINE_GPRS
| REST_INLINE_GPRS
;
20078 strategy
|= SAVE_INLINE_VRS
| REST_INLINE_VRS
;
20081 /* Don't bother to try to save things out-of-line if r11 is occupied
20082 by the static chain. It would require too much fiddling and the
20083 static chain is rarely used anyway. FPRs are saved w.r.t the stack
20084 pointer on Darwin, and AIX uses r1 or r12. */
20085 if (using_static_chain_p
20086 && (DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
))
20087 strategy
|= ((DEFAULT_ABI
== ABI_DARWIN
? 0 : SAVE_INLINE_FPRS
)
20089 | SAVE_INLINE_VRS
| REST_INLINE_VRS
);
20091 /* We can only use the out-of-line routines to restore if we've
20092 saved all the registers from first_fp_reg_save in the prologue.
20093 Otherwise, we risk loading garbage. */
20094 if ((strategy
& (SAVE_INLINE_FPRS
| REST_INLINE_FPRS
)) == SAVE_INLINE_FPRS
)
20098 for (i
= info
->first_fp_reg_save
; i
< 64; i
++)
20099 if (!save_reg_p (i
))
20101 strategy
|= REST_INLINE_FPRS
;
20106 /* If we are going to use store multiple, then don't even bother
20107 with the out-of-line routines, since the store-multiple
20108 instruction will always be smaller. */
20109 if ((strategy
& SAVRES_MULTIPLE
))
20110 strategy
|= SAVE_INLINE_GPRS
;
20112 /* info->lr_save_p isn't yet set if the only reason lr needs to be
20113 saved is an out-of-line save or restore. Set up the value for
20114 the next test (excluding out-of-line gpr restore). */
20115 lr_save_p
= (info
->lr_save_p
20116 || !(strategy
& SAVE_INLINE_GPRS
)
20117 || !(strategy
& SAVE_INLINE_FPRS
)
20118 || !(strategy
& SAVE_INLINE_VRS
)
20119 || !(strategy
& REST_INLINE_FPRS
)
20120 || !(strategy
& REST_INLINE_VRS
));
20122 /* The situation is more complicated with load multiple. We'd
20123 prefer to use the out-of-line routines for restores, since the
20124 "exit" out-of-line routines can handle the restore of LR and the
20125 frame teardown. However if doesn't make sense to use the
20126 out-of-line routine if that is the only reason we'd need to save
20127 LR, and we can't use the "exit" out-of-line gpr restore if we
20128 have saved some fprs; In those cases it is advantageous to use
20129 load multiple when available. */
20130 if ((strategy
& SAVRES_MULTIPLE
)
20132 || info
->first_fp_reg_save
!= 64))
20133 strategy
|= REST_INLINE_GPRS
;
20135 /* Saving CR interferes with the exit routines used on the SPE, so
20138 && info
->spe_64bit_regs_used
20139 && info
->cr_save_p
)
20140 strategy
|= REST_INLINE_GPRS
;
20142 /* We can only use load multiple or the out-of-line routines to
20143 restore if we've used store multiple or out-of-line routines
20144 in the prologue, i.e. if we've saved all the registers from
20145 first_gp_reg_save. Otherwise, we risk loading garbage. */
20146 if ((strategy
& (SAVE_INLINE_GPRS
| REST_INLINE_GPRS
| SAVRES_MULTIPLE
))
20147 == SAVE_INLINE_GPRS
)
20151 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
20152 if (!save_reg_p (i
))
20154 strategy
|= REST_INLINE_GPRS
;
20159 if (TARGET_ELF
&& TARGET_64BIT
)
20161 if (!(strategy
& SAVE_INLINE_FPRS
))
20162 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
20163 else if (!(strategy
& SAVE_INLINE_GPRS
)
20164 && info
->first_fp_reg_save
== 64)
20165 strategy
|= SAVE_NOINLINE_GPRS_SAVES_LR
;
20167 else if (TARGET_AIX
&& !(strategy
& REST_INLINE_FPRS
))
20168 strategy
|= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
;
20170 if (TARGET_MACHO
&& !(strategy
& SAVE_INLINE_FPRS
))
20171 strategy
|= SAVE_NOINLINE_FPRS_SAVES_LR
;
20176 /* Calculate the stack information for the current function. This is
20177 complicated by having two separate calling sequences, the AIX calling
20178 sequence and the V.4 calling sequence.
20180 AIX (and Darwin/Mac OS X) stack frames look like:
20182 SP----> +---------------------------------------+
20183 | back chain to caller | 0 0
20184 +---------------------------------------+
20185 | saved CR | 4 8 (8-11)
20186 +---------------------------------------+
20188 +---------------------------------------+
20189 | reserved for compilers | 12 24
20190 +---------------------------------------+
20191 | reserved for binders | 16 32
20192 +---------------------------------------+
20193 | saved TOC pointer | 20 40
20194 +---------------------------------------+
20195 | Parameter save area (P) | 24 48
20196 +---------------------------------------+
20197 | Alloca space (A) | 24+P etc.
20198 +---------------------------------------+
20199 | Local variable space (L) | 24+P+A
20200 +---------------------------------------+
20201 | Float/int conversion temporary (X) | 24+P+A+L
20202 +---------------------------------------+
20203 | Save area for AltiVec registers (W) | 24+P+A+L+X
20204 +---------------------------------------+
20205 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
20206 +---------------------------------------+
20207 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
20208 +---------------------------------------+
20209 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
20210 +---------------------------------------+
20211 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
20212 +---------------------------------------+
20213 old SP->| back chain to caller's caller |
20214 +---------------------------------------+
20216 The required alignment for AIX configurations is two words (i.e., 8
20219 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
20221 SP----> +---------------------------------------+
20222 | Back chain to caller | 0
20223 +---------------------------------------+
20224 | Save area for CR | 8
20225 +---------------------------------------+
20227 +---------------------------------------+
20228 | Saved TOC pointer | 24
20229 +---------------------------------------+
20230 | Parameter save area (P) | 32
20231 +---------------------------------------+
20232 | Alloca space (A) | 32+P
20233 +---------------------------------------+
20234 | Local variable space (L) | 32+P+A
20235 +---------------------------------------+
20236 | Save area for AltiVec registers (W) | 32+P+A+L
20237 +---------------------------------------+
20238 | AltiVec alignment padding (Y) | 32+P+A+L+W
20239 +---------------------------------------+
20240 | Save area for GP registers (G) | 32+P+A+L+W+Y
20241 +---------------------------------------+
20242 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
20243 +---------------------------------------+
20244 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
20245 +---------------------------------------+
20248 V.4 stack frames look like:
20250 SP----> +---------------------------------------+
20251 | back chain to caller | 0
20252 +---------------------------------------+
20253 | caller's saved LR | 4
20254 +---------------------------------------+
20255 | Parameter save area (P) | 8
20256 +---------------------------------------+
20257 | Alloca space (A) | 8+P
20258 +---------------------------------------+
20259 | Varargs save area (V) | 8+P+A
20260 +---------------------------------------+
20261 | Local variable space (L) | 8+P+A+V
20262 +---------------------------------------+
20263 | Float/int conversion temporary (X) | 8+P+A+V+L
20264 +---------------------------------------+
20265 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
20266 +---------------------------------------+
20267 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
20268 +---------------------------------------+
20269 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
20270 +---------------------------------------+
20271 | SPE: area for 64-bit GP registers |
20272 +---------------------------------------+
20273 | SPE alignment padding |
20274 +---------------------------------------+
20275 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
20276 +---------------------------------------+
20277 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
20278 +---------------------------------------+
20279 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
20280 +---------------------------------------+
20281 old SP->| back chain to caller's caller |
20282 +---------------------------------------+
20284 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
20285 given. (But note below and in sysv4.h that we require only 8 and
20286 may round up the size of our stack frame anyways. The historical
20287 reason is early versions of powerpc-linux which didn't properly
20288 align the stack at program startup. A happy side-effect is that
20289 -mno-eabi libraries can be used with -meabi programs.)
20291 The EABI configuration defaults to the V.4 layout. However,
20292 the stack alignment requirements may differ. If -mno-eabi is not
20293 given, the required stack alignment is 8 bytes; if -mno-eabi is
20294 given, the required alignment is 16 bytes. (But see V.4 comment
20297 #ifndef ABI_STACK_BOUNDARY
20298 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
20301 static rs6000_stack_t
*
20302 rs6000_stack_info (void)
20304 rs6000_stack_t
*info_ptr
= &stack_info
;
20305 int reg_size
= TARGET_32BIT
? 4 : 8;
20310 HOST_WIDE_INT non_fixed_size
;
20311 bool using_static_chain_p
;
20313 if (reload_completed
&& info_ptr
->reload_completed
)
20316 memset (info_ptr
, 0, sizeof (*info_ptr
));
20317 info_ptr
->reload_completed
= reload_completed
;
20321 /* Cache value so we don't rescan instruction chain over and over. */
20322 if (cfun
->machine
->insn_chain_scanned_p
== 0)
20323 cfun
->machine
->insn_chain_scanned_p
20324 = spe_func_has_64bit_regs_p () + 1;
20325 info_ptr
->spe_64bit_regs_used
= cfun
->machine
->insn_chain_scanned_p
- 1;
20328 /* Select which calling sequence. */
20329 info_ptr
->abi
= DEFAULT_ABI
;
20331 /* Calculate which registers need to be saved & save area size. */
20332 info_ptr
->first_gp_reg_save
= first_reg_to_save ();
20333 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
20334 even if it currently looks like we won't. Reload may need it to
20335 get at a constant; if so, it will have already created a constant
20336 pool entry for it. */
20337 if (((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
20338 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
20339 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
20340 && crtl
->uses_const_pool
20341 && info_ptr
->first_gp_reg_save
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
20342 first_gp
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
20344 first_gp
= info_ptr
->first_gp_reg_save
;
20346 info_ptr
->gp_size
= reg_size
* (32 - first_gp
);
20348 /* For the SPE, we have an additional upper 32-bits on each GPR.
20349 Ideally we should save the entire 64-bits only when the upper
20350 half is used in SIMD instructions. Since we only record
20351 registers live (not the size they are used in), this proves
20352 difficult because we'd have to traverse the instruction chain at
20353 the right time, taking reload into account. This is a real pain,
20354 so we opt to save the GPRs in 64-bits always if but one register
20355 gets used in 64-bits. Otherwise, all the registers in the frame
20356 get saved in 32-bits.
20358 So... since when we save all GPRs (except the SP) in 64-bits, the
20359 traditional GP save area will be empty. */
20360 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
20361 info_ptr
->gp_size
= 0;
20363 info_ptr
->first_fp_reg_save
= first_fp_reg_to_save ();
20364 info_ptr
->fp_size
= 8 * (64 - info_ptr
->first_fp_reg_save
);
20366 info_ptr
->first_altivec_reg_save
= first_altivec_reg_to_save ();
20367 info_ptr
->altivec_size
= 16 * (LAST_ALTIVEC_REGNO
+ 1
20368 - info_ptr
->first_altivec_reg_save
);
20370 /* Does this function call anything? */
20371 info_ptr
->calls_p
= (! crtl
->is_leaf
20372 || cfun
->machine
->ra_needs_full_frame
);
20374 /* Determine if we need to save the condition code registers. */
20375 if (df_regs_ever_live_p (CR2_REGNO
)
20376 || df_regs_ever_live_p (CR3_REGNO
)
20377 || df_regs_ever_live_p (CR4_REGNO
))
20379 info_ptr
->cr_save_p
= 1;
20380 if (DEFAULT_ABI
== ABI_V4
)
20381 info_ptr
->cr_size
= reg_size
;
20384 /* If the current function calls __builtin_eh_return, then we need
20385 to allocate stack space for registers that will hold data for
20386 the exception handler. */
20387 if (crtl
->calls_eh_return
)
20390 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
20393 /* SPE saves EH registers in 64-bits. */
20394 ehrd_size
= i
* (TARGET_SPE_ABI
20395 && info_ptr
->spe_64bit_regs_used
!= 0
20396 ? UNITS_PER_SPE_WORD
: UNITS_PER_WORD
);
20401 /* In the ELFv2 ABI, we also need to allocate space for separate
20402 CR field save areas if the function calls __builtin_eh_return. */
20403 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
20405 /* This hard-codes that we have three call-saved CR fields. */
20406 ehcr_size
= 3 * reg_size
;
20407 /* We do *not* use the regular CR save mechanism. */
20408 info_ptr
->cr_save_p
= 0;
20413 /* Determine various sizes. */
20414 info_ptr
->reg_size
= reg_size
;
20415 info_ptr
->fixed_size
= RS6000_SAVE_AREA
;
20416 info_ptr
->vars_size
= RS6000_ALIGN (get_frame_size (), 8);
20417 info_ptr
->parm_size
= RS6000_ALIGN (crtl
->outgoing_args_size
,
20418 TARGET_ALTIVEC
? 16 : 8);
20419 if (FRAME_GROWS_DOWNWARD
)
20420 info_ptr
->vars_size
20421 += RS6000_ALIGN (info_ptr
->fixed_size
+ info_ptr
->vars_size
20422 + info_ptr
->parm_size
,
20423 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
)
20424 - (info_ptr
->fixed_size
+ info_ptr
->vars_size
20425 + info_ptr
->parm_size
);
20427 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
20428 info_ptr
->spe_gp_size
= 8 * (32 - first_gp
);
20430 info_ptr
->spe_gp_size
= 0;
20432 if (TARGET_ALTIVEC_ABI
)
20433 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
20435 info_ptr
->vrsave_mask
= 0;
20437 if (TARGET_ALTIVEC_VRSAVE
&& info_ptr
->vrsave_mask
)
20438 info_ptr
->vrsave_size
= 4;
20440 info_ptr
->vrsave_size
= 0;
20442 compute_save_world_info (info_ptr
);
20444 /* Calculate the offsets. */
20445 switch (DEFAULT_ABI
)
20449 gcc_unreachable ();
20454 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
20455 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
20457 if (TARGET_ALTIVEC_ABI
)
20459 info_ptr
->vrsave_save_offset
20460 = info_ptr
->gp_save_offset
- info_ptr
->vrsave_size
;
20462 /* Align stack so vector save area is on a quadword boundary.
20463 The padding goes above the vectors. */
20464 if (info_ptr
->altivec_size
!= 0)
20465 info_ptr
->altivec_padding_size
20466 = info_ptr
->vrsave_save_offset
& 0xF;
20468 info_ptr
->altivec_padding_size
= 0;
20470 info_ptr
->altivec_save_offset
20471 = info_ptr
->vrsave_save_offset
20472 - info_ptr
->altivec_padding_size
20473 - info_ptr
->altivec_size
;
20474 gcc_assert (info_ptr
->altivec_size
== 0
20475 || info_ptr
->altivec_save_offset
% 16 == 0);
20477 /* Adjust for AltiVec case. */
20478 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
- ehrd_size
;
20481 info_ptr
->ehrd_offset
= info_ptr
->gp_save_offset
- ehrd_size
;
20483 info_ptr
->ehcr_offset
= info_ptr
->ehrd_offset
- ehcr_size
;
20484 info_ptr
->cr_save_offset
= reg_size
; /* first word when 64-bit. */
20485 info_ptr
->lr_save_offset
= 2*reg_size
;
20489 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
20490 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
20491 info_ptr
->cr_save_offset
= info_ptr
->gp_save_offset
- info_ptr
->cr_size
;
20493 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
20495 /* Align stack so SPE GPR save area is aligned on a
20496 double-word boundary. */
20497 if (info_ptr
->spe_gp_size
!= 0 && info_ptr
->cr_save_offset
!= 0)
20498 info_ptr
->spe_padding_size
20499 = 8 - (-info_ptr
->cr_save_offset
% 8);
20501 info_ptr
->spe_padding_size
= 0;
20503 info_ptr
->spe_gp_save_offset
20504 = info_ptr
->cr_save_offset
20505 - info_ptr
->spe_padding_size
20506 - info_ptr
->spe_gp_size
;
20508 /* Adjust for SPE case. */
20509 info_ptr
->ehrd_offset
= info_ptr
->spe_gp_save_offset
;
20511 else if (TARGET_ALTIVEC_ABI
)
20513 info_ptr
->vrsave_save_offset
20514 = info_ptr
->cr_save_offset
- info_ptr
->vrsave_size
;
20516 /* Align stack so vector save area is on a quadword boundary. */
20517 if (info_ptr
->altivec_size
!= 0)
20518 info_ptr
->altivec_padding_size
20519 = 16 - (-info_ptr
->vrsave_save_offset
% 16);
20521 info_ptr
->altivec_padding_size
= 0;
20523 info_ptr
->altivec_save_offset
20524 = info_ptr
->vrsave_save_offset
20525 - info_ptr
->altivec_padding_size
20526 - info_ptr
->altivec_size
;
20528 /* Adjust for AltiVec case. */
20529 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
;
20532 info_ptr
->ehrd_offset
= info_ptr
->cr_save_offset
;
20533 info_ptr
->ehrd_offset
-= ehrd_size
;
20534 info_ptr
->lr_save_offset
= reg_size
;
20538 save_align
= (TARGET_ALTIVEC_ABI
|| DEFAULT_ABI
== ABI_DARWIN
) ? 16 : 8;
20539 info_ptr
->save_size
= RS6000_ALIGN (info_ptr
->fp_size
20540 + info_ptr
->gp_size
20541 + info_ptr
->altivec_size
20542 + info_ptr
->altivec_padding_size
20543 + info_ptr
->spe_gp_size
20544 + info_ptr
->spe_padding_size
20547 + info_ptr
->cr_size
20548 + info_ptr
->vrsave_size
,
20551 non_fixed_size
= (info_ptr
->vars_size
20552 + info_ptr
->parm_size
20553 + info_ptr
->save_size
);
20555 info_ptr
->total_size
= RS6000_ALIGN (non_fixed_size
+ info_ptr
->fixed_size
,
20556 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
);
20558 /* Determine if we need to save the link register. */
20559 if (info_ptr
->calls_p
20560 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
20562 && !TARGET_PROFILE_KERNEL
)
20563 || (DEFAULT_ABI
== ABI_V4
&& cfun
->calls_alloca
)
20564 #ifdef TARGET_RELOCATABLE
20565 || (TARGET_RELOCATABLE
&& (get_pool_size () != 0))
20567 || rs6000_ra_ever_killed ())
20568 info_ptr
->lr_save_p
= 1;
20570 using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
20571 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
20572 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
20573 info_ptr
->savres_strategy
= rs6000_savres_strategy (info_ptr
,
20574 using_static_chain_p
);
20576 if (!(info_ptr
->savres_strategy
& SAVE_INLINE_GPRS
)
20577 || !(info_ptr
->savres_strategy
& SAVE_INLINE_FPRS
)
20578 || !(info_ptr
->savres_strategy
& SAVE_INLINE_VRS
)
20579 || !(info_ptr
->savres_strategy
& REST_INLINE_GPRS
)
20580 || !(info_ptr
->savres_strategy
& REST_INLINE_FPRS
)
20581 || !(info_ptr
->savres_strategy
& REST_INLINE_VRS
))
20582 info_ptr
->lr_save_p
= 1;
20584 if (info_ptr
->lr_save_p
)
20585 df_set_regs_ever_live (LR_REGNO
, true);
20587 /* Determine if we need to allocate any stack frame:
20589 For AIX we need to push the stack if a frame pointer is needed
20590 (because the stack might be dynamically adjusted), if we are
20591 debugging, if we make calls, or if the sum of fp_save, gp_save,
20592 and local variables are more than the space needed to save all
20593 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
20594 + 18*8 = 288 (GPR13 reserved).
20596 For V.4 we don't have the stack cushion that AIX uses, but assume
20597 that the debugger can handle stackless frames. */
20599 if (info_ptr
->calls_p
)
20600 info_ptr
->push_p
= 1;
20602 else if (DEFAULT_ABI
== ABI_V4
)
20603 info_ptr
->push_p
= non_fixed_size
!= 0;
20605 else if (frame_pointer_needed
)
20606 info_ptr
->push_p
= 1;
20608 else if (TARGET_XCOFF
&& write_symbols
!= NO_DEBUG
)
20609 info_ptr
->push_p
= 1;
20612 info_ptr
->push_p
= non_fixed_size
> (TARGET_32BIT
? 220 : 288);
20614 /* Zero offsets if we're not saving those registers. */
20615 if (info_ptr
->fp_size
== 0)
20616 info_ptr
->fp_save_offset
= 0;
20618 if (info_ptr
->gp_size
== 0)
20619 info_ptr
->gp_save_offset
= 0;
20621 if (! TARGET_ALTIVEC_ABI
|| info_ptr
->altivec_size
== 0)
20622 info_ptr
->altivec_save_offset
= 0;
20624 /* Zero VRSAVE offset if not saved and restored. */
20625 if (! TARGET_ALTIVEC_VRSAVE
|| info_ptr
->vrsave_mask
== 0)
20626 info_ptr
->vrsave_save_offset
= 0;
20628 if (! TARGET_SPE_ABI
20629 || info_ptr
->spe_64bit_regs_used
== 0
20630 || info_ptr
->spe_gp_size
== 0)
20631 info_ptr
->spe_gp_save_offset
= 0;
20633 if (! info_ptr
->lr_save_p
)
20634 info_ptr
->lr_save_offset
= 0;
20636 if (! info_ptr
->cr_save_p
)
20637 info_ptr
->cr_save_offset
= 0;
20642 /* Return true if the current function uses any GPRs in 64-bit SIMD
20646 spe_func_has_64bit_regs_p (void)
20650 /* Functions that save and restore all the call-saved registers will
20651 need to save/restore the registers in 64-bits. */
20652 if (crtl
->calls_eh_return
20653 || cfun
->calls_setjmp
20654 || crtl
->has_nonlocal_goto
)
20657 insns
= get_insns ();
20659 for (insn
= NEXT_INSN (insns
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
20665 /* FIXME: This should be implemented with attributes...
20667 (set_attr "spe64" "true")....then,
20668 if (get_spe64(insn)) return true;
20670 It's the only reliable way to do the stuff below. */
20672 i
= PATTERN (insn
);
20673 if (GET_CODE (i
) == SET
)
20675 enum machine_mode mode
= GET_MODE (SET_SRC (i
));
20677 if (SPE_VECTOR_MODE (mode
))
20679 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
))
20689 debug_stack_info (rs6000_stack_t
*info
)
20691 const char *abi_string
;
20694 info
= rs6000_stack_info ();
20696 fprintf (stderr
, "\nStack information for function %s:\n",
20697 ((current_function_decl
&& DECL_NAME (current_function_decl
))
20698 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl
))
20703 default: abi_string
= "Unknown"; break;
20704 case ABI_NONE
: abi_string
= "NONE"; break;
20705 case ABI_AIX
: abi_string
= "AIX"; break;
20706 case ABI_ELFv2
: abi_string
= "ELFv2"; break;
20707 case ABI_DARWIN
: abi_string
= "Darwin"; break;
20708 case ABI_V4
: abi_string
= "V.4"; break;
20711 fprintf (stderr
, "\tABI = %5s\n", abi_string
);
20713 if (TARGET_ALTIVEC_ABI
)
20714 fprintf (stderr
, "\tALTIVEC ABI extensions enabled.\n");
20716 if (TARGET_SPE_ABI
)
20717 fprintf (stderr
, "\tSPE ABI extensions enabled.\n");
20719 if (info
->first_gp_reg_save
!= 32)
20720 fprintf (stderr
, "\tfirst_gp_reg_save = %5d\n", info
->first_gp_reg_save
);
20722 if (info
->first_fp_reg_save
!= 64)
20723 fprintf (stderr
, "\tfirst_fp_reg_save = %5d\n", info
->first_fp_reg_save
);
20725 if (info
->first_altivec_reg_save
<= LAST_ALTIVEC_REGNO
)
20726 fprintf (stderr
, "\tfirst_altivec_reg_save = %5d\n",
20727 info
->first_altivec_reg_save
);
20729 if (info
->lr_save_p
)
20730 fprintf (stderr
, "\tlr_save_p = %5d\n", info
->lr_save_p
);
20732 if (info
->cr_save_p
)
20733 fprintf (stderr
, "\tcr_save_p = %5d\n", info
->cr_save_p
);
20735 if (info
->vrsave_mask
)
20736 fprintf (stderr
, "\tvrsave_mask = 0x%x\n", info
->vrsave_mask
);
20739 fprintf (stderr
, "\tpush_p = %5d\n", info
->push_p
);
20742 fprintf (stderr
, "\tcalls_p = %5d\n", info
->calls_p
);
20744 if (info
->gp_save_offset
)
20745 fprintf (stderr
, "\tgp_save_offset = %5d\n", info
->gp_save_offset
);
20747 if (info
->fp_save_offset
)
20748 fprintf (stderr
, "\tfp_save_offset = %5d\n", info
->fp_save_offset
);
20750 if (info
->altivec_save_offset
)
20751 fprintf (stderr
, "\taltivec_save_offset = %5d\n",
20752 info
->altivec_save_offset
);
20754 if (info
->spe_gp_save_offset
)
20755 fprintf (stderr
, "\tspe_gp_save_offset = %5d\n",
20756 info
->spe_gp_save_offset
);
20758 if (info
->vrsave_save_offset
)
20759 fprintf (stderr
, "\tvrsave_save_offset = %5d\n",
20760 info
->vrsave_save_offset
);
20762 if (info
->lr_save_offset
)
20763 fprintf (stderr
, "\tlr_save_offset = %5d\n", info
->lr_save_offset
);
20765 if (info
->cr_save_offset
)
20766 fprintf (stderr
, "\tcr_save_offset = %5d\n", info
->cr_save_offset
);
20768 if (info
->varargs_save_offset
)
20769 fprintf (stderr
, "\tvarargs_save_offset = %5d\n", info
->varargs_save_offset
);
20771 if (info
->total_size
)
20772 fprintf (stderr
, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
20775 if (info
->vars_size
)
20776 fprintf (stderr
, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
20779 if (info
->parm_size
)
20780 fprintf (stderr
, "\tparm_size = %5d\n", info
->parm_size
);
20782 if (info
->fixed_size
)
20783 fprintf (stderr
, "\tfixed_size = %5d\n", info
->fixed_size
);
20786 fprintf (stderr
, "\tgp_size = %5d\n", info
->gp_size
);
20788 if (info
->spe_gp_size
)
20789 fprintf (stderr
, "\tspe_gp_size = %5d\n", info
->spe_gp_size
);
20792 fprintf (stderr
, "\tfp_size = %5d\n", info
->fp_size
);
20794 if (info
->altivec_size
)
20795 fprintf (stderr
, "\taltivec_size = %5d\n", info
->altivec_size
);
20797 if (info
->vrsave_size
)
20798 fprintf (stderr
, "\tvrsave_size = %5d\n", info
->vrsave_size
);
20800 if (info
->altivec_padding_size
)
20801 fprintf (stderr
, "\taltivec_padding_size= %5d\n",
20802 info
->altivec_padding_size
);
20804 if (info
->spe_padding_size
)
20805 fprintf (stderr
, "\tspe_padding_size = %5d\n",
20806 info
->spe_padding_size
);
20809 fprintf (stderr
, "\tcr_size = %5d\n", info
->cr_size
);
20811 if (info
->save_size
)
20812 fprintf (stderr
, "\tsave_size = %5d\n", info
->save_size
);
20814 if (info
->reg_size
!= 4)
20815 fprintf (stderr
, "\treg_size = %5d\n", info
->reg_size
);
20817 fprintf (stderr
, "\tsave-strategy = %04x\n", info
->savres_strategy
);
20819 fprintf (stderr
, "\n");
20823 rs6000_return_addr (int count
, rtx frame
)
20825 /* Currently we don't optimize very well between prolog and body
20826 code and for PIC code the code can be actually quite bad, so
20827 don't try to be too clever here. */
20829 || ((DEFAULT_ABI
== ABI_V4
|| DEFAULT_ABI
== ABI_DARWIN
) && flag_pic
))
20831 cfun
->machine
->ra_needs_full_frame
= 1;
20838 plus_constant (Pmode
,
20840 (gen_rtx_MEM (Pmode
,
20841 memory_address (Pmode
, frame
))),
20842 RETURN_ADDRESS_OFFSET
)));
20845 cfun
->machine
->ra_need_lr
= 1;
20846 return get_hard_reg_initial_val (Pmode
, LR_REGNO
);
20849 /* Say whether a function is a candidate for sibcall handling or not. */
20852 rs6000_function_ok_for_sibcall (tree decl
, tree exp
)
20857 fntype
= TREE_TYPE (decl
);
20859 fntype
= TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp
)));
20861 /* We can't do it if the called function has more vector parameters
20862 than the current function; there's nowhere to put the VRsave code. */
20863 if (TARGET_ALTIVEC_ABI
20864 && TARGET_ALTIVEC_VRSAVE
20865 && !(decl
&& decl
== current_function_decl
))
20867 function_args_iterator args_iter
;
20871 /* Functions with vector parameters are required to have a
20872 prototype, so the argument type info must be available
20874 FOREACH_FUNCTION_ARGS(fntype
, type
, args_iter
)
20875 if (TREE_CODE (type
) == VECTOR_TYPE
20876 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
20879 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl
), type
, args_iter
)
20880 if (TREE_CODE (type
) == VECTOR_TYPE
20881 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type
)))
20888 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
20889 functions, because the callee may have a different TOC pointer to
20890 the caller and there's no way to ensure we restore the TOC when
20891 we return. With the secure-plt SYSV ABI we can't make non-local
20892 calls when -fpic/PIC because the plt call stubs use r30. */
20893 if (DEFAULT_ABI
== ABI_DARWIN
20894 || ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
20896 && !DECL_EXTERNAL (decl
)
20897 && (*targetm
.binds_local_p
) (decl
))
20898 || (DEFAULT_ABI
== ABI_V4
20899 && (!TARGET_SECURE_PLT
20902 && (*targetm
.binds_local_p
) (decl
)))))
20904 tree attr_list
= TYPE_ATTRIBUTES (fntype
);
20906 if (!lookup_attribute ("longcall", attr_list
)
20907 || lookup_attribute ("shortcall", attr_list
))
20915 rs6000_ra_ever_killed (void)
20921 if (cfun
->is_thunk
)
20924 if (cfun
->machine
->lr_save_state
)
20925 return cfun
->machine
->lr_save_state
- 1;
20927 /* regs_ever_live has LR marked as used if any sibcalls are present,
20928 but this should not force saving and restoring in the
20929 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
20930 clobbers LR, so that is inappropriate. */
20932 /* Also, the prologue can generate a store into LR that
20933 doesn't really count, like this:
20936 bcl to set PIC register
20940 When we're called from the epilogue, we need to avoid counting
20941 this as a store. */
20943 push_topmost_sequence ();
20944 top
= get_insns ();
20945 pop_topmost_sequence ();
20946 reg
= gen_rtx_REG (Pmode
, LR_REGNO
);
20948 for (insn
= NEXT_INSN (top
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
20954 if (!SIBLING_CALL_P (insn
))
20957 else if (find_regno_note (insn
, REG_INC
, LR_REGNO
))
20959 else if (set_of (reg
, insn
) != NULL_RTX
20960 && !prologue_epilogue_contains (insn
))
20967 /* Emit instructions needed to load the TOC register.
20968 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
20969 a constant pool; or for SVR4 -fpic. */
20972 rs6000_emit_load_toc_table (int fromprolog
)
20975 dest
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
20977 if (TARGET_ELF
&& TARGET_SECURE_PLT
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
)
20980 rtx lab
, tmp1
, tmp2
, got
;
20982 lab
= gen_label_rtx ();
20983 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (lab
));
20984 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
20986 got
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
20988 got
= rs6000_got_sym ();
20989 tmp1
= tmp2
= dest
;
20992 tmp1
= gen_reg_rtx (Pmode
);
20993 tmp2
= gen_reg_rtx (Pmode
);
20995 emit_insn (gen_load_toc_v4_PIC_1 (lab
));
20996 emit_move_insn (tmp1
, gen_rtx_REG (Pmode
, LR_REGNO
));
20997 emit_insn (gen_load_toc_v4_PIC_3b (tmp2
, tmp1
, got
, lab
));
20998 emit_insn (gen_load_toc_v4_PIC_3c (dest
, tmp2
, got
, lab
));
21000 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
21002 emit_insn (gen_load_toc_v4_pic_si ());
21003 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
21005 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 2)
21008 rtx temp0
= (fromprolog
21009 ? gen_rtx_REG (Pmode
, 0)
21010 : gen_reg_rtx (Pmode
));
21016 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
21017 symF
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
21019 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
21020 symL
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
21022 emit_insn (gen_load_toc_v4_PIC_1 (symF
));
21023 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
21024 emit_insn (gen_load_toc_v4_PIC_2 (temp0
, dest
, symL
, symF
));
21030 tocsym
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
21031 lab
= gen_label_rtx ();
21032 emit_insn (gen_load_toc_v4_PIC_1b (tocsym
, lab
));
21033 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
21034 if (TARGET_LINK_STACK
)
21035 emit_insn (gen_addsi3 (dest
, dest
, GEN_INT (4)));
21036 emit_move_insn (temp0
, gen_rtx_MEM (Pmode
, dest
));
21038 emit_insn (gen_addsi3 (dest
, temp0
, dest
));
21040 else if (TARGET_ELF
&& !TARGET_AIX
&& flag_pic
== 0 && TARGET_MINIMAL_TOC
)
21042 /* This is for AIX code running in non-PIC ELF32. */
21045 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
21046 realsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
21048 emit_insn (gen_elf_high (dest
, realsym
));
21049 emit_insn (gen_elf_low (dest
, dest
, realsym
));
21053 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
21056 emit_insn (gen_load_toc_aix_si (dest
));
21058 emit_insn (gen_load_toc_aix_di (dest
));
21062 /* Emit instructions to restore the link register after determining where
21063 its value has been stored. */
21066 rs6000_emit_eh_reg_restore (rtx source
, rtx scratch
)
21068 rs6000_stack_t
*info
= rs6000_stack_info ();
21071 operands
[0] = source
;
21072 operands
[1] = scratch
;
21074 if (info
->lr_save_p
)
21076 rtx frame_rtx
= stack_pointer_rtx
;
21077 HOST_WIDE_INT sp_offset
= 0;
21080 if (frame_pointer_needed
21081 || cfun
->calls_alloca
21082 || info
->total_size
> 32767)
21084 tmp
= gen_frame_mem (Pmode
, frame_rtx
);
21085 emit_move_insn (operands
[1], tmp
);
21086 frame_rtx
= operands
[1];
21088 else if (info
->push_p
)
21089 sp_offset
= info
->total_size
;
21091 tmp
= plus_constant (Pmode
, frame_rtx
,
21092 info
->lr_save_offset
+ sp_offset
);
21093 tmp
= gen_frame_mem (Pmode
, tmp
);
21094 emit_move_insn (tmp
, operands
[0]);
21097 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
), operands
[0]);
21099 /* Freeze lr_save_p. We've just emitted rtl that depends on the
21100 state of lr_save_p so any change from here on would be a bug. In
21101 particular, stop rs6000_ra_ever_killed from considering the SET
21102 of lr we may have added just above. */
21103 cfun
->machine
->lr_save_state
= info
->lr_save_p
+ 1;
21106 static GTY(()) alias_set_type set
= -1;
21109 get_TOC_alias_set (void)
21112 set
= new_alias_set ();
21116 /* This returns nonzero if the current function uses the TOC. This is
21117 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
21118 is generated by the ABI_V4 load_toc_* patterns. */
21125 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
21128 rtx pat
= PATTERN (insn
);
21131 if (GET_CODE (pat
) == PARALLEL
)
21132 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
21134 rtx sub
= XVECEXP (pat
, 0, i
);
21135 if (GET_CODE (sub
) == USE
)
21137 sub
= XEXP (sub
, 0);
21138 if (GET_CODE (sub
) == UNSPEC
21139 && XINT (sub
, 1) == UNSPEC_TOC
)
21149 create_TOC_reference (rtx symbol
, rtx largetoc_reg
)
21151 rtx tocrel
, tocreg
, hi
;
21153 if (TARGET_DEBUG_ADDR
)
21155 if (GET_CODE (symbol
) == SYMBOL_REF
)
21156 fprintf (stderr
, "\ncreate_TOC_reference, (symbol_ref %s)\n",
21160 fprintf (stderr
, "\ncreate_TOC_reference, code %s:\n",
21161 GET_RTX_NAME (GET_CODE (symbol
)));
21162 debug_rtx (symbol
);
21166 if (!can_create_pseudo_p ())
21167 df_set_regs_ever_live (TOC_REGISTER
, true);
21169 tocreg
= gen_rtx_REG (Pmode
, TOC_REGISTER
);
21170 tocrel
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (2, symbol
, tocreg
), UNSPEC_TOCREL
);
21171 if (TARGET_CMODEL
== CMODEL_SMALL
|| can_create_pseudo_p ())
21174 hi
= gen_rtx_HIGH (Pmode
, copy_rtx (tocrel
));
21175 if (largetoc_reg
!= NULL
)
21177 emit_move_insn (largetoc_reg
, hi
);
21180 return gen_rtx_LO_SUM (Pmode
, hi
, tocrel
);
21183 /* Issue assembly directives that create a reference to the given DWARF
21184 FRAME_TABLE_LABEL from the current function section. */
21186 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label
)
21188 fprintf (asm_out_file
, "\t.ref %s\n",
21189 (* targetm
.strip_name_encoding
) (frame_table_label
));
21192 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
21193 and the change to the stack pointer. */
21196 rs6000_emit_stack_tie (rtx fp
, bool hard_frame_needed
)
21203 regs
[i
++] = gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
21204 if (hard_frame_needed
)
21205 regs
[i
++] = gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
);
21206 if (!(REGNO (fp
) == STACK_POINTER_REGNUM
21207 || (hard_frame_needed
21208 && REGNO (fp
) == HARD_FRAME_POINTER_REGNUM
)))
21211 p
= rtvec_alloc (i
);
21214 rtx mem
= gen_frame_mem (BLKmode
, regs
[i
]);
21215 RTVEC_ELT (p
, i
) = gen_rtx_SET (VOIDmode
, mem
, const0_rtx
);
21218 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode
, p
)));
21221 /* Emit the correct code for allocating stack space, as insns.
21222 If COPY_REG, make sure a copy of the old frame is left there.
21223 The generated code may use hard register 0 as a temporary. */
21226 rs6000_emit_allocate_stack (HOST_WIDE_INT size
, rtx copy_reg
, int copy_off
)
21229 rtx stack_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
21230 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
21231 rtx todec
= gen_int_mode (-size
, Pmode
);
21234 if (INTVAL (todec
) != -size
)
21236 warning (0, "stack frame too large");
21237 emit_insn (gen_trap ());
21241 if (crtl
->limit_stack
)
21243 if (REG_P (stack_limit_rtx
)
21244 && REGNO (stack_limit_rtx
) > 1
21245 && REGNO (stack_limit_rtx
) <= 31)
21247 emit_insn (gen_add3_insn (tmp_reg
, stack_limit_rtx
, GEN_INT (size
)));
21248 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
21251 else if (GET_CODE (stack_limit_rtx
) == SYMBOL_REF
21253 && DEFAULT_ABI
== ABI_V4
)
21255 rtx toload
= gen_rtx_CONST (VOIDmode
,
21256 gen_rtx_PLUS (Pmode
,
21260 emit_insn (gen_elf_high (tmp_reg
, toload
));
21261 emit_insn (gen_elf_low (tmp_reg
, tmp_reg
, toload
));
21262 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
21266 warning (0, "stack limit expression is not supported");
21272 emit_insn (gen_add3_insn (copy_reg
, stack_reg
, GEN_INT (copy_off
)));
21274 emit_move_insn (copy_reg
, stack_reg
);
21279 /* Need a note here so that try_split doesn't get confused. */
21280 if (get_last_insn () == NULL_RTX
)
21281 emit_note (NOTE_INSN_DELETED
);
21282 insn
= emit_move_insn (tmp_reg
, todec
);
21283 try_split (PATTERN (insn
), insn
, 0);
21287 insn
= emit_insn (TARGET_32BIT
21288 ? gen_movsi_update_stack (stack_reg
, stack_reg
,
21290 : gen_movdi_di_update_stack (stack_reg
, stack_reg
,
21291 todec
, stack_reg
));
21292 /* Since we didn't use gen_frame_mem to generate the MEM, grab
21293 it now and set the alias set/attributes. The above gen_*_update
21294 calls will generate a PARALLEL with the MEM set being the first
21296 par
= PATTERN (insn
);
21297 gcc_assert (GET_CODE (par
) == PARALLEL
);
21298 set
= XVECEXP (par
, 0, 0);
21299 gcc_assert (GET_CODE (set
) == SET
);
21300 mem
= SET_DEST (set
);
21301 gcc_assert (MEM_P (mem
));
21302 MEM_NOTRAP_P (mem
) = 1;
21303 set_mem_alias_set (mem
, get_frame_alias_set ());
21305 RTX_FRAME_RELATED_P (insn
) = 1;
21306 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
21307 gen_rtx_SET (VOIDmode
, stack_reg
,
21308 gen_rtx_PLUS (Pmode
, stack_reg
,
21309 GEN_INT (-size
))));
21312 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
21314 #if PROBE_INTERVAL > 32768
21315 #error Cannot use indexed addressing mode for stack probing
21318 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
21319 inclusive. These are offsets from the current stack pointer. */
21322 rs6000_emit_probe_stack_range (HOST_WIDE_INT first
, HOST_WIDE_INT size
)
21324 /* See if we have a constant small number of probes to generate. If so,
21325 that's the easy case. */
21326 if (first
+ size
<= 32768)
21330 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
21331 it exceeds SIZE. If only one probe is needed, this will not
21332 generate any code. Then probe at FIRST + SIZE. */
21333 for (i
= PROBE_INTERVAL
; i
< size
; i
+= PROBE_INTERVAL
)
21334 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
21337 emit_stack_probe (plus_constant (Pmode
, stack_pointer_rtx
,
21341 /* Otherwise, do the same as above, but in a loop. Note that we must be
21342 extra careful with variables wrapping around because we might be at
21343 the very top (or the very bottom) of the address space and we have
21344 to be able to handle this case properly; in particular, we use an
21345 equality test for the loop condition. */
21348 HOST_WIDE_INT rounded_size
;
21349 rtx r12
= gen_rtx_REG (Pmode
, 12);
21350 rtx r0
= gen_rtx_REG (Pmode
, 0);
21352 /* Sanity check for the addressing mode we're going to use. */
21353 gcc_assert (first
<= 32768);
21355 /* Step 1: round SIZE to the previous multiple of the interval. */
21357 rounded_size
= size
& -PROBE_INTERVAL
;
21360 /* Step 2: compute initial and final value of the loop counter. */
21362 /* TEST_ADDR = SP + FIRST. */
21363 emit_insn (gen_rtx_SET (VOIDmode
, r12
,
21364 plus_constant (Pmode
, stack_pointer_rtx
,
21367 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
21368 if (rounded_size
> 32768)
21370 emit_move_insn (r0
, GEN_INT (-rounded_size
));
21371 emit_insn (gen_rtx_SET (VOIDmode
, r0
,
21372 gen_rtx_PLUS (Pmode
, r12
, r0
)));
21375 emit_insn (gen_rtx_SET (VOIDmode
, r0
,
21376 plus_constant (Pmode
, r12
, -rounded_size
)));
21379 /* Step 3: the loop
21381 while (TEST_ADDR != LAST_ADDR)
21383 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
21387 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
21388 until it is equal to ROUNDED_SIZE. */
21391 emit_insn (gen_probe_stack_rangedi (r12
, r12
, r0
));
21393 emit_insn (gen_probe_stack_rangesi (r12
, r12
, r0
));
21396 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
21397 that SIZE is equal to ROUNDED_SIZE. */
21399 if (size
!= rounded_size
)
21400 emit_stack_probe (plus_constant (Pmode
, r12
, rounded_size
- size
));
21404 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
21405 absolute addresses. */
21408 output_probe_stack_range (rtx reg1
, rtx reg2
)
21410 static int labelno
= 0;
21411 char loop_lab
[32], end_lab
[32];
21414 ASM_GENERATE_INTERNAL_LABEL (loop_lab
, "LPSRL", labelno
);
21415 ASM_GENERATE_INTERNAL_LABEL (end_lab
, "LPSRE", labelno
++);
21417 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, loop_lab
);
21419 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
21423 output_asm_insn ("cmpd 0,%0,%1", xops
);
21425 output_asm_insn ("cmpw 0,%0,%1", xops
);
21427 fputs ("\tbeq 0,", asm_out_file
);
21428 assemble_name_raw (asm_out_file
, end_lab
);
21429 fputc ('\n', asm_out_file
);
21431 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
21432 xops
[1] = GEN_INT (-PROBE_INTERVAL
);
21433 output_asm_insn ("addi %0,%0,%1", xops
);
21435 /* Probe at TEST_ADDR and branch. */
21436 xops
[1] = gen_rtx_REG (Pmode
, 0);
21437 output_asm_insn ("stw %1,0(%0)", xops
);
21438 fprintf (asm_out_file
, "\tb ");
21439 assemble_name_raw (asm_out_file
, loop_lab
);
21440 fputc ('\n', asm_out_file
);
21442 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file
, end_lab
);
21447 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
21448 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
21449 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
21450 deduce these equivalences by itself so it wasn't necessary to hold
21451 its hand so much. Don't be tempted to always supply d2_f_d_e with
21452 the actual cfa register, ie. r31 when we are using a hard frame
21453 pointer. That fails when saving regs off r1, and sched moves the
21454 r31 setup past the reg saves. */
21457 rs6000_frame_related (rtx insn
, rtx reg
, HOST_WIDE_INT val
,
21458 rtx reg2
, rtx rreg
, rtx split_reg
)
21462 if (REGNO (reg
) == STACK_POINTER_REGNUM
&& reg2
== NULL_RTX
)
21464 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
21467 gcc_checking_assert (val
== 0);
21468 real
= PATTERN (insn
);
21469 if (GET_CODE (real
) == PARALLEL
)
21470 for (i
= 0; i
< XVECLEN (real
, 0); i
++)
21471 if (GET_CODE (XVECEXP (real
, 0, i
)) == SET
)
21473 rtx set
= XVECEXP (real
, 0, i
);
21475 RTX_FRAME_RELATED_P (set
) = 1;
21477 RTX_FRAME_RELATED_P (insn
) = 1;
21481 /* copy_rtx will not make unique copies of registers, so we need to
21482 ensure we don't have unwanted sharing here. */
21484 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
21487 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
21489 real
= copy_rtx (PATTERN (insn
));
21491 if (reg2
!= NULL_RTX
)
21492 real
= replace_rtx (real
, reg2
, rreg
);
21494 if (REGNO (reg
) == STACK_POINTER_REGNUM
)
21495 gcc_checking_assert (val
== 0);
21497 real
= replace_rtx (real
, reg
,
21498 gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
,
21499 STACK_POINTER_REGNUM
),
21502 /* We expect that 'real' is either a SET or a PARALLEL containing
21503 SETs (and possibly other stuff). In a PARALLEL, all the SETs
21504 are important so they all have to be marked RTX_FRAME_RELATED_P. */
21506 if (GET_CODE (real
) == SET
)
21510 temp
= simplify_rtx (SET_SRC (set
));
21512 SET_SRC (set
) = temp
;
21513 temp
= simplify_rtx (SET_DEST (set
));
21515 SET_DEST (set
) = temp
;
21516 if (GET_CODE (SET_DEST (set
)) == MEM
)
21518 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
21520 XEXP (SET_DEST (set
), 0) = temp
;
21527 gcc_assert (GET_CODE (real
) == PARALLEL
);
21528 for (i
= 0; i
< XVECLEN (real
, 0); i
++)
21529 if (GET_CODE (XVECEXP (real
, 0, i
)) == SET
)
21531 rtx set
= XVECEXP (real
, 0, i
);
21533 temp
= simplify_rtx (SET_SRC (set
));
21535 SET_SRC (set
) = temp
;
21536 temp
= simplify_rtx (SET_DEST (set
));
21538 SET_DEST (set
) = temp
;
21539 if (GET_CODE (SET_DEST (set
)) == MEM
)
21541 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
21543 XEXP (SET_DEST (set
), 0) = temp
;
21545 RTX_FRAME_RELATED_P (set
) = 1;
21549 /* If a store insn has been split into multiple insns, the
21550 true source register is given by split_reg. */
21551 if (split_reg
!= NULL_RTX
)
21552 real
= gen_rtx_SET (VOIDmode
, SET_DEST (real
), split_reg
);
21554 RTX_FRAME_RELATED_P (insn
) = 1;
21555 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, real
);
21560 /* Returns an insn that has a vrsave set operation with the
21561 appropriate CLOBBERs. */
21564 generate_set_vrsave (rtx reg
, rs6000_stack_t
*info
, int epiloguep
)
21567 rtx insn
, clobs
[TOTAL_ALTIVEC_REGS
+ 1];
21568 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
21571 = gen_rtx_SET (VOIDmode
,
21573 gen_rtx_UNSPEC_VOLATILE (SImode
,
21574 gen_rtvec (2, reg
, vrsave
),
21575 UNSPECV_SET_VRSAVE
));
21579 /* We need to clobber the registers in the mask so the scheduler
21580 does not move sets to VRSAVE before sets of AltiVec registers.
21582 However, if the function receives nonlocal gotos, reload will set
21583 all call saved registers live. We will end up with:
21585 (set (reg 999) (mem))
21586 (parallel [ (set (reg vrsave) (unspec blah))
21587 (clobber (reg 999))])
21589 The clobber will cause the store into reg 999 to be dead, and
21590 flow will attempt to delete an epilogue insn. In this case, we
21591 need an unspec use/set of the register. */
21593 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
21594 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
21596 if (!epiloguep
|| call_used_regs
[i
])
21597 clobs
[nclobs
++] = gen_rtx_CLOBBER (VOIDmode
,
21598 gen_rtx_REG (V4SImode
, i
));
21601 rtx reg
= gen_rtx_REG (V4SImode
, i
);
21604 = gen_rtx_SET (VOIDmode
,
21606 gen_rtx_UNSPEC (V4SImode
,
21607 gen_rtvec (1, reg
), 27));
21611 insn
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nclobs
));
21613 for (i
= 0; i
< nclobs
; ++i
)
21614 XVECEXP (insn
, 0, i
) = clobs
[i
];
21620 gen_frame_set (rtx reg
, rtx frame_reg
, int offset
, bool store
)
21624 addr
= gen_rtx_PLUS (Pmode
, frame_reg
, GEN_INT (offset
));
21625 mem
= gen_frame_mem (GET_MODE (reg
), addr
);
21626 return gen_rtx_SET (VOIDmode
, store
? mem
: reg
, store
? reg
: mem
);
21630 gen_frame_load (rtx reg
, rtx frame_reg
, int offset
)
21632 return gen_frame_set (reg
, frame_reg
, offset
, false);
21636 gen_frame_store (rtx reg
, rtx frame_reg
, int offset
)
21638 return gen_frame_set (reg
, frame_reg
, offset
, true);
21641 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
21642 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
21645 emit_frame_save (rtx frame_reg
, enum machine_mode mode
,
21646 unsigned int regno
, int offset
, HOST_WIDE_INT frame_reg_to_sp
)
21650 /* Some cases that need register indexed addressing. */
21651 gcc_checking_assert (!((TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
21652 || (TARGET_VSX
&& ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
21653 || (TARGET_E500_DOUBLE
&& mode
== DFmode
)
21655 && SPE_VECTOR_MODE (mode
)
21656 && !SPE_CONST_OFFSET_OK (offset
))));
21658 reg
= gen_rtx_REG (mode
, regno
);
21659 insn
= emit_insn (gen_frame_store (reg
, frame_reg
, offset
));
21660 return rs6000_frame_related (insn
, frame_reg
, frame_reg_to_sp
,
21661 NULL_RTX
, NULL_RTX
, NULL_RTX
);
21664 /* Emit an offset memory reference suitable for a frame store, while
21665 converting to a valid addressing mode. */
21668 gen_frame_mem_offset (enum machine_mode mode
, rtx reg
, int offset
)
21670 rtx int_rtx
, offset_rtx
;
21672 int_rtx
= GEN_INT (offset
);
21674 if ((TARGET_SPE_ABI
&& SPE_VECTOR_MODE (mode
) && !SPE_CONST_OFFSET_OK (offset
))
21675 || (TARGET_E500_DOUBLE
&& mode
== DFmode
))
21677 offset_rtx
= gen_rtx_REG (Pmode
, FIXED_SCRATCH
);
21678 emit_move_insn (offset_rtx
, int_rtx
);
21681 offset_rtx
= int_rtx
;
21683 return gen_frame_mem (mode
, gen_rtx_PLUS (Pmode
, reg
, offset_rtx
));
21686 #ifndef TARGET_FIX_AND_CONTINUE
21687 #define TARGET_FIX_AND_CONTINUE 0
21690 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
21691 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
21692 #define LAST_SAVRES_REGISTER 31
21693 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
21704 static GTY(()) rtx savres_routine_syms
[N_SAVRES_REGISTERS
][12];
21706 /* Temporary holding space for an out-of-line register save/restore
21708 static char savres_routine_name
[30];
21710 /* Return the name for an out-of-line register save/restore routine.
21711 We are saving/restoring GPRs if GPR is true. */
21714 rs6000_savres_routine_name (rs6000_stack_t
*info
, int regno
, int sel
)
21716 const char *prefix
= "";
21717 const char *suffix
= "";
21719 /* Different targets are supposed to define
21720 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
21721 routine name could be defined with:
21723 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
21725 This is a nice idea in practice, but in reality, things are
21726 complicated in several ways:
21728 - ELF targets have save/restore routines for GPRs.
21730 - SPE targets use different prefixes for 32/64-bit registers, and
21731 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
21733 - PPC64 ELF targets have routines for save/restore of GPRs that
21734 differ in what they do with the link register, so having a set
21735 prefix doesn't work. (We only use one of the save routines at
21736 the moment, though.)
21738 - PPC32 elf targets have "exit" versions of the restore routines
21739 that restore the link register and can save some extra space.
21740 These require an extra suffix. (There are also "tail" versions
21741 of the restore routines and "GOT" versions of the save routines,
21742 but we don't generate those at present. Same problems apply,
21745 We deal with all this by synthesizing our own prefix/suffix and
21746 using that for the simple sprintf call shown above. */
21749 /* No floating point saves on the SPE. */
21750 gcc_assert ((sel
& SAVRES_REG
) == SAVRES_GPR
);
21752 if ((sel
& SAVRES_SAVE
))
21753 prefix
= info
->spe_64bit_regs_used
? "_save64gpr_" : "_save32gpr_";
21755 prefix
= info
->spe_64bit_regs_used
? "_rest64gpr_" : "_rest32gpr_";
21757 if ((sel
& SAVRES_LR
))
21760 else if (DEFAULT_ABI
== ABI_V4
)
21765 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
21766 prefix
= (sel
& SAVRES_SAVE
) ? "_savegpr_" : "_restgpr_";
21767 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
21768 prefix
= (sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_";
21769 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
21770 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
21774 if ((sel
& SAVRES_LR
))
21777 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
21779 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
21780 /* No out-of-line save/restore routines for GPRs on AIX. */
21781 gcc_assert (!TARGET_AIX
|| (sel
& SAVRES_REG
) != SAVRES_GPR
);
21785 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
21786 prefix
= ((sel
& SAVRES_SAVE
)
21787 ? ((sel
& SAVRES_LR
) ? "_savegpr0_" : "_savegpr1_")
21788 : ((sel
& SAVRES_LR
) ? "_restgpr0_" : "_restgpr1_"));
21789 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
21791 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
21792 if ((sel
& SAVRES_LR
))
21793 prefix
= ((sel
& SAVRES_SAVE
) ? "_savefpr_" : "_restfpr_");
21797 prefix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_PREFIX
: RESTORE_FP_PREFIX
;
21798 suffix
= (sel
& SAVRES_SAVE
) ? SAVE_FP_SUFFIX
: RESTORE_FP_SUFFIX
;
21801 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
21802 prefix
= (sel
& SAVRES_SAVE
) ? "_savevr_" : "_restvr_";
21807 if (DEFAULT_ABI
== ABI_DARWIN
)
21809 /* The Darwin approach is (slightly) different, in order to be
21810 compatible with code generated by the system toolchain. There is a
21811 single symbol for the start of save sequence, and the code here
21812 embeds an offset into that code on the basis of the first register
21814 prefix
= (sel
& SAVRES_SAVE
) ? "save" : "rest" ;
21815 if ((sel
& SAVRES_REG
) == SAVRES_GPR
)
21816 sprintf (savres_routine_name
, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix
,
21817 ((sel
& SAVRES_LR
) ? "x" : ""), (regno
== 13 ? "" : "+"),
21818 (regno
- 13) * 4, prefix
, regno
);
21819 else if ((sel
& SAVRES_REG
) == SAVRES_FPR
)
21820 sprintf (savres_routine_name
, "*%sFP%s%.0d ; %s f%d-f31", prefix
,
21821 (regno
== 14 ? "" : "+"), (regno
- 14) * 4, prefix
, regno
);
21822 else if ((sel
& SAVRES_REG
) == SAVRES_VR
)
21823 sprintf (savres_routine_name
, "*%sVEC%s%.0d ; %s v%d-v31", prefix
,
21824 (regno
== 20 ? "" : "+"), (regno
- 20) * 8, prefix
, regno
);
21829 sprintf (savres_routine_name
, "%s%d%s", prefix
, regno
, suffix
);
21831 return savres_routine_name
;
21834 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
21835 We are saving/restoring GPRs if GPR is true. */
21838 rs6000_savres_routine_sym (rs6000_stack_t
*info
, int sel
)
21840 int regno
= ((sel
& SAVRES_REG
) == SAVRES_GPR
21841 ? info
->first_gp_reg_save
21842 : (sel
& SAVRES_REG
) == SAVRES_FPR
21843 ? info
->first_fp_reg_save
- 32
21844 : (sel
& SAVRES_REG
) == SAVRES_VR
21845 ? info
->first_altivec_reg_save
- FIRST_ALTIVEC_REGNO
21850 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
21851 versions of the gpr routines. */
21852 if (TARGET_SPE_ABI
&& (sel
& SAVRES_REG
) == SAVRES_GPR
21853 && info
->spe_64bit_regs_used
)
21854 select
^= SAVRES_FPR
^ SAVRES_GPR
;
21856 /* Don't generate bogus routine names. */
21857 gcc_assert (FIRST_SAVRES_REGISTER
<= regno
21858 && regno
<= LAST_SAVRES_REGISTER
21859 && select
>= 0 && select
<= 12);
21861 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
];
21867 name
= rs6000_savres_routine_name (info
, regno
, sel
);
21869 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
]
21870 = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
21871 SYMBOL_REF_FLAGS (sym
) |= SYMBOL_FLAG_FUNCTION
;
21877 /* Emit a sequence of insns, including a stack tie if needed, for
21878 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
21879 reset the stack pointer, but move the base of the frame into
21880 reg UPDT_REGNO for use by out-of-line register restore routines. */
21883 rs6000_emit_stack_reset (rs6000_stack_t
*info
,
21884 rtx frame_reg_rtx
, HOST_WIDE_INT frame_off
,
21885 unsigned updt_regno
)
21889 /* This blockage is needed so that sched doesn't decide to move
21890 the sp change before the register restores. */
21891 if (DEFAULT_ABI
== ABI_V4
21893 && info
->spe_64bit_regs_used
!= 0
21894 && info
->first_gp_reg_save
!= 32))
21895 rs6000_emit_stack_tie (frame_reg_rtx
, frame_pointer_needed
);
21897 /* If we are restoring registers out-of-line, we will be using the
21898 "exit" variants of the restore routines, which will reset the
21899 stack for us. But we do need to point updt_reg into the
21900 right place for those routines. */
21901 updt_reg_rtx
= gen_rtx_REG (Pmode
, updt_regno
);
21903 if (frame_off
!= 0)
21904 return emit_insn (gen_add3_insn (updt_reg_rtx
,
21905 frame_reg_rtx
, GEN_INT (frame_off
)));
21906 else if (REGNO (frame_reg_rtx
) != updt_regno
)
21907 return emit_move_insn (updt_reg_rtx
, frame_reg_rtx
);
21912 /* Return the register number used as a pointer by out-of-line
21913 save/restore functions. */
21915 static inline unsigned
21916 ptr_regno_for_savres (int sel
)
21918 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
21919 return (sel
& SAVRES_REG
) == SAVRES_FPR
|| (sel
& SAVRES_LR
) ? 1 : 12;
21920 return DEFAULT_ABI
== ABI_DARWIN
&& (sel
& SAVRES_REG
) == SAVRES_FPR
? 1 : 11;
21923 /* Construct a parallel rtx describing the effect of a call to an
21924 out-of-line register save/restore routine, and emit the insn
21925 or jump_insn as appropriate. */
21928 rs6000_emit_savres_rtx (rs6000_stack_t
*info
,
21929 rtx frame_reg_rtx
, int save_area_offset
, int lr_offset
,
21930 enum machine_mode reg_mode
, int sel
)
21933 int offset
, start_reg
, end_reg
, n_regs
, use_reg
;
21934 int reg_size
= GET_MODE_SIZE (reg_mode
);
21940 start_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
21941 ? info
->first_gp_reg_save
21942 : (sel
& SAVRES_REG
) == SAVRES_FPR
21943 ? info
->first_fp_reg_save
21944 : (sel
& SAVRES_REG
) == SAVRES_VR
21945 ? info
->first_altivec_reg_save
21947 end_reg
= ((sel
& SAVRES_REG
) == SAVRES_GPR
21949 : (sel
& SAVRES_REG
) == SAVRES_FPR
21951 : (sel
& SAVRES_REG
) == SAVRES_VR
21952 ? LAST_ALTIVEC_REGNO
+ 1
21954 n_regs
= end_reg
- start_reg
;
21955 p
= rtvec_alloc (3 + ((sel
& SAVRES_LR
) ? 1 : 0)
21956 + ((sel
& SAVRES_REG
) == SAVRES_VR
? 1 : 0)
21959 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
21960 RTVEC_ELT (p
, offset
++) = ret_rtx
;
21962 RTVEC_ELT (p
, offset
++)
21963 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
21965 sym
= rs6000_savres_routine_sym (info
, sel
);
21966 RTVEC_ELT (p
, offset
++) = gen_rtx_USE (VOIDmode
, sym
);
21968 use_reg
= ptr_regno_for_savres (sel
);
21969 if ((sel
& SAVRES_REG
) == SAVRES_VR
)
21971 /* Vector regs are saved/restored using [reg+reg] addressing. */
21972 RTVEC_ELT (p
, offset
++)
21973 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
21974 RTVEC_ELT (p
, offset
++)
21975 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 0));
21978 RTVEC_ELT (p
, offset
++)
21979 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, use_reg
));
21981 for (i
= 0; i
< end_reg
- start_reg
; i
++)
21982 RTVEC_ELT (p
, i
+ offset
)
21983 = gen_frame_set (gen_rtx_REG (reg_mode
, start_reg
+ i
),
21984 frame_reg_rtx
, save_area_offset
+ reg_size
* i
,
21985 (sel
& SAVRES_SAVE
) != 0);
21987 if ((sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
21988 RTVEC_ELT (p
, i
+ offset
)
21989 = gen_frame_store (gen_rtx_REG (Pmode
, 0), frame_reg_rtx
, lr_offset
);
21991 par
= gen_rtx_PARALLEL (VOIDmode
, p
);
21993 if (!(sel
& SAVRES_SAVE
) && (sel
& SAVRES_LR
))
21995 insn
= emit_jump_insn (par
);
21996 JUMP_LABEL (insn
) = ret_rtx
;
21999 insn
= emit_insn (par
);
22003 /* Emit code to store CR fields that need to be saved into REG. */
22006 rs6000_emit_move_from_cr (rtx reg
)
22008 /* Only the ELFv2 ABI allows storing only selected fields. */
22009 if (DEFAULT_ABI
== ABI_ELFv2
&& TARGET_MFCRF
)
22011 int i
, cr_reg
[8], count
= 0;
22013 /* Collect CR fields that must be saved. */
22014 for (i
= 0; i
< 8; i
++)
22015 if (save_reg_p (CR0_REGNO
+ i
))
22016 cr_reg
[count
++] = i
;
22018 /* If it's just a single one, use mfcrf. */
22021 rtvec p
= rtvec_alloc (1);
22022 rtvec r
= rtvec_alloc (2);
22023 RTVEC_ELT (r
, 0) = gen_rtx_REG (CCmode
, CR0_REGNO
+ cr_reg
[0]);
22024 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7 - cr_reg
[0]));
22026 = gen_rtx_SET (VOIDmode
, reg
,
22027 gen_rtx_UNSPEC (SImode
, r
, UNSPEC_MOVESI_FROM_CR
));
22029 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
22033 /* ??? It might be better to handle count == 2 / 3 cases here
22034 as well, using logical operations to combine the values. */
22037 emit_insn (gen_movesi_from_cr (reg
));
22040 /* Determine whether the gp REG is really used. */
22043 rs6000_reg_live_or_pic_offset_p (int reg
)
22045 /* If the function calls eh_return, claim used all the registers that would
22046 be checked for liveness otherwise. This is required for the PIC offset
22047 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
22048 register allocation purposes in this case. */
22050 return (((crtl
->calls_eh_return
|| df_regs_ever_live_p (reg
))
22051 && (!call_used_regs
[reg
]
22052 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
22053 && !TARGET_SINGLE_PIC_BASE
22054 && TARGET_TOC
&& TARGET_MINIMAL_TOC
)))
22055 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
22056 && !TARGET_SINGLE_PIC_BASE
22057 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
22058 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
))));
22061 /* Emit function prologue as insns. */
22064 rs6000_emit_prologue (void)
22066 rs6000_stack_t
*info
= rs6000_stack_info ();
22067 enum machine_mode reg_mode
= Pmode
;
22068 int reg_size
= TARGET_32BIT
? 4 : 8;
22069 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
22070 rtx frame_reg_rtx
= sp_reg_rtx
;
22071 unsigned int cr_save_regno
;
22072 rtx cr_save_rtx
= NULL_RTX
;
22075 int using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
22076 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
22077 && call_used_regs
[STATIC_CHAIN_REGNUM
]);
22078 /* Offset to top of frame for frame_reg and sp respectively. */
22079 HOST_WIDE_INT frame_off
= 0;
22080 HOST_WIDE_INT sp_off
= 0;
22082 #ifdef ENABLE_CHECKING
22083 /* Track and check usage of r0, r11, r12. */
22084 int reg_inuse
= using_static_chain_p
? 1 << 11 : 0;
22085 #define START_USE(R) do \
22087 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
22088 reg_inuse |= 1 << (R); \
22090 #define END_USE(R) do \
22092 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
22093 reg_inuse &= ~(1 << (R)); \
22095 #define NOT_INUSE(R) do \
22097 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
22100 #define START_USE(R) do {} while (0)
22101 #define END_USE(R) do {} while (0)
22102 #define NOT_INUSE(R) do {} while (0)
22105 if (DEFAULT_ABI
== ABI_ELFv2
)
22107 cfun
->machine
->r2_setup_needed
= df_regs_ever_live_p (TOC_REGNUM
);
22109 /* With -mminimal-toc we may generate an extra use of r2 below. */
22110 if (!TARGET_SINGLE_PIC_BASE
22111 && TARGET_TOC
&& TARGET_MINIMAL_TOC
&& get_pool_size () != 0)
22112 cfun
->machine
->r2_setup_needed
= true;
22116 if (flag_stack_usage_info
)
22117 current_function_static_stack_size
= info
->total_size
;
22119 if (flag_stack_check
== STATIC_BUILTIN_STACK_CHECK
)
22121 HOST_WIDE_INT size
= info
->total_size
;
22123 if (crtl
->is_leaf
&& !cfun
->calls_alloca
)
22125 if (size
> PROBE_INTERVAL
&& size
> STACK_CHECK_PROTECT
)
22126 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT
,
22127 size
- STACK_CHECK_PROTECT
);
22130 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT
, size
);
22133 if (TARGET_FIX_AND_CONTINUE
)
22135 /* gdb on darwin arranges to forward a function from the old
22136 address by modifying the first 5 instructions of the function
22137 to branch to the overriding function. This is necessary to
22138 permit function pointers that point to the old function to
22139 actually forward to the new function. */
22140 emit_insn (gen_nop ());
22141 emit_insn (gen_nop ());
22142 emit_insn (gen_nop ());
22143 emit_insn (gen_nop ());
22144 emit_insn (gen_nop ());
22147 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
22149 reg_mode
= V2SImode
;
22153 /* Handle world saves specially here. */
22154 if (WORLD_SAVE_P (info
))
22161 /* save_world expects lr in r0. */
22162 reg0
= gen_rtx_REG (Pmode
, 0);
22163 if (info
->lr_save_p
)
22165 insn
= emit_move_insn (reg0
,
22166 gen_rtx_REG (Pmode
, LR_REGNO
));
22167 RTX_FRAME_RELATED_P (insn
) = 1;
22170 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
22171 assumptions about the offsets of various bits of the stack
22173 gcc_assert (info
->gp_save_offset
== -220
22174 && info
->fp_save_offset
== -144
22175 && info
->lr_save_offset
== 8
22176 && info
->cr_save_offset
== 4
22179 && (!crtl
->calls_eh_return
22180 || info
->ehrd_offset
== -432)
22181 && info
->vrsave_save_offset
== -224
22182 && info
->altivec_save_offset
== -416);
22184 treg
= gen_rtx_REG (SImode
, 11);
22185 emit_move_insn (treg
, GEN_INT (-info
->total_size
));
22187 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
22188 in R11. It also clobbers R12, so beware! */
22190 /* Preserve CR2 for save_world prologues */
22192 sz
+= 32 - info
->first_gp_reg_save
;
22193 sz
+= 64 - info
->first_fp_reg_save
;
22194 sz
+= LAST_ALTIVEC_REGNO
- info
->first_altivec_reg_save
+ 1;
22195 p
= rtvec_alloc (sz
);
22197 RTVEC_ELT (p
, j
++) = gen_rtx_CLOBBER (VOIDmode
,
22198 gen_rtx_REG (SImode
,
22200 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
22201 gen_rtx_SYMBOL_REF (Pmode
,
22203 /* We do floats first so that the instruction pattern matches
22205 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
22207 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
22209 info
->first_fp_reg_save
+ i
),
22211 info
->fp_save_offset
+ frame_off
+ 8 * i
);
22212 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
22214 = gen_frame_store (gen_rtx_REG (V4SImode
,
22215 info
->first_altivec_reg_save
+ i
),
22217 info
->altivec_save_offset
+ frame_off
+ 16 * i
);
22218 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
22220 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
22222 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
22224 /* CR register traditionally saved as CR2. */
22226 = gen_frame_store (gen_rtx_REG (SImode
, CR2_REGNO
),
22227 frame_reg_rtx
, info
->cr_save_offset
+ frame_off
);
22228 /* Explain about use of R0. */
22229 if (info
->lr_save_p
)
22231 = gen_frame_store (reg0
,
22232 frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
22233 /* Explain what happens to the stack pointer. */
22235 rtx newval
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
, treg
);
22236 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, sp_reg_rtx
, newval
);
22239 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
22240 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
22241 treg
, GEN_INT (-info
->total_size
), NULL_RTX
);
22242 sp_off
= frame_off
= info
->total_size
;
22245 strategy
= info
->savres_strategy
;
22247 /* For V.4, update stack before we do any saving and set back pointer. */
22248 if (! WORLD_SAVE_P (info
)
22250 && (DEFAULT_ABI
== ABI_V4
22251 || crtl
->calls_eh_return
))
22253 bool need_r11
= (TARGET_SPE
22254 ? (!(strategy
& SAVE_INLINE_GPRS
)
22255 && info
->spe_64bit_regs_used
== 0)
22256 : (!(strategy
& SAVE_INLINE_FPRS
)
22257 || !(strategy
& SAVE_INLINE_GPRS
)
22258 || !(strategy
& SAVE_INLINE_VRS
)));
22259 int ptr_regno
= -1;
22260 rtx ptr_reg
= NULL_RTX
;
22263 if (info
->total_size
< 32767)
22264 frame_off
= info
->total_size
;
22267 else if (info
->cr_save_p
22269 || info
->first_fp_reg_save
< 64
22270 || info
->first_gp_reg_save
< 32
22271 || info
->altivec_size
!= 0
22272 || info
->vrsave_mask
!= 0
22273 || crtl
->calls_eh_return
)
22277 /* The prologue won't be saving any regs so there is no need
22278 to set up a frame register to access any frame save area.
22279 We also won't be using frame_off anywhere below, but set
22280 the correct value anyway to protect against future
22281 changes to this function. */
22282 frame_off
= info
->total_size
;
22284 if (ptr_regno
!= -1)
22286 /* Set up the frame offset to that needed by the first
22287 out-of-line save function. */
22288 START_USE (ptr_regno
);
22289 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
22290 frame_reg_rtx
= ptr_reg
;
22291 if (!(strategy
& SAVE_INLINE_FPRS
) && info
->fp_size
!= 0)
22292 gcc_checking_assert (info
->fp_save_offset
+ info
->fp_size
== 0);
22293 else if (!(strategy
& SAVE_INLINE_GPRS
) && info
->first_gp_reg_save
< 32)
22294 ptr_off
= info
->gp_save_offset
+ info
->gp_size
;
22295 else if (!(strategy
& SAVE_INLINE_VRS
) && info
->altivec_size
!= 0)
22296 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
22297 frame_off
= -ptr_off
;
22299 rs6000_emit_allocate_stack (info
->total_size
, ptr_reg
, ptr_off
);
22300 sp_off
= info
->total_size
;
22301 if (frame_reg_rtx
!= sp_reg_rtx
)
22302 rs6000_emit_stack_tie (frame_reg_rtx
, false);
22305 /* If we use the link register, get it into r0. */
22306 if (!WORLD_SAVE_P (info
) && info
->lr_save_p
)
22308 rtx addr
, reg
, mem
;
22310 reg
= gen_rtx_REG (Pmode
, 0);
22312 insn
= emit_move_insn (reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
22313 RTX_FRAME_RELATED_P (insn
) = 1;
22315 if (!(strategy
& (SAVE_NOINLINE_GPRS_SAVES_LR
22316 | SAVE_NOINLINE_FPRS_SAVES_LR
)))
22318 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
22319 GEN_INT (info
->lr_save_offset
+ frame_off
));
22320 mem
= gen_rtx_MEM (Pmode
, addr
);
22321 /* This should not be of rs6000_sr_alias_set, because of
22322 __builtin_return_address. */
22324 insn
= emit_move_insn (mem
, reg
);
22325 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
22326 NULL_RTX
, NULL_RTX
, NULL_RTX
);
22331 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
22332 r12 will be needed by out-of-line gpr restore. */
22333 cr_save_regno
= ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
22334 && !(strategy
& (SAVE_INLINE_GPRS
22335 | SAVE_NOINLINE_GPRS_SAVES_LR
))
22337 if (!WORLD_SAVE_P (info
)
22339 && REGNO (frame_reg_rtx
) != cr_save_regno
22340 && !(using_static_chain_p
&& cr_save_regno
== 11))
22342 cr_save_rtx
= gen_rtx_REG (SImode
, cr_save_regno
);
22343 START_USE (cr_save_regno
);
22344 rs6000_emit_move_from_cr (cr_save_rtx
);
22347 /* Do any required saving of fpr's. If only one or two to save, do
22348 it ourselves. Otherwise, call function. */
22349 if (!WORLD_SAVE_P (info
) && (strategy
& SAVE_INLINE_FPRS
))
22352 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
22353 if (save_reg_p (info
->first_fp_reg_save
+ i
))
22354 emit_frame_save (frame_reg_rtx
,
22355 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
22356 ? DFmode
: SFmode
),
22357 info
->first_fp_reg_save
+ i
,
22358 info
->fp_save_offset
+ frame_off
+ 8 * i
,
22359 sp_off
- frame_off
);
22361 else if (!WORLD_SAVE_P (info
) && info
->first_fp_reg_save
!= 64)
22363 bool lr
= (strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
22364 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
22365 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
22366 rtx ptr_reg
= frame_reg_rtx
;
22368 if (REGNO (frame_reg_rtx
) == ptr_regno
)
22369 gcc_checking_assert (frame_off
== 0);
22372 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
22373 NOT_INUSE (ptr_regno
);
22374 emit_insn (gen_add3_insn (ptr_reg
,
22375 frame_reg_rtx
, GEN_INT (frame_off
)));
22377 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
22378 info
->fp_save_offset
,
22379 info
->lr_save_offset
,
22381 rs6000_frame_related (insn
, ptr_reg
, sp_off
,
22382 NULL_RTX
, NULL_RTX
, NULL_RTX
);
22387 /* Save GPRs. This is done as a PARALLEL if we are using
22388 the store-multiple instructions. */
22389 if (!WORLD_SAVE_P (info
)
22391 && info
->spe_64bit_regs_used
!= 0
22392 && info
->first_gp_reg_save
!= 32)
22395 rtx spe_save_area_ptr
;
22396 HOST_WIDE_INT save_off
;
22397 int ool_adjust
= 0;
22399 /* Determine whether we can address all of the registers that need
22400 to be saved with an offset from frame_reg_rtx that fits in
22401 the small const field for SPE memory instructions. */
22402 int spe_regs_addressable
22403 = (SPE_CONST_OFFSET_OK (info
->spe_gp_save_offset
+ frame_off
22404 + reg_size
* (32 - info
->first_gp_reg_save
- 1))
22405 && (strategy
& SAVE_INLINE_GPRS
));
22407 if (spe_regs_addressable
)
22409 spe_save_area_ptr
= frame_reg_rtx
;
22410 save_off
= frame_off
;
22414 /* Make r11 point to the start of the SPE save area. We need
22415 to be careful here if r11 is holding the static chain. If
22416 it is, then temporarily save it in r0. */
22417 HOST_WIDE_INT offset
;
22419 if (!(strategy
& SAVE_INLINE_GPRS
))
22420 ool_adjust
= 8 * (info
->first_gp_reg_save
- FIRST_SAVED_GP_REGNO
);
22421 offset
= info
->spe_gp_save_offset
+ frame_off
- ool_adjust
;
22422 spe_save_area_ptr
= gen_rtx_REG (Pmode
, 11);
22423 save_off
= frame_off
- offset
;
22425 if (using_static_chain_p
)
22427 rtx r0
= gen_rtx_REG (Pmode
, 0);
22430 gcc_assert (info
->first_gp_reg_save
> 11);
22432 emit_move_insn (r0
, spe_save_area_ptr
);
22434 else if (REGNO (frame_reg_rtx
) != 11)
22437 emit_insn (gen_addsi3 (spe_save_area_ptr
,
22438 frame_reg_rtx
, GEN_INT (offset
)));
22439 if (!using_static_chain_p
&& REGNO (frame_reg_rtx
) == 11)
22440 frame_off
= -info
->spe_gp_save_offset
+ ool_adjust
;
22443 if ((strategy
& SAVE_INLINE_GPRS
))
22445 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
22446 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
22447 emit_frame_save (spe_save_area_ptr
, reg_mode
,
22448 info
->first_gp_reg_save
+ i
,
22449 (info
->spe_gp_save_offset
+ save_off
22451 sp_off
- save_off
);
22455 insn
= rs6000_emit_savres_rtx (info
, spe_save_area_ptr
,
22456 info
->spe_gp_save_offset
+ save_off
,
22458 SAVRES_SAVE
| SAVRES_GPR
);
22460 rs6000_frame_related (insn
, spe_save_area_ptr
, sp_off
- save_off
,
22461 NULL_RTX
, NULL_RTX
, NULL_RTX
);
22464 /* Move the static chain pointer back. */
22465 if (!spe_regs_addressable
)
22467 if (using_static_chain_p
)
22469 emit_move_insn (spe_save_area_ptr
, gen_rtx_REG (Pmode
, 0));
22472 else if (REGNO (frame_reg_rtx
) != 11)
22476 else if (!WORLD_SAVE_P (info
) && !(strategy
& SAVE_INLINE_GPRS
))
22478 bool lr
= (strategy
& SAVE_NOINLINE_GPRS_SAVES_LR
) != 0;
22479 int sel
= SAVRES_SAVE
| SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
22480 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
22481 rtx ptr_reg
= frame_reg_rtx
;
22482 bool ptr_set_up
= REGNO (ptr_reg
) == ptr_regno
;
22483 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
22487 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
22489 /* Need to adjust r11 (r12) if we saved any FPRs. */
22490 if (end_save
+ frame_off
!= 0)
22492 rtx offset
= GEN_INT (end_save
+ frame_off
);
22495 frame_off
= -end_save
;
22497 NOT_INUSE (ptr_regno
);
22498 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
22500 else if (!ptr_set_up
)
22502 NOT_INUSE (ptr_regno
);
22503 emit_move_insn (ptr_reg
, frame_reg_rtx
);
22505 ptr_off
= -end_save
;
22506 insn
= rs6000_emit_savres_rtx (info
, ptr_reg
,
22507 info
->gp_save_offset
+ ptr_off
,
22508 info
->lr_save_offset
+ ptr_off
,
22510 rs6000_frame_related (insn
, ptr_reg
, sp_off
- ptr_off
,
22511 NULL_RTX
, NULL_RTX
, NULL_RTX
);
22515 else if (!WORLD_SAVE_P (info
) && (strategy
& SAVRES_MULTIPLE
))
22519 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
22520 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
22522 = gen_frame_store (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
22524 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
22525 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
22526 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
22527 NULL_RTX
, NULL_RTX
, NULL_RTX
);
22529 else if (!WORLD_SAVE_P (info
))
22532 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
22533 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
22534 emit_frame_save (frame_reg_rtx
, reg_mode
,
22535 info
->first_gp_reg_save
+ i
,
22536 info
->gp_save_offset
+ frame_off
+ reg_size
* i
,
22537 sp_off
- frame_off
);
22540 if (crtl
->calls_eh_return
)
22547 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
22548 if (regno
== INVALID_REGNUM
)
22552 p
= rtvec_alloc (i
);
22556 unsigned int regno
= EH_RETURN_DATA_REGNO (i
);
22557 if (regno
== INVALID_REGNUM
)
22561 = gen_frame_store (gen_rtx_REG (reg_mode
, regno
),
22563 info
->ehrd_offset
+ sp_off
+ reg_size
* (int) i
);
22564 RTVEC_ELT (p
, i
) = insn
;
22565 RTX_FRAME_RELATED_P (insn
) = 1;
22568 insn
= emit_insn (gen_blockage ());
22569 RTX_FRAME_RELATED_P (insn
) = 1;
22570 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, gen_rtx_PARALLEL (VOIDmode
, p
));
22573 /* In AIX ABI we need to make sure r2 is really saved. */
22574 if (TARGET_AIX
&& crtl
->calls_eh_return
)
22576 rtx tmp_reg
, tmp_reg_si
, hi
, lo
, compare_result
, toc_save_done
, jump
;
22577 rtx save_insn
, join_insn
, note
;
22578 long toc_restore_insn
;
22580 tmp_reg
= gen_rtx_REG (Pmode
, 11);
22581 tmp_reg_si
= gen_rtx_REG (SImode
, 11);
22582 if (using_static_chain_p
)
22585 emit_move_insn (gen_rtx_REG (Pmode
, 0), tmp_reg
);
22589 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, LR_REGNO
));
22590 /* Peek at instruction to which this function returns. If it's
22591 restoring r2, then we know we've already saved r2. We can't
22592 unconditionally save r2 because the value we have will already
22593 be updated if we arrived at this function via a plt call or
22594 toc adjusting stub. */
22595 emit_move_insn (tmp_reg_si
, gen_rtx_MEM (SImode
, tmp_reg
));
22596 toc_restore_insn
= ((TARGET_32BIT
? 0x80410000 : 0xE8410000)
22597 + RS6000_TOC_SAVE_SLOT
);
22598 hi
= gen_int_mode (toc_restore_insn
& ~0xffff, SImode
);
22599 emit_insn (gen_xorsi3 (tmp_reg_si
, tmp_reg_si
, hi
));
22600 compare_result
= gen_rtx_REG (CCUNSmode
, CR0_REGNO
);
22601 validate_condition_mode (EQ
, CCUNSmode
);
22602 lo
= gen_int_mode (toc_restore_insn
& 0xffff, SImode
);
22603 emit_insn (gen_rtx_SET (VOIDmode
, compare_result
,
22604 gen_rtx_COMPARE (CCUNSmode
, tmp_reg_si
, lo
)));
22605 toc_save_done
= gen_label_rtx ();
22606 jump
= gen_rtx_IF_THEN_ELSE (VOIDmode
,
22607 gen_rtx_EQ (VOIDmode
, compare_result
,
22609 gen_rtx_LABEL_REF (VOIDmode
, toc_save_done
),
22611 jump
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, jump
));
22612 JUMP_LABEL (jump
) = toc_save_done
;
22613 LABEL_NUSES (toc_save_done
) += 1;
22615 save_insn
= emit_frame_save (frame_reg_rtx
, reg_mode
,
22616 TOC_REGNUM
, frame_off
+ RS6000_TOC_SAVE_SLOT
,
22617 sp_off
- frame_off
);
22619 emit_label (toc_save_done
);
22621 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
22622 have a CFG that has different saves along different paths.
22623 Move the note to a dummy blockage insn, which describes that
22624 R2 is unconditionally saved after the label. */
22625 /* ??? An alternate representation might be a special insn pattern
22626 containing both the branch and the store. That might let the
22627 code that minimizes the number of DW_CFA_advance opcodes better
22628 freedom in placing the annotations. */
22629 note
= find_reg_note (save_insn
, REG_FRAME_RELATED_EXPR
, NULL
);
22631 remove_note (save_insn
, note
);
22633 note
= alloc_reg_note (REG_FRAME_RELATED_EXPR
,
22634 copy_rtx (PATTERN (save_insn
)), NULL_RTX
);
22635 RTX_FRAME_RELATED_P (save_insn
) = 0;
22637 join_insn
= emit_insn (gen_blockage ());
22638 REG_NOTES (join_insn
) = note
;
22639 RTX_FRAME_RELATED_P (join_insn
) = 1;
22641 if (using_static_chain_p
)
22643 emit_move_insn (tmp_reg
, gen_rtx_REG (Pmode
, 0));
22650 /* Save CR if we use any that must be preserved. */
22651 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
)
22653 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
22654 GEN_INT (info
->cr_save_offset
+ frame_off
));
22655 rtx mem
= gen_frame_mem (SImode
, addr
);
22657 /* If we didn't copy cr before, do so now using r0. */
22658 if (cr_save_rtx
== NULL_RTX
)
22661 cr_save_rtx
= gen_rtx_REG (SImode
, 0);
22662 rs6000_emit_move_from_cr (cr_save_rtx
);
22665 /* Saving CR requires a two-instruction sequence: one instruction
22666 to move the CR to a general-purpose register, and a second
22667 instruction that stores the GPR to memory.
22669 We do not emit any DWARF CFI records for the first of these,
22670 because we cannot properly represent the fact that CR is saved in
22671 a register. One reason is that we cannot express that multiple
22672 CR fields are saved; another reason is that on 64-bit, the size
22673 of the CR register in DWARF (4 bytes) differs from the size of
22674 a general-purpose register.
22676 This means if any intervening instruction were to clobber one of
22677 the call-saved CR fields, we'd have incorrect CFI. To prevent
22678 this from happening, we mark the store to memory as a use of
22679 those CR fields, which prevents any such instruction from being
22680 scheduled in between the two instructions. */
22685 crsave_v
[n_crsave
++] = gen_rtx_SET (VOIDmode
, mem
, cr_save_rtx
);
22686 for (i
= 0; i
< 8; i
++)
22687 if (save_reg_p (CR0_REGNO
+ i
))
22688 crsave_v
[n_crsave
++]
22689 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
22691 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
,
22692 gen_rtvec_v (n_crsave
, crsave_v
)));
22693 END_USE (REGNO (cr_save_rtx
));
22695 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
22696 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
22697 so we need to construct a frame expression manually. */
22698 RTX_FRAME_RELATED_P (insn
) = 1;
22700 /* Update address to be stack-pointer relative, like
22701 rs6000_frame_related would do. */
22702 addr
= gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
),
22703 GEN_INT (info
->cr_save_offset
+ sp_off
));
22704 mem
= gen_frame_mem (SImode
, addr
);
22706 if (DEFAULT_ABI
== ABI_ELFv2
)
22708 /* In the ELFv2 ABI we generate separate CFI records for each
22709 CR field that was actually saved. They all point to the
22710 same 32-bit stack slot. */
22714 for (i
= 0; i
< 8; i
++)
22715 if (save_reg_p (CR0_REGNO
+ i
))
22718 = gen_rtx_SET (VOIDmode
, mem
,
22719 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
22721 RTX_FRAME_RELATED_P (crframe
[n_crframe
]) = 1;
22725 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
22726 gen_rtx_PARALLEL (VOIDmode
,
22727 gen_rtvec_v (n_crframe
, crframe
)));
22731 /* In other ABIs, by convention, we use a single CR regnum to
22732 represent the fact that all call-saved CR fields are saved.
22733 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
22734 rtx set
= gen_rtx_SET (VOIDmode
, mem
,
22735 gen_rtx_REG (SImode
, CR2_REGNO
));
22736 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
22740 /* In the ELFv2 ABI we need to save all call-saved CR fields into
22741 *separate* slots if the routine calls __builtin_eh_return, so
22742 that they can be independently restored by the unwinder. */
22743 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
22745 int i
, cr_off
= info
->ehcr_offset
;
22748 /* ??? We might get better performance by using multiple mfocrf
22750 crsave
= gen_rtx_REG (SImode
, 0);
22751 emit_insn (gen_movesi_from_cr (crsave
));
22753 for (i
= 0; i
< 8; i
++)
22754 if (!call_used_regs
[CR0_REGNO
+ i
])
22756 rtvec p
= rtvec_alloc (2);
22758 = gen_frame_store (crsave
, frame_reg_rtx
, cr_off
+ frame_off
);
22760 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
));
22762 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
22764 RTX_FRAME_RELATED_P (insn
) = 1;
22765 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
22766 gen_frame_store (gen_rtx_REG (SImode
, CR0_REGNO
+ i
),
22767 sp_reg_rtx
, cr_off
+ sp_off
));
22769 cr_off
+= reg_size
;
22773 /* Update stack and set back pointer unless this is V.4,
22774 for which it was done previously. */
22775 if (!WORLD_SAVE_P (info
) && info
->push_p
22776 && !(DEFAULT_ABI
== ABI_V4
|| crtl
->calls_eh_return
))
22778 rtx ptr_reg
= NULL
;
22781 /* If saving altivec regs we need to be able to address all save
22782 locations using a 16-bit offset. */
22783 if ((strategy
& SAVE_INLINE_VRS
) == 0
22784 || (info
->altivec_size
!= 0
22785 && (info
->altivec_save_offset
+ info
->altivec_size
- 16
22786 + info
->total_size
- frame_off
) > 32767)
22787 || (info
->vrsave_size
!= 0
22788 && (info
->vrsave_save_offset
22789 + info
->total_size
- frame_off
) > 32767))
22791 int sel
= SAVRES_SAVE
| SAVRES_VR
;
22792 unsigned ptr_regno
= ptr_regno_for_savres (sel
);
22794 if (using_static_chain_p
22795 && ptr_regno
== STATIC_CHAIN_REGNUM
)
22797 if (REGNO (frame_reg_rtx
) != ptr_regno
)
22798 START_USE (ptr_regno
);
22799 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
22800 frame_reg_rtx
= ptr_reg
;
22801 ptr_off
= info
->altivec_save_offset
+ info
->altivec_size
;
22802 frame_off
= -ptr_off
;
22804 else if (REGNO (frame_reg_rtx
) == 1)
22805 frame_off
= info
->total_size
;
22806 rs6000_emit_allocate_stack (info
->total_size
, ptr_reg
, ptr_off
);
22807 sp_off
= info
->total_size
;
22808 if (frame_reg_rtx
!= sp_reg_rtx
)
22809 rs6000_emit_stack_tie (frame_reg_rtx
, false);
22812 /* Set frame pointer, if needed. */
22813 if (frame_pointer_needed
)
22815 insn
= emit_move_insn (gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
),
22817 RTX_FRAME_RELATED_P (insn
) = 1;
22820 /* Save AltiVec registers if needed. Save here because the red zone does
22821 not always include AltiVec registers. */
22822 if (!WORLD_SAVE_P (info
) && TARGET_ALTIVEC_ABI
22823 && info
->altivec_size
!= 0 && (strategy
& SAVE_INLINE_VRS
) == 0)
22825 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
22827 /* Oddly, the vector save/restore functions point r0 at the end
22828 of the save area, then use r11 or r12 to load offsets for
22829 [reg+reg] addressing. */
22830 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
22831 int scratch_regno
= ptr_regno_for_savres (SAVRES_SAVE
| SAVRES_VR
);
22832 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
22834 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
22836 if (end_save
+ frame_off
!= 0)
22838 rtx offset
= GEN_INT (end_save
+ frame_off
);
22840 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
22843 emit_move_insn (ptr_reg
, frame_reg_rtx
);
22845 ptr_off
= -end_save
;
22846 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
22847 info
->altivec_save_offset
+ ptr_off
,
22848 0, V4SImode
, SAVRES_SAVE
| SAVRES_VR
);
22849 rs6000_frame_related (insn
, scratch_reg
, sp_off
- ptr_off
,
22850 NULL_RTX
, NULL_RTX
, NULL_RTX
);
22851 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
22853 /* The oddity mentioned above clobbered our frame reg. */
22854 emit_move_insn (frame_reg_rtx
, ptr_reg
);
22855 frame_off
= ptr_off
;
22858 else if (!WORLD_SAVE_P (info
) && TARGET_ALTIVEC_ABI
22859 && info
->altivec_size
!= 0)
22863 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
22864 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
22866 rtx areg
, savereg
, mem
, split_reg
;
22869 offset
= (info
->altivec_save_offset
+ frame_off
22870 + 16 * (i
- info
->first_altivec_reg_save
));
22872 savereg
= gen_rtx_REG (V4SImode
, i
);
22875 areg
= gen_rtx_REG (Pmode
, 0);
22876 emit_move_insn (areg
, GEN_INT (offset
));
22878 /* AltiVec addressing mode is [reg+reg]. */
22879 mem
= gen_frame_mem (V4SImode
,
22880 gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
));
22882 insn
= emit_move_insn (mem
, savereg
);
22884 /* When we split a VSX store into two insns, we need to make
22885 sure the DWARF info knows which register we are storing.
22886 Pass it in to be used on the appropriate note. */
22887 if (!BYTES_BIG_ENDIAN
22888 && GET_CODE (PATTERN (insn
)) == SET
22889 && GET_CODE (SET_SRC (PATTERN (insn
))) == VEC_SELECT
)
22890 split_reg
= savereg
;
22892 split_reg
= NULL_RTX
;
22894 rs6000_frame_related (insn
, frame_reg_rtx
, sp_off
- frame_off
,
22895 areg
, GEN_INT (offset
), split_reg
);
22899 /* VRSAVE is a bit vector representing which AltiVec registers
22900 are used. The OS uses this to determine which vector
22901 registers to save on a context switch. We need to save
22902 VRSAVE on the stack frame, add whatever AltiVec registers we
22903 used in this function, and do the corresponding magic in the
22906 if (!WORLD_SAVE_P (info
)
22908 && TARGET_ALTIVEC_VRSAVE
22909 && info
->vrsave_mask
!= 0)
22915 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
22916 be using r12 as frame_reg_rtx and r11 as the static chain
22917 pointer for nested functions. */
22919 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
22920 && !using_static_chain_p
)
22922 else if (REGNO (frame_reg_rtx
) == 12)
22925 if (using_static_chain_p
)
22929 NOT_INUSE (save_regno
);
22930 reg
= gen_rtx_REG (SImode
, save_regno
);
22931 vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
22933 emit_insn (gen_get_vrsave_internal (reg
));
22935 emit_insn (gen_rtx_SET (VOIDmode
, reg
, vrsave
));
22938 offset
= info
->vrsave_save_offset
+ frame_off
;
22939 insn
= emit_insn (gen_frame_store (reg
, frame_reg_rtx
, offset
));
22941 /* Include the registers in the mask. */
22942 emit_insn (gen_iorsi3 (reg
, reg
, GEN_INT ((int) info
->vrsave_mask
)));
22944 insn
= emit_insn (generate_set_vrsave (reg
, info
, 0));
22947 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
22948 if (!TARGET_SINGLE_PIC_BASE
22949 && ((TARGET_TOC
&& TARGET_MINIMAL_TOC
&& get_pool_size () != 0)
22950 || (DEFAULT_ABI
== ABI_V4
22951 && (flag_pic
== 1 || (flag_pic
&& TARGET_SECURE_PLT
))
22952 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))))
22954 /* If emit_load_toc_table will use the link register, we need to save
22955 it. We use R12 for this purpose because emit_load_toc_table
22956 can use register 0. This allows us to use a plain 'blr' to return
22957 from the procedure more often. */
22958 int save_LR_around_toc_setup
= (TARGET_ELF
22959 && DEFAULT_ABI
== ABI_V4
22961 && ! info
->lr_save_p
22962 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun
)->preds
) > 0);
22963 if (save_LR_around_toc_setup
)
22965 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
22966 rtx tmp
= gen_rtx_REG (Pmode
, 12);
22968 insn
= emit_move_insn (tmp
, lr
);
22969 RTX_FRAME_RELATED_P (insn
) = 1;
22971 rs6000_emit_load_toc_table (TRUE
);
22973 insn
= emit_move_insn (lr
, tmp
);
22974 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
22975 RTX_FRAME_RELATED_P (insn
) = 1;
22978 rs6000_emit_load_toc_table (TRUE
);
22982 if (!TARGET_SINGLE_PIC_BASE
22983 && DEFAULT_ABI
== ABI_DARWIN
22984 && flag_pic
&& crtl
->uses_pic_offset_table
)
22986 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
22987 rtx src
= gen_rtx_SYMBOL_REF (Pmode
, MACHOPIC_FUNCTION_BASE_NAME
);
22989 /* Save and restore LR locally around this call (in R0). */
22990 if (!info
->lr_save_p
)
22991 emit_move_insn (gen_rtx_REG (Pmode
, 0), lr
);
22993 emit_insn (gen_load_macho_picbase (src
));
22995 emit_move_insn (gen_rtx_REG (Pmode
,
22996 RS6000_PIC_OFFSET_TABLE_REGNUM
),
22999 if (!info
->lr_save_p
)
23000 emit_move_insn (lr
, gen_rtx_REG (Pmode
, 0));
23004 /* If we need to, save the TOC register after doing the stack setup.
23005 Do not emit eh frame info for this save. The unwinder wants info,
23006 conceptually attached to instructions in this function, about
23007 register values in the caller of this function. This R2 may have
23008 already been changed from the value in the caller.
23009 We don't attempt to write accurate DWARF EH frame info for R2
23010 because code emitted by gcc for a (non-pointer) function call
23011 doesn't save and restore R2. Instead, R2 is managed out-of-line
23012 by a linker generated plt call stub when the function resides in
23013 a shared library. This behaviour is costly to describe in DWARF,
23014 both in terms of the size of DWARF info and the time taken in the
23015 unwinder to interpret it. R2 changes, apart from the
23016 calls_eh_return case earlier in this function, are handled by
23017 linux-unwind.h frob_update_context. */
23018 if (rs6000_save_toc_in_prologue_p ())
23020 rtx reg
= gen_rtx_REG (reg_mode
, TOC_REGNUM
);
23021 emit_insn (gen_frame_store (reg
, sp_reg_rtx
, RS6000_TOC_SAVE_SLOT
));
23025 /* Write function prologue. */
23028 rs6000_output_function_prologue (FILE *file
,
23029 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
23031 rs6000_stack_t
*info
= rs6000_stack_info ();
23033 if (TARGET_DEBUG_STACK
)
23034 debug_stack_info (info
);
23036 /* Write .extern for any function we will call to save and restore
23038 if (info
->first_fp_reg_save
< 64
23043 int regno
= info
->first_fp_reg_save
- 32;
23045 if ((info
->savres_strategy
& SAVE_INLINE_FPRS
) == 0)
23047 bool lr
= (info
->savres_strategy
& SAVE_NOINLINE_FPRS_SAVES_LR
) != 0;
23048 int sel
= SAVRES_SAVE
| SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
23049 name
= rs6000_savres_routine_name (info
, regno
, sel
);
23050 fprintf (file
, "\t.extern %s\n", name
);
23052 if ((info
->savres_strategy
& REST_INLINE_FPRS
) == 0)
23054 bool lr
= (info
->savres_strategy
23055 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
23056 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
23057 name
= rs6000_savres_routine_name (info
, regno
, sel
);
23058 fprintf (file
, "\t.extern %s\n", name
);
23062 /* ELFv2 ABI r2 setup code and local entry point. This must follow
23063 immediately after the global entry point label. */
23064 if (DEFAULT_ABI
== ABI_ELFv2
&& cfun
->machine
->r2_setup_needed
)
23066 const char *name
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
23068 fprintf (file
, "0:\taddis 2,12,.TOC.-0b@ha\n");
23069 fprintf (file
, "\taddi 2,2,.TOC.-0b@l\n");
23071 fputs ("\t.localentry\t", file
);
23072 assemble_name (file
, name
);
23073 fputs (",.-", file
);
23074 assemble_name (file
, name
);
23075 fputs ("\n", file
);
23078 /* Output -mprofile-kernel code. This needs to be done here instead of
23079 in output_function_profile since it must go after the ELFv2 ABI
23080 local entry point. */
23081 if (TARGET_PROFILE_KERNEL
)
23083 gcc_assert (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
);
23084 gcc_assert (!TARGET_32BIT
);
23086 asm_fprintf (file
, "\tmflr %s\n", reg_names
[0]);
23087 asm_fprintf (file
, "\tstd %s,16(%s)\n", reg_names
[0], reg_names
[1]);
23089 /* In the ELFv2 ABI we have no compiler stack word. It must be
23090 the resposibility of _mcount to preserve the static chain
23091 register if required. */
23092 if (DEFAULT_ABI
!= ABI_ELFv2
23093 && cfun
->static_chain_decl
!= NULL
)
23095 asm_fprintf (file
, "\tstd %s,24(%s)\n",
23096 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
23097 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
23098 asm_fprintf (file
, "\tld %s,24(%s)\n",
23099 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
23102 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
23105 rs6000_pic_labelno
++;
23108 /* Non-zero if vmx regs are restored before the frame pop, zero if
23109 we restore after the pop when possible. */
23110 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
23112 /* Restoring cr is a two step process: loading a reg from the frame
23113 save, then moving the reg to cr. For ABI_V4 we must let the
23114 unwinder know that the stack location is no longer valid at or
23115 before the stack deallocation, but we can't emit a cfa_restore for
23116 cr at the stack deallocation like we do for other registers.
23117 The trouble is that it is possible for the move to cr to be
23118 scheduled after the stack deallocation. So say exactly where cr
23119 is located on each of the two insns. */
23122 load_cr_save (int regno
, rtx frame_reg_rtx
, int offset
, bool exit_func
)
23124 rtx mem
= gen_frame_mem_offset (SImode
, frame_reg_rtx
, offset
);
23125 rtx reg
= gen_rtx_REG (SImode
, regno
);
23126 rtx insn
= emit_move_insn (reg
, mem
);
23128 if (!exit_func
&& DEFAULT_ABI
== ABI_V4
)
23130 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
23131 rtx set
= gen_rtx_SET (VOIDmode
, reg
, cr
);
23133 add_reg_note (insn
, REG_CFA_REGISTER
, set
);
23134 RTX_FRAME_RELATED_P (insn
) = 1;
23139 /* Reload CR from REG. */
23142 restore_saved_cr (rtx reg
, int using_mfcr_multiple
, bool exit_func
)
23147 if (using_mfcr_multiple
)
23149 for (i
= 0; i
< 8; i
++)
23150 if (save_reg_p (CR0_REGNO
+ i
))
23152 gcc_assert (count
);
23155 if (using_mfcr_multiple
&& count
> 1)
23161 p
= rtvec_alloc (count
);
23164 for (i
= 0; i
< 8; i
++)
23165 if (save_reg_p (CR0_REGNO
+ i
))
23167 rtvec r
= rtvec_alloc (2);
23168 RTVEC_ELT (r
, 0) = reg
;
23169 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7-i
));
23170 RTVEC_ELT (p
, ndx
) =
23171 gen_rtx_SET (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+ i
),
23172 gen_rtx_UNSPEC (CCmode
, r
, UNSPEC_MOVESI_TO_CR
));
23175 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
23176 gcc_assert (ndx
== count
);
23178 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
23179 CR field separately. */
23180 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
23182 for (i
= 0; i
< 8; i
++)
23183 if (save_reg_p (CR0_REGNO
+ i
))
23184 add_reg_note (insn
, REG_CFA_RESTORE
,
23185 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
23187 RTX_FRAME_RELATED_P (insn
) = 1;
23191 for (i
= 0; i
< 8; i
++)
23192 if (save_reg_p (CR0_REGNO
+ i
))
23194 rtx insn
= emit_insn (gen_movsi_to_cr_one
23195 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
23197 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
23198 CR field separately, attached to the insn that in fact
23199 restores this particular CR field. */
23200 if (!exit_func
&& DEFAULT_ABI
== ABI_ELFv2
&& flag_shrink_wrap
)
23202 add_reg_note (insn
, REG_CFA_RESTORE
,
23203 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
23205 RTX_FRAME_RELATED_P (insn
) = 1;
23209 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
23210 if (!exit_func
&& DEFAULT_ABI
!= ABI_ELFv2
23211 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
23213 rtx insn
= get_last_insn ();
23214 rtx cr
= gen_rtx_REG (SImode
, CR2_REGNO
);
23216 add_reg_note (insn
, REG_CFA_RESTORE
, cr
);
23217 RTX_FRAME_RELATED_P (insn
) = 1;
23221 /* Like cr, the move to lr instruction can be scheduled after the
23222 stack deallocation, but unlike cr, its stack frame save is still
23223 valid. So we only need to emit the cfa_restore on the correct
23227 load_lr_save (int regno
, rtx frame_reg_rtx
, int offset
)
23229 rtx mem
= gen_frame_mem_offset (Pmode
, frame_reg_rtx
, offset
);
23230 rtx reg
= gen_rtx_REG (Pmode
, regno
);
23232 emit_move_insn (reg
, mem
);
23236 restore_saved_lr (int regno
, bool exit_func
)
23238 rtx reg
= gen_rtx_REG (Pmode
, regno
);
23239 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
23240 rtx insn
= emit_move_insn (lr
, reg
);
23242 if (!exit_func
&& flag_shrink_wrap
)
23244 add_reg_note (insn
, REG_CFA_RESTORE
, lr
);
23245 RTX_FRAME_RELATED_P (insn
) = 1;
23250 add_crlr_cfa_restore (const rs6000_stack_t
*info
, rtx cfa_restores
)
23252 if (DEFAULT_ABI
== ABI_ELFv2
)
23255 for (i
= 0; i
< 8; i
++)
23256 if (save_reg_p (CR0_REGNO
+ i
))
23258 rtx cr
= gen_rtx_REG (SImode
, CR0_REGNO
+ i
);
23259 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, cr
,
23263 else if (info
->cr_save_p
)
23264 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
23265 gen_rtx_REG (SImode
, CR2_REGNO
),
23268 if (info
->lr_save_p
)
23269 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
23270 gen_rtx_REG (Pmode
, LR_REGNO
),
23272 return cfa_restores
;
23275 /* Return true if OFFSET from stack pointer can be clobbered by signals.
23276 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
23277 below stack pointer not cloberred by signals. */
23280 offset_below_red_zone_p (HOST_WIDE_INT offset
)
23282 return offset
< (DEFAULT_ABI
== ABI_V4
23284 : TARGET_32BIT
? -220 : -288);
23287 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
23290 emit_cfa_restores (rtx cfa_restores
)
23292 rtx insn
= get_last_insn ();
23293 rtx
*loc
= ®_NOTES (insn
);
23296 loc
= &XEXP (*loc
, 1);
23297 *loc
= cfa_restores
;
23298 RTX_FRAME_RELATED_P (insn
) = 1;
23301 /* Emit function epilogue as insns. */
23304 rs6000_emit_epilogue (int sibcall
)
23306 rs6000_stack_t
*info
;
23307 int restoring_GPRs_inline
;
23308 int restoring_FPRs_inline
;
23309 int using_load_multiple
;
23310 int using_mtcr_multiple
;
23311 int use_backchain_to_restore_sp
;
23314 HOST_WIDE_INT frame_off
= 0;
23315 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, 1);
23316 rtx frame_reg_rtx
= sp_reg_rtx
;
23317 rtx cfa_restores
= NULL_RTX
;
23319 rtx cr_save_reg
= NULL_RTX
;
23320 enum machine_mode reg_mode
= Pmode
;
23321 int reg_size
= TARGET_32BIT
? 4 : 8;
23324 unsigned ptr_regno
;
23326 info
= rs6000_stack_info ();
23328 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
23330 reg_mode
= V2SImode
;
23334 strategy
= info
->savres_strategy
;
23335 using_load_multiple
= strategy
& SAVRES_MULTIPLE
;
23336 restoring_FPRs_inline
= sibcall
|| (strategy
& REST_INLINE_FPRS
);
23337 restoring_GPRs_inline
= sibcall
|| (strategy
& REST_INLINE_GPRS
);
23338 using_mtcr_multiple
= (rs6000_cpu
== PROCESSOR_PPC601
23339 || rs6000_cpu
== PROCESSOR_PPC603
23340 || rs6000_cpu
== PROCESSOR_PPC750
23342 /* Restore via the backchain when we have a large frame, since this
23343 is more efficient than an addis, addi pair. The second condition
23344 here will not trigger at the moment; We don't actually need a
23345 frame pointer for alloca, but the generic parts of the compiler
23346 give us one anyway. */
23347 use_backchain_to_restore_sp
= (info
->total_size
> 32767 - info
->lr_save_offset
23348 || (cfun
->calls_alloca
23349 && !frame_pointer_needed
));
23350 restore_lr
= (info
->lr_save_p
23351 && (restoring_FPRs_inline
23352 || (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
))
23353 && (restoring_GPRs_inline
23354 || info
->first_fp_reg_save
< 64));
23356 if (WORLD_SAVE_P (info
))
23360 const char *alloc_rname
;
23363 /* eh_rest_world_r10 will return to the location saved in the LR
23364 stack slot (which is not likely to be our caller.)
23365 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
23366 rest_world is similar, except any R10 parameter is ignored.
23367 The exception-handling stuff that was here in 2.95 is no
23368 longer necessary. */
23372 + 32 - info
->first_gp_reg_save
23373 + LAST_ALTIVEC_REGNO
+ 1 - info
->first_altivec_reg_save
23374 + 63 + 1 - info
->first_fp_reg_save
);
23376 strcpy (rname
, ((crtl
->calls_eh_return
) ?
23377 "*eh_rest_world_r10" : "*rest_world"));
23378 alloc_rname
= ggc_strdup (rname
);
23381 RTVEC_ELT (p
, j
++) = ret_rtx
;
23382 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
23383 gen_rtx_REG (Pmode
,
23386 = gen_rtx_USE (VOIDmode
, gen_rtx_SYMBOL_REF (Pmode
, alloc_rname
));
23387 /* The instruction pattern requires a clobber here;
23388 it is shared with the restVEC helper. */
23390 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 11));
23393 /* CR register traditionally saved as CR2. */
23394 rtx reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
23396 = gen_frame_load (reg
, frame_reg_rtx
, info
->cr_save_offset
);
23397 if (flag_shrink_wrap
)
23399 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
,
23400 gen_rtx_REG (Pmode
, LR_REGNO
),
23402 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
23406 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
23408 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
23410 = gen_frame_load (reg
,
23411 frame_reg_rtx
, info
->gp_save_offset
+ reg_size
* i
);
23412 if (flag_shrink_wrap
)
23413 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
23415 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
23417 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
23419 = gen_frame_load (reg
,
23420 frame_reg_rtx
, info
->altivec_save_offset
+ 16 * i
);
23421 if (flag_shrink_wrap
)
23422 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
23424 for (i
= 0; info
->first_fp_reg_save
+ i
<= 63; i
++)
23426 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
23427 ? DFmode
: SFmode
),
23428 info
->first_fp_reg_save
+ i
);
23430 = gen_frame_load (reg
, frame_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
23431 if (flag_shrink_wrap
)
23432 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
23435 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 0));
23437 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 12));
23439 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 7));
23441 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 8));
23443 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, 10));
23444 insn
= emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
23446 if (flag_shrink_wrap
)
23448 REG_NOTES (insn
) = cfa_restores
;
23449 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
23450 RTX_FRAME_RELATED_P (insn
) = 1;
23455 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
23457 frame_off
= info
->total_size
;
23459 /* Restore AltiVec registers if we must do so before adjusting the
23461 if (TARGET_ALTIVEC_ABI
23462 && info
->altivec_size
!= 0
23463 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
23464 || (DEFAULT_ABI
!= ABI_V4
23465 && offset_below_red_zone_p (info
->altivec_save_offset
))))
23468 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
23470 gcc_checking_assert (scratch_regno
== 11 || scratch_regno
== 12);
23471 if (use_backchain_to_restore_sp
)
23473 int frame_regno
= 11;
23475 if ((strategy
& REST_INLINE_VRS
) == 0)
23477 /* Of r11 and r12, select the one not clobbered by an
23478 out-of-line restore function for the frame register. */
23479 frame_regno
= 11 + 12 - scratch_regno
;
23481 frame_reg_rtx
= gen_rtx_REG (Pmode
, frame_regno
);
23482 emit_move_insn (frame_reg_rtx
,
23483 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
23486 else if (frame_pointer_needed
)
23487 frame_reg_rtx
= hard_frame_pointer_rtx
;
23489 if ((strategy
& REST_INLINE_VRS
) == 0)
23491 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
23493 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
23494 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
23496 if (end_save
+ frame_off
!= 0)
23498 rtx offset
= GEN_INT (end_save
+ frame_off
);
23500 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
23503 emit_move_insn (ptr_reg
, frame_reg_rtx
);
23505 ptr_off
= -end_save
;
23506 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
23507 info
->altivec_save_offset
+ ptr_off
,
23508 0, V4SImode
, SAVRES_VR
);
23512 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
23513 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
23515 rtx addr
, areg
, mem
, reg
;
23517 areg
= gen_rtx_REG (Pmode
, 0);
23519 (areg
, GEN_INT (info
->altivec_save_offset
23521 + 16 * (i
- info
->first_altivec_reg_save
)));
23523 /* AltiVec addressing mode is [reg+reg]. */
23524 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
23525 mem
= gen_frame_mem (V4SImode
, addr
);
23527 reg
= gen_rtx_REG (V4SImode
, i
);
23528 emit_move_insn (reg
, mem
);
23532 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
23533 if (((strategy
& REST_INLINE_VRS
) == 0
23534 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
23535 && (flag_shrink_wrap
23536 || (offset_below_red_zone_p
23537 (info
->altivec_save_offset
23538 + 16 * (i
- info
->first_altivec_reg_save
)))))
23540 rtx reg
= gen_rtx_REG (V4SImode
, i
);
23541 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
23545 /* Restore VRSAVE if we must do so before adjusting the stack. */
23547 && TARGET_ALTIVEC_VRSAVE
23548 && info
->vrsave_mask
!= 0
23549 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
23550 || (DEFAULT_ABI
!= ABI_V4
23551 && offset_below_red_zone_p (info
->vrsave_save_offset
))))
23555 if (frame_reg_rtx
== sp_reg_rtx
)
23557 if (use_backchain_to_restore_sp
)
23559 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
23560 emit_move_insn (frame_reg_rtx
,
23561 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
23564 else if (frame_pointer_needed
)
23565 frame_reg_rtx
= hard_frame_pointer_rtx
;
23568 reg
= gen_rtx_REG (SImode
, 12);
23569 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
23570 info
->vrsave_save_offset
+ frame_off
));
23572 emit_insn (generate_set_vrsave (reg
, info
, 1));
23576 /* If we have a large stack frame, restore the old stack pointer
23577 using the backchain. */
23578 if (use_backchain_to_restore_sp
)
23580 if (frame_reg_rtx
== sp_reg_rtx
)
23582 /* Under V.4, don't reset the stack pointer until after we're done
23583 loading the saved registers. */
23584 if (DEFAULT_ABI
== ABI_V4
)
23585 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
23587 insn
= emit_move_insn (frame_reg_rtx
,
23588 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
23591 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
23592 && DEFAULT_ABI
== ABI_V4
)
23593 /* frame_reg_rtx has been set up by the altivec restore. */
23597 insn
= emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
23598 frame_reg_rtx
= sp_reg_rtx
;
23601 /* If we have a frame pointer, we can restore the old stack pointer
23603 else if (frame_pointer_needed
)
23605 frame_reg_rtx
= sp_reg_rtx
;
23606 if (DEFAULT_ABI
== ABI_V4
)
23607 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
23608 /* Prevent reordering memory accesses against stack pointer restore. */
23609 else if (cfun
->calls_alloca
23610 || offset_below_red_zone_p (-info
->total_size
))
23611 rs6000_emit_stack_tie (frame_reg_rtx
, true);
23613 insn
= emit_insn (gen_add3_insn (frame_reg_rtx
, hard_frame_pointer_rtx
,
23614 GEN_INT (info
->total_size
)));
23617 else if (info
->push_p
23618 && DEFAULT_ABI
!= ABI_V4
23619 && !crtl
->calls_eh_return
)
23621 /* Prevent reordering memory accesses against stack pointer restore. */
23622 if (cfun
->calls_alloca
23623 || offset_below_red_zone_p (-info
->total_size
))
23624 rs6000_emit_stack_tie (frame_reg_rtx
, false);
23625 insn
= emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
,
23626 GEN_INT (info
->total_size
)));
23629 if (insn
&& frame_reg_rtx
== sp_reg_rtx
)
23633 REG_NOTES (insn
) = cfa_restores
;
23634 cfa_restores
= NULL_RTX
;
23636 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
23637 RTX_FRAME_RELATED_P (insn
) = 1;
23640 /* Restore AltiVec registers if we have not done so already. */
23641 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
23642 && TARGET_ALTIVEC_ABI
23643 && info
->altivec_size
!= 0
23644 && (DEFAULT_ABI
== ABI_V4
23645 || !offset_below_red_zone_p (info
->altivec_save_offset
)))
23649 if ((strategy
& REST_INLINE_VRS
) == 0)
23651 int end_save
= info
->altivec_save_offset
+ info
->altivec_size
;
23653 rtx ptr_reg
= gen_rtx_REG (Pmode
, 0);
23654 int scratch_regno
= ptr_regno_for_savres (SAVRES_VR
);
23655 rtx scratch_reg
= gen_rtx_REG (Pmode
, scratch_regno
);
23657 if (end_save
+ frame_off
!= 0)
23659 rtx offset
= GEN_INT (end_save
+ frame_off
);
23661 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
, offset
));
23664 emit_move_insn (ptr_reg
, frame_reg_rtx
);
23666 ptr_off
= -end_save
;
23667 insn
= rs6000_emit_savres_rtx (info
, scratch_reg
,
23668 info
->altivec_save_offset
+ ptr_off
,
23669 0, V4SImode
, SAVRES_VR
);
23670 if (REGNO (frame_reg_rtx
) == REGNO (scratch_reg
))
23672 /* Frame reg was clobbered by out-of-line save. Restore it
23673 from ptr_reg, and if we are calling out-of-line gpr or
23674 fpr restore set up the correct pointer and offset. */
23675 unsigned newptr_regno
= 1;
23676 if (!restoring_GPRs_inline
)
23678 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
23679 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
23680 newptr_regno
= ptr_regno_for_savres (sel
);
23681 end_save
= info
->gp_save_offset
+ info
->gp_size
;
23683 else if (!restoring_FPRs_inline
)
23685 bool lr
= !(strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
);
23686 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
23687 newptr_regno
= ptr_regno_for_savres (sel
);
23688 end_save
= info
->gp_save_offset
+ info
->gp_size
;
23691 if (newptr_regno
!= 1 && REGNO (frame_reg_rtx
) != newptr_regno
)
23692 frame_reg_rtx
= gen_rtx_REG (Pmode
, newptr_regno
);
23694 if (end_save
+ ptr_off
!= 0)
23696 rtx offset
= GEN_INT (end_save
+ ptr_off
);
23698 frame_off
= -end_save
;
23699 emit_insn (gen_add3_insn (frame_reg_rtx
, ptr_reg
, offset
));
23703 frame_off
= ptr_off
;
23704 emit_move_insn (frame_reg_rtx
, ptr_reg
);
23710 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
23711 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
23713 rtx addr
, areg
, mem
, reg
;
23715 areg
= gen_rtx_REG (Pmode
, 0);
23717 (areg
, GEN_INT (info
->altivec_save_offset
23719 + 16 * (i
- info
->first_altivec_reg_save
)));
23721 /* AltiVec addressing mode is [reg+reg]. */
23722 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
23723 mem
= gen_frame_mem (V4SImode
, addr
);
23725 reg
= gen_rtx_REG (V4SImode
, i
);
23726 emit_move_insn (reg
, mem
);
23730 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
23731 if (((strategy
& REST_INLINE_VRS
) == 0
23732 || (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
)) != 0)
23733 && (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
))
23735 rtx reg
= gen_rtx_REG (V4SImode
, i
);
23736 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
23740 /* Restore VRSAVE if we have not done so already. */
23741 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
23743 && TARGET_ALTIVEC_VRSAVE
23744 && info
->vrsave_mask
!= 0
23745 && (DEFAULT_ABI
== ABI_V4
23746 || !offset_below_red_zone_p (info
->vrsave_save_offset
)))
23750 reg
= gen_rtx_REG (SImode
, 12);
23751 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
23752 info
->vrsave_save_offset
+ frame_off
));
23754 emit_insn (generate_set_vrsave (reg
, info
, 1));
23757 /* If we exit by an out-of-line restore function on ABI_V4 then that
23758 function will deallocate the stack, so we don't need to worry
23759 about the unwinder restoring cr from an invalid stack frame
23761 exit_func
= (!restoring_FPRs_inline
23762 || (!restoring_GPRs_inline
23763 && info
->first_fp_reg_save
== 64));
23765 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
23766 *separate* slots if the routine calls __builtin_eh_return, so
23767 that they can be independently restored by the unwinder. */
23768 if (DEFAULT_ABI
== ABI_ELFv2
&& crtl
->calls_eh_return
)
23770 int i
, cr_off
= info
->ehcr_offset
;
23772 for (i
= 0; i
< 8; i
++)
23773 if (!call_used_regs
[CR0_REGNO
+ i
])
23775 rtx reg
= gen_rtx_REG (SImode
, 0);
23776 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
23777 cr_off
+ frame_off
));
23779 insn
= emit_insn (gen_movsi_to_cr_one
23780 (gen_rtx_REG (CCmode
, CR0_REGNO
+ i
), reg
));
23782 if (!exit_func
&& flag_shrink_wrap
)
23784 add_reg_note (insn
, REG_CFA_RESTORE
,
23785 gen_rtx_REG (SImode
, CR0_REGNO
+ i
));
23787 RTX_FRAME_RELATED_P (insn
) = 1;
23790 cr_off
+= reg_size
;
23794 /* Get the old lr if we saved it. If we are restoring registers
23795 out-of-line, then the out-of-line routines can do this for us. */
23796 if (restore_lr
&& restoring_GPRs_inline
)
23797 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
23799 /* Get the old cr if we saved it. */
23800 if (info
->cr_save_p
)
23802 unsigned cr_save_regno
= 12;
23804 if (!restoring_GPRs_inline
)
23806 /* Ensure we don't use the register used by the out-of-line
23807 gpr register restore below. */
23808 bool lr
= info
->gp_save_offset
+ info
->gp_size
== 0;
23809 int sel
= SAVRES_GPR
| (lr
? SAVRES_LR
: 0);
23810 int gpr_ptr_regno
= ptr_regno_for_savres (sel
);
23812 if (gpr_ptr_regno
== 12)
23813 cr_save_regno
= 11;
23814 gcc_checking_assert (REGNO (frame_reg_rtx
) != cr_save_regno
);
23816 else if (REGNO (frame_reg_rtx
) == 12)
23817 cr_save_regno
= 11;
23819 cr_save_reg
= load_cr_save (cr_save_regno
, frame_reg_rtx
,
23820 info
->cr_save_offset
+ frame_off
,
23824 /* Set LR here to try to overlap restores below. */
23825 if (restore_lr
&& restoring_GPRs_inline
)
23826 restore_saved_lr (0, exit_func
);
23828 /* Load exception handler data registers, if needed. */
23829 if (crtl
->calls_eh_return
)
23831 unsigned int i
, regno
;
23835 rtx reg
= gen_rtx_REG (reg_mode
, 2);
23836 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
23837 frame_off
+ RS6000_TOC_SAVE_SLOT
));
23844 regno
= EH_RETURN_DATA_REGNO (i
);
23845 if (regno
== INVALID_REGNUM
)
23848 /* Note: possible use of r0 here to address SPE regs. */
23849 mem
= gen_frame_mem_offset (reg_mode
, frame_reg_rtx
,
23850 info
->ehrd_offset
+ frame_off
23851 + reg_size
* (int) i
);
23853 emit_move_insn (gen_rtx_REG (reg_mode
, regno
), mem
);
23857 /* Restore GPRs. This is done as a PARALLEL if we are using
23858 the load-multiple instructions. */
23860 && info
->spe_64bit_regs_used
23861 && info
->first_gp_reg_save
!= 32)
23863 /* Determine whether we can address all of the registers that need
23864 to be saved with an offset from frame_reg_rtx that fits in
23865 the small const field for SPE memory instructions. */
23866 int spe_regs_addressable
23867 = (SPE_CONST_OFFSET_OK (info
->spe_gp_save_offset
+ frame_off
23868 + reg_size
* (32 - info
->first_gp_reg_save
- 1))
23869 && restoring_GPRs_inline
);
23871 if (!spe_regs_addressable
)
23873 int ool_adjust
= 0;
23874 rtx old_frame_reg_rtx
= frame_reg_rtx
;
23875 /* Make r11 point to the start of the SPE save area. We worried about
23876 not clobbering it when we were saving registers in the prologue.
23877 There's no need to worry here because the static chain is passed
23878 anew to every function. */
23880 if (!restoring_GPRs_inline
)
23881 ool_adjust
= 8 * (info
->first_gp_reg_save
- FIRST_SAVED_GP_REGNO
);
23882 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
23883 emit_insn (gen_addsi3 (frame_reg_rtx
, old_frame_reg_rtx
,
23884 GEN_INT (info
->spe_gp_save_offset
23887 /* Keep the invariant that frame_reg_rtx + frame_off points
23888 at the top of the stack frame. */
23889 frame_off
= -info
->spe_gp_save_offset
+ ool_adjust
;
23892 if (restoring_GPRs_inline
)
23894 HOST_WIDE_INT spe_offset
= info
->spe_gp_save_offset
+ frame_off
;
23896 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
23897 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
23899 rtx offset
, addr
, mem
, reg
;
23901 /* We're doing all this to ensure that the immediate offset
23902 fits into the immediate field of 'evldd'. */
23903 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset
+ reg_size
* i
));
23905 offset
= GEN_INT (spe_offset
+ reg_size
* i
);
23906 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, offset
);
23907 mem
= gen_rtx_MEM (V2SImode
, addr
);
23908 reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
23910 emit_move_insn (reg
, mem
);
23914 rs6000_emit_savres_rtx (info
, frame_reg_rtx
,
23915 info
->spe_gp_save_offset
+ frame_off
,
23916 info
->lr_save_offset
+ frame_off
,
23918 SAVRES_GPR
| SAVRES_LR
);
23920 else if (!restoring_GPRs_inline
)
23922 /* We are jumping to an out-of-line function. */
23924 int end_save
= info
->gp_save_offset
+ info
->gp_size
;
23925 bool can_use_exit
= end_save
== 0;
23926 int sel
= SAVRES_GPR
| (can_use_exit
? SAVRES_LR
: 0);
23929 /* Emit stack reset code if we need it. */
23930 ptr_regno
= ptr_regno_for_savres (sel
);
23931 ptr_reg
= gen_rtx_REG (Pmode
, ptr_regno
);
23933 rs6000_emit_stack_reset (info
, frame_reg_rtx
, frame_off
, ptr_regno
);
23934 else if (end_save
+ frame_off
!= 0)
23935 emit_insn (gen_add3_insn (ptr_reg
, frame_reg_rtx
,
23936 GEN_INT (end_save
+ frame_off
)));
23937 else if (REGNO (frame_reg_rtx
) != ptr_regno
)
23938 emit_move_insn (ptr_reg
, frame_reg_rtx
);
23939 if (REGNO (frame_reg_rtx
) == ptr_regno
)
23940 frame_off
= -end_save
;
23942 if (can_use_exit
&& info
->cr_save_p
)
23943 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, true);
23945 ptr_off
= -end_save
;
23946 rs6000_emit_savres_rtx (info
, ptr_reg
,
23947 info
->gp_save_offset
+ ptr_off
,
23948 info
->lr_save_offset
+ ptr_off
,
23951 else if (using_load_multiple
)
23954 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
23955 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
23957 = gen_frame_load (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
23959 info
->gp_save_offset
+ frame_off
+ reg_size
* i
);
23960 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
23964 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
23965 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
23966 emit_insn (gen_frame_load
23967 (gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
),
23969 info
->gp_save_offset
+ frame_off
+ reg_size
* i
));
23972 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
23974 /* If the frame pointer was used then we can't delay emitting
23975 a REG_CFA_DEF_CFA note. This must happen on the insn that
23976 restores the frame pointer, r31. We may have already emitted
23977 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
23978 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
23979 be harmless if emitted. */
23980 if (frame_pointer_needed
)
23982 insn
= get_last_insn ();
23983 add_reg_note (insn
, REG_CFA_DEF_CFA
,
23984 plus_constant (Pmode
, frame_reg_rtx
, frame_off
));
23985 RTX_FRAME_RELATED_P (insn
) = 1;
23988 /* Set up cfa_restores. We always need these when
23989 shrink-wrapping. If not shrink-wrapping then we only need
23990 the cfa_restore when the stack location is no longer valid.
23991 The cfa_restores must be emitted on or before the insn that
23992 invalidates the stack, and of course must not be emitted
23993 before the insn that actually does the restore. The latter
23994 is why it is a bad idea to emit the cfa_restores as a group
23995 on the last instruction here that actually does a restore:
23996 That insn may be reordered with respect to others doing
23998 if (flag_shrink_wrap
23999 && !restoring_GPRs_inline
24000 && info
->first_fp_reg_save
== 64)
24001 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
24003 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
24004 if (!restoring_GPRs_inline
24005 || using_load_multiple
24006 || rs6000_reg_live_or_pic_offset_p (i
))
24008 rtx reg
= gen_rtx_REG (reg_mode
, i
);
24010 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
24014 if (!restoring_GPRs_inline
24015 && info
->first_fp_reg_save
== 64)
24017 /* We are jumping to an out-of-line function. */
24019 emit_cfa_restores (cfa_restores
);
24023 if (restore_lr
&& !restoring_GPRs_inline
)
24025 load_lr_save (0, frame_reg_rtx
, info
->lr_save_offset
+ frame_off
);
24026 restore_saved_lr (0, exit_func
);
24029 /* Restore fpr's if we need to do it without calling a function. */
24030 if (restoring_FPRs_inline
)
24031 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
24032 if (save_reg_p (info
->first_fp_reg_save
+ i
))
24034 rtx reg
= gen_rtx_REG ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
24035 ? DFmode
: SFmode
),
24036 info
->first_fp_reg_save
+ i
);
24037 emit_insn (gen_frame_load (reg
, frame_reg_rtx
,
24038 info
->fp_save_offset
+ frame_off
+ 8 * i
));
24039 if (DEFAULT_ABI
== ABI_V4
|| flag_shrink_wrap
)
24040 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
, cfa_restores
);
24043 /* If we saved cr, restore it here. Just those that were used. */
24044 if (info
->cr_save_p
)
24045 restore_saved_cr (cr_save_reg
, using_mtcr_multiple
, exit_func
);
24047 /* If this is V.4, unwind the stack pointer after all of the loads
24048 have been done, or set up r11 if we are restoring fp out of line. */
24050 if (!restoring_FPRs_inline
)
24052 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
24053 int sel
= SAVRES_FPR
| (lr
? SAVRES_LR
: 0);
24054 ptr_regno
= ptr_regno_for_savres (sel
);
24057 insn
= rs6000_emit_stack_reset (info
, frame_reg_rtx
, frame_off
, ptr_regno
);
24058 if (REGNO (frame_reg_rtx
) == ptr_regno
)
24061 if (insn
&& restoring_FPRs_inline
)
24065 REG_NOTES (insn
) = cfa_restores
;
24066 cfa_restores
= NULL_RTX
;
24068 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
24069 RTX_FRAME_RELATED_P (insn
) = 1;
24072 if (crtl
->calls_eh_return
)
24074 rtx sa
= EH_RETURN_STACKADJ_RTX
;
24075 emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
, sa
));
24081 bool lr
= (strategy
& REST_NOINLINE_FPRS_DOESNT_RESTORE_LR
) == 0;
24082 if (! restoring_FPRs_inline
)
24084 p
= rtvec_alloc (4 + 64 - info
->first_fp_reg_save
);
24085 RTVEC_ELT (p
, 0) = ret_rtx
;
24091 /* We can't hang the cfa_restores off a simple return,
24092 since the shrink-wrap code sometimes uses an existing
24093 return. This means there might be a path from
24094 pre-prologue code to this return, and dwarf2cfi code
24095 wants the eh_frame unwinder state to be the same on
24096 all paths to any point. So we need to emit the
24097 cfa_restores before the return. For -m64 we really
24098 don't need epilogue cfa_restores at all, except for
24099 this irritating dwarf2cfi with shrink-wrap
24100 requirement; The stack red-zone means eh_frame info
24101 from the prologue telling the unwinder to restore
24102 from the stack is perfectly good right to the end of
24104 emit_insn (gen_blockage ());
24105 emit_cfa_restores (cfa_restores
);
24106 cfa_restores
= NULL_RTX
;
24108 p
= rtvec_alloc (2);
24109 RTVEC_ELT (p
, 0) = simple_return_rtx
;
24112 RTVEC_ELT (p
, 1) = ((restoring_FPRs_inline
|| !lr
)
24113 ? gen_rtx_USE (VOIDmode
,
24114 gen_rtx_REG (Pmode
, LR_REGNO
))
24115 : gen_rtx_CLOBBER (VOIDmode
,
24116 gen_rtx_REG (Pmode
, LR_REGNO
)));
24118 /* If we have to restore more than two FP registers, branch to the
24119 restore function. It will return to our caller. */
24120 if (! restoring_FPRs_inline
)
24126 if (flag_shrink_wrap
)
24127 cfa_restores
= add_crlr_cfa_restore (info
, cfa_restores
);
24129 sym
= rs6000_savres_routine_sym (info
,
24130 SAVRES_FPR
| (lr
? SAVRES_LR
: 0));
24131 RTVEC_ELT (p
, 2) = gen_rtx_USE (VOIDmode
, sym
);
24132 reg
= (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)? 1 : 11;
24133 RTVEC_ELT (p
, 3) = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, reg
));
24135 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
24137 rtx reg
= gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
);
24139 RTVEC_ELT (p
, i
+ 4)
24140 = gen_frame_load (reg
, sp_reg_rtx
, info
->fp_save_offset
+ 8 * i
);
24141 if (flag_shrink_wrap
)
24142 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
24147 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
24153 /* Ensure the cfa_restores are hung off an insn that won't
24154 be reordered above other restores. */
24155 emit_insn (gen_blockage ());
24157 emit_cfa_restores (cfa_restores
);
24161 /* Write function epilogue. */
24164 rs6000_output_function_epilogue (FILE *file
,
24165 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
24168 macho_branch_islands ();
24169 /* Mach-O doesn't support labels at the end of objects, so if
24170 it looks like we might want one, insert a NOP. */
24172 rtx insn
= get_last_insn ();
24173 rtx deleted_debug_label
= NULL_RTX
;
24176 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
24178 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
24179 notes only, instead set their CODE_LABEL_NUMBER to -1,
24180 otherwise there would be code generation differences
24181 in between -g and -g0. */
24182 if (NOTE_P (insn
) && NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
24183 deleted_debug_label
= insn
;
24184 insn
= PREV_INSN (insn
);
24189 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
)))
24190 fputs ("\tnop\n", file
);
24191 else if (deleted_debug_label
)
24192 for (insn
= deleted_debug_label
; insn
; insn
= NEXT_INSN (insn
))
24193 if (NOTE_KIND (insn
) == NOTE_INSN_DELETED_DEBUG_LABEL
)
24194 CODE_LABEL_NUMBER (insn
) = -1;
24198 /* Output a traceback table here. See /usr/include/sys/debug.h for info
24201 We don't output a traceback table if -finhibit-size-directive was
24202 used. The documentation for -finhibit-size-directive reads
24203 ``don't output a @code{.size} assembler directive, or anything
24204 else that would cause trouble if the function is split in the
24205 middle, and the two halves are placed at locations far apart in
24206 memory.'' The traceback table has this property, since it
24207 includes the offset from the start of the function to the
24208 traceback table itself.
24210 System V.4 Powerpc's (and the embedded ABI derived from it) use a
24211 different traceback table. */
24212 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
24213 && ! flag_inhibit_size_directive
24214 && rs6000_traceback
!= traceback_none
&& !cfun
->is_thunk
)
24216 const char *fname
= NULL
;
24217 const char *language_string
= lang_hooks
.name
;
24218 int fixed_parms
= 0, float_parms
= 0, parm_info
= 0;
24220 int optional_tbtab
;
24221 rs6000_stack_t
*info
= rs6000_stack_info ();
24223 if (rs6000_traceback
== traceback_full
)
24224 optional_tbtab
= 1;
24225 else if (rs6000_traceback
== traceback_part
)
24226 optional_tbtab
= 0;
24228 optional_tbtab
= !optimize_size
&& !TARGET_ELF
;
24230 if (optional_tbtab
)
24232 fname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
24233 while (*fname
== '.') /* V.4 encodes . in the name */
24236 /* Need label immediately before tbtab, so we can compute
24237 its offset from the function start. */
24238 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
24239 ASM_OUTPUT_LABEL (file
, fname
);
24242 /* The .tbtab pseudo-op can only be used for the first eight
24243 expressions, since it can't handle the possibly variable
24244 length fields that follow. However, if you omit the optional
24245 fields, the assembler outputs zeros for all optional fields
24246 anyways, giving each variable length field is minimum length
24247 (as defined in sys/debug.h). Thus we can not use the .tbtab
24248 pseudo-op at all. */
24250 /* An all-zero word flags the start of the tbtab, for debuggers
24251 that have to find it by searching forward from the entry
24252 point or from the current pc. */
24253 fputs ("\t.long 0\n", file
);
24255 /* Tbtab format type. Use format type 0. */
24256 fputs ("\t.byte 0,", file
);
24258 /* Language type. Unfortunately, there does not seem to be any
24259 official way to discover the language being compiled, so we
24260 use language_string.
24261 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
24262 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
24263 a number, so for now use 9. LTO and Go aren't assigned numbers
24264 either, so for now use 0. */
24265 if (! strcmp (language_string
, "GNU C")
24266 || ! strcmp (language_string
, "GNU GIMPLE")
24267 || ! strcmp (language_string
, "GNU Go"))
24269 else if (! strcmp (language_string
, "GNU F77")
24270 || ! strcmp (language_string
, "GNU Fortran"))
24272 else if (! strcmp (language_string
, "GNU Pascal"))
24274 else if (! strcmp (language_string
, "GNU Ada"))
24276 else if (! strcmp (language_string
, "GNU C++")
24277 || ! strcmp (language_string
, "GNU Objective-C++"))
24279 else if (! strcmp (language_string
, "GNU Java"))
24281 else if (! strcmp (language_string
, "GNU Objective-C"))
24284 gcc_unreachable ();
24285 fprintf (file
, "%d,", i
);
24287 /* 8 single bit fields: global linkage (not set for C extern linkage,
24288 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
24289 from start of procedure stored in tbtab, internal function, function
24290 has controlled storage, function has no toc, function uses fp,
24291 function logs/aborts fp operations. */
24292 /* Assume that fp operations are used if any fp reg must be saved. */
24293 fprintf (file
, "%d,",
24294 (optional_tbtab
<< 5) | ((info
->first_fp_reg_save
!= 64) << 1));
24296 /* 6 bitfields: function is interrupt handler, name present in
24297 proc table, function calls alloca, on condition directives
24298 (controls stack walks, 3 bits), saves condition reg, saves
24300 /* The `function calls alloca' bit seems to be set whenever reg 31 is
24301 set up as a frame pointer, even when there is no alloca call. */
24302 fprintf (file
, "%d,",
24303 ((optional_tbtab
<< 6)
24304 | ((optional_tbtab
& frame_pointer_needed
) << 5)
24305 | (info
->cr_save_p
<< 1)
24306 | (info
->lr_save_p
)));
24308 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
24310 fprintf (file
, "%d,",
24311 (info
->push_p
<< 7) | (64 - info
->first_fp_reg_save
));
24313 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
24314 fprintf (file
, "%d,", (32 - first_reg_to_save ()));
24316 if (optional_tbtab
)
24318 /* Compute the parameter info from the function decl argument
24321 int next_parm_info_bit
= 31;
24323 for (decl
= DECL_ARGUMENTS (current_function_decl
);
24324 decl
; decl
= DECL_CHAIN (decl
))
24326 rtx parameter
= DECL_INCOMING_RTL (decl
);
24327 enum machine_mode mode
= GET_MODE (parameter
);
24329 if (GET_CODE (parameter
) == REG
)
24331 if (SCALAR_FLOAT_MODE_P (mode
))
24352 gcc_unreachable ();
24355 /* If only one bit will fit, don't or in this entry. */
24356 if (next_parm_info_bit
> 0)
24357 parm_info
|= (bits
<< (next_parm_info_bit
- 1));
24358 next_parm_info_bit
-= 2;
24362 fixed_parms
+= ((GET_MODE_SIZE (mode
)
24363 + (UNITS_PER_WORD
- 1))
24365 next_parm_info_bit
-= 1;
24371 /* Number of fixed point parameters. */
24372 /* This is actually the number of words of fixed point parameters; thus
24373 an 8 byte struct counts as 2; and thus the maximum value is 8. */
24374 fprintf (file
, "%d,", fixed_parms
);
24376 /* 2 bitfields: number of floating point parameters (7 bits), parameters
24378 /* This is actually the number of fp registers that hold parameters;
24379 and thus the maximum value is 13. */
24380 /* Set parameters on stack bit if parameters are not in their original
24381 registers, regardless of whether they are on the stack? Xlc
24382 seems to set the bit when not optimizing. */
24383 fprintf (file
, "%d\n", ((float_parms
<< 1) | (! optimize
)));
24385 if (! optional_tbtab
)
24388 /* Optional fields follow. Some are variable length. */
24390 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
24391 11 double float. */
24392 /* There is an entry for each parameter in a register, in the order that
24393 they occur in the parameter list. Any intervening arguments on the
24394 stack are ignored. If the list overflows a long (max possible length
24395 34 bits) then completely leave off all elements that don't fit. */
24396 /* Only emit this long if there was at least one parameter. */
24397 if (fixed_parms
|| float_parms
)
24398 fprintf (file
, "\t.long %d\n", parm_info
);
24400 /* Offset from start of code to tb table. */
24401 fputs ("\t.long ", file
);
24402 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
24403 RS6000_OUTPUT_BASENAME (file
, fname
);
24405 rs6000_output_function_entry (file
, fname
);
24408 /* Interrupt handler mask. */
24409 /* Omit this long, since we never set the interrupt handler bit
24412 /* Number of CTL (controlled storage) anchors. */
24413 /* Omit this long, since the has_ctl bit is never set above. */
24415 /* Displacement into stack of each CTL anchor. */
24416 /* Omit this list of longs, because there are no CTL anchors. */
24418 /* Length of function name. */
24421 fprintf (file
, "\t.short %d\n", (int) strlen (fname
));
24423 /* Function name. */
24424 assemble_string (fname
, strlen (fname
));
24426 /* Register for alloca automatic storage; this is always reg 31.
24427 Only emit this if the alloca bit was set above. */
24428 if (frame_pointer_needed
)
24429 fputs ("\t.byte 31\n", file
);
24431 fputs ("\t.align 2\n", file
);
24435 /* A C compound statement that outputs the assembler code for a thunk
24436 function, used to implement C++ virtual function calls with
24437 multiple inheritance. The thunk acts as a wrapper around a virtual
24438 function, adjusting the implicit object parameter before handing
24439 control off to the real function.
24441 First, emit code to add the integer DELTA to the location that
24442 contains the incoming first argument. Assume that this argument
24443 contains a pointer, and is the one used to pass the `this' pointer
24444 in C++. This is the incoming argument *before* the function
24445 prologue, e.g. `%o0' on a sparc. The addition must preserve the
24446 values of all other incoming arguments.
24448 After the addition, emit code to jump to FUNCTION, which is a
24449 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
24450 not touch the return address. Hence returning from FUNCTION will
24451 return to whoever called the current `thunk'.
24453 The effect must be as if FUNCTION had been called directly with the
24454 adjusted first argument. This macro is responsible for emitting
24455 all of the code for a thunk function; output_function_prologue()
24456 and output_function_epilogue() are not invoked.
24458 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
24459 been extracted from it.) It might possibly be useful on some
24460 targets, but probably not.
24462 If you do not define this macro, the target-independent code in the
24463 C++ frontend will generate a less efficient heavyweight thunk that
24464 calls FUNCTION instead of jumping to it. The generic approach does
24465 not support varargs. */
24468 rs6000_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
24469 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
24472 rtx this_rtx
, insn
, funexp
;
24474 reload_completed
= 1;
24475 epilogue_completed
= 1;
24477 /* Mark the end of the (empty) prologue. */
24478 emit_note (NOTE_INSN_PROLOGUE_END
);
24480 /* Find the "this" pointer. If the function returns a structure,
24481 the structure return pointer is in r3. */
24482 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
24483 this_rtx
= gen_rtx_REG (Pmode
, 4);
24485 this_rtx
= gen_rtx_REG (Pmode
, 3);
24487 /* Apply the constant offset, if required. */
24489 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, GEN_INT (delta
)));
24491 /* Apply the offset from the vtable, if required. */
24494 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
24495 rtx tmp
= gen_rtx_REG (Pmode
, 12);
24497 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
24498 if (((unsigned HOST_WIDE_INT
) vcall_offset
) + 0x8000 >= 0x10000)
24500 emit_insn (gen_add3_insn (tmp
, tmp
, vcall_offset_rtx
));
24501 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
24505 rtx loc
= gen_rtx_PLUS (Pmode
, tmp
, vcall_offset_rtx
);
24507 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, loc
));
24509 emit_insn (gen_add3_insn (this_rtx
, this_rtx
, tmp
));
24512 /* Generate a tail call to the target function. */
24513 if (!TREE_USED (function
))
24515 assemble_external (function
);
24516 TREE_USED (function
) = 1;
24518 funexp
= XEXP (DECL_RTL (function
), 0);
24519 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
24522 if (MACHOPIC_INDIRECT
)
24523 funexp
= machopic_indirect_call_target (funexp
);
24526 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
24527 generate sibcall RTL explicitly. */
24528 insn
= emit_call_insn (
24529 gen_rtx_PARALLEL (VOIDmode
,
24531 gen_rtx_CALL (VOIDmode
,
24532 funexp
, const0_rtx
),
24533 gen_rtx_USE (VOIDmode
, const0_rtx
),
24534 gen_rtx_USE (VOIDmode
,
24535 gen_rtx_REG (SImode
,
24537 simple_return_rtx
)));
24538 SIBLING_CALL_P (insn
) = 1;
24541 /* Ensure we have a global entry point for the thunk. ??? We could
24542 avoid that if the target routine doesn't need a global entry point,
24543 but we do not know whether this is the case at this point. */
24544 if (DEFAULT_ABI
== ABI_ELFv2
)
24545 cfun
->machine
->r2_setup_needed
= true;
24547 /* Run just enough of rest_of_compilation to get the insns emitted.
24548 There's not really enough bulk here to make other passes such as
24549 instruction scheduling worth while. Note that use_thunk calls
24550 assemble_start_function and assemble_end_function. */
24551 insn
= get_insns ();
24552 shorten_branches (insn
);
24553 final_start_function (insn
, file
, 1);
24554 final (insn
, file
, 1);
24555 final_end_function ();
24557 reload_completed
= 0;
24558 epilogue_completed
= 0;
24561 /* A quick summary of the various types of 'constant-pool tables'
24564 Target Flags Name One table per
24565 AIX (none) AIX TOC object file
24566 AIX -mfull-toc AIX TOC object file
24567 AIX -mminimal-toc AIX minimal TOC translation unit
24568 SVR4/EABI (none) SVR4 SDATA object file
24569 SVR4/EABI -fpic SVR4 pic object file
24570 SVR4/EABI -fPIC SVR4 PIC translation unit
24571 SVR4/EABI -mrelocatable EABI TOC function
24572 SVR4/EABI -maix AIX TOC object file
24573 SVR4/EABI -maix -mminimal-toc
24574 AIX minimal TOC translation unit
24576 Name Reg. Set by entries contains:
24577 made by addrs? fp? sum?
24579 AIX TOC 2 crt0 as Y option option
24580 AIX minimal TOC 30 prolog gcc Y Y option
24581 SVR4 SDATA 13 crt0 gcc N Y N
24582 SVR4 pic 30 prolog ld Y not yet N
24583 SVR4 PIC 30 prolog gcc Y option option
24584 EABI TOC 30 prolog gcc Y option option
24588 /* Hash functions for the hash table. */
24591 rs6000_hash_constant (rtx k
)
24593 enum rtx_code code
= GET_CODE (k
);
24594 enum machine_mode mode
= GET_MODE (k
);
24595 unsigned result
= (code
<< 3) ^ mode
;
24596 const char *format
;
24599 format
= GET_RTX_FORMAT (code
);
24600 flen
= strlen (format
);
24606 return result
* 1231 + (unsigned) INSN_UID (XEXP (k
, 0));
24609 if (mode
!= VOIDmode
)
24610 return real_hash (CONST_DOUBLE_REAL_VALUE (k
)) * result
;
24622 for (; fidx
< flen
; fidx
++)
24623 switch (format
[fidx
])
24628 const char *str
= XSTR (k
, fidx
);
24629 len
= strlen (str
);
24630 result
= result
* 613 + len
;
24631 for (i
= 0; i
< len
; i
++)
24632 result
= result
* 613 + (unsigned) str
[i
];
24637 result
= result
* 1231 + rs6000_hash_constant (XEXP (k
, fidx
));
24641 result
= result
* 613 + (unsigned) XINT (k
, fidx
);
24644 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT
))
24645 result
= result
* 613 + (unsigned) XWINT (k
, fidx
);
24649 for (i
= 0; i
< sizeof (HOST_WIDE_INT
) / sizeof (unsigned); i
++)
24650 result
= result
* 613 + (unsigned) (XWINT (k
, fidx
)
24657 gcc_unreachable ();
24664 toc_hash_function (const void *hash_entry
)
24666 const struct toc_hash_struct
*thc
=
24667 (const struct toc_hash_struct
*) hash_entry
;
24668 return rs6000_hash_constant (thc
->key
) ^ thc
->key_mode
;
24671 /* Compare H1 and H2 for equivalence. */
24674 toc_hash_eq (const void *h1
, const void *h2
)
24676 rtx r1
= ((const struct toc_hash_struct
*) h1
)->key
;
24677 rtx r2
= ((const struct toc_hash_struct
*) h2
)->key
;
24679 if (((const struct toc_hash_struct
*) h1
)->key_mode
24680 != ((const struct toc_hash_struct
*) h2
)->key_mode
)
24683 return rtx_equal_p (r1
, r2
);
24686 /* These are the names given by the C++ front-end to vtables, and
24687 vtable-like objects. Ideally, this logic should not be here;
24688 instead, there should be some programmatic way of inquiring as
24689 to whether or not an object is a vtable. */
24691 #define VTABLE_NAME_P(NAME) \
24692 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
24693 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
24694 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
24695 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
24696 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
24698 #ifdef NO_DOLLAR_IN_LABEL
24699 /* Return a GGC-allocated character string translating dollar signs in
24700 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
24703 rs6000_xcoff_strip_dollar (const char *name
)
24709 q
= (const char *) strchr (name
, '$');
24711 if (q
== 0 || q
== name
)
24714 len
= strlen (name
);
24715 strip
= XALLOCAVEC (char, len
+ 1);
24716 strcpy (strip
, name
);
24717 p
= strip
+ (q
- name
);
24721 p
= strchr (p
+ 1, '$');
24724 return ggc_alloc_string (strip
, len
);
24729 rs6000_output_symbol_ref (FILE *file
, rtx x
)
24731 /* Currently C++ toc references to vtables can be emitted before it
24732 is decided whether the vtable is public or private. If this is
24733 the case, then the linker will eventually complain that there is
24734 a reference to an unknown section. Thus, for vtables only,
24735 we emit the TOC reference to reference the symbol and not the
24737 const char *name
= XSTR (x
, 0);
24739 if (VTABLE_NAME_P (name
))
24741 RS6000_OUTPUT_BASENAME (file
, name
);
24744 assemble_name (file
, name
);
24747 /* Output a TOC entry. We derive the entry name from what is being
24751 output_toc (FILE *file
, rtx x
, int labelno
, enum machine_mode mode
)
24754 const char *name
= buf
;
24756 HOST_WIDE_INT offset
= 0;
24758 gcc_assert (!TARGET_NO_TOC
);
24760 /* When the linker won't eliminate them, don't output duplicate
24761 TOC entries (this happens on AIX if there is any kind of TOC,
24762 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
24764 if (TARGET_TOC
&& GET_CODE (x
) != LABEL_REF
)
24766 struct toc_hash_struct
*h
;
24769 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
24770 time because GGC is not initialized at that point. */
24771 if (toc_hash_table
== NULL
)
24772 toc_hash_table
= htab_create_ggc (1021, toc_hash_function
,
24773 toc_hash_eq
, NULL
);
24775 h
= ggc_alloc_toc_hash_struct ();
24777 h
->key_mode
= mode
;
24778 h
->labelno
= labelno
;
24780 found
= htab_find_slot (toc_hash_table
, h
, INSERT
);
24781 if (*found
== NULL
)
24783 else /* This is indeed a duplicate.
24784 Set this label equal to that label. */
24786 fputs ("\t.set ", file
);
24787 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
24788 fprintf (file
, "%d,", labelno
);
24789 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
24790 fprintf (file
, "%d\n", ((*(const struct toc_hash_struct
**)
24794 if (TARGET_XCOFF
&& GET_CODE (x
) == SYMBOL_REF
24795 && (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_GLOBAL_DYNAMIC
24796 || SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
))
24798 fputs ("\t.set ", file
);
24799 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
24800 fprintf (file
, "%d,", labelno
);
24801 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LCM");
24802 fprintf (file
, "%d\n", ((*(const struct toc_hash_struct
**)
24810 /* If we're going to put a double constant in the TOC, make sure it's
24811 aligned properly when strict alignment is on. */
24812 if (GET_CODE (x
) == CONST_DOUBLE
24813 && STRICT_ALIGNMENT
24814 && GET_MODE_BITSIZE (mode
) >= 64
24815 && ! (TARGET_NO_FP_IN_TOC
&& ! TARGET_MINIMAL_TOC
)) {
24816 ASM_OUTPUT_ALIGN (file
, 3);
24819 (*targetm
.asm_out
.internal_label
) (file
, "LC", labelno
);
24821 /* Handle FP constants specially. Note that if we have a minimal
24822 TOC, things we put here aren't actually in the TOC, so we can allow
24824 if (GET_CODE (x
) == CONST_DOUBLE
&&
24825 (GET_MODE (x
) == TFmode
|| GET_MODE (x
) == TDmode
))
24827 REAL_VALUE_TYPE rv
;
24830 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
24831 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
24832 REAL_VALUE_TO_TARGET_DECIMAL128 (rv
, k
);
24834 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv
, k
);
24838 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
24839 fputs (DOUBLE_INT_ASM_OP
, file
);
24841 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
24842 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
24843 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
24844 fprintf (file
, "0x%lx%08lx,0x%lx%08lx\n",
24845 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
24846 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff,
24847 k
[WORDS_BIG_ENDIAN
? 2 : 3] & 0xffffffff,
24848 k
[WORDS_BIG_ENDIAN
? 3 : 2] & 0xffffffff);
24853 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
24854 fputs ("\t.long ", file
);
24856 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
24857 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
24858 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
24859 fprintf (file
, "0x%lx,0x%lx,0x%lx,0x%lx\n",
24860 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
24861 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
24865 else if (GET_CODE (x
) == CONST_DOUBLE
&&
24866 (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DDmode
))
24868 REAL_VALUE_TYPE rv
;
24871 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
24873 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
24874 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, k
);
24876 REAL_VALUE_TO_TARGET_DOUBLE (rv
, k
);
24880 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
24881 fputs (DOUBLE_INT_ASM_OP
, file
);
24883 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
24884 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
24885 fprintf (file
, "0x%lx%08lx\n",
24886 k
[WORDS_BIG_ENDIAN
? 0 : 1] & 0xffffffff,
24887 k
[WORDS_BIG_ENDIAN
? 1 : 0] & 0xffffffff);
24892 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
24893 fputs ("\t.long ", file
);
24895 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
24896 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
24897 fprintf (file
, "0x%lx,0x%lx\n",
24898 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
24902 else if (GET_CODE (x
) == CONST_DOUBLE
&&
24903 (GET_MODE (x
) == SFmode
|| GET_MODE (x
) == SDmode
))
24905 REAL_VALUE_TYPE rv
;
24908 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
24909 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
24910 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
24912 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
24916 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
24917 fputs (DOUBLE_INT_ASM_OP
, file
);
24919 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
24920 if (WORDS_BIG_ENDIAN
)
24921 fprintf (file
, "0x%lx00000000\n", l
& 0xffffffff);
24923 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
24928 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
24929 fputs ("\t.long ", file
);
24931 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
24932 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
24936 else if (GET_MODE (x
) == VOIDmode
&& GET_CODE (x
) == CONST_INT
)
24938 unsigned HOST_WIDE_INT low
;
24939 HOST_WIDE_INT high
;
24941 low
= INTVAL (x
) & 0xffffffff;
24942 high
= (HOST_WIDE_INT
) INTVAL (x
) >> 32;
24944 /* TOC entries are always Pmode-sized, so when big-endian
24945 smaller integer constants in the TOC need to be padded.
24946 (This is still a win over putting the constants in
24947 a separate constant pool, because then we'd have
24948 to have both a TOC entry _and_ the actual constant.)
24950 For a 32-bit target, CONST_INT values are loaded and shifted
24951 entirely within `low' and can be stored in one TOC entry. */
24953 /* It would be easy to make this work, but it doesn't now. */
24954 gcc_assert (!TARGET_64BIT
|| POINTER_SIZE
>= GET_MODE_BITSIZE (mode
));
24956 if (WORDS_BIG_ENDIAN
&& POINTER_SIZE
> GET_MODE_BITSIZE (mode
))
24959 low
<<= POINTER_SIZE
- GET_MODE_BITSIZE (mode
);
24960 high
= (HOST_WIDE_INT
) low
>> 32;
24966 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
24967 fputs (DOUBLE_INT_ASM_OP
, file
);
24969 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
24970 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
24971 fprintf (file
, "0x%lx%08lx\n",
24972 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
24977 if (POINTER_SIZE
< GET_MODE_BITSIZE (mode
))
24979 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
24980 fputs ("\t.long ", file
);
24982 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
24983 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
24984 fprintf (file
, "0x%lx,0x%lx\n",
24985 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
24989 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
24990 fputs ("\t.long ", file
);
24992 fprintf (file
, "\t.tc IS_%lx[TC],", (long) low
& 0xffffffff);
24993 fprintf (file
, "0x%lx\n", (long) low
& 0xffffffff);
24999 if (GET_CODE (x
) == CONST
)
25001 gcc_assert (GET_CODE (XEXP (x
, 0)) == PLUS
25002 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
);
25004 base
= XEXP (XEXP (x
, 0), 0);
25005 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
25008 switch (GET_CODE (base
))
25011 name
= XSTR (base
, 0);
25015 ASM_GENERATE_INTERNAL_LABEL (buf
, "L",
25016 CODE_LABEL_NUMBER (XEXP (base
, 0)));
25020 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (base
));
25024 gcc_unreachable ();
25027 if (TARGET_ELF
|| TARGET_MINIMAL_TOC
)
25028 fputs (TARGET_32BIT
? "\t.long " : DOUBLE_INT_ASM_OP
, file
);
25031 fputs ("\t.tc ", file
);
25032 RS6000_OUTPUT_BASENAME (file
, name
);
25035 fprintf (file
, ".N" HOST_WIDE_INT_PRINT_UNSIGNED
, - offset
);
25037 fprintf (file
, ".P" HOST_WIDE_INT_PRINT_UNSIGNED
, offset
);
25039 /* Mark large TOC symbols on AIX with [TE] so they are mapped
25040 after other TOC symbols, reducing overflow of small TOC access
25041 to [TC] symbols. */
25042 fputs (TARGET_XCOFF
&& TARGET_CMODEL
!= CMODEL_SMALL
25043 ? "[TE]," : "[TC],", file
);
25046 /* Currently C++ toc references to vtables can be emitted before it
25047 is decided whether the vtable is public or private. If this is
25048 the case, then the linker will eventually complain that there is
25049 a TOC reference to an unknown section. Thus, for vtables only,
25050 we emit the TOC reference to reference the symbol and not the
25052 if (VTABLE_NAME_P (name
))
25054 RS6000_OUTPUT_BASENAME (file
, name
);
25056 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
25057 else if (offset
> 0)
25058 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
25061 output_addr_const (file
, x
);
25064 if (TARGET_XCOFF
&& GET_CODE (base
) == SYMBOL_REF
25065 && SYMBOL_REF_TLS_MODEL (base
) != 0)
25067 if (SYMBOL_REF_TLS_MODEL (base
) == TLS_MODEL_LOCAL_EXEC
)
25068 fputs ("@le", file
);
25069 else if (SYMBOL_REF_TLS_MODEL (base
) == TLS_MODEL_INITIAL_EXEC
)
25070 fputs ("@ie", file
);
25071 /* Use global-dynamic for local-dynamic. */
25072 else if (SYMBOL_REF_TLS_MODEL (base
) == TLS_MODEL_GLOBAL_DYNAMIC
25073 || SYMBOL_REF_TLS_MODEL (base
) == TLS_MODEL_LOCAL_DYNAMIC
)
25076 (*targetm
.asm_out
.internal_label
) (file
, "LCM", labelno
);
25077 fputs ("\t.tc .", file
);
25078 RS6000_OUTPUT_BASENAME (file
, name
);
25079 fputs ("[TC],", file
);
25080 output_addr_const (file
, x
);
25081 fputs ("@m", file
);
25089 /* Output an assembler pseudo-op to write an ASCII string of N characters
25090 starting at P to FILE.
25092 On the RS/6000, we have to do this using the .byte operation and
25093 write out special characters outside the quoted string.
25094 Also, the assembler is broken; very long strings are truncated,
25095 so we must artificially break them up early. */
25098 output_ascii (FILE *file
, const char *p
, int n
)
25101 int i
, count_string
;
25102 const char *for_string
= "\t.byte \"";
25103 const char *for_decimal
= "\t.byte ";
25104 const char *to_close
= NULL
;
25107 for (i
= 0; i
< n
; i
++)
25110 if (c
>= ' ' && c
< 0177)
25113 fputs (for_string
, file
);
25116 /* Write two quotes to get one. */
25124 for_decimal
= "\"\n\t.byte ";
25128 if (count_string
>= 512)
25130 fputs (to_close
, file
);
25132 for_string
= "\t.byte \"";
25133 for_decimal
= "\t.byte ";
25141 fputs (for_decimal
, file
);
25142 fprintf (file
, "%d", c
);
25144 for_string
= "\n\t.byte \"";
25145 for_decimal
= ", ";
25151 /* Now close the string if we have written one. Then end the line. */
25153 fputs (to_close
, file
);
25156 /* Generate a unique section name for FILENAME for a section type
25157 represented by SECTION_DESC. Output goes into BUF.
25159 SECTION_DESC can be any string, as long as it is different for each
25160 possible section type.
25162 We name the section in the same manner as xlc. The name begins with an
25163 underscore followed by the filename (after stripping any leading directory
25164 names) with the last period replaced by the string SECTION_DESC. If
25165 FILENAME does not contain a period, SECTION_DESC is appended to the end of
25169 rs6000_gen_section_name (char **buf
, const char *filename
,
25170 const char *section_desc
)
25172 const char *q
, *after_last_slash
, *last_period
= 0;
25176 after_last_slash
= filename
;
25177 for (q
= filename
; *q
; q
++)
25180 after_last_slash
= q
+ 1;
25181 else if (*q
== '.')
25185 len
= strlen (after_last_slash
) + strlen (section_desc
) + 2;
25186 *buf
= (char *) xmalloc (len
);
25191 for (q
= after_last_slash
; *q
; q
++)
25193 if (q
== last_period
)
25195 strcpy (p
, section_desc
);
25196 p
+= strlen (section_desc
);
25200 else if (ISALNUM (*q
))
25204 if (last_period
== 0)
25205 strcpy (p
, section_desc
);
25210 /* Emit profile function. */
25213 output_profile_hook (int labelno ATTRIBUTE_UNUSED
)
25215 /* Non-standard profiling for kernels, which just saves LR then calls
25216 _mcount without worrying about arg saves. The idea is to change
25217 the function prologue as little as possible as it isn't easy to
25218 account for arg save/restore code added just for _mcount. */
25219 if (TARGET_PROFILE_KERNEL
)
25222 if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
25224 #ifndef NO_PROFILE_COUNTERS
25225 # define NO_PROFILE_COUNTERS 0
25227 if (NO_PROFILE_COUNTERS
)
25228 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
25229 LCT_NORMAL
, VOIDmode
, 0);
25233 const char *label_name
;
25236 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
25237 label_name
= ggc_strdup ((*targetm
.strip_name_encoding
) (buf
));
25238 fun
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
25240 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
25241 LCT_NORMAL
, VOIDmode
, 1, fun
, Pmode
);
25244 else if (DEFAULT_ABI
== ABI_DARWIN
)
25246 const char *mcount_name
= RS6000_MCOUNT
;
25247 int caller_addr_regno
= LR_REGNO
;
25249 /* Be conservative and always set this, at least for now. */
25250 crtl
->uses_pic_offset_table
= 1;
25253 /* For PIC code, set up a stub and collect the caller's address
25254 from r0, which is where the prologue puts it. */
25255 if (MACHOPIC_INDIRECT
25256 && crtl
->uses_pic_offset_table
)
25257 caller_addr_regno
= 0;
25259 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, mcount_name
),
25260 LCT_NORMAL
, VOIDmode
, 1,
25261 gen_rtx_REG (Pmode
, caller_addr_regno
), Pmode
);
25265 /* Write function profiler code. */
25268 output_function_profiler (FILE *file
, int labelno
)
25272 switch (DEFAULT_ABI
)
25275 gcc_unreachable ();
25280 warning (0, "no profiling of 64-bit code for this ABI");
25283 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
25284 fprintf (file
, "\tmflr %s\n", reg_names
[0]);
25285 if (NO_PROFILE_COUNTERS
)
25287 asm_fprintf (file
, "\tstw %s,4(%s)\n",
25288 reg_names
[0], reg_names
[1]);
25290 else if (TARGET_SECURE_PLT
&& flag_pic
)
25292 if (TARGET_LINK_STACK
)
25295 get_ppc476_thunk_name (name
);
25296 asm_fprintf (file
, "\tbl %s\n", name
);
25299 asm_fprintf (file
, "\tbcl 20,31,1f\n1:\n");
25300 asm_fprintf (file
, "\tstw %s,4(%s)\n",
25301 reg_names
[0], reg_names
[1]);
25302 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
25303 asm_fprintf (file
, "\taddis %s,%s,",
25304 reg_names
[12], reg_names
[12]);
25305 assemble_name (file
, buf
);
25306 asm_fprintf (file
, "-1b@ha\n\tla %s,", reg_names
[0]);
25307 assemble_name (file
, buf
);
25308 asm_fprintf (file
, "-1b@l(%s)\n", reg_names
[12]);
25310 else if (flag_pic
== 1)
25312 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file
);
25313 asm_fprintf (file
, "\tstw %s,4(%s)\n",
25314 reg_names
[0], reg_names
[1]);
25315 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
25316 asm_fprintf (file
, "\tlwz %s,", reg_names
[0]);
25317 assemble_name (file
, buf
);
25318 asm_fprintf (file
, "@got(%s)\n", reg_names
[12]);
25320 else if (flag_pic
> 1)
25322 asm_fprintf (file
, "\tstw %s,4(%s)\n",
25323 reg_names
[0], reg_names
[1]);
25324 /* Now, we need to get the address of the label. */
25325 if (TARGET_LINK_STACK
)
25328 get_ppc476_thunk_name (name
);
25329 asm_fprintf (file
, "\tbl %s\n\tb 1f\n\t.long ", name
);
25330 assemble_name (file
, buf
);
25331 fputs ("-.\n1:", file
);
25332 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
25333 asm_fprintf (file
, "\taddi %s,%s,4\n",
25334 reg_names
[11], reg_names
[11]);
25338 fputs ("\tbcl 20,31,1f\n\t.long ", file
);
25339 assemble_name (file
, buf
);
25340 fputs ("-.\n1:", file
);
25341 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
25343 asm_fprintf (file
, "\tlwz %s,0(%s)\n",
25344 reg_names
[0], reg_names
[11]);
25345 asm_fprintf (file
, "\tadd %s,%s,%s\n",
25346 reg_names
[0], reg_names
[0], reg_names
[11]);
25350 asm_fprintf (file
, "\tlis %s,", reg_names
[12]);
25351 assemble_name (file
, buf
);
25352 fputs ("@ha\n", file
);
25353 asm_fprintf (file
, "\tstw %s,4(%s)\n",
25354 reg_names
[0], reg_names
[1]);
25355 asm_fprintf (file
, "\tla %s,", reg_names
[0]);
25356 assemble_name (file
, buf
);
25357 asm_fprintf (file
, "@l(%s)\n", reg_names
[12]);
25360 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
25361 fprintf (file
, "\tbl %s%s\n",
25362 RS6000_MCOUNT
, flag_pic
? "@plt" : "");
25368 /* Don't do anything, done in output_profile_hook (). */
25375 /* The following variable value is the last issued insn. */
25377 static rtx last_scheduled_insn
;
25379 /* The following variable helps to balance issuing of load and
25380 store instructions */
25382 static int load_store_pendulum
;
25384 /* Power4 load update and store update instructions are cracked into a
25385 load or store and an integer insn which are executed in the same cycle.
25386 Branches have their own dispatch slot which does not count against the
25387 GCC issue rate, but it changes the program flow so there are no other
25388 instructions to issue in this cycle. */
25391 rs6000_variable_issue_1 (rtx insn
, int more
)
25393 last_scheduled_insn
= insn
;
25394 if (GET_CODE (PATTERN (insn
)) == USE
25395 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
25397 cached_can_issue_more
= more
;
25398 return cached_can_issue_more
;
25401 if (insn_terminates_group_p (insn
, current_group
))
25403 cached_can_issue_more
= 0;
25404 return cached_can_issue_more
;
25407 /* If no reservation, but reach here */
25408 if (recog_memoized (insn
) < 0)
25411 if (rs6000_sched_groups
)
25413 if (is_microcoded_insn (insn
))
25414 cached_can_issue_more
= 0;
25415 else if (is_cracked_insn (insn
))
25416 cached_can_issue_more
= more
> 2 ? more
- 2 : 0;
25418 cached_can_issue_more
= more
- 1;
25420 return cached_can_issue_more
;
25423 if (rs6000_cpu_attr
== CPU_CELL
&& is_nonpipeline_insn (insn
))
25426 cached_can_issue_more
= more
- 1;
25427 return cached_can_issue_more
;
25431 rs6000_variable_issue (FILE *stream
, int verbose
, rtx insn
, int more
)
25433 int r
= rs6000_variable_issue_1 (insn
, more
);
25435 fprintf (stream
, "// rs6000_variable_issue (more = %d) = %d\n", more
, r
);
25439 /* Adjust the cost of a scheduling dependency. Return the new cost of
25440 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
25443 rs6000_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
25445 enum attr_type attr_type
;
25447 if (! recog_memoized (insn
))
25450 switch (REG_NOTE_KIND (link
))
25454 /* Data dependency; DEP_INSN writes a register that INSN reads
25455 some cycles later. */
25457 /* Separate a load from a narrower, dependent store. */
25458 if (rs6000_sched_groups
25459 && GET_CODE (PATTERN (insn
)) == SET
25460 && GET_CODE (PATTERN (dep_insn
)) == SET
25461 && GET_CODE (XEXP (PATTERN (insn
), 1)) == MEM
25462 && GET_CODE (XEXP (PATTERN (dep_insn
), 0)) == MEM
25463 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn
), 1)))
25464 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn
), 0)))))
25467 attr_type
= get_attr_type (insn
);
25472 /* Tell the first scheduling pass about the latency between
25473 a mtctr and bctr (and mtlr and br/blr). The first
25474 scheduling pass will not know about this latency since
25475 the mtctr instruction, which has the latency associated
25476 to it, will be generated by reload. */
25479 /* Leave some extra cycles between a compare and its
25480 dependent branch, to inhibit expensive mispredicts. */
25481 if ((rs6000_cpu_attr
== CPU_PPC603
25482 || rs6000_cpu_attr
== CPU_PPC604
25483 || rs6000_cpu_attr
== CPU_PPC604E
25484 || rs6000_cpu_attr
== CPU_PPC620
25485 || rs6000_cpu_attr
== CPU_PPC630
25486 || rs6000_cpu_attr
== CPU_PPC750
25487 || rs6000_cpu_attr
== CPU_PPC7400
25488 || rs6000_cpu_attr
== CPU_PPC7450
25489 || rs6000_cpu_attr
== CPU_PPCE5500
25490 || rs6000_cpu_attr
== CPU_PPCE6500
25491 || rs6000_cpu_attr
== CPU_POWER4
25492 || rs6000_cpu_attr
== CPU_POWER5
25493 || rs6000_cpu_attr
== CPU_POWER7
25494 || rs6000_cpu_attr
== CPU_POWER8
25495 || rs6000_cpu_attr
== CPU_CELL
)
25496 && recog_memoized (dep_insn
)
25497 && (INSN_CODE (dep_insn
) >= 0))
25499 switch (get_attr_type (dep_insn
))
25503 case TYPE_DELAYED_COMPARE
:
25504 case TYPE_IMUL_COMPARE
:
25505 case TYPE_LMUL_COMPARE
:
25506 case TYPE_FPCOMPARE
:
25507 case TYPE_CR_LOGICAL
:
25508 case TYPE_DELAYED_CR
:
25517 case TYPE_STORE_UX
:
25519 case TYPE_FPSTORE_U
:
25520 case TYPE_FPSTORE_UX
:
25521 if ((rs6000_cpu
== PROCESSOR_POWER6
)
25522 && recog_memoized (dep_insn
)
25523 && (INSN_CODE (dep_insn
) >= 0))
25526 if (GET_CODE (PATTERN (insn
)) != SET
)
25527 /* If this happens, we have to extend this to schedule
25528 optimally. Return default for now. */
25531 /* Adjust the cost for the case where the value written
25532 by a fixed point operation is used as the address
25533 gen value on a store. */
25534 switch (get_attr_type (dep_insn
))
25541 if (! store_data_bypass_p (dep_insn
, insn
))
25545 case TYPE_LOAD_EXT
:
25546 case TYPE_LOAD_EXT_U
:
25547 case TYPE_LOAD_EXT_UX
:
25548 case TYPE_VAR_SHIFT_ROTATE
:
25549 case TYPE_VAR_DELAYED_COMPARE
:
25551 if (! store_data_bypass_p (dep_insn
, insn
))
25557 case TYPE_FAST_COMPARE
:
25560 case TYPE_INSERT_WORD
:
25561 case TYPE_INSERT_DWORD
:
25562 case TYPE_FPLOAD_U
:
25563 case TYPE_FPLOAD_UX
:
25565 case TYPE_STORE_UX
:
25566 case TYPE_FPSTORE_U
:
25567 case TYPE_FPSTORE_UX
:
25569 if (! store_data_bypass_p (dep_insn
, insn
))
25577 case TYPE_IMUL_COMPARE
:
25578 case TYPE_LMUL_COMPARE
:
25580 if (! store_data_bypass_p (dep_insn
, insn
))
25586 if (! store_data_bypass_p (dep_insn
, insn
))
25592 if (! store_data_bypass_p (dep_insn
, insn
))
25605 case TYPE_LOAD_EXT
:
25606 case TYPE_LOAD_EXT_U
:
25607 case TYPE_LOAD_EXT_UX
:
25608 if ((rs6000_cpu
== PROCESSOR_POWER6
)
25609 && recog_memoized (dep_insn
)
25610 && (INSN_CODE (dep_insn
) >= 0))
25613 /* Adjust the cost for the case where the value written
25614 by a fixed point instruction is used within the address
25615 gen portion of a subsequent load(u)(x) */
25616 switch (get_attr_type (dep_insn
))
25623 if (set_to_load_agen (dep_insn
, insn
))
25627 case TYPE_LOAD_EXT
:
25628 case TYPE_LOAD_EXT_U
:
25629 case TYPE_LOAD_EXT_UX
:
25630 case TYPE_VAR_SHIFT_ROTATE
:
25631 case TYPE_VAR_DELAYED_COMPARE
:
25633 if (set_to_load_agen (dep_insn
, insn
))
25639 case TYPE_FAST_COMPARE
:
25642 case TYPE_INSERT_WORD
:
25643 case TYPE_INSERT_DWORD
:
25644 case TYPE_FPLOAD_U
:
25645 case TYPE_FPLOAD_UX
:
25647 case TYPE_STORE_UX
:
25648 case TYPE_FPSTORE_U
:
25649 case TYPE_FPSTORE_UX
:
25651 if (set_to_load_agen (dep_insn
, insn
))
25659 case TYPE_IMUL_COMPARE
:
25660 case TYPE_LMUL_COMPARE
:
25662 if (set_to_load_agen (dep_insn
, insn
))
25668 if (set_to_load_agen (dep_insn
, insn
))
25674 if (set_to_load_agen (dep_insn
, insn
))
25685 if ((rs6000_cpu
== PROCESSOR_POWER6
)
25686 && recog_memoized (dep_insn
)
25687 && (INSN_CODE (dep_insn
) >= 0)
25688 && (get_attr_type (dep_insn
) == TYPE_MFFGPR
))
25695 /* Fall out to return default cost. */
25699 case REG_DEP_OUTPUT
:
25700 /* Output dependency; DEP_INSN writes a register that INSN writes some
25702 if ((rs6000_cpu
== PROCESSOR_POWER6
)
25703 && recog_memoized (dep_insn
)
25704 && (INSN_CODE (dep_insn
) >= 0))
25706 attr_type
= get_attr_type (insn
);
25711 if (get_attr_type (dep_insn
) == TYPE_FP
)
25715 if (get_attr_type (dep_insn
) == TYPE_MFFGPR
)
25723 /* Anti dependency; DEP_INSN reads a register that INSN writes some
25728 gcc_unreachable ();
25734 /* Debug version of rs6000_adjust_cost. */
25737 rs6000_debug_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
25739 int ret
= rs6000_adjust_cost (insn
, link
, dep_insn
, cost
);
25745 switch (REG_NOTE_KIND (link
))
25747 default: dep
= "unknown depencency"; break;
25748 case REG_DEP_TRUE
: dep
= "data dependency"; break;
25749 case REG_DEP_OUTPUT
: dep
= "output dependency"; break;
25750 case REG_DEP_ANTI
: dep
= "anti depencency"; break;
25754 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
25755 "%s, insn:\n", ret
, cost
, dep
);
25763 /* The function returns a true if INSN is microcoded.
25764 Return false otherwise. */
25767 is_microcoded_insn (rtx insn
)
25769 if (!insn
|| !NONDEBUG_INSN_P (insn
)
25770 || GET_CODE (PATTERN (insn
)) == USE
25771 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
25774 if (rs6000_cpu_attr
== CPU_CELL
)
25775 return get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
;
25777 if (rs6000_sched_groups
25778 && (rs6000_cpu
== PROCESSOR_POWER4
|| rs6000_cpu
== PROCESSOR_POWER5
))
25780 enum attr_type type
= get_attr_type (insn
);
25781 if (type
== TYPE_LOAD_EXT_U
25782 || type
== TYPE_LOAD_EXT_UX
25783 || type
== TYPE_LOAD_UX
25784 || type
== TYPE_STORE_UX
25785 || type
== TYPE_MFCR
)
25792 /* The function returns true if INSN is cracked into 2 instructions
25793 by the processor (and therefore occupies 2 issue slots). */
25796 is_cracked_insn (rtx insn
)
25798 if (!insn
|| !NONDEBUG_INSN_P (insn
)
25799 || GET_CODE (PATTERN (insn
)) == USE
25800 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
25803 if (rs6000_sched_groups
25804 && (rs6000_cpu
== PROCESSOR_POWER4
|| rs6000_cpu
== PROCESSOR_POWER5
))
25806 enum attr_type type
= get_attr_type (insn
);
25807 if (type
== TYPE_LOAD_U
|| type
== TYPE_STORE_U
25808 || type
== TYPE_FPLOAD_U
|| type
== TYPE_FPSTORE_U
25809 || type
== TYPE_FPLOAD_UX
|| type
== TYPE_FPSTORE_UX
25810 || type
== TYPE_LOAD_EXT
|| type
== TYPE_DELAYED_CR
25811 || type
== TYPE_COMPARE
|| type
== TYPE_DELAYED_COMPARE
25812 || type
== TYPE_IMUL_COMPARE
|| type
== TYPE_LMUL_COMPARE
25813 || type
== TYPE_IDIV
|| type
== TYPE_LDIV
25814 || type
== TYPE_INSERT_WORD
)
25821 /* The function returns true if INSN can be issued only from
25822 the branch slot. */
25825 is_branch_slot_insn (rtx insn
)
25827 if (!insn
|| !NONDEBUG_INSN_P (insn
)
25828 || GET_CODE (PATTERN (insn
)) == USE
25829 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
25832 if (rs6000_sched_groups
)
25834 enum attr_type type
= get_attr_type (insn
);
25835 if (type
== TYPE_BRANCH
|| type
== TYPE_JMPREG
)
25843 /* The function returns true if out_inst sets a value that is
25844 used in the address generation computation of in_insn */
25846 set_to_load_agen (rtx out_insn
, rtx in_insn
)
25848 rtx out_set
, in_set
;
25850 /* For performance reasons, only handle the simple case where
25851 both loads are a single_set. */
25852 out_set
= single_set (out_insn
);
25855 in_set
= single_set (in_insn
);
25857 return reg_mentioned_p (SET_DEST (out_set
), SET_SRC (in_set
));
25863 /* Try to determine base/offset/size parts of the given MEM.
25864 Return true if successful, false if all the values couldn't
25867 This function only looks for REG or REG+CONST address forms.
25868 REG+REG address form will return false. */
25871 get_memref_parts (rtx mem
, rtx
*base
, HOST_WIDE_INT
*offset
,
25872 HOST_WIDE_INT
*size
)
25875 if MEM_SIZE_KNOWN_P (mem
)
25876 *size
= MEM_SIZE (mem
);
25880 if (GET_CODE (XEXP (mem
, 0)) == PRE_MODIFY
)
25881 addr_rtx
= XEXP (XEXP (mem
, 0), 1);
25883 addr_rtx
= (XEXP (mem
, 0));
25885 if (GET_CODE (addr_rtx
) == REG
)
25890 else if (GET_CODE (addr_rtx
) == PLUS
25891 && CONST_INT_P (XEXP (addr_rtx
, 1)))
25893 *base
= XEXP (addr_rtx
, 0);
25894 *offset
= INTVAL (XEXP (addr_rtx
, 1));
25902 /* The function returns true if the target storage location of
25903 mem1 is adjacent to the target storage location of mem2 */
25904 /* Return 1 if memory locations are adjacent. */
25907 adjacent_mem_locations (rtx mem1
, rtx mem2
)
25910 HOST_WIDE_INT off1
, size1
, off2
, size2
;
25912 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
25913 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
25914 return ((REGNO (reg1
) == REGNO (reg2
))
25915 && ((off1
+ size1
== off2
)
25916 || (off2
+ size2
== off1
)));
25921 /* This function returns true if it can be determined that the two MEM
25922 locations overlap by at least 1 byte based on base reg/offset/size. */
25925 mem_locations_overlap (rtx mem1
, rtx mem2
)
25928 HOST_WIDE_INT off1
, size1
, off2
, size2
;
25930 if (get_memref_parts (mem1
, ®1
, &off1
, &size1
)
25931 && get_memref_parts (mem2
, ®2
, &off2
, &size2
))
25932 return ((REGNO (reg1
) == REGNO (reg2
))
25933 && (((off1
<= off2
) && (off1
+ size1
> off2
))
25934 || ((off2
<= off1
) && (off2
+ size2
> off1
))));
25939 /* A C statement (sans semicolon) to update the integer scheduling
25940 priority INSN_PRIORITY (INSN). Increase the priority to execute the
25941 INSN earlier, reduce the priority to execute INSN later. Do not
25942 define this macro if you do not need to adjust the scheduling
25943 priorities of insns. */
25946 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED
, int priority
)
25948 rtx load_mem
, str_mem
;
25949 /* On machines (like the 750) which have asymmetric integer units,
25950 where one integer unit can do multiply and divides and the other
25951 can't, reduce the priority of multiply/divide so it is scheduled
25952 before other integer operations. */
25955 if (! INSN_P (insn
))
25958 if (GET_CODE (PATTERN (insn
)) == USE
)
25961 switch (rs6000_cpu_attr
) {
25963 switch (get_attr_type (insn
))
25970 fprintf (stderr
, "priority was %#x (%d) before adjustment\n",
25971 priority
, priority
);
25972 if (priority
>= 0 && priority
< 0x01000000)
25979 if (insn_must_be_first_in_group (insn
)
25980 && reload_completed
25981 && current_sched_info
->sched_max_insns_priority
25982 && rs6000_sched_restricted_insns_priority
)
25985 /* Prioritize insns that can be dispatched only in the first
25987 if (rs6000_sched_restricted_insns_priority
== 1)
25988 /* Attach highest priority to insn. This means that in
25989 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
25990 precede 'priority' (critical path) considerations. */
25991 return current_sched_info
->sched_max_insns_priority
;
25992 else if (rs6000_sched_restricted_insns_priority
== 2)
25993 /* Increase priority of insn by a minimal amount. This means that in
25994 haifa-sched.c:ready_sort(), only 'priority' (critical path)
25995 considerations precede dispatch-slot restriction considerations. */
25996 return (priority
+ 1);
25999 if (rs6000_cpu
== PROCESSOR_POWER6
26000 && ((load_store_pendulum
== -2 && is_load_insn (insn
, &load_mem
))
26001 || (load_store_pendulum
== 2 && is_store_insn (insn
, &str_mem
))))
26002 /* Attach highest priority to insn if the scheduler has just issued two
26003 stores and this instruction is a load, or two loads and this instruction
26004 is a store. Power6 wants loads and stores scheduled alternately
26006 return current_sched_info
->sched_max_insns_priority
;
26011 /* Return true if the instruction is nonpipelined on the Cell. */
26013 is_nonpipeline_insn (rtx insn
)
26015 enum attr_type type
;
26016 if (!insn
|| !NONDEBUG_INSN_P (insn
)
26017 || GET_CODE (PATTERN (insn
)) == USE
26018 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
26021 type
= get_attr_type (insn
);
26022 if (type
== TYPE_IMUL
26023 || type
== TYPE_IMUL2
26024 || type
== TYPE_IMUL3
26025 || type
== TYPE_LMUL
26026 || type
== TYPE_IDIV
26027 || type
== TYPE_LDIV
26028 || type
== TYPE_SDIV
26029 || type
== TYPE_DDIV
26030 || type
== TYPE_SSQRT
26031 || type
== TYPE_DSQRT
26032 || type
== TYPE_MFCR
26033 || type
== TYPE_MFCRF
26034 || type
== TYPE_MFJMPR
)
26042 /* Return how many instructions the machine can issue per cycle. */
26045 rs6000_issue_rate (void)
26047 /* Unless scheduling for register pressure, use issue rate of 1 for
26048 first scheduling pass to decrease degradation. */
26049 if (!reload_completed
&& !flag_sched_pressure
)
26052 switch (rs6000_cpu_attr
) {
26054 case CPU_PPC601
: /* ? */
26064 case CPU_PPCE300C2
:
26065 case CPU_PPCE300C3
:
26066 case CPU_PPCE500MC
:
26067 case CPU_PPCE500MC64
:
26090 /* Return how many instructions to look ahead for better insn
26094 rs6000_use_sched_lookahead (void)
26096 switch (rs6000_cpu_attr
)
26103 return (reload_completed
? 8 : 0);
26110 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
26112 rs6000_use_sched_lookahead_guard (rtx insn
)
26114 if (rs6000_cpu_attr
!= CPU_CELL
)
26117 if (insn
== NULL_RTX
|| !INSN_P (insn
))
26120 if (!reload_completed
26121 || is_nonpipeline_insn (insn
)
26122 || is_microcoded_insn (insn
))
26128 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
26129 and return true. */
26132 find_mem_ref (rtx pat
, rtx
*mem_ref
)
26137 /* stack_tie does not produce any real memory traffic. */
26138 if (tie_operand (pat
, VOIDmode
))
26141 if (GET_CODE (pat
) == MEM
)
26147 /* Recursively process the pattern. */
26148 fmt
= GET_RTX_FORMAT (GET_CODE (pat
));
26150 for (i
= GET_RTX_LENGTH (GET_CODE (pat
)) - 1; i
>= 0; i
--)
26154 if (find_mem_ref (XEXP (pat
, i
), mem_ref
))
26157 else if (fmt
[i
] == 'E')
26158 for (j
= XVECLEN (pat
, i
) - 1; j
>= 0; j
--)
26160 if (find_mem_ref (XVECEXP (pat
, i
, j
), mem_ref
))
26168 /* Determine if PAT is a PATTERN of a load insn. */
26171 is_load_insn1 (rtx pat
, rtx
*load_mem
)
26173 if (!pat
|| pat
== NULL_RTX
)
26176 if (GET_CODE (pat
) == SET
)
26177 return find_mem_ref (SET_SRC (pat
), load_mem
);
26179 if (GET_CODE (pat
) == PARALLEL
)
26183 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
26184 if (is_load_insn1 (XVECEXP (pat
, 0, i
), load_mem
))
26191 /* Determine if INSN loads from memory. */
26194 is_load_insn (rtx insn
, rtx
*load_mem
)
26196 if (!insn
|| !INSN_P (insn
))
26202 return is_load_insn1 (PATTERN (insn
), load_mem
);
26205 /* Determine if PAT is a PATTERN of a store insn. */
26208 is_store_insn1 (rtx pat
, rtx
*str_mem
)
26210 if (!pat
|| pat
== NULL_RTX
)
26213 if (GET_CODE (pat
) == SET
)
26214 return find_mem_ref (SET_DEST (pat
), str_mem
);
26216 if (GET_CODE (pat
) == PARALLEL
)
26220 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
26221 if (is_store_insn1 (XVECEXP (pat
, 0, i
), str_mem
))
26228 /* Determine if INSN stores to memory. */
26231 is_store_insn (rtx insn
, rtx
*str_mem
)
26233 if (!insn
|| !INSN_P (insn
))
26236 return is_store_insn1 (PATTERN (insn
), str_mem
);
26239 /* Returns whether the dependence between INSN and NEXT is considered
26240 costly by the given target. */
26243 rs6000_is_costly_dependence (dep_t dep
, int cost
, int distance
)
26247 rtx load_mem
, str_mem
;
26249 /* If the flag is not enabled - no dependence is considered costly;
26250 allow all dependent insns in the same group.
26251 This is the most aggressive option. */
26252 if (rs6000_sched_costly_dep
== no_dep_costly
)
26255 /* If the flag is set to 1 - a dependence is always considered costly;
26256 do not allow dependent instructions in the same group.
26257 This is the most conservative option. */
26258 if (rs6000_sched_costly_dep
== all_deps_costly
)
26261 insn
= DEP_PRO (dep
);
26262 next
= DEP_CON (dep
);
26264 if (rs6000_sched_costly_dep
== store_to_load_dep_costly
26265 && is_load_insn (next
, &load_mem
)
26266 && is_store_insn (insn
, &str_mem
))
26267 /* Prevent load after store in the same group. */
26270 if (rs6000_sched_costly_dep
== true_store_to_load_dep_costly
26271 && is_load_insn (next
, &load_mem
)
26272 && is_store_insn (insn
, &str_mem
)
26273 && DEP_TYPE (dep
) == REG_DEP_TRUE
26274 && mem_locations_overlap(str_mem
, load_mem
))
26275 /* Prevent load after store in the same group if it is a true
26279 /* The flag is set to X; dependences with latency >= X are considered costly,
26280 and will not be scheduled in the same group. */
26281 if (rs6000_sched_costly_dep
<= max_dep_latency
26282 && ((cost
- distance
) >= (int)rs6000_sched_costly_dep
))
26288 /* Return the next insn after INSN that is found before TAIL is reached,
26289 skipping any "non-active" insns - insns that will not actually occupy
26290 an issue slot. Return NULL_RTX if such an insn is not found. */
26293 get_next_active_insn (rtx insn
, rtx tail
)
26295 if (insn
== NULL_RTX
|| insn
== tail
)
26300 insn
= NEXT_INSN (insn
);
26301 if (insn
== NULL_RTX
|| insn
== tail
)
26305 || JUMP_P (insn
) || JUMP_TABLE_DATA_P (insn
)
26306 || (NONJUMP_INSN_P (insn
)
26307 && GET_CODE (PATTERN (insn
)) != USE
26308 && GET_CODE (PATTERN (insn
)) != CLOBBER
26309 && INSN_CODE (insn
) != CODE_FOR_stack_tie
))
26315 /* We are about to begin issuing insns for this clock cycle. */
26318 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
, int sched_verbose
,
26319 rtx
*ready ATTRIBUTE_UNUSED
,
26320 int *pn_ready ATTRIBUTE_UNUSED
,
26321 int clock_var ATTRIBUTE_UNUSED
)
26323 int n_ready
= *pn_ready
;
26326 fprintf (dump
, "// rs6000_sched_reorder :\n");
26328 /* Reorder the ready list, if the second to last ready insn
26329 is a nonepipeline insn. */
26330 if (rs6000_cpu_attr
== CPU_CELL
&& n_ready
> 1)
26332 if (is_nonpipeline_insn (ready
[n_ready
- 1])
26333 && (recog_memoized (ready
[n_ready
- 2]) > 0))
26334 /* Simply swap first two insns. */
26336 rtx tmp
= ready
[n_ready
- 1];
26337 ready
[n_ready
- 1] = ready
[n_ready
- 2];
26338 ready
[n_ready
- 2] = tmp
;
26342 if (rs6000_cpu
== PROCESSOR_POWER6
)
26343 load_store_pendulum
= 0;
26345 return rs6000_issue_rate ();
26348 /* Like rs6000_sched_reorder, but called after issuing each insn. */
26351 rs6000_sched_reorder2 (FILE *dump
, int sched_verbose
, rtx
*ready
,
26352 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
)
26355 fprintf (dump
, "// rs6000_sched_reorder2 :\n");
26357 /* For Power6, we need to handle some special cases to try and keep the
26358 store queue from overflowing and triggering expensive flushes.
26360 This code monitors how load and store instructions are being issued
26361 and skews the ready list one way or the other to increase the likelihood
26362 that a desired instruction is issued at the proper time.
26364 A couple of things are done. First, we maintain a "load_store_pendulum"
26365 to track the current state of load/store issue.
26367 - If the pendulum is at zero, then no loads or stores have been
26368 issued in the current cycle so we do nothing.
26370 - If the pendulum is 1, then a single load has been issued in this
26371 cycle and we attempt to locate another load in the ready list to
26374 - If the pendulum is -2, then two stores have already been
26375 issued in this cycle, so we increase the priority of the first load
26376 in the ready list to increase it's likelihood of being chosen first
26379 - If the pendulum is -1, then a single store has been issued in this
26380 cycle and we attempt to locate another store in the ready list to
26381 issue with it, preferring a store to an adjacent memory location to
26382 facilitate store pairing in the store queue.
26384 - If the pendulum is 2, then two loads have already been
26385 issued in this cycle, so we increase the priority of the first store
26386 in the ready list to increase it's likelihood of being chosen first
26389 - If the pendulum < -2 or > 2, then do nothing.
26391 Note: This code covers the most common scenarios. There exist non
26392 load/store instructions which make use of the LSU and which
26393 would need to be accounted for to strictly model the behavior
26394 of the machine. Those instructions are currently unaccounted
26395 for to help minimize compile time overhead of this code.
26397 if (rs6000_cpu
== PROCESSOR_POWER6
&& last_scheduled_insn
)
26401 rtx tmp
, load_mem
, str_mem
;
26403 if (is_store_insn (last_scheduled_insn
, &str_mem
))
26404 /* Issuing a store, swing the load_store_pendulum to the left */
26405 load_store_pendulum
--;
26406 else if (is_load_insn (last_scheduled_insn
, &load_mem
))
26407 /* Issuing a load, swing the load_store_pendulum to the right */
26408 load_store_pendulum
++;
26410 return cached_can_issue_more
;
26412 /* If the pendulum is balanced, or there is only one instruction on
26413 the ready list, then all is well, so return. */
26414 if ((load_store_pendulum
== 0) || (*pn_ready
<= 1))
26415 return cached_can_issue_more
;
26417 if (load_store_pendulum
== 1)
26419 /* A load has been issued in this cycle. Scan the ready list
26420 for another load to issue with it */
26425 if (is_load_insn (ready
[pos
], &load_mem
))
26427 /* Found a load. Move it to the head of the ready list,
26428 and adjust it's priority so that it is more likely to
26431 for (i
=pos
; i
<*pn_ready
-1; i
++)
26432 ready
[i
] = ready
[i
+ 1];
26433 ready
[*pn_ready
-1] = tmp
;
26435 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
26436 INSN_PRIORITY (tmp
)++;
26442 else if (load_store_pendulum
== -2)
26444 /* Two stores have been issued in this cycle. Increase the
26445 priority of the first load in the ready list to favor it for
26446 issuing in the next cycle. */
26451 if (is_load_insn (ready
[pos
], &load_mem
)
26453 && INSN_PRIORITY_KNOWN (ready
[pos
]))
26455 INSN_PRIORITY (ready
[pos
])++;
26457 /* Adjust the pendulum to account for the fact that a load
26458 was found and increased in priority. This is to prevent
26459 increasing the priority of multiple loads */
26460 load_store_pendulum
--;
26467 else if (load_store_pendulum
== -1)
26469 /* A store has been issued in this cycle. Scan the ready list for
26470 another store to issue with it, preferring a store to an adjacent
26472 int first_store_pos
= -1;
26478 if (is_store_insn (ready
[pos
], &str_mem
))
26481 /* Maintain the index of the first store found on the
26483 if (first_store_pos
== -1)
26484 first_store_pos
= pos
;
26486 if (is_store_insn (last_scheduled_insn
, &str_mem2
)
26487 && adjacent_mem_locations (str_mem
, str_mem2
))
26489 /* Found an adjacent store. Move it to the head of the
26490 ready list, and adjust it's priority so that it is
26491 more likely to stay there */
26493 for (i
=pos
; i
<*pn_ready
-1; i
++)
26494 ready
[i
] = ready
[i
+ 1];
26495 ready
[*pn_ready
-1] = tmp
;
26497 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
26498 INSN_PRIORITY (tmp
)++;
26500 first_store_pos
= -1;
26508 if (first_store_pos
>= 0)
26510 /* An adjacent store wasn't found, but a non-adjacent store was,
26511 so move the non-adjacent store to the front of the ready
26512 list, and adjust its priority so that it is more likely to
26514 tmp
= ready
[first_store_pos
];
26515 for (i
=first_store_pos
; i
<*pn_ready
-1; i
++)
26516 ready
[i
] = ready
[i
+ 1];
26517 ready
[*pn_ready
-1] = tmp
;
26518 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
26519 INSN_PRIORITY (tmp
)++;
26522 else if (load_store_pendulum
== 2)
26524 /* Two loads have been issued in this cycle. Increase the priority
26525 of the first store in the ready list to favor it for issuing in
26531 if (is_store_insn (ready
[pos
], &str_mem
)
26533 && INSN_PRIORITY_KNOWN (ready
[pos
]))
26535 INSN_PRIORITY (ready
[pos
])++;
26537 /* Adjust the pendulum to account for the fact that a store
26538 was found and increased in priority. This is to prevent
26539 increasing the priority of multiple stores */
26540 load_store_pendulum
++;
26549 return cached_can_issue_more
;
26552 /* Return whether the presence of INSN causes a dispatch group termination
26553 of group WHICH_GROUP.
26555 If WHICH_GROUP == current_group, this function will return true if INSN
26556 causes the termination of the current group (i.e, the dispatch group to
26557 which INSN belongs). This means that INSN will be the last insn in the
26558 group it belongs to.
26560 If WHICH_GROUP == previous_group, this function will return true if INSN
26561 causes the termination of the previous group (i.e, the dispatch group that
26562 precedes the group to which INSN belongs). This means that INSN will be
26563 the first insn in the group it belongs to). */
26566 insn_terminates_group_p (rtx insn
, enum group_termination which_group
)
26573 first
= insn_must_be_first_in_group (insn
);
26574 last
= insn_must_be_last_in_group (insn
);
26579 if (which_group
== current_group
)
26581 else if (which_group
== previous_group
)
26589 insn_must_be_first_in_group (rtx insn
)
26591 enum attr_type type
;
26595 || DEBUG_INSN_P (insn
)
26596 || GET_CODE (PATTERN (insn
)) == USE
26597 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
26600 switch (rs6000_cpu
)
26602 case PROCESSOR_POWER5
:
26603 if (is_cracked_insn (insn
))
26605 case PROCESSOR_POWER4
:
26606 if (is_microcoded_insn (insn
))
26609 if (!rs6000_sched_groups
)
26612 type
= get_attr_type (insn
);
26619 case TYPE_DELAYED_CR
:
26620 case TYPE_CR_LOGICAL
:
26634 case PROCESSOR_POWER6
:
26635 type
= get_attr_type (insn
);
26639 case TYPE_INSERT_DWORD
:
26643 case TYPE_VAR_SHIFT_ROTATE
:
26650 case TYPE_INSERT_WORD
:
26651 case TYPE_DELAYED_COMPARE
:
26652 case TYPE_IMUL_COMPARE
:
26653 case TYPE_LMUL_COMPARE
:
26654 case TYPE_FPCOMPARE
:
26665 case TYPE_LOAD_EXT_UX
:
26667 case TYPE_STORE_UX
:
26668 case TYPE_FPLOAD_U
:
26669 case TYPE_FPLOAD_UX
:
26670 case TYPE_FPSTORE_U
:
26671 case TYPE_FPSTORE_UX
:
26677 case PROCESSOR_POWER7
:
26678 type
= get_attr_type (insn
);
26682 case TYPE_CR_LOGICAL
:
26689 case TYPE_DELAYED_COMPARE
:
26690 case TYPE_VAR_DELAYED_COMPARE
:
26696 case TYPE_LOAD_EXT
:
26697 case TYPE_LOAD_EXT_U
:
26698 case TYPE_LOAD_EXT_UX
:
26700 case TYPE_STORE_UX
:
26701 case TYPE_FPLOAD_U
:
26702 case TYPE_FPLOAD_UX
:
26703 case TYPE_FPSTORE_U
:
26704 case TYPE_FPSTORE_UX
:
26712 case PROCESSOR_POWER8
:
26713 type
= get_attr_type (insn
);
26717 case TYPE_CR_LOGICAL
:
26718 case TYPE_DELAYED_CR
:
26723 case TYPE_DELAYED_COMPARE
:
26724 case TYPE_VAR_DELAYED_COMPARE
:
26725 case TYPE_IMUL_COMPARE
:
26726 case TYPE_LMUL_COMPARE
:
26733 case TYPE_LOAD_EXT
:
26734 case TYPE_LOAD_EXT_U
:
26735 case TYPE_LOAD_EXT_UX
:
26736 case TYPE_STORE_UX
:
26737 case TYPE_VECSTORE
:
26753 insn_must_be_last_in_group (rtx insn
)
26755 enum attr_type type
;
26759 || DEBUG_INSN_P (insn
)
26760 || GET_CODE (PATTERN (insn
)) == USE
26761 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
26764 switch (rs6000_cpu
) {
26765 case PROCESSOR_POWER4
:
26766 case PROCESSOR_POWER5
:
26767 if (is_microcoded_insn (insn
))
26770 if (is_branch_slot_insn (insn
))
26774 case PROCESSOR_POWER6
:
26775 type
= get_attr_type (insn
);
26782 case TYPE_VAR_SHIFT_ROTATE
:
26789 case TYPE_DELAYED_COMPARE
:
26790 case TYPE_IMUL_COMPARE
:
26791 case TYPE_LMUL_COMPARE
:
26792 case TYPE_FPCOMPARE
:
26806 case PROCESSOR_POWER7
:
26807 type
= get_attr_type (insn
);
26815 case TYPE_LOAD_EXT_U
:
26816 case TYPE_LOAD_EXT_UX
:
26817 case TYPE_STORE_UX
:
26823 case PROCESSOR_POWER8
:
26824 type
= get_attr_type (insn
);
26834 case TYPE_LOAD_EXT_U
:
26835 case TYPE_LOAD_EXT_UX
:
26836 case TYPE_STORE_UX
:
26849 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
26850 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
26853 is_costly_group (rtx
*group_insns
, rtx next_insn
)
26856 int issue_rate
= rs6000_issue_rate ();
26858 for (i
= 0; i
< issue_rate
; i
++)
26860 sd_iterator_def sd_it
;
26862 rtx insn
= group_insns
[i
];
26867 FOR_EACH_DEP (insn
, SD_LIST_RES_FORW
, sd_it
, dep
)
26869 rtx next
= DEP_CON (dep
);
26871 if (next
== next_insn
26872 && rs6000_is_costly_dependence (dep
, dep_cost (dep
), 0))
26880 /* Utility of the function redefine_groups.
26881 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
26882 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
26883 to keep it "far" (in a separate group) from GROUP_INSNS, following
26884 one of the following schemes, depending on the value of the flag
26885 -minsert_sched_nops = X:
26886 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
26887 in order to force NEXT_INSN into a separate group.
26888 (2) X < sched_finish_regroup_exact: insert exactly X nops.
26889 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
26890 insertion (has a group just ended, how many vacant issue slots remain in the
26891 last group, and how many dispatch groups were encountered so far). */
26894 force_new_group (int sched_verbose
, FILE *dump
, rtx
*group_insns
,
26895 rtx next_insn
, bool *group_end
, int can_issue_more
,
26900 int issue_rate
= rs6000_issue_rate ();
26901 bool end
= *group_end
;
26904 if (next_insn
== NULL_RTX
|| DEBUG_INSN_P (next_insn
))
26905 return can_issue_more
;
26907 if (rs6000_sched_insert_nops
> sched_finish_regroup_exact
)
26908 return can_issue_more
;
26910 force
= is_costly_group (group_insns
, next_insn
);
26912 return can_issue_more
;
26914 if (sched_verbose
> 6)
26915 fprintf (dump
,"force: group count = %d, can_issue_more = %d\n",
26916 *group_count
,can_issue_more
);
26918 if (rs6000_sched_insert_nops
== sched_finish_regroup_exact
)
26921 can_issue_more
= 0;
26923 /* Since only a branch can be issued in the last issue_slot, it is
26924 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
26925 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
26926 in this case the last nop will start a new group and the branch
26927 will be forced to the new group. */
26928 if (can_issue_more
&& !is_branch_slot_insn (next_insn
))
26931 /* Do we have a special group ending nop? */
26932 if (rs6000_cpu_attr
== CPU_POWER6
|| rs6000_cpu_attr
== CPU_POWER7
26933 || rs6000_cpu_attr
== CPU_POWER8
)
26935 nop
= gen_group_ending_nop ();
26936 emit_insn_before (nop
, next_insn
);
26937 can_issue_more
= 0;
26940 while (can_issue_more
> 0)
26943 emit_insn_before (nop
, next_insn
);
26951 if (rs6000_sched_insert_nops
< sched_finish_regroup_exact
)
26953 int n_nops
= rs6000_sched_insert_nops
;
26955 /* Nops can't be issued from the branch slot, so the effective
26956 issue_rate for nops is 'issue_rate - 1'. */
26957 if (can_issue_more
== 0)
26958 can_issue_more
= issue_rate
;
26960 if (can_issue_more
== 0)
26962 can_issue_more
= issue_rate
- 1;
26965 for (i
= 0; i
< issue_rate
; i
++)
26967 group_insns
[i
] = 0;
26974 emit_insn_before (nop
, next_insn
);
26975 if (can_issue_more
== issue_rate
- 1) /* new group begins */
26978 if (can_issue_more
== 0)
26980 can_issue_more
= issue_rate
- 1;
26983 for (i
= 0; i
< issue_rate
; i
++)
26985 group_insns
[i
] = 0;
26991 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
26994 /* Is next_insn going to start a new group? */
26997 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
26998 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
26999 || (can_issue_more
< issue_rate
&&
27000 insn_terminates_group_p (next_insn
, previous_group
)));
27001 if (*group_end
&& end
)
27004 if (sched_verbose
> 6)
27005 fprintf (dump
, "done force: group count = %d, can_issue_more = %d\n",
27006 *group_count
, can_issue_more
);
27007 return can_issue_more
;
27010 return can_issue_more
;
27013 /* This function tries to synch the dispatch groups that the compiler "sees"
27014 with the dispatch groups that the processor dispatcher is expected to
27015 form in practice. It tries to achieve this synchronization by forcing the
27016 estimated processor grouping on the compiler (as opposed to the function
27017 'pad_goups' which tries to force the scheduler's grouping on the processor).
27019 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
27020 examines the (estimated) dispatch groups that will be formed by the processor
27021 dispatcher. It marks these group boundaries to reflect the estimated
27022 processor grouping, overriding the grouping that the scheduler had marked.
27023 Depending on the value of the flag '-minsert-sched-nops' this function can
27024 force certain insns into separate groups or force a certain distance between
27025 them by inserting nops, for example, if there exists a "costly dependence"
27028 The function estimates the group boundaries that the processor will form as
27029 follows: It keeps track of how many vacant issue slots are available after
27030 each insn. A subsequent insn will start a new group if one of the following
27032 - no more vacant issue slots remain in the current dispatch group.
27033 - only the last issue slot, which is the branch slot, is vacant, but the next
27034 insn is not a branch.
27035 - only the last 2 or less issue slots, including the branch slot, are vacant,
27036 which means that a cracked insn (which occupies two issue slots) can't be
27037 issued in this group.
27038 - less than 'issue_rate' slots are vacant, and the next insn always needs to
27039 start a new group. */
27042 redefine_groups (FILE *dump
, int sched_verbose
, rtx prev_head_insn
, rtx tail
)
27044 rtx insn
, next_insn
;
27046 int can_issue_more
;
27049 int group_count
= 0;
27053 issue_rate
= rs6000_issue_rate ();
27054 group_insns
= XALLOCAVEC (rtx
, issue_rate
);
27055 for (i
= 0; i
< issue_rate
; i
++)
27057 group_insns
[i
] = 0;
27059 can_issue_more
= issue_rate
;
27061 insn
= get_next_active_insn (prev_head_insn
, tail
);
27064 while (insn
!= NULL_RTX
)
27066 slot
= (issue_rate
- can_issue_more
);
27067 group_insns
[slot
] = insn
;
27069 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
27070 if (insn_terminates_group_p (insn
, current_group
))
27071 can_issue_more
= 0;
27073 next_insn
= get_next_active_insn (insn
, tail
);
27074 if (next_insn
== NULL_RTX
)
27075 return group_count
+ 1;
27077 /* Is next_insn going to start a new group? */
27079 = (can_issue_more
== 0
27080 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
27081 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
27082 || (can_issue_more
< issue_rate
&&
27083 insn_terminates_group_p (next_insn
, previous_group
)));
27085 can_issue_more
= force_new_group (sched_verbose
, dump
, group_insns
,
27086 next_insn
, &group_end
, can_issue_more
,
27092 can_issue_more
= 0;
27093 for (i
= 0; i
< issue_rate
; i
++)
27095 group_insns
[i
] = 0;
27099 if (GET_MODE (next_insn
) == TImode
&& can_issue_more
)
27100 PUT_MODE (next_insn
, VOIDmode
);
27101 else if (!can_issue_more
&& GET_MODE (next_insn
) != TImode
)
27102 PUT_MODE (next_insn
, TImode
);
27105 if (can_issue_more
== 0)
27106 can_issue_more
= issue_rate
;
27109 return group_count
;
27112 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
27113 dispatch group boundaries that the scheduler had marked. Pad with nops
27114 any dispatch groups which have vacant issue slots, in order to force the
27115 scheduler's grouping on the processor dispatcher. The function
27116 returns the number of dispatch groups found. */
27119 pad_groups (FILE *dump
, int sched_verbose
, rtx prev_head_insn
, rtx tail
)
27121 rtx insn
, next_insn
;
27124 int can_issue_more
;
27126 int group_count
= 0;
27128 /* Initialize issue_rate. */
27129 issue_rate
= rs6000_issue_rate ();
27130 can_issue_more
= issue_rate
;
27132 insn
= get_next_active_insn (prev_head_insn
, tail
);
27133 next_insn
= get_next_active_insn (insn
, tail
);
27135 while (insn
!= NULL_RTX
)
27138 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
27140 group_end
= (next_insn
== NULL_RTX
|| GET_MODE (next_insn
) == TImode
);
27142 if (next_insn
== NULL_RTX
)
27147 /* If the scheduler had marked group termination at this location
27148 (between insn and next_insn), and neither insn nor next_insn will
27149 force group termination, pad the group with nops to force group
27152 && (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
27153 && !insn_terminates_group_p (insn
, current_group
)
27154 && !insn_terminates_group_p (next_insn
, previous_group
))
27156 if (!is_branch_slot_insn (next_insn
))
27159 while (can_issue_more
)
27162 emit_insn_before (nop
, next_insn
);
27167 can_issue_more
= issue_rate
;
27172 next_insn
= get_next_active_insn (insn
, tail
);
27175 return group_count
;
27178 /* We're beginning a new block. Initialize data structures as necessary. */
27181 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
27182 int sched_verbose ATTRIBUTE_UNUSED
,
27183 int max_ready ATTRIBUTE_UNUSED
)
27185 last_scheduled_insn
= NULL_RTX
;
27186 load_store_pendulum
= 0;
27189 /* The following function is called at the end of scheduling BB.
27190 After reload, it inserts nops at insn group bundling. */
27193 rs6000_sched_finish (FILE *dump
, int sched_verbose
)
27198 fprintf (dump
, "=== Finishing schedule.\n");
27200 if (reload_completed
&& rs6000_sched_groups
)
27202 /* Do not run sched_finish hook when selective scheduling enabled. */
27203 if (sel_sched_p ())
27206 if (rs6000_sched_insert_nops
== sched_finish_none
)
27209 if (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
27210 n_groups
= pad_groups (dump
, sched_verbose
,
27211 current_sched_info
->prev_head
,
27212 current_sched_info
->next_tail
);
27214 n_groups
= redefine_groups (dump
, sched_verbose
,
27215 current_sched_info
->prev_head
,
27216 current_sched_info
->next_tail
);
27218 if (sched_verbose
>= 6)
27220 fprintf (dump
, "ngroups = %d\n", n_groups
);
27221 print_rtl (dump
, current_sched_info
->prev_head
);
27222 fprintf (dump
, "Done finish_sched\n");
27227 struct _rs6000_sched_context
27229 short cached_can_issue_more
;
27230 rtx last_scheduled_insn
;
27231 int load_store_pendulum
;
27234 typedef struct _rs6000_sched_context rs6000_sched_context_def
;
27235 typedef rs6000_sched_context_def
*rs6000_sched_context_t
;
27237 /* Allocate store for new scheduling context. */
27239 rs6000_alloc_sched_context (void)
27241 return xmalloc (sizeof (rs6000_sched_context_def
));
27244 /* If CLEAN_P is true then initializes _SC with clean data,
27245 and from the global context otherwise. */
27247 rs6000_init_sched_context (void *_sc
, bool clean_p
)
27249 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
27253 sc
->cached_can_issue_more
= 0;
27254 sc
->last_scheduled_insn
= NULL_RTX
;
27255 sc
->load_store_pendulum
= 0;
27259 sc
->cached_can_issue_more
= cached_can_issue_more
;
27260 sc
->last_scheduled_insn
= last_scheduled_insn
;
27261 sc
->load_store_pendulum
= load_store_pendulum
;
27265 /* Sets the global scheduling context to the one pointed to by _SC. */
27267 rs6000_set_sched_context (void *_sc
)
27269 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
27271 gcc_assert (sc
!= NULL
);
27273 cached_can_issue_more
= sc
->cached_can_issue_more
;
27274 last_scheduled_insn
= sc
->last_scheduled_insn
;
27275 load_store_pendulum
= sc
->load_store_pendulum
;
27280 rs6000_free_sched_context (void *_sc
)
27282 gcc_assert (_sc
!= NULL
);
27288 /* Length in units of the trampoline for entering a nested function. */
27291 rs6000_trampoline_size (void)
27295 switch (DEFAULT_ABI
)
27298 gcc_unreachable ();
27301 ret
= (TARGET_32BIT
) ? 12 : 24;
27305 gcc_assert (!TARGET_32BIT
);
27311 ret
= (TARGET_32BIT
) ? 40 : 48;
27318 /* Emit RTL insns to initialize the variable parts of a trampoline.
27319 FNADDR is an RTX for the address of the function's pure code.
27320 CXT is an RTX for the static chain value for the function. */
27323 rs6000_trampoline_init (rtx m_tramp
, tree fndecl
, rtx cxt
)
27325 int regsize
= (TARGET_32BIT
) ? 4 : 8;
27326 rtx fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
27327 rtx ctx_reg
= force_reg (Pmode
, cxt
);
27328 rtx addr
= force_reg (Pmode
, XEXP (m_tramp
, 0));
27330 switch (DEFAULT_ABI
)
27333 gcc_unreachable ();
27335 /* Under AIX, just build the 3 word function descriptor */
27338 rtx fnmem
, fn_reg
, toc_reg
;
27340 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
27341 error ("You cannot take the address of a nested function if you use "
27342 "the -mno-pointers-to-nested-functions option.");
27344 fnmem
= gen_const_mem (Pmode
, force_reg (Pmode
, fnaddr
));
27345 fn_reg
= gen_reg_rtx (Pmode
);
27346 toc_reg
= gen_reg_rtx (Pmode
);
27348 /* Macro to shorten the code expansions below. */
27349 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
27351 m_tramp
= replace_equiv_address (m_tramp
, addr
);
27353 emit_move_insn (fn_reg
, MEM_PLUS (fnmem
, 0));
27354 emit_move_insn (toc_reg
, MEM_PLUS (fnmem
, regsize
));
27355 emit_move_insn (MEM_PLUS (m_tramp
, 0), fn_reg
);
27356 emit_move_insn (MEM_PLUS (m_tramp
, regsize
), toc_reg
);
27357 emit_move_insn (MEM_PLUS (m_tramp
, 2*regsize
), ctx_reg
);
27363 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
27367 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__trampoline_setup"),
27368 LCT_NORMAL
, VOIDmode
, 4,
27370 GEN_INT (rs6000_trampoline_size ()), SImode
,
27378 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
27379 identifier as an argument, so the front end shouldn't look it up. */
27382 rs6000_attribute_takes_identifier_p (const_tree attr_id
)
27384 return is_attribute_p ("altivec", attr_id
);
27387 /* Handle the "altivec" attribute. The attribute may have
27388 arguments as follows:
27390 __attribute__((altivec(vector__)))
27391 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
27392 __attribute__((altivec(bool__))) (always followed by 'unsigned')
27394 and may appear more than once (e.g., 'vector bool char') in a
27395 given declaration. */
27398 rs6000_handle_altivec_attribute (tree
*node
,
27399 tree name ATTRIBUTE_UNUSED
,
27401 int flags ATTRIBUTE_UNUSED
,
27402 bool *no_add_attrs
)
27404 tree type
= *node
, result
= NULL_TREE
;
27405 enum machine_mode mode
;
27408 = ((args
&& TREE_CODE (args
) == TREE_LIST
&& TREE_VALUE (args
)
27409 && TREE_CODE (TREE_VALUE (args
)) == IDENTIFIER_NODE
)
27410 ? *IDENTIFIER_POINTER (TREE_VALUE (args
))
27413 while (POINTER_TYPE_P (type
)
27414 || TREE_CODE (type
) == FUNCTION_TYPE
27415 || TREE_CODE (type
) == METHOD_TYPE
27416 || TREE_CODE (type
) == ARRAY_TYPE
)
27417 type
= TREE_TYPE (type
);
27419 mode
= TYPE_MODE (type
);
27421 /* Check for invalid AltiVec type qualifiers. */
27422 if (type
== long_double_type_node
)
27423 error ("use of %<long double%> in AltiVec types is invalid");
27424 else if (type
== boolean_type_node
)
27425 error ("use of boolean types in AltiVec types is invalid");
27426 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
27427 error ("use of %<complex%> in AltiVec types is invalid");
27428 else if (DECIMAL_FLOAT_MODE_P (mode
))
27429 error ("use of decimal floating point types in AltiVec types is invalid");
27430 else if (!TARGET_VSX
)
27432 if (type
== long_unsigned_type_node
|| type
== long_integer_type_node
)
27435 error ("use of %<long%> in AltiVec types is invalid for "
27436 "64-bit code without -mvsx");
27437 else if (rs6000_warn_altivec_long
)
27438 warning (0, "use of %<long%> in AltiVec types is deprecated; "
27441 else if (type
== long_long_unsigned_type_node
27442 || type
== long_long_integer_type_node
)
27443 error ("use of %<long long%> in AltiVec types is invalid without "
27445 else if (type
== double_type_node
)
27446 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
27449 switch (altivec_type
)
27452 unsigned_p
= TYPE_UNSIGNED (type
);
27456 result
= (unsigned_p
? unsigned_V2DI_type_node
: V2DI_type_node
);
27459 result
= (unsigned_p
? unsigned_V4SI_type_node
: V4SI_type_node
);
27462 result
= (unsigned_p
? unsigned_V8HI_type_node
: V8HI_type_node
);
27465 result
= (unsigned_p
? unsigned_V16QI_type_node
: V16QI_type_node
);
27467 case SFmode
: result
= V4SF_type_node
; break;
27468 case DFmode
: result
= V2DF_type_node
; break;
27469 /* If the user says 'vector int bool', we may be handed the 'bool'
27470 attribute _before_ the 'vector' attribute, and so select the
27471 proper type in the 'b' case below. */
27472 case V4SImode
: case V8HImode
: case V16QImode
: case V4SFmode
:
27473 case V2DImode
: case V2DFmode
:
27481 case DImode
: case V2DImode
: result
= bool_V2DI_type_node
; break;
27482 case SImode
: case V4SImode
: result
= bool_V4SI_type_node
; break;
27483 case HImode
: case V8HImode
: result
= bool_V8HI_type_node
; break;
27484 case QImode
: case V16QImode
: result
= bool_V16QI_type_node
;
27491 case V8HImode
: result
= pixel_V8HI_type_node
;
27497 /* Propagate qualifiers attached to the element type
27498 onto the vector type. */
27499 if (result
&& result
!= type
&& TYPE_QUALS (type
))
27500 result
= build_qualified_type (result
, TYPE_QUALS (type
));
27502 *no_add_attrs
= true; /* No need to hang on to the attribute. */
27505 *node
= lang_hooks
.types
.reconstruct_complex_type (*node
, result
);
27510 /* AltiVec defines four built-in scalar types that serve as vector
27511 elements; we must teach the compiler how to mangle them. */
27513 static const char *
27514 rs6000_mangle_type (const_tree type
)
27516 type
= TYPE_MAIN_VARIANT (type
);
27518 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
27519 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
27522 if (type
== bool_char_type_node
) return "U6__boolc";
27523 if (type
== bool_short_type_node
) return "U6__bools";
27524 if (type
== pixel_type_node
) return "u7__pixel";
27525 if (type
== bool_int_type_node
) return "U6__booli";
27526 if (type
== bool_long_type_node
) return "U6__booll";
27528 /* Mangle IBM extended float long double as `g' (__float128) on
27529 powerpc*-linux where long-double-64 previously was the default. */
27530 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
27532 && TARGET_LONG_DOUBLE_128
27533 && !TARGET_IEEEQUAD
)
27536 /* For all other types, use normal C++ mangling. */
27540 /* Handle a "longcall" or "shortcall" attribute; arguments as in
27541 struct attribute_spec.handler. */
27544 rs6000_handle_longcall_attribute (tree
*node
, tree name
,
27545 tree args ATTRIBUTE_UNUSED
,
27546 int flags ATTRIBUTE_UNUSED
,
27547 bool *no_add_attrs
)
27549 if (TREE_CODE (*node
) != FUNCTION_TYPE
27550 && TREE_CODE (*node
) != FIELD_DECL
27551 && TREE_CODE (*node
) != TYPE_DECL
)
27553 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
27555 *no_add_attrs
= true;
27561 /* Set longcall attributes on all functions declared when
27562 rs6000_default_long_calls is true. */
27564 rs6000_set_default_type_attributes (tree type
)
27566 if (rs6000_default_long_calls
27567 && (TREE_CODE (type
) == FUNCTION_TYPE
27568 || TREE_CODE (type
) == METHOD_TYPE
))
27569 TYPE_ATTRIBUTES (type
) = tree_cons (get_identifier ("longcall"),
27571 TYPE_ATTRIBUTES (type
));
27574 darwin_set_default_type_attributes (type
);
27578 /* Return a reference suitable for calling a function with the
27579 longcall attribute. */
27582 rs6000_longcall_ref (rtx call_ref
)
27584 const char *call_name
;
27587 if (GET_CODE (call_ref
) != SYMBOL_REF
)
27590 /* System V adds '.' to the internal name, so skip them. */
27591 call_name
= XSTR (call_ref
, 0);
27592 if (*call_name
== '.')
27594 while (*call_name
== '.')
27597 node
= get_identifier (call_name
);
27598 call_ref
= gen_rtx_SYMBOL_REF (VOIDmode
, IDENTIFIER_POINTER (node
));
27601 return force_reg (Pmode
, call_ref
);
27604 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
27605 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
27608 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
27609 struct attribute_spec.handler. */
27611 rs6000_handle_struct_attribute (tree
*node
, tree name
,
27612 tree args ATTRIBUTE_UNUSED
,
27613 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
27616 if (DECL_P (*node
))
27618 if (TREE_CODE (*node
) == TYPE_DECL
)
27619 type
= &TREE_TYPE (*node
);
27624 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
27625 || TREE_CODE (*type
) == UNION_TYPE
)))
27627 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
27628 *no_add_attrs
= true;
27631 else if ((is_attribute_p ("ms_struct", name
)
27632 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
27633 || ((is_attribute_p ("gcc_struct", name
)
27634 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
27636 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
27638 *no_add_attrs
= true;
27645 rs6000_ms_bitfield_layout_p (const_tree record_type
)
27647 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
27648 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
27649 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
27652 #ifdef USING_ELFOS_H
27654 /* A get_unnamed_section callback, used for switching to toc_section. */
27657 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
27659 if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
27660 && TARGET_MINIMAL_TOC
27661 && !TARGET_RELOCATABLE
)
27663 if (!toc_initialized
)
27665 toc_initialized
= 1;
27666 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
27667 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LCTOC", 0);
27668 fprintf (asm_out_file
, "\t.tc ");
27669 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1[TC],");
27670 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
27671 fprintf (asm_out_file
, "\n");
27673 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
27674 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
27675 fprintf (asm_out_file
, " = .+32768\n");
27678 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
27680 else if ((DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
27681 && !TARGET_RELOCATABLE
)
27682 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
27685 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
27686 if (!toc_initialized
)
27688 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
27689 fprintf (asm_out_file
, " = .+32768\n");
27690 toc_initialized
= 1;
27695 /* Implement TARGET_ASM_INIT_SECTIONS. */
27698 rs6000_elf_asm_init_sections (void)
27701 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op
, NULL
);
27704 = get_unnamed_section (SECTION_WRITE
, output_section_asm_op
,
27705 SDATA2_SECTION_ASM_OP
);
27708 /* Implement TARGET_SELECT_RTX_SECTION. */
27711 rs6000_elf_select_rtx_section (enum machine_mode mode
, rtx x
,
27712 unsigned HOST_WIDE_INT align
)
27714 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
27715 return toc_section
;
27717 return default_elf_select_rtx_section (mode
, x
, align
);
27720 /* For a SYMBOL_REF, set generic flags and then perform some
27721 target-specific processing.
27723 When the AIX ABI is requested on a non-AIX system, replace the
27724 function name with the real name (with a leading .) rather than the
27725 function descriptor name. This saves a lot of overriding code to
27726 read the prefixes. */
27728 static void rs6000_elf_encode_section_info (tree
, rtx
, int) ATTRIBUTE_UNUSED
;
27730 rs6000_elf_encode_section_info (tree decl
, rtx rtl
, int first
)
27732 default_encode_section_info (decl
, rtl
, first
);
27735 && TREE_CODE (decl
) == FUNCTION_DECL
27737 && DEFAULT_ABI
== ABI_AIX
)
27739 rtx sym_ref
= XEXP (rtl
, 0);
27740 size_t len
= strlen (XSTR (sym_ref
, 0));
27741 char *str
= XALLOCAVEC (char, len
+ 2);
27743 memcpy (str
+ 1, XSTR (sym_ref
, 0), len
+ 1);
27744 XSTR (sym_ref
, 0) = ggc_alloc_string (str
, len
+ 1);
27749 compare_section_name (const char *section
, const char *templ
)
27753 len
= strlen (templ
);
27754 return (strncmp (section
, templ
, len
) == 0
27755 && (section
[len
] == 0 || section
[len
] == '.'));
27759 rs6000_elf_in_small_data_p (const_tree decl
)
27761 if (rs6000_sdata
== SDATA_NONE
)
27764 /* We want to merge strings, so we never consider them small data. */
27765 if (TREE_CODE (decl
) == STRING_CST
)
27768 /* Functions are never in the small data area. */
27769 if (TREE_CODE (decl
) == FUNCTION_DECL
)
27772 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
))
27774 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
27775 if (compare_section_name (section
, ".sdata")
27776 || compare_section_name (section
, ".sdata2")
27777 || compare_section_name (section
, ".gnu.linkonce.s")
27778 || compare_section_name (section
, ".sbss")
27779 || compare_section_name (section
, ".sbss2")
27780 || compare_section_name (section
, ".gnu.linkonce.sb")
27781 || strcmp (section
, ".PPC.EMB.sdata0") == 0
27782 || strcmp (section
, ".PPC.EMB.sbss0") == 0)
27787 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
27790 && size
<= g_switch_value
27791 /* If it's not public, and we're not going to reference it there,
27792 there's no need to put it in the small data section. */
27793 && (rs6000_sdata
!= SDATA_DATA
|| TREE_PUBLIC (decl
)))
27800 #endif /* USING_ELFOS_H */
27802 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
27805 rs6000_use_blocks_for_constant_p (enum machine_mode mode
, const_rtx x
)
27807 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
);
27810 /* Do not place thread-local symbols refs in the object blocks. */
27813 rs6000_use_blocks_for_decl_p (const_tree decl
)
27815 return !DECL_THREAD_LOCAL_P (decl
);
27818 /* Return a REG that occurs in ADDR with coefficient 1.
27819 ADDR can be effectively incremented by incrementing REG.
27821 r0 is special and we must not select it as an address
27822 register by this routine since our caller will try to
27823 increment the returned register via an "la" instruction. */
27826 find_addr_reg (rtx addr
)
27828 while (GET_CODE (addr
) == PLUS
)
27830 if (GET_CODE (XEXP (addr
, 0)) == REG
27831 && REGNO (XEXP (addr
, 0)) != 0)
27832 addr
= XEXP (addr
, 0);
27833 else if (GET_CODE (XEXP (addr
, 1)) == REG
27834 && REGNO (XEXP (addr
, 1)) != 0)
27835 addr
= XEXP (addr
, 1);
27836 else if (CONSTANT_P (XEXP (addr
, 0)))
27837 addr
= XEXP (addr
, 1);
27838 else if (CONSTANT_P (XEXP (addr
, 1)))
27839 addr
= XEXP (addr
, 0);
27841 gcc_unreachable ();
27843 gcc_assert (GET_CODE (addr
) == REG
&& REGNO (addr
) != 0);
27848 rs6000_fatal_bad_address (rtx op
)
27850 fatal_insn ("bad address", op
);
27855 typedef struct branch_island_d
{
27856 tree function_name
;
27862 static vec
<branch_island
, va_gc
> *branch_islands
;
27864 /* Remember to generate a branch island for far calls to the given
27868 add_compiler_branch_island (tree label_name
, tree function_name
,
27871 branch_island bi
= {function_name
, label_name
, line_number
};
27872 vec_safe_push (branch_islands
, bi
);
27875 /* Generate far-jump branch islands for everything recorded in
27876 branch_islands. Invoked immediately after the last instruction of
27877 the epilogue has been emitted; the branch islands must be appended
27878 to, and contiguous with, the function body. Mach-O stubs are
27879 generated in machopic_output_stub(). */
27882 macho_branch_islands (void)
27886 while (!vec_safe_is_empty (branch_islands
))
27888 branch_island
*bi
= &branch_islands
->last ();
27889 const char *label
= IDENTIFIER_POINTER (bi
->label_name
);
27890 const char *name
= IDENTIFIER_POINTER (bi
->function_name
);
27891 char name_buf
[512];
27892 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
27893 if (name
[0] == '*' || name
[0] == '&')
27894 strcpy (name_buf
, name
+1);
27898 strcpy (name_buf
+1, name
);
27900 strcpy (tmp_buf
, "\n");
27901 strcat (tmp_buf
, label
);
27902 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
27903 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
27904 dbxout_stabd (N_SLINE
, bi
->line_number
);
27905 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
27908 if (TARGET_LINK_STACK
)
27911 get_ppc476_thunk_name (name
);
27912 strcat (tmp_buf
, ":\n\tmflr r0\n\tbl ");
27913 strcat (tmp_buf
, name
);
27914 strcat (tmp_buf
, "\n");
27915 strcat (tmp_buf
, label
);
27916 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
27920 strcat (tmp_buf
, ":\n\tmflr r0\n\tbcl 20,31,");
27921 strcat (tmp_buf
, label
);
27922 strcat (tmp_buf
, "_pic\n");
27923 strcat (tmp_buf
, label
);
27924 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
27927 strcat (tmp_buf
, "\taddis r11,r11,ha16(");
27928 strcat (tmp_buf
, name_buf
);
27929 strcat (tmp_buf
, " - ");
27930 strcat (tmp_buf
, label
);
27931 strcat (tmp_buf
, "_pic)\n");
27933 strcat (tmp_buf
, "\tmtlr r0\n");
27935 strcat (tmp_buf
, "\taddi r12,r11,lo16(");
27936 strcat (tmp_buf
, name_buf
);
27937 strcat (tmp_buf
, " - ");
27938 strcat (tmp_buf
, label
);
27939 strcat (tmp_buf
, "_pic)\n");
27941 strcat (tmp_buf
, "\tmtctr r12\n\tbctr\n");
27945 strcat (tmp_buf
, ":\nlis r12,hi16(");
27946 strcat (tmp_buf
, name_buf
);
27947 strcat (tmp_buf
, ")\n\tori r12,r12,lo16(");
27948 strcat (tmp_buf
, name_buf
);
27949 strcat (tmp_buf
, ")\n\tmtctr r12\n\tbctr");
27951 output_asm_insn (tmp_buf
, 0);
27952 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
27953 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
27954 dbxout_stabd (N_SLINE
, bi
->line_number
);
27955 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
27956 branch_islands
->pop ();
27960 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
27961 already there or not. */
27964 no_previous_def (tree function_name
)
27969 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
27970 if (function_name
== bi
->function_name
)
27975 /* GET_PREV_LABEL gets the label name from the previous definition of
27979 get_prev_label (tree function_name
)
27984 FOR_EACH_VEC_SAFE_ELT (branch_islands
, ix
, bi
)
27985 if (function_name
== bi
->function_name
)
27986 return bi
->label_name
;
27990 /* INSN is either a function call or a millicode call. It may have an
27991 unconditional jump in its delay slot.
27993 CALL_DEST is the routine we are calling. */
27996 output_call (rtx insn
, rtx
*operands
, int dest_operand_number
,
27997 int cookie_operand_number
)
27999 static char buf
[256];
28000 if (darwin_emit_branch_islands
28001 && GET_CODE (operands
[dest_operand_number
]) == SYMBOL_REF
28002 && (INTVAL (operands
[cookie_operand_number
]) & CALL_LONG
))
28005 tree funname
= get_identifier (XSTR (operands
[dest_operand_number
], 0));
28007 if (no_previous_def (funname
))
28009 rtx label_rtx
= gen_label_rtx ();
28010 char *label_buf
, temp_buf
[256];
28011 ASM_GENERATE_INTERNAL_LABEL (temp_buf
, "L",
28012 CODE_LABEL_NUMBER (label_rtx
));
28013 label_buf
= temp_buf
[0] == '*' ? temp_buf
+ 1 : temp_buf
;
28014 labelname
= get_identifier (label_buf
);
28015 add_compiler_branch_island (labelname
, funname
, insn_line (insn
));
28018 labelname
= get_prev_label (funname
);
28020 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
28021 instruction will reach 'foo', otherwise link as 'bl L42'".
28022 "L42" should be a 'branch island', that will do a far jump to
28023 'foo'. Branch islands are generated in
28024 macho_branch_islands(). */
28025 sprintf (buf
, "jbsr %%z%d,%.246s",
28026 dest_operand_number
, IDENTIFIER_POINTER (labelname
));
28029 sprintf (buf
, "bl %%z%d", dest_operand_number
);
28033 /* Generate PIC and indirect symbol stubs. */
28036 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
28038 unsigned int length
;
28039 char *symbol_name
, *lazy_ptr_name
;
28040 char *local_label_0
;
28041 static int label
= 0;
28043 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
28044 symb
= (*targetm
.strip_name_encoding
) (symb
);
28047 length
= strlen (symb
);
28048 symbol_name
= XALLOCAVEC (char, length
+ 32);
28049 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
28051 lazy_ptr_name
= XALLOCAVEC (char, length
+ 32);
28052 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name
, symb
, length
);
28055 switch_to_section (darwin_sections
[machopic_picsymbol_stub1_section
]);
28057 switch_to_section (darwin_sections
[machopic_symbol_stub1_section
]);
28061 fprintf (file
, "\t.align 5\n");
28063 fprintf (file
, "%s:\n", stub
);
28064 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
28067 local_label_0
= XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
28068 sprintf (local_label_0
, "\"L%011d$spb\"", label
);
28070 fprintf (file
, "\tmflr r0\n");
28071 if (TARGET_LINK_STACK
)
28074 get_ppc476_thunk_name (name
);
28075 fprintf (file
, "\tbl %s\n", name
);
28076 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
28080 fprintf (file
, "\tbcl 20,31,%s\n", local_label_0
);
28081 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
28083 fprintf (file
, "\taddis r11,r11,ha16(%s-%s)\n",
28084 lazy_ptr_name
, local_label_0
);
28085 fprintf (file
, "\tmtlr r0\n");
28086 fprintf (file
, "\t%s r12,lo16(%s-%s)(r11)\n",
28087 (TARGET_64BIT
? "ldu" : "lwzu"),
28088 lazy_ptr_name
, local_label_0
);
28089 fprintf (file
, "\tmtctr r12\n");
28090 fprintf (file
, "\tbctr\n");
28094 fprintf (file
, "\t.align 4\n");
28096 fprintf (file
, "%s:\n", stub
);
28097 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
28099 fprintf (file
, "\tlis r11,ha16(%s)\n", lazy_ptr_name
);
28100 fprintf (file
, "\t%s r12,lo16(%s)(r11)\n",
28101 (TARGET_64BIT
? "ldu" : "lwzu"),
28103 fprintf (file
, "\tmtctr r12\n");
28104 fprintf (file
, "\tbctr\n");
28107 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr_section
]);
28108 fprintf (file
, "%s:\n", lazy_ptr_name
);
28109 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
28110 fprintf (file
, "%sdyld_stub_binding_helper\n",
28111 (TARGET_64BIT
? DOUBLE_INT_ASM_OP
: "\t.long\t"));
28114 /* Legitimize PIC addresses. If the address is already
28115 position-independent, we return ORIG. Newly generated
28116 position-independent addresses go into a reg. This is REG if non
28117 zero, otherwise we allocate register(s) as necessary. */
28119 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
28122 rs6000_machopic_legitimize_pic_address (rtx orig
, enum machine_mode mode
,
28127 if (reg
== NULL
&& ! reload_in_progress
&& ! reload_completed
)
28128 reg
= gen_reg_rtx (Pmode
);
28130 if (GET_CODE (orig
) == CONST
)
28134 if (GET_CODE (XEXP (orig
, 0)) == PLUS
28135 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
28138 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
28140 /* Use a different reg for the intermediate value, as
28141 it will be marked UNCHANGING. */
28142 reg_temp
= !can_create_pseudo_p () ? reg
: gen_reg_rtx (Pmode
);
28143 base
= rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
28146 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
28149 if (GET_CODE (offset
) == CONST_INT
)
28151 if (SMALL_INT (offset
))
28152 return plus_constant (Pmode
, base
, INTVAL (offset
));
28153 else if (! reload_in_progress
&& ! reload_completed
)
28154 offset
= force_reg (Pmode
, offset
);
28157 rtx mem
= force_const_mem (Pmode
, orig
);
28158 return machopic_legitimize_pic_address (mem
, Pmode
, reg
);
28161 return gen_rtx_PLUS (Pmode
, base
, offset
);
28164 /* Fall back on generic machopic code. */
28165 return machopic_legitimize_pic_address (orig
, mode
, reg
);
28168 /* Output a .machine directive for the Darwin assembler, and call
28169 the generic start_file routine. */
28172 rs6000_darwin_file_start (void)
28174 static const struct
28178 HOST_WIDE_INT if_set
;
28180 { "ppc64", "ppc64", MASK_64BIT
},
28181 { "970", "ppc970", MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
28182 { "power4", "ppc970", 0 },
28183 { "G5", "ppc970", 0 },
28184 { "7450", "ppc7450", 0 },
28185 { "7400", "ppc7400", MASK_ALTIVEC
},
28186 { "G4", "ppc7400", 0 },
28187 { "750", "ppc750", 0 },
28188 { "740", "ppc750", 0 },
28189 { "G3", "ppc750", 0 },
28190 { "604e", "ppc604e", 0 },
28191 { "604", "ppc604", 0 },
28192 { "603e", "ppc603", 0 },
28193 { "603", "ppc603", 0 },
28194 { "601", "ppc601", 0 },
28195 { NULL
, "ppc", 0 } };
28196 const char *cpu_id
= "";
28199 rs6000_file_start ();
28200 darwin_file_start ();
28202 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
28204 if (rs6000_default_cpu
!= 0 && rs6000_default_cpu
[0] != '\0')
28205 cpu_id
= rs6000_default_cpu
;
28207 if (global_options_set
.x_rs6000_cpu_index
)
28208 cpu_id
= processor_target_table
[rs6000_cpu_index
].name
;
28210 /* Look through the mapping array. Pick the first name that either
28211 matches the argument, has a bit set in IF_SET that is also set
28212 in the target flags, or has a NULL name. */
28215 while (mapping
[i
].arg
!= NULL
28216 && strcmp (mapping
[i
].arg
, cpu_id
) != 0
28217 && (mapping
[i
].if_set
& rs6000_isa_flags
) == 0)
28220 fprintf (asm_out_file
, "\t.machine %s\n", mapping
[i
].name
);
28223 #endif /* TARGET_MACHO */
28227 rs6000_elf_reloc_rw_mask (void)
28231 else if (DEFAULT_ABI
== ABI_AIX
|| DEFAULT_ABI
== ABI_ELFv2
)
28237 /* Record an element in the table of global constructors. SYMBOL is
28238 a SYMBOL_REF of the function to be called; PRIORITY is a number
28239 between 0 and MAX_INIT_PRIORITY.
28241 This differs from default_named_section_asm_out_constructor in
28242 that we have special handling for -mrelocatable. */
28244 static void rs6000_elf_asm_out_constructor (rtx
, int) ATTRIBUTE_UNUSED
;
28246 rs6000_elf_asm_out_constructor (rtx symbol
, int priority
)
28248 const char *section
= ".ctors";
28251 if (priority
!= DEFAULT_INIT_PRIORITY
)
28253 sprintf (buf
, ".ctors.%.5u",
28254 /* Invert the numbering so the linker puts us in the proper
28255 order; constructors are run from right to left, and the
28256 linker sorts in increasing order. */
28257 MAX_INIT_PRIORITY
- priority
);
28261 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
28262 assemble_align (POINTER_SIZE
);
28264 if (TARGET_RELOCATABLE
)
28266 fputs ("\t.long (", asm_out_file
);
28267 output_addr_const (asm_out_file
, symbol
);
28268 fputs (")@fixup\n", asm_out_file
);
28271 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
28274 static void rs6000_elf_asm_out_destructor (rtx
, int) ATTRIBUTE_UNUSED
;
28276 rs6000_elf_asm_out_destructor (rtx symbol
, int priority
)
28278 const char *section
= ".dtors";
28281 if (priority
!= DEFAULT_INIT_PRIORITY
)
28283 sprintf (buf
, ".dtors.%.5u",
28284 /* Invert the numbering so the linker puts us in the proper
28285 order; constructors are run from right to left, and the
28286 linker sorts in increasing order. */
28287 MAX_INIT_PRIORITY
- priority
);
28291 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
28292 assemble_align (POINTER_SIZE
);
28294 if (TARGET_RELOCATABLE
)
28296 fputs ("\t.long (", asm_out_file
);
28297 output_addr_const (asm_out_file
, symbol
);
28298 fputs (")@fixup\n", asm_out_file
);
28301 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
28305 rs6000_elf_declare_function_name (FILE *file
, const char *name
, tree decl
)
28307 if (TARGET_64BIT
&& DEFAULT_ABI
!= ABI_ELFv2
)
28309 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file
);
28310 ASM_OUTPUT_LABEL (file
, name
);
28311 fputs (DOUBLE_INT_ASM_OP
, file
);
28312 rs6000_output_function_entry (file
, name
);
28313 fputs (",.TOC.@tocbase,0\n\t.previous\n", file
);
28316 fputs ("\t.size\t", file
);
28317 assemble_name (file
, name
);
28318 fputs (",24\n\t.type\t.", file
);
28319 assemble_name (file
, name
);
28320 fputs (",@function\n", file
);
28321 if (TREE_PUBLIC (decl
) && ! DECL_WEAK (decl
))
28323 fputs ("\t.globl\t.", file
);
28324 assemble_name (file
, name
);
28329 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
28330 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
28331 rs6000_output_function_entry (file
, name
);
28332 fputs (":\n", file
);
28336 if (TARGET_RELOCATABLE
28337 && !TARGET_SECURE_PLT
28338 && (get_pool_size () != 0 || crtl
->profile
)
28343 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
28345 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
28346 fprintf (file
, "\t.long ");
28347 assemble_name (file
, buf
);
28349 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
28350 assemble_name (file
, buf
);
28354 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
28355 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
28357 if (DEFAULT_ABI
== ABI_AIX
)
28359 const char *desc_name
, *orig_name
;
28361 orig_name
= (*targetm
.strip_name_encoding
) (name
);
28362 desc_name
= orig_name
;
28363 while (*desc_name
== '.')
28366 if (TREE_PUBLIC (decl
))
28367 fprintf (file
, "\t.globl %s\n", desc_name
);
28369 fprintf (file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
28370 fprintf (file
, "%s:\n", desc_name
);
28371 fprintf (file
, "\t.long %s\n", orig_name
);
28372 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file
);
28373 fputs ("\t.long 0\n", file
);
28374 fprintf (file
, "\t.previous\n");
28376 ASM_OUTPUT_LABEL (file
, name
);
28379 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED
;
28381 rs6000_elf_file_end (void)
28383 #ifdef HAVE_AS_GNU_ATTRIBUTE
28384 if (TARGET_32BIT
&& DEFAULT_ABI
== ABI_V4
)
28386 if (rs6000_passes_float
)
28387 fprintf (asm_out_file
, "\t.gnu_attribute 4, %d\n",
28388 ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
) ? 1
28389 : (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_SINGLE_FLOAT
) ? 3
28391 if (rs6000_passes_vector
)
28392 fprintf (asm_out_file
, "\t.gnu_attribute 8, %d\n",
28393 (TARGET_ALTIVEC_ABI
? 2
28394 : TARGET_SPE_ABI
? 3
28396 if (rs6000_returns_struct
)
28397 fprintf (asm_out_file
, "\t.gnu_attribute 12, %d\n",
28398 aix_struct_return
? 2 : 1);
28401 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
28402 if (TARGET_32BIT
|| DEFAULT_ABI
== ABI_ELFv2
)
28403 file_end_indicate_exec_stack ();
28410 rs6000_xcoff_asm_output_anchor (rtx symbol
)
28414 sprintf (buffer
, "$ + " HOST_WIDE_INT_PRINT_DEC
,
28415 SYMBOL_REF_BLOCK_OFFSET (symbol
));
28416 ASM_OUTPUT_DEF (asm_out_file
, XSTR (symbol
, 0), buffer
);
28420 rs6000_xcoff_asm_globalize_label (FILE *stream
, const char *name
)
28422 fputs (GLOBAL_ASM_OP
, stream
);
28423 RS6000_OUTPUT_BASENAME (stream
, name
);
28424 putc ('\n', stream
);
28427 /* A get_unnamed_decl callback, used for read-only sections. PTR
28428 points to the section string variable. */
28431 rs6000_xcoff_output_readonly_section_asm_op (const void *directive
)
28433 fprintf (asm_out_file
, "\t.csect %s[RO],%s\n",
28434 *(const char *const *) directive
,
28435 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
28438 /* Likewise for read-write sections. */
28441 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive
)
28443 fprintf (asm_out_file
, "\t.csect %s[RW],%s\n",
28444 *(const char *const *) directive
,
28445 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
28449 rs6000_xcoff_output_tls_section_asm_op (const void *directive
)
28451 fprintf (asm_out_file
, "\t.csect %s[TL],%s\n",
28452 *(const char *const *) directive
,
28453 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
28456 /* A get_unnamed_section callback, used for switching to toc_section. */
28459 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
28461 if (TARGET_MINIMAL_TOC
)
28463 /* toc_section is always selected at least once from
28464 rs6000_xcoff_file_start, so this is guaranteed to
28465 always be defined once and only once in each file. */
28466 if (!toc_initialized
)
28468 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file
);
28469 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file
);
28470 toc_initialized
= 1;
28472 fprintf (asm_out_file
, "\t.csect toc_table[RW]%s\n",
28473 (TARGET_32BIT
? "" : ",3"));
28476 fputs ("\t.toc\n", asm_out_file
);
28479 /* Implement TARGET_ASM_INIT_SECTIONS. */
28482 rs6000_xcoff_asm_init_sections (void)
28484 read_only_data_section
28485 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
28486 &xcoff_read_only_section_name
);
28488 private_data_section
28489 = get_unnamed_section (SECTION_WRITE
,
28490 rs6000_xcoff_output_readwrite_section_asm_op
,
28491 &xcoff_private_data_section_name
);
28494 = get_unnamed_section (SECTION_TLS
,
28495 rs6000_xcoff_output_tls_section_asm_op
,
28496 &xcoff_tls_data_section_name
);
28498 tls_private_data_section
28499 = get_unnamed_section (SECTION_TLS
,
28500 rs6000_xcoff_output_tls_section_asm_op
,
28501 &xcoff_private_data_section_name
);
28503 read_only_private_data_section
28504 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
28505 &xcoff_private_data_section_name
);
28508 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op
, NULL
);
28510 readonly_data_section
= read_only_data_section
;
28511 exception_section
= data_section
;
28515 rs6000_xcoff_reloc_rw_mask (void)
28521 rs6000_xcoff_asm_named_section (const char *name
, unsigned int flags
,
28522 tree decl ATTRIBUTE_UNUSED
)
28525 static const char * const suffix
[4] = { "PR", "RO", "RW", "TL" };
28527 if (flags
& SECTION_CODE
)
28529 else if (flags
& SECTION_TLS
)
28531 else if (flags
& SECTION_WRITE
)
28536 fprintf (asm_out_file
, "\t.csect %s%s[%s],%u\n",
28537 (flags
& SECTION_CODE
) ? "." : "",
28538 name
, suffix
[smclass
], flags
& SECTION_ENTSIZE
);
28542 rs6000_xcoff_select_section (tree decl
, int reloc
,
28543 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
28545 if (decl_readonly_section (decl
, reloc
))
28547 if (TREE_PUBLIC (decl
))
28548 return read_only_data_section
;
28550 return read_only_private_data_section
;
28555 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
28557 if (TREE_PUBLIC (decl
))
28558 return tls_data_section
;
28559 else if (bss_initializer_p (decl
))
28561 /* Convert to COMMON to emit in BSS. */
28562 DECL_COMMON (decl
) = 1;
28563 return tls_comm_section
;
28566 return tls_private_data_section
;
28570 if (TREE_PUBLIC (decl
))
28571 return data_section
;
28573 return private_data_section
;
28578 rs6000_xcoff_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
28582 /* Use select_section for private and uninitialized data. */
28583 if (!TREE_PUBLIC (decl
)
28584 || DECL_COMMON (decl
)
28585 || DECL_INITIAL (decl
) == NULL_TREE
28586 || DECL_INITIAL (decl
) == error_mark_node
28587 || (flag_zero_initialized_in_bss
28588 && initializer_zerop (DECL_INITIAL (decl
))))
28591 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
28592 name
= (*targetm
.strip_name_encoding
) (name
);
28593 DECL_SECTION_NAME (decl
) = build_string (strlen (name
), name
);
28596 /* Select section for constant in constant pool.
28598 On RS/6000, all constants are in the private read-only data area.
28599 However, if this is being placed in the TOC it must be output as a
28603 rs6000_xcoff_select_rtx_section (enum machine_mode mode
, rtx x
,
28604 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
28606 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
28607 return toc_section
;
28609 return read_only_private_data_section
;
28612 /* Remove any trailing [DS] or the like from the symbol name. */
28614 static const char *
28615 rs6000_xcoff_strip_name_encoding (const char *name
)
28620 len
= strlen (name
);
28621 if (name
[len
- 1] == ']')
28622 return ggc_alloc_string (name
, len
- 4);
28627 /* Section attributes. AIX is always PIC. */
28629 static unsigned int
28630 rs6000_xcoff_section_type_flags (tree decl
, const char *name
, int reloc
)
28632 unsigned int align
;
28633 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
28635 /* Align to at least UNIT size. */
28636 if ((flags
& SECTION_CODE
) != 0 || !decl
|| !DECL_P (decl
))
28637 align
= MIN_UNITS_PER_WORD
;
28639 /* Increase alignment of large objects if not already stricter. */
28640 align
= MAX ((DECL_ALIGN (decl
) / BITS_PER_UNIT
),
28641 int_size_in_bytes (TREE_TYPE (decl
)) > MIN_UNITS_PER_WORD
28642 ? UNITS_PER_FP_WORD
: MIN_UNITS_PER_WORD
);
28644 return flags
| (exact_log2 (align
) & SECTION_ENTSIZE
);
28647 /* Output at beginning of assembler file.
28649 Initialize the section names for the RS/6000 at this point.
28651 Specify filename, including full path, to assembler.
28653 We want to go into the TOC section so at least one .toc will be emitted.
28654 Also, in order to output proper .bs/.es pairs, we need at least one static
28655 [RW] section emitted.
28657 Finally, declare mcount when profiling to make the assembler happy. */
28660 rs6000_xcoff_file_start (void)
28662 rs6000_gen_section_name (&xcoff_bss_section_name
,
28663 main_input_filename
, ".bss_");
28664 rs6000_gen_section_name (&xcoff_private_data_section_name
,
28665 main_input_filename
, ".rw_");
28666 rs6000_gen_section_name (&xcoff_read_only_section_name
,
28667 main_input_filename
, ".ro_");
28668 rs6000_gen_section_name (&xcoff_tls_data_section_name
,
28669 main_input_filename
, ".tls_");
28670 rs6000_gen_section_name (&xcoff_tbss_section_name
,
28671 main_input_filename
, ".tbss_[UL]");
28673 fputs ("\t.file\t", asm_out_file
);
28674 output_quoted_string (asm_out_file
, main_input_filename
);
28675 fputc ('\n', asm_out_file
);
28676 if (write_symbols
!= NO_DEBUG
)
28677 switch_to_section (private_data_section
);
28678 switch_to_section (text_section
);
28680 fprintf (asm_out_file
, "\t.extern %s\n", RS6000_MCOUNT
);
28681 rs6000_file_start ();
28684 /* Output at end of assembler file.
28685 On the RS/6000, referencing data should automatically pull in text. */
28688 rs6000_xcoff_file_end (void)
28690 switch_to_section (text_section
);
28691 fputs ("_section_.text:\n", asm_out_file
);
28692 switch_to_section (data_section
);
28693 fputs (TARGET_32BIT
28694 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
28700 rs6000_xcoff_encode_section_info (tree decl
, rtx rtl
, int first
)
28705 default_encode_section_info (decl
, rtl
, first
);
28707 /* Careful not to prod global register variables. */
28710 symbol
= XEXP (rtl
, 0);
28711 if (GET_CODE (symbol
) != SYMBOL_REF
)
28714 flags
= SYMBOL_REF_FLAGS (symbol
);
28716 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_THREAD_LOCAL_P (decl
))
28717 flags
&= ~SYMBOL_FLAG_HAS_BLOCK_INFO
;
28719 SYMBOL_REF_FLAGS (symbol
) = flags
;
28721 #endif /* HAVE_AS_TLS */
28722 #endif /* TARGET_XCOFF */
28724 /* Compute a (partial) cost for rtx X. Return true if the complete
28725 cost has been computed, and false if subexpressions should be
28726 scanned. In either case, *TOTAL contains the cost result. */
28729 rs6000_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
28730 int *total
, bool speed
)
28732 enum machine_mode mode
= GET_MODE (x
);
28736 /* On the RS/6000, if it is valid in the insn, it is free. */
28738 if (((outer_code
== SET
28739 || outer_code
== PLUS
28740 || outer_code
== MINUS
)
28741 && (satisfies_constraint_I (x
)
28742 || satisfies_constraint_L (x
)))
28743 || (outer_code
== AND
28744 && (satisfies_constraint_K (x
)
28746 ? satisfies_constraint_L (x
)
28747 : satisfies_constraint_J (x
))
28748 || mask_operand (x
, mode
)
28750 && mask64_operand (x
, DImode
))))
28751 || ((outer_code
== IOR
|| outer_code
== XOR
)
28752 && (satisfies_constraint_K (x
)
28754 ? satisfies_constraint_L (x
)
28755 : satisfies_constraint_J (x
))))
28756 || outer_code
== ASHIFT
28757 || outer_code
== ASHIFTRT
28758 || outer_code
== LSHIFTRT
28759 || outer_code
== ROTATE
28760 || outer_code
== ROTATERT
28761 || outer_code
== ZERO_EXTRACT
28762 || (outer_code
== MULT
28763 && satisfies_constraint_I (x
))
28764 || ((outer_code
== DIV
|| outer_code
== UDIV
28765 || outer_code
== MOD
|| outer_code
== UMOD
)
28766 && exact_log2 (INTVAL (x
)) >= 0)
28767 || (outer_code
== COMPARE
28768 && (satisfies_constraint_I (x
)
28769 || satisfies_constraint_K (x
)))
28770 || ((outer_code
== EQ
|| outer_code
== NE
)
28771 && (satisfies_constraint_I (x
)
28772 || satisfies_constraint_K (x
)
28774 ? satisfies_constraint_L (x
)
28775 : satisfies_constraint_J (x
))))
28776 || (outer_code
== GTU
28777 && satisfies_constraint_I (x
))
28778 || (outer_code
== LTU
28779 && satisfies_constraint_P (x
)))
28784 else if ((outer_code
== PLUS
28785 && reg_or_add_cint_operand (x
, VOIDmode
))
28786 || (outer_code
== MINUS
28787 && reg_or_sub_cint_operand (x
, VOIDmode
))
28788 || ((outer_code
== SET
28789 || outer_code
== IOR
28790 || outer_code
== XOR
)
28792 & ~ (unsigned HOST_WIDE_INT
) 0xffffffff) == 0))
28794 *total
= COSTS_N_INSNS (1);
28804 /* When optimizing for size, MEM should be slightly more expensive
28805 than generating address, e.g., (plus (reg) (const)).
28806 L1 cache latency is about two instructions. */
28807 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
28816 if (FLOAT_MODE_P (mode
))
28817 *total
= rs6000_cost
->fp
;
28819 *total
= COSTS_N_INSNS (1);
28823 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
28824 && satisfies_constraint_I (XEXP (x
, 1)))
28826 if (INTVAL (XEXP (x
, 1)) >= -256
28827 && INTVAL (XEXP (x
, 1)) <= 255)
28828 *total
= rs6000_cost
->mulsi_const9
;
28830 *total
= rs6000_cost
->mulsi_const
;
28832 else if (mode
== SFmode
)
28833 *total
= rs6000_cost
->fp
;
28834 else if (FLOAT_MODE_P (mode
))
28835 *total
= rs6000_cost
->dmul
;
28836 else if (mode
== DImode
)
28837 *total
= rs6000_cost
->muldi
;
28839 *total
= rs6000_cost
->mulsi
;
28843 if (mode
== SFmode
)
28844 *total
= rs6000_cost
->fp
;
28846 *total
= rs6000_cost
->dmul
;
28851 if (FLOAT_MODE_P (mode
))
28853 *total
= mode
== DFmode
? rs6000_cost
->ddiv
28854 : rs6000_cost
->sdiv
;
28861 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
28862 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
28864 if (code
== DIV
|| code
== MOD
)
28866 *total
= COSTS_N_INSNS (2);
28869 *total
= COSTS_N_INSNS (1);
28873 if (GET_MODE (XEXP (x
, 1)) == DImode
)
28874 *total
= rs6000_cost
->divdi
;
28876 *total
= rs6000_cost
->divsi
;
28878 /* Add in shift and subtract for MOD. */
28879 if (code
== MOD
|| code
== UMOD
)
28880 *total
+= COSTS_N_INSNS (2);
28885 *total
= COSTS_N_INSNS (4);
28889 *total
= COSTS_N_INSNS (TARGET_POPCNTD
? 1 : 6);
28893 *total
= COSTS_N_INSNS (TARGET_CMPB
? 2 : 6);
28897 if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
28909 *total
= COSTS_N_INSNS (1);
28917 /* Handle mul_highpart. */
28918 if (outer_code
== TRUNCATE
28919 && GET_CODE (XEXP (x
, 0)) == MULT
)
28921 if (mode
== DImode
)
28922 *total
= rs6000_cost
->muldi
;
28924 *total
= rs6000_cost
->mulsi
;
28927 else if (outer_code
== AND
)
28930 *total
= COSTS_N_INSNS (1);
28935 if (GET_CODE (XEXP (x
, 0)) == MEM
)
28938 *total
= COSTS_N_INSNS (1);
28944 if (!FLOAT_MODE_P (mode
))
28946 *total
= COSTS_N_INSNS (1);
28952 case UNSIGNED_FLOAT
:
28955 case FLOAT_TRUNCATE
:
28956 *total
= rs6000_cost
->fp
;
28960 if (mode
== DFmode
)
28963 *total
= rs6000_cost
->fp
;
28967 switch (XINT (x
, 1))
28970 *total
= rs6000_cost
->fp
;
28982 *total
= COSTS_N_INSNS (1);
28985 else if (FLOAT_MODE_P (mode
)
28986 && TARGET_PPC_GFXOPT
&& TARGET_HARD_FLOAT
&& TARGET_FPRS
)
28988 *total
= rs6000_cost
->fp
;
28996 /* Carry bit requires mode == Pmode.
28997 NEG or PLUS already counted so only add one. */
28999 && (outer_code
== NEG
|| outer_code
== PLUS
))
29001 *total
= COSTS_N_INSNS (1);
29004 if (outer_code
== SET
)
29006 if (XEXP (x
, 1) == const0_rtx
)
29008 if (TARGET_ISEL
&& !TARGET_MFCRF
)
29009 *total
= COSTS_N_INSNS (8);
29011 *total
= COSTS_N_INSNS (2);
29014 else if (mode
== Pmode
)
29016 *total
= COSTS_N_INSNS (3);
29025 if (outer_code
== SET
&& (XEXP (x
, 1) == const0_rtx
))
29027 if (TARGET_ISEL
&& !TARGET_MFCRF
)
29028 *total
= COSTS_N_INSNS (8);
29030 *total
= COSTS_N_INSNS (2);
29034 if (outer_code
== COMPARE
)
29048 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
29051 rs6000_debug_rtx_costs (rtx x
, int code
, int outer_code
, int opno
, int *total
,
29054 bool ret
= rs6000_rtx_costs (x
, code
, outer_code
, opno
, total
, speed
);
29057 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
29058 "opno = %d, total = %d, speed = %s, x:\n",
29059 ret
? "complete" : "scan inner",
29060 GET_RTX_NAME (code
),
29061 GET_RTX_NAME (outer_code
),
29064 speed
? "true" : "false");
29071 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
29074 rs6000_debug_address_cost (rtx x
, enum machine_mode mode
,
29075 addr_space_t as
, bool speed
)
29077 int ret
= TARGET_ADDRESS_COST (x
, mode
, as
, speed
);
29079 fprintf (stderr
, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
29080 ret
, speed
? "true" : "false");
29087 /* A C expression returning the cost of moving data from a register of class
29088 CLASS1 to one of CLASS2. */
29091 rs6000_register_move_cost (enum machine_mode mode
,
29092 reg_class_t from
, reg_class_t to
)
29096 if (TARGET_DEBUG_COST
)
29099 /* Moves from/to GENERAL_REGS. */
29100 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
29101 || reg_classes_intersect_p (from
, GENERAL_REGS
))
29103 reg_class_t rclass
= from
;
29105 if (! reg_classes_intersect_p (to
, GENERAL_REGS
))
29108 if (rclass
== FLOAT_REGS
|| rclass
== ALTIVEC_REGS
|| rclass
== VSX_REGS
)
29109 ret
= (rs6000_memory_move_cost (mode
, rclass
, false)
29110 + rs6000_memory_move_cost (mode
, GENERAL_REGS
, false));
29112 /* It's more expensive to move CR_REGS than CR0_REGS because of the
29114 else if (rclass
== CR_REGS
)
29117 /* For those processors that have slow LR/CTR moves, make them more
29118 expensive than memory in order to bias spills to memory .*/
29119 else if ((rs6000_cpu
== PROCESSOR_POWER6
29120 || rs6000_cpu
== PROCESSOR_POWER7
29121 || rs6000_cpu
== PROCESSOR_POWER8
)
29122 && reg_classes_intersect_p (rclass
, LINK_OR_CTR_REGS
))
29123 ret
= 6 * hard_regno_nregs
[0][mode
];
29126 /* A move will cost one instruction per GPR moved. */
29127 ret
= 2 * hard_regno_nregs
[0][mode
];
29130 /* If we have VSX, we can easily move between FPR or Altivec registers. */
29131 else if (VECTOR_MEM_VSX_P (mode
)
29132 && reg_classes_intersect_p (to
, VSX_REGS
)
29133 && reg_classes_intersect_p (from
, VSX_REGS
))
29134 ret
= 2 * hard_regno_nregs
[32][mode
];
29136 /* Moving between two similar registers is just one instruction. */
29137 else if (reg_classes_intersect_p (to
, from
))
29138 ret
= (mode
== TFmode
|| mode
== TDmode
) ? 4 : 2;
29140 /* Everything else has to go through GENERAL_REGS. */
29142 ret
= (rs6000_register_move_cost (mode
, GENERAL_REGS
, to
)
29143 + rs6000_register_move_cost (mode
, from
, GENERAL_REGS
));
29145 if (TARGET_DEBUG_COST
)
29147 if (dbg_cost_ctrl
== 1)
29149 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
29150 ret
, GET_MODE_NAME (mode
), reg_class_names
[from
],
29151 reg_class_names
[to
]);
29158 /* A C expressions returning the cost of moving data of MODE from a register to
29162 rs6000_memory_move_cost (enum machine_mode mode
, reg_class_t rclass
,
29163 bool in ATTRIBUTE_UNUSED
)
29167 if (TARGET_DEBUG_COST
)
29170 if (reg_classes_intersect_p (rclass
, GENERAL_REGS
))
29171 ret
= 4 * hard_regno_nregs
[0][mode
];
29172 else if ((reg_classes_intersect_p (rclass
, FLOAT_REGS
)
29173 || reg_classes_intersect_p (rclass
, VSX_REGS
)))
29174 ret
= 4 * hard_regno_nregs
[32][mode
];
29175 else if (reg_classes_intersect_p (rclass
, ALTIVEC_REGS
))
29176 ret
= 4 * hard_regno_nregs
[FIRST_ALTIVEC_REGNO
][mode
];
29178 ret
= 4 + rs6000_register_move_cost (mode
, rclass
, GENERAL_REGS
);
29180 if (TARGET_DEBUG_COST
)
29182 if (dbg_cost_ctrl
== 1)
29184 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
29185 ret
, GET_MODE_NAME (mode
), reg_class_names
[rclass
], in
);
29192 /* Returns a code for a target-specific builtin that implements
29193 reciprocal of the function, or NULL_TREE if not available. */
29196 rs6000_builtin_reciprocal (unsigned int fn
, bool md_fn
,
29197 bool sqrt ATTRIBUTE_UNUSED
)
29199 if (optimize_insn_for_size_p ())
29205 case VSX_BUILTIN_XVSQRTDP
:
29206 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode
))
29209 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_2DF
];
29211 case VSX_BUILTIN_XVSQRTSP
:
29212 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode
))
29215 return rs6000_builtin_decls
[VSX_BUILTIN_RSQRT_4SF
];
29224 case BUILT_IN_SQRT
:
29225 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode
))
29228 return rs6000_builtin_decls
[RS6000_BUILTIN_RSQRT
];
29230 case BUILT_IN_SQRTF
:
29231 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode
))
29234 return rs6000_builtin_decls
[RS6000_BUILTIN_RSQRTF
];
29241 /* Load up a constant. If the mode is a vector mode, splat the value across
29242 all of the vector elements. */
29245 rs6000_load_constant_and_splat (enum machine_mode mode
, REAL_VALUE_TYPE dconst
)
29249 if (mode
== SFmode
|| mode
== DFmode
)
29251 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, mode
);
29252 reg
= force_reg (mode
, d
);
29254 else if (mode
== V4SFmode
)
29256 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, SFmode
);
29257 rtvec v
= gen_rtvec (4, d
, d
, d
, d
);
29258 reg
= gen_reg_rtx (mode
);
29259 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
29261 else if (mode
== V2DFmode
)
29263 rtx d
= CONST_DOUBLE_FROM_REAL_VALUE (dconst
, DFmode
);
29264 rtvec v
= gen_rtvec (2, d
, d
);
29265 reg
= gen_reg_rtx (mode
);
29266 rs6000_expand_vector_init (reg
, gen_rtx_PARALLEL (mode
, v
));
29269 gcc_unreachable ();
29274 /* Generate an FMA instruction. */
29277 rs6000_emit_madd (rtx target
, rtx m1
, rtx m2
, rtx a
)
29279 enum machine_mode mode
= GET_MODE (target
);
29282 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
29283 gcc_assert (dst
!= NULL
);
29286 emit_move_insn (target
, dst
);
29289 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
29292 rs6000_emit_msub (rtx target
, rtx m1
, rtx m2
, rtx a
)
29294 enum machine_mode mode
= GET_MODE (target
);
29297 /* Altivec does not support fms directly;
29298 generate in terms of fma in that case. */
29299 if (optab_handler (fms_optab
, mode
) != CODE_FOR_nothing
)
29300 dst
= expand_ternary_op (mode
, fms_optab
, m1
, m2
, a
, target
, 0);
29303 a
= expand_unop (mode
, neg_optab
, a
, NULL_RTX
, 0);
29304 dst
= expand_ternary_op (mode
, fma_optab
, m1
, m2
, a
, target
, 0);
29306 gcc_assert (dst
!= NULL
);
29309 emit_move_insn (target
, dst
);
29312 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
29315 rs6000_emit_nmsub (rtx dst
, rtx m1
, rtx m2
, rtx a
)
29317 enum machine_mode mode
= GET_MODE (dst
);
29320 /* This is a tad more complicated, since the fnma_optab is for
29321 a different expression: fma(-m1, m2, a), which is the same
29322 thing except in the case of signed zeros.
29324 Fortunately we know that if FMA is supported that FNMSUB is
29325 also supported in the ISA. Just expand it directly. */
29327 gcc_assert (optab_handler (fma_optab
, mode
) != CODE_FOR_nothing
);
29329 r
= gen_rtx_NEG (mode
, a
);
29330 r
= gen_rtx_FMA (mode
, m1
, m2
, r
);
29331 r
= gen_rtx_NEG (mode
, r
);
29332 emit_insn (gen_rtx_SET (VOIDmode
, dst
, r
));
29335 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
29336 add a reg_note saying that this was a division. Support both scalar and
29337 vector divide. Assumes no trapping math and finite arguments. */
29340 rs6000_emit_swdiv (rtx dst
, rtx n
, rtx d
, bool note_p
)
29342 enum machine_mode mode
= GET_MODE (dst
);
29343 rtx one
, x0
, e0
, x1
, xprev
, eprev
, xnext
, enext
, u
, v
;
29346 /* Low precision estimates guarantee 5 bits of accuracy. High
29347 precision estimates guarantee 14 bits of accuracy. SFmode
29348 requires 23 bits of accuracy. DFmode requires 52 bits of
29349 accuracy. Each pass at least doubles the accuracy, leading
29350 to the following. */
29351 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
29352 if (mode
== DFmode
|| mode
== V2DFmode
)
29355 enum insn_code code
= optab_handler (smul_optab
, mode
);
29356 insn_gen_fn gen_mul
= GEN_FCN (code
);
29358 gcc_assert (code
!= CODE_FOR_nothing
);
29360 one
= rs6000_load_constant_and_splat (mode
, dconst1
);
29362 /* x0 = 1./d estimate */
29363 x0
= gen_reg_rtx (mode
);
29364 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
29365 gen_rtx_UNSPEC (mode
, gen_rtvec (1, d
),
29368 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
29371 /* e0 = 1. - d * x0 */
29372 e0
= gen_reg_rtx (mode
);
29373 rs6000_emit_nmsub (e0
, d
, x0
, one
);
29375 /* x1 = x0 + e0 * x0 */
29376 x1
= gen_reg_rtx (mode
);
29377 rs6000_emit_madd (x1
, e0
, x0
, x0
);
29379 for (i
= 0, xprev
= x1
, eprev
= e0
; i
< passes
- 2;
29380 ++i
, xprev
= xnext
, eprev
= enext
) {
29382 /* enext = eprev * eprev */
29383 enext
= gen_reg_rtx (mode
);
29384 emit_insn (gen_mul (enext
, eprev
, eprev
));
29386 /* xnext = xprev + enext * xprev */
29387 xnext
= gen_reg_rtx (mode
);
29388 rs6000_emit_madd (xnext
, enext
, xprev
, xprev
);
29394 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
29396 /* u = n * xprev */
29397 u
= gen_reg_rtx (mode
);
29398 emit_insn (gen_mul (u
, n
, xprev
));
29400 /* v = n - (d * u) */
29401 v
= gen_reg_rtx (mode
);
29402 rs6000_emit_nmsub (v
, d
, u
, n
);
29404 /* dst = (v * xprev) + u */
29405 rs6000_emit_madd (dst
, v
, xprev
, u
);
29408 add_reg_note (get_last_insn (), REG_EQUAL
, gen_rtx_DIV (mode
, n
, d
));
29411 /* Newton-Raphson approximation of single/double-precision floating point
29412 rsqrt. Assumes no trapping math and finite arguments. */
29415 rs6000_emit_swrsqrt (rtx dst
, rtx src
)
29417 enum machine_mode mode
= GET_MODE (src
);
29418 rtx x0
= gen_reg_rtx (mode
);
29419 rtx y
= gen_reg_rtx (mode
);
29421 /* Low precision estimates guarantee 5 bits of accuracy. High
29422 precision estimates guarantee 14 bits of accuracy. SFmode
29423 requires 23 bits of accuracy. DFmode requires 52 bits of
29424 accuracy. Each pass at least doubles the accuracy, leading
29425 to the following. */
29426 int passes
= (TARGET_RECIP_PRECISION
) ? 1 : 3;
29427 if (mode
== DFmode
|| mode
== V2DFmode
)
29430 REAL_VALUE_TYPE dconst3_2
;
29433 enum insn_code code
= optab_handler (smul_optab
, mode
);
29434 insn_gen_fn gen_mul
= GEN_FCN (code
);
29436 gcc_assert (code
!= CODE_FOR_nothing
);
29438 /* Load up the constant 1.5 either as a scalar, or as a vector. */
29439 real_from_integer (&dconst3_2
, VOIDmode
, 3, 0, 0);
29440 SET_REAL_EXP (&dconst3_2
, REAL_EXP (&dconst3_2
) - 1);
29442 halfthree
= rs6000_load_constant_and_splat (mode
, dconst3_2
);
29444 /* x0 = rsqrt estimate */
29445 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
29446 gen_rtx_UNSPEC (mode
, gen_rtvec (1, src
),
29449 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
29450 rs6000_emit_msub (y
, src
, halfthree
, src
);
29452 for (i
= 0; i
< passes
; i
++)
29454 rtx x1
= gen_reg_rtx (mode
);
29455 rtx u
= gen_reg_rtx (mode
);
29456 rtx v
= gen_reg_rtx (mode
);
29458 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
29459 emit_insn (gen_mul (u
, x0
, x0
));
29460 rs6000_emit_nmsub (v
, y
, u
, halfthree
);
29461 emit_insn (gen_mul (x1
, x0
, v
));
29465 emit_move_insn (dst
, x0
);
29469 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
29470 (Power7) targets. DST is the target, and SRC is the argument operand. */
29473 rs6000_emit_popcount (rtx dst
, rtx src
)
29475 enum machine_mode mode
= GET_MODE (dst
);
29478 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
29479 if (TARGET_POPCNTD
)
29481 if (mode
== SImode
)
29482 emit_insn (gen_popcntdsi2 (dst
, src
));
29484 emit_insn (gen_popcntddi2 (dst
, src
));
29488 tmp1
= gen_reg_rtx (mode
);
29490 if (mode
== SImode
)
29492 emit_insn (gen_popcntbsi2 (tmp1
, src
));
29493 tmp2
= expand_mult (SImode
, tmp1
, GEN_INT (0x01010101),
29495 tmp2
= force_reg (SImode
, tmp2
);
29496 emit_insn (gen_lshrsi3 (dst
, tmp2
, GEN_INT (24)));
29500 emit_insn (gen_popcntbdi2 (tmp1
, src
));
29501 tmp2
= expand_mult (DImode
, tmp1
,
29502 GEN_INT ((HOST_WIDE_INT
)
29503 0x01010101 << 32 | 0x01010101),
29505 tmp2
= force_reg (DImode
, tmp2
);
29506 emit_insn (gen_lshrdi3 (dst
, tmp2
, GEN_INT (56)));
29511 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
29512 target, and SRC is the argument operand. */
29515 rs6000_emit_parity (rtx dst
, rtx src
)
29517 enum machine_mode mode
= GET_MODE (dst
);
29520 tmp
= gen_reg_rtx (mode
);
29522 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
29525 if (mode
== SImode
)
29527 emit_insn (gen_popcntbsi2 (tmp
, src
));
29528 emit_insn (gen_paritysi2_cmpb (dst
, tmp
));
29532 emit_insn (gen_popcntbdi2 (tmp
, src
));
29533 emit_insn (gen_paritydi2_cmpb (dst
, tmp
));
29538 if (mode
== SImode
)
29540 /* Is mult+shift >= shift+xor+shift+xor? */
29541 if (rs6000_cost
->mulsi_const
>= COSTS_N_INSNS (3))
29543 rtx tmp1
, tmp2
, tmp3
, tmp4
;
29545 tmp1
= gen_reg_rtx (SImode
);
29546 emit_insn (gen_popcntbsi2 (tmp1
, src
));
29548 tmp2
= gen_reg_rtx (SImode
);
29549 emit_insn (gen_lshrsi3 (tmp2
, tmp1
, GEN_INT (16)));
29550 tmp3
= gen_reg_rtx (SImode
);
29551 emit_insn (gen_xorsi3 (tmp3
, tmp1
, tmp2
));
29553 tmp4
= gen_reg_rtx (SImode
);
29554 emit_insn (gen_lshrsi3 (tmp4
, tmp3
, GEN_INT (8)));
29555 emit_insn (gen_xorsi3 (tmp
, tmp3
, tmp4
));
29558 rs6000_emit_popcount (tmp
, src
);
29559 emit_insn (gen_andsi3 (dst
, tmp
, const1_rtx
));
29563 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
29564 if (rs6000_cost
->muldi
>= COSTS_N_INSNS (5))
29566 rtx tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
;
29568 tmp1
= gen_reg_rtx (DImode
);
29569 emit_insn (gen_popcntbdi2 (tmp1
, src
));
29571 tmp2
= gen_reg_rtx (DImode
);
29572 emit_insn (gen_lshrdi3 (tmp2
, tmp1
, GEN_INT (32)));
29573 tmp3
= gen_reg_rtx (DImode
);
29574 emit_insn (gen_xordi3 (tmp3
, tmp1
, tmp2
));
29576 tmp4
= gen_reg_rtx (DImode
);
29577 emit_insn (gen_lshrdi3 (tmp4
, tmp3
, GEN_INT (16)));
29578 tmp5
= gen_reg_rtx (DImode
);
29579 emit_insn (gen_xordi3 (tmp5
, tmp3
, tmp4
));
29581 tmp6
= gen_reg_rtx (DImode
);
29582 emit_insn (gen_lshrdi3 (tmp6
, tmp5
, GEN_INT (8)));
29583 emit_insn (gen_xordi3 (tmp
, tmp5
, tmp6
));
29586 rs6000_emit_popcount (tmp
, src
);
29587 emit_insn (gen_anddi3 (dst
, tmp
, const1_rtx
));
29591 /* Expand an Altivec constant permutation for little endian mode.
29592 There are two issues: First, the two input operands must be
29593 swapped so that together they form a double-wide array in LE
29594 order. Second, the vperm instruction has surprising behavior
29595 in LE mode: it interprets the elements of the source vectors
29596 in BE mode ("left to right") and interprets the elements of
29597 the destination vector in LE mode ("right to left"). To
29598 correct for this, we must subtract each element of the permute
29599 control vector from 31.
29601 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
29602 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
29603 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
29604 serve as the permute control vector. Then, in BE mode,
29608 places the desired result in vr9. However, in LE mode the
29609 vector contents will be
29611 vr10 = 00000003 00000002 00000001 00000000
29612 vr11 = 00000007 00000006 00000005 00000004
29614 The result of the vperm using the same permute control vector is
29616 vr9 = 05000000 07000000 01000000 03000000
29618 That is, the leftmost 4 bytes of vr10 are interpreted as the
29619 source for the rightmost 4 bytes of vr9, and so on.
29621 If we change the permute control vector to
29623 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
29631 vr9 = 00000006 00000004 00000002 00000000. */
29634 altivec_expand_vec_perm_const_le (rtx operands
[4])
29638 rtx constv
, unspec
;
29639 rtx target
= operands
[0];
29640 rtx op0
= operands
[1];
29641 rtx op1
= operands
[2];
29642 rtx sel
= operands
[3];
29644 /* Unpack and adjust the constant selector. */
29645 for (i
= 0; i
< 16; ++i
)
29647 rtx e
= XVECEXP (sel
, 0, i
);
29648 unsigned int elt
= 31 - (INTVAL (e
) & 31);
29649 perm
[i
] = GEN_INT (elt
);
29652 /* Expand to a permute, swapping the inputs and using the
29653 adjusted selector. */
29655 op0
= force_reg (V16QImode
, op0
);
29657 op1
= force_reg (V16QImode
, op1
);
29659 constv
= gen_rtx_CONST_VECTOR (V16QImode
, gen_rtvec_v (16, perm
));
29660 constv
= force_reg (V16QImode
, constv
);
29661 unspec
= gen_rtx_UNSPEC (V16QImode
, gen_rtvec (3, op1
, op0
, constv
),
29663 if (!REG_P (target
))
29665 rtx tmp
= gen_reg_rtx (V16QImode
);
29666 emit_move_insn (tmp
, unspec
);
29670 emit_move_insn (target
, unspec
);
29673 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
29674 permute control vector. But here it's not a constant, so we must
29675 generate a vector splat/subtract to do the adjustment. */
29678 altivec_expand_vec_perm_le (rtx operands
[4])
29681 rtx target
= operands
[0];
29682 rtx op0
= operands
[1];
29683 rtx op1
= operands
[2];
29684 rtx sel
= operands
[3];
29687 /* Get everything in regs so the pattern matches. */
29689 op0
= force_reg (V16QImode
, op0
);
29691 op1
= force_reg (V16QImode
, op1
);
29693 sel
= force_reg (V16QImode
, sel
);
29694 if (!REG_P (target
))
29695 tmp
= gen_reg_rtx (V16QImode
);
29697 /* SEL = splat(31) - SEL. */
29698 /* We want to subtract from 31, but we can't vspltisb 31 since
29699 it's out of range. -1 works as well because only the low-order
29700 five bits of the permute control vector elements are used. */
29701 splat
= gen_rtx_VEC_DUPLICATE (V16QImode
,
29702 gen_rtx_CONST_INT (QImode
, -1));
29703 emit_move_insn (tmp
, splat
);
29704 sel
= gen_rtx_MINUS (V16QImode
, tmp
, sel
);
29705 emit_move_insn (tmp
, sel
);
29707 /* Permute with operands reversed and adjusted selector. */
29708 unspec
= gen_rtx_UNSPEC (V16QImode
, gen_rtvec (3, op1
, op0
, tmp
),
29711 /* Copy into target, possibly by way of a register. */
29712 if (!REG_P (target
))
29714 emit_move_insn (tmp
, unspec
);
29718 emit_move_insn (target
, unspec
);
29721 /* Expand an Altivec constant permutation. Return true if we match
29722 an efficient implementation; false to fall back to VPERM. */
29725 altivec_expand_vec_perm_const (rtx operands
[4])
29727 struct altivec_perm_insn
{
29728 HOST_WIDE_INT mask
;
29729 enum insn_code impl
;
29730 unsigned char perm
[16];
29732 static const struct altivec_perm_insn patterns
[] = {
29733 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuhum
,
29734 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
29735 { OPTION_MASK_ALTIVEC
, CODE_FOR_altivec_vpkuwum
,
29736 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
29737 { OPTION_MASK_ALTIVEC
,
29738 BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghb
: CODE_FOR_altivec_vmrglb
,
29739 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
29740 { OPTION_MASK_ALTIVEC
,
29741 BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghh
: CODE_FOR_altivec_vmrglh
,
29742 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
29743 { OPTION_MASK_ALTIVEC
,
29744 BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrghw
: CODE_FOR_altivec_vmrglw
,
29745 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
29746 { OPTION_MASK_ALTIVEC
,
29747 BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglb
: CODE_FOR_altivec_vmrghb
,
29748 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
29749 { OPTION_MASK_ALTIVEC
,
29750 BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglh
: CODE_FOR_altivec_vmrghh
,
29751 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
29752 { OPTION_MASK_ALTIVEC
,
29753 BYTES_BIG_ENDIAN
? CODE_FOR_altivec_vmrglw
: CODE_FOR_altivec_vmrghw
,
29754 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
29755 { OPTION_MASK_P8_VECTOR
, CODE_FOR_p8_vmrgew
,
29756 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
29757 { OPTION_MASK_P8_VECTOR
, CODE_FOR_p8_vmrgow
,
29758 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
29761 unsigned int i
, j
, elt
, which
;
29762 unsigned char perm
[16];
29763 rtx target
, op0
, op1
, sel
, x
;
29766 target
= operands
[0];
29771 /* Unpack the constant selector. */
29772 for (i
= which
= 0; i
< 16; ++i
)
29774 rtx e
= XVECEXP (sel
, 0, i
);
29775 elt
= INTVAL (e
) & 31;
29776 which
|= (elt
< 16 ? 1 : 2);
29780 /* Simplify the constant selector based on operands. */
29784 gcc_unreachable ();
29788 if (!rtx_equal_p (op0
, op1
))
29793 for (i
= 0; i
< 16; ++i
)
29805 /* Look for splat patterns. */
29810 for (i
= 0; i
< 16; ++i
)
29811 if (perm
[i
] != elt
)
29815 if (!BYTES_BIG_ENDIAN
)
29817 emit_insn (gen_altivec_vspltb (target
, op0
, GEN_INT (elt
)));
29823 for (i
= 0; i
< 16; i
+= 2)
29824 if (perm
[i
] != elt
|| perm
[i
+ 1] != elt
+ 1)
29828 int field
= BYTES_BIG_ENDIAN
? elt
/ 2 : 7 - elt
/ 2;
29829 x
= gen_reg_rtx (V8HImode
);
29830 emit_insn (gen_altivec_vsplth (x
, gen_lowpart (V8HImode
, op0
),
29832 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
29839 for (i
= 0; i
< 16; i
+= 4)
29841 || perm
[i
+ 1] != elt
+ 1
29842 || perm
[i
+ 2] != elt
+ 2
29843 || perm
[i
+ 3] != elt
+ 3)
29847 int field
= BYTES_BIG_ENDIAN
? elt
/ 4 : 3 - elt
/ 4;
29848 x
= gen_reg_rtx (V4SImode
);
29849 emit_insn (gen_altivec_vspltw (x
, gen_lowpart (V4SImode
, op0
),
29851 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
29857 /* Look for merge and pack patterns. */
29858 for (j
= 0; j
< ARRAY_SIZE (patterns
); ++j
)
29862 if ((patterns
[j
].mask
& rs6000_isa_flags
) == 0)
29865 elt
= patterns
[j
].perm
[0];
29866 if (perm
[0] == elt
)
29868 else if (perm
[0] == elt
+ 16)
29872 for (i
= 1; i
< 16; ++i
)
29874 elt
= patterns
[j
].perm
[i
];
29876 elt
= (elt
>= 16 ? elt
- 16 : elt
+ 16);
29877 else if (one_vec
&& elt
>= 16)
29879 if (perm
[i
] != elt
)
29884 enum insn_code icode
= patterns
[j
].impl
;
29885 enum machine_mode omode
= insn_data
[icode
].operand
[0].mode
;
29886 enum machine_mode imode
= insn_data
[icode
].operand
[1].mode
;
29888 /* For little-endian, don't use vpkuwum and vpkuhum if the
29889 underlying vector type is not V4SI and V8HI, respectively.
29890 For example, using vpkuwum with a V8HI picks up the even
29891 halfwords (BE numbering) when the even halfwords (LE
29892 numbering) are what we need. */
29893 if (!BYTES_BIG_ENDIAN
29894 && icode
== CODE_FOR_altivec_vpkuwum
29895 && ((GET_CODE (op0
) == REG
29896 && GET_MODE (op0
) != V4SImode
)
29897 || (GET_CODE (op0
) == SUBREG
29898 && GET_MODE (XEXP (op0
, 0)) != V4SImode
)))
29900 if (!BYTES_BIG_ENDIAN
29901 && icode
== CODE_FOR_altivec_vpkuhum
29902 && ((GET_CODE (op0
) == REG
29903 && GET_MODE (op0
) != V8HImode
)
29904 || (GET_CODE (op0
) == SUBREG
29905 && GET_MODE (XEXP (op0
, 0)) != V8HImode
)))
29908 /* For little-endian, the two input operands must be swapped
29909 (or swapped back) to ensure proper right-to-left numbering
29911 if (swapped
^ !BYTES_BIG_ENDIAN
)
29912 x
= op0
, op0
= op1
, op1
= x
;
29913 if (imode
!= V16QImode
)
29915 op0
= gen_lowpart (imode
, op0
);
29916 op1
= gen_lowpart (imode
, op1
);
29918 if (omode
== V16QImode
)
29921 x
= gen_reg_rtx (omode
);
29922 emit_insn (GEN_FCN (icode
) (x
, op0
, op1
));
29923 if (omode
!= V16QImode
)
29924 emit_move_insn (target
, gen_lowpart (V16QImode
, x
));
29929 if (!BYTES_BIG_ENDIAN
)
29931 altivec_expand_vec_perm_const_le (operands
);
29938 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
29939 Return true if we match an efficient implementation. */
29942 rs6000_expand_vec_perm_const_1 (rtx target
, rtx op0
, rtx op1
,
29943 unsigned char perm0
, unsigned char perm1
)
29947 /* If both selectors come from the same operand, fold to single op. */
29948 if ((perm0
& 2) == (perm1
& 2))
29955 /* If both operands are equal, fold to simpler permutation. */
29956 if (rtx_equal_p (op0
, op1
))
29959 perm1
= (perm1
& 1) + 2;
29961 /* If the first selector comes from the second operand, swap. */
29962 else if (perm0
& 2)
29968 x
= op0
, op0
= op1
, op1
= x
;
29970 /* If the second selector does not come from the second operand, fail. */
29971 else if ((perm1
& 2) == 0)
29975 if (target
!= NULL
)
29977 enum machine_mode vmode
, dmode
;
29980 vmode
= GET_MODE (target
);
29981 gcc_assert (GET_MODE_NUNITS (vmode
) == 2);
29982 dmode
= mode_for_vector (GET_MODE_INNER (vmode
), 4);
29984 x
= gen_rtx_VEC_CONCAT (dmode
, op0
, op1
);
29985 v
= gen_rtvec (2, GEN_INT (perm0
), GEN_INT (perm1
));
29986 x
= gen_rtx_VEC_SELECT (vmode
, x
, gen_rtx_PARALLEL (VOIDmode
, v
));
29987 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
29993 rs6000_expand_vec_perm_const (rtx operands
[4])
29995 rtx target
, op0
, op1
, sel
;
29996 unsigned char perm0
, perm1
;
29998 target
= operands
[0];
30003 /* Unpack the constant selector. */
30004 perm0
= INTVAL (XVECEXP (sel
, 0, 0)) & 3;
30005 perm1
= INTVAL (XVECEXP (sel
, 0, 1)) & 3;
30007 return rs6000_expand_vec_perm_const_1 (target
, op0
, op1
, perm0
, perm1
);
30010 /* Test whether a constant permutation is supported. */
30013 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode
,
30014 const unsigned char *sel
)
30016 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
30017 if (TARGET_ALTIVEC
)
30020 /* Check for ps_merge* or evmerge* insns. */
30021 if ((TARGET_PAIRED_FLOAT
&& vmode
== V2SFmode
)
30022 || (TARGET_SPE
&& vmode
== V2SImode
))
30024 rtx op0
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 1);
30025 rtx op1
= gen_raw_REG (vmode
, LAST_VIRTUAL_REGISTER
+ 2);
30026 return rs6000_expand_vec_perm_const_1 (NULL
, op0
, op1
, sel
[0], sel
[1]);
30032 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
30035 rs6000_do_expand_vec_perm (rtx target
, rtx op0
, rtx op1
,
30036 enum machine_mode vmode
, unsigned nelt
, rtx perm
[])
30038 enum machine_mode imode
;
30042 if (GET_MODE_CLASS (vmode
) != MODE_VECTOR_INT
)
30044 imode
= GET_MODE_INNER (vmode
);
30045 imode
= mode_for_size (GET_MODE_BITSIZE (imode
), MODE_INT
, 0);
30046 imode
= mode_for_vector (imode
, nelt
);
30049 x
= gen_rtx_CONST_VECTOR (imode
, gen_rtvec_v (nelt
, perm
));
30050 x
= expand_vec_perm (vmode
, op0
, op1
, x
, target
);
30052 emit_move_insn (target
, x
);
30055 /* Expand an extract even operation. */
30058 rs6000_expand_extract_even (rtx target
, rtx op0
, rtx op1
)
30060 enum machine_mode vmode
= GET_MODE (target
);
30061 unsigned i
, nelt
= GET_MODE_NUNITS (vmode
);
30064 for (i
= 0; i
< nelt
; i
++)
30065 perm
[i
] = GEN_INT (i
* 2);
30067 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
30070 /* Expand a vector interleave operation. */
30073 rs6000_expand_interleave (rtx target
, rtx op0
, rtx op1
, bool highp
)
30075 enum machine_mode vmode
= GET_MODE (target
);
30076 unsigned i
, high
, nelt
= GET_MODE_NUNITS (vmode
);
30079 high
= (highp
== BYTES_BIG_ENDIAN
? 0 : nelt
/ 2);
30080 for (i
= 0; i
< nelt
/ 2; i
++)
30082 perm
[i
* 2] = GEN_INT (i
+ high
);
30083 perm
[i
* 2 + 1] = GEN_INT (i
+ nelt
+ high
);
30086 rs6000_do_expand_vec_perm (target
, op0
, op1
, vmode
, nelt
, perm
);
30089 /* Return an RTX representing where to find the function value of a
30090 function returning MODE. */
30092 rs6000_complex_function_value (enum machine_mode mode
)
30094 unsigned int regno
;
30096 enum machine_mode inner
= GET_MODE_INNER (mode
);
30097 unsigned int inner_bytes
= GET_MODE_SIZE (inner
);
30099 if (FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
30100 regno
= FP_ARG_RETURN
;
30103 regno
= GP_ARG_RETURN
;
30105 /* 32-bit is OK since it'll go in r3/r4. */
30106 if (TARGET_32BIT
&& inner_bytes
>= 4)
30107 return gen_rtx_REG (mode
, regno
);
30110 if (inner_bytes
>= 8)
30111 return gen_rtx_REG (mode
, regno
);
30113 r1
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
),
30115 r2
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
+ 1),
30116 GEN_INT (inner_bytes
));
30117 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
30120 /* Target hook for TARGET_FUNCTION_VALUE.
30122 On the SPE, both FPs and vectors are returned in r3.
30124 On RS/6000 an integer value is in r3 and a floating-point value is in
30125 fp1, unless -msoft-float. */
30128 rs6000_function_value (const_tree valtype
,
30129 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
30130 bool outgoing ATTRIBUTE_UNUSED
)
30132 enum machine_mode mode
;
30133 unsigned int regno
;
30134 enum machine_mode elt_mode
;
30137 /* Special handling for structs in darwin64. */
30139 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype
), valtype
))
30141 CUMULATIVE_ARGS valcum
;
30145 valcum
.fregno
= FP_ARG_MIN_REG
;
30146 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
30147 /* Do a trial code generation as if this were going to be passed as
30148 an argument; if any part goes in memory, we return NULL. */
30149 valret
= rs6000_darwin64_record_arg (&valcum
, valtype
, true, /* retval= */ true);
30152 /* Otherwise fall through to standard ABI rules. */
30155 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
30156 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (valtype
), valtype
,
30157 &elt_mode
, &n_elts
))
30159 int first_reg
, n_regs
, i
;
30162 if (SCALAR_FLOAT_MODE_P (elt_mode
))
30164 /* _Decimal128 must use even/odd register pairs. */
30165 first_reg
= (elt_mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
30166 n_regs
= (GET_MODE_SIZE (elt_mode
) + 7) >> 3;
30170 first_reg
= ALTIVEC_ARG_RETURN
;
30174 par
= gen_rtx_PARALLEL (TYPE_MODE (valtype
), rtvec_alloc (n_elts
));
30175 for (i
= 0; i
< n_elts
; i
++)
30177 rtx r
= gen_rtx_REG (elt_mode
, first_reg
+ i
* n_regs
);
30178 rtx off
= GEN_INT (i
* GET_MODE_SIZE (elt_mode
));
30179 XVECEXP (par
, 0, i
) = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
30185 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DImode
)
30187 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
30188 return gen_rtx_PARALLEL (DImode
,
30190 gen_rtx_EXPR_LIST (VOIDmode
,
30191 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
30193 gen_rtx_EXPR_LIST (VOIDmode
,
30194 gen_rtx_REG (SImode
,
30195 GP_ARG_RETURN
+ 1),
30198 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DCmode
)
30200 return gen_rtx_PARALLEL (DCmode
,
30202 gen_rtx_EXPR_LIST (VOIDmode
,
30203 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
30205 gen_rtx_EXPR_LIST (VOIDmode
,
30206 gen_rtx_REG (SImode
,
30207 GP_ARG_RETURN
+ 1),
30209 gen_rtx_EXPR_LIST (VOIDmode
,
30210 gen_rtx_REG (SImode
,
30211 GP_ARG_RETURN
+ 2),
30213 gen_rtx_EXPR_LIST (VOIDmode
,
30214 gen_rtx_REG (SImode
,
30215 GP_ARG_RETURN
+ 3),
30219 mode
= TYPE_MODE (valtype
);
30220 if ((INTEGRAL_TYPE_P (valtype
) && GET_MODE_BITSIZE (mode
) < BITS_PER_WORD
)
30221 || POINTER_TYPE_P (valtype
))
30222 mode
= TARGET_32BIT
? SImode
: DImode
;
30224 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
30225 /* _Decimal128 must use an even/odd register pair. */
30226 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
30227 else if (SCALAR_FLOAT_TYPE_P (valtype
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
30228 && ((TARGET_SINGLE_FLOAT
&& (mode
== SFmode
)) || TARGET_DOUBLE_FLOAT
))
30229 regno
= FP_ARG_RETURN
;
30230 else if (TREE_CODE (valtype
) == COMPLEX_TYPE
30231 && targetm
.calls
.split_complex_arg
)
30232 return rs6000_complex_function_value (mode
);
30233 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
30234 return register is used in both cases, and we won't see V2DImode/V2DFmode
30235 for pure altivec, combine the two cases. */
30236 else if (TREE_CODE (valtype
) == VECTOR_TYPE
30237 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
30238 && ALTIVEC_OR_VSX_VECTOR_MODE (mode
))
30239 regno
= ALTIVEC_ARG_RETURN
;
30240 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
30241 && (mode
== DFmode
|| mode
== DCmode
30242 || mode
== TFmode
|| mode
== TCmode
))
30243 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
30245 regno
= GP_ARG_RETURN
;
30247 return gen_rtx_REG (mode
, regno
);
30250 /* Define how to find the value returned by a library function
30251 assuming the value has mode MODE. */
30253 rs6000_libcall_value (enum machine_mode mode
)
30255 unsigned int regno
;
30257 if (TARGET_32BIT
&& TARGET_POWERPC64
&& mode
== DImode
)
30259 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
30260 return gen_rtx_PARALLEL (DImode
,
30262 gen_rtx_EXPR_LIST (VOIDmode
,
30263 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
30265 gen_rtx_EXPR_LIST (VOIDmode
,
30266 gen_rtx_REG (SImode
,
30267 GP_ARG_RETURN
+ 1),
30271 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
30272 /* _Decimal128 must use an even/odd register pair. */
30273 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
30274 else if (SCALAR_FLOAT_MODE_P (mode
)
30275 && TARGET_HARD_FLOAT
&& TARGET_FPRS
30276 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
) || TARGET_DOUBLE_FLOAT
))
30277 regno
= FP_ARG_RETURN
;
30278 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
30279 return register is used in both cases, and we won't see V2DImode/V2DFmode
30280 for pure altivec, combine the two cases. */
30281 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode
)
30282 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
)
30283 regno
= ALTIVEC_ARG_RETURN
;
30284 else if (COMPLEX_MODE_P (mode
) && targetm
.calls
.split_complex_arg
)
30285 return rs6000_complex_function_value (mode
);
30286 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
30287 && (mode
== DFmode
|| mode
== DCmode
30288 || mode
== TFmode
|| mode
== TCmode
))
30289 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
30291 regno
= GP_ARG_RETURN
;
30293 return gen_rtx_REG (mode
, regno
);
30297 /* Return true if we use LRA instead of reload pass. */
30299 rs6000_lra_p (void)
30301 return rs6000_lra_flag
;
30304 /* Given FROM and TO register numbers, say whether this elimination is allowed.
30305 Frame pointer elimination is automatically handled.
30307 For the RS/6000, if frame pointer elimination is being done, we would like
30308 to convert ap into fp, not sp.
30310 We need r30 if -mminimal-toc was specified, and there are constant pool
30314 rs6000_can_eliminate (const int from
, const int to
)
30316 return (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
30317 ? ! frame_pointer_needed
30318 : from
== RS6000_PIC_OFFSET_TABLE_REGNUM
30319 ? ! TARGET_MINIMAL_TOC
|| TARGET_NO_TOC
|| get_pool_size () == 0
30323 /* Define the offset between two registers, FROM to be eliminated and its
30324 replacement TO, at the start of a routine. */
30326 rs6000_initial_elimination_offset (int from
, int to
)
30328 rs6000_stack_t
*info
= rs6000_stack_info ();
30329 HOST_WIDE_INT offset
;
30331 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
30332 offset
= info
->push_p
? 0 : -info
->total_size
;
30333 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
30335 offset
= info
->push_p
? 0 : -info
->total_size
;
30336 if (FRAME_GROWS_DOWNWARD
)
30337 offset
+= info
->fixed_size
+ info
->vars_size
+ info
->parm_size
;
30339 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
30340 offset
= FRAME_GROWS_DOWNWARD
30341 ? info
->fixed_size
+ info
->vars_size
+ info
->parm_size
30343 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
30344 offset
= info
->total_size
;
30345 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
30346 offset
= info
->push_p
? info
->total_size
: 0;
30347 else if (from
== RS6000_PIC_OFFSET_TABLE_REGNUM
)
30350 gcc_unreachable ();
30356 rs6000_dwarf_register_span (rtx reg
)
30360 unsigned regno
= REGNO (reg
);
30361 enum machine_mode mode
= GET_MODE (reg
);
30365 && (SPE_VECTOR_MODE (GET_MODE (reg
))
30366 || (TARGET_E500_DOUBLE
&& FLOAT_MODE_P (mode
)
30367 && mode
!= SFmode
&& mode
!= SDmode
&& mode
!= SCmode
)))
30372 regno
= REGNO (reg
);
30374 /* The duality of the SPE register size wreaks all kinds of havoc.
30375 This is a way of distinguishing r0 in 32-bits from r0 in
30377 words
= (GET_MODE_SIZE (mode
) + UNITS_PER_FP_WORD
- 1) / UNITS_PER_FP_WORD
;
30378 gcc_assert (words
<= 4);
30379 for (i
= 0; i
< words
; i
++, regno
++)
30381 if (BYTES_BIG_ENDIAN
)
30383 parts
[2 * i
] = gen_rtx_REG (SImode
, regno
+ 1200);
30384 parts
[2 * i
+ 1] = gen_rtx_REG (SImode
, regno
);
30388 parts
[2 * i
] = gen_rtx_REG (SImode
, regno
);
30389 parts
[2 * i
+ 1] = gen_rtx_REG (SImode
, regno
+ 1200);
30393 return gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (words
* 2, parts
));
30396 /* Fill in sizes for SPE register high parts in table used by unwinder. */
30399 rs6000_init_dwarf_reg_sizes_extra (tree address
)
30404 enum machine_mode mode
= TYPE_MODE (char_type_node
);
30405 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
30406 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
30407 rtx value
= gen_int_mode (4, mode
);
30409 for (i
= 1201; i
< 1232; i
++)
30411 int column
= DWARF_REG_TO_UNWIND_COLUMN (i
);
30412 HOST_WIDE_INT offset
30413 = DWARF_FRAME_REGNUM (column
) * GET_MODE_SIZE (mode
);
30415 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
30419 if (TARGET_MACHO
&& ! TARGET_ALTIVEC
)
30422 enum machine_mode mode
= TYPE_MODE (char_type_node
);
30423 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
30424 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
30425 rtx value
= gen_int_mode (16, mode
);
30427 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
30428 The unwinder still needs to know the size of Altivec registers. */
30430 for (i
= FIRST_ALTIVEC_REGNO
; i
< LAST_ALTIVEC_REGNO
+1; i
++)
30432 int column
= DWARF_REG_TO_UNWIND_COLUMN (i
);
30433 HOST_WIDE_INT offset
30434 = DWARF_FRAME_REGNUM (column
) * GET_MODE_SIZE (mode
);
30436 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
30441 /* Map internal gcc register numbers to DWARF2 register numbers. */
30444 rs6000_dbx_register_number (unsigned int regno
)
30446 if (regno
<= 63 || write_symbols
!= DWARF2_DEBUG
)
30448 if (regno
== LR_REGNO
)
30450 if (regno
== CTR_REGNO
)
30452 if (CR_REGNO_P (regno
))
30453 return regno
- CR0_REGNO
+ 86;
30454 if (regno
== CA_REGNO
)
30455 return 101; /* XER */
30456 if (ALTIVEC_REGNO_P (regno
))
30457 return regno
- FIRST_ALTIVEC_REGNO
+ 1124;
30458 if (regno
== VRSAVE_REGNO
)
30460 if (regno
== VSCR_REGNO
)
30462 if (regno
== SPE_ACC_REGNO
)
30464 if (regno
== SPEFSCR_REGNO
)
30466 /* SPE high reg number. We get these values of regno from
30467 rs6000_dwarf_register_span. */
30468 gcc_assert (regno
>= 1200 && regno
< 1232);
30472 /* target hook eh_return_filter_mode */
30473 static enum machine_mode
30474 rs6000_eh_return_filter_mode (void)
30476 return TARGET_32BIT
? SImode
: word_mode
;
30479 /* Target hook for scalar_mode_supported_p. */
30481 rs6000_scalar_mode_supported_p (enum machine_mode mode
)
30483 if (DECIMAL_FLOAT_MODE_P (mode
))
30484 return default_decimal_float_supported_p ();
30486 return default_scalar_mode_supported_p (mode
);
30489 /* Target hook for vector_mode_supported_p. */
30491 rs6000_vector_mode_supported_p (enum machine_mode mode
)
30494 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (mode
))
30497 if (TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
30500 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode
))
30507 /* Target hook for invalid_arg_for_unprototyped_fn. */
30508 static const char *
30509 invalid_arg_for_unprototyped_fn (const_tree typelist
, const_tree funcdecl
, const_tree val
)
30511 return (!rs6000_darwin64_abi
30513 && TREE_CODE (TREE_TYPE (val
)) == VECTOR_TYPE
30514 && (funcdecl
== NULL_TREE
30515 || (TREE_CODE (funcdecl
) == FUNCTION_DECL
30516 && DECL_BUILT_IN_CLASS (funcdecl
) != BUILT_IN_MD
)))
30517 ? N_("AltiVec argument passed to unprototyped function")
30521 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
30522 setup by using __stack_chk_fail_local hidden function instead of
30523 calling __stack_chk_fail directly. Otherwise it is better to call
30524 __stack_chk_fail directly. */
30526 static tree ATTRIBUTE_UNUSED
30527 rs6000_stack_protect_fail (void)
30529 return (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
30530 ? default_hidden_stack_protect_fail ()
30531 : default_external_stack_protect_fail ();
30535 rs6000_final_prescan_insn (rtx insn
, rtx
*operand ATTRIBUTE_UNUSED
,
30536 int num_operands ATTRIBUTE_UNUSED
)
30538 if (rs6000_warn_cell_microcode
)
30541 int insn_code_number
= recog_memoized (insn
);
30542 location_t location
= INSN_LOCATION (insn
);
30544 /* Punt on insns we cannot recognize. */
30545 if (insn_code_number
< 0)
30548 temp
= get_insn_template (insn_code_number
, insn
);
30550 if (get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
)
30551 warning_at (location
, OPT_mwarn_cell_microcode
,
30552 "emitting microcode insn %s\t[%s] #%d",
30553 temp
, insn_data
[INSN_CODE (insn
)].name
, INSN_UID (insn
));
30554 else if (get_attr_cell_micro (insn
) == CELL_MICRO_CONDITIONAL
)
30555 warning_at (location
, OPT_mwarn_cell_microcode
,
30556 "emitting conditional microcode insn %s\t[%s] #%d",
30557 temp
, insn_data
[INSN_CODE (insn
)].name
, INSN_UID (insn
));
30561 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
30564 static unsigned HOST_WIDE_INT
30565 rs6000_asan_shadow_offset (void)
30567 return (unsigned HOST_WIDE_INT
) 1 << (TARGET_64BIT
? 41 : 29);
30571 /* Mask options that we want to support inside of attribute((target)) and
30572 #pragma GCC target operations. Note, we do not include things like
30573 64/32-bit, endianess, hard/soft floating point, etc. that would have
30574 different calling sequences. */
30576 struct rs6000_opt_mask
{
30577 const char *name
; /* option name */
30578 HOST_WIDE_INT mask
; /* mask to set */
30579 bool invert
; /* invert sense of mask */
30580 bool valid_target
; /* option is a target option */
30583 static struct rs6000_opt_mask
const rs6000_opt_masks
[] =
30585 { "altivec", OPTION_MASK_ALTIVEC
, false, true },
30586 { "cmpb", OPTION_MASK_CMPB
, false, true },
30587 { "crypto", OPTION_MASK_CRYPTO
, false, true },
30588 { "direct-move", OPTION_MASK_DIRECT_MOVE
, false, true },
30589 { "dlmzb", OPTION_MASK_DLMZB
, false, true },
30590 { "fprnd", OPTION_MASK_FPRND
, false, true },
30591 { "hard-dfp", OPTION_MASK_DFP
, false, true },
30592 { "htm", OPTION_MASK_HTM
, false, true },
30593 { "isel", OPTION_MASK_ISEL
, false, true },
30594 { "mfcrf", OPTION_MASK_MFCRF
, false, true },
30595 { "mfpgpr", OPTION_MASK_MFPGPR
, false, true },
30596 { "mulhw", OPTION_MASK_MULHW
, false, true },
30597 { "multiple", OPTION_MASK_MULTIPLE
, false, true },
30598 { "popcntb", OPTION_MASK_POPCNTB
, false, true },
30599 { "popcntd", OPTION_MASK_POPCNTD
, false, true },
30600 { "power8-fusion", OPTION_MASK_P8_FUSION
, false, true },
30601 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN
, false, true },
30602 { "power8-vector", OPTION_MASK_P8_VECTOR
, false, true },
30603 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT
, false, true },
30604 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT
, false, true },
30605 { "quad-memory", OPTION_MASK_QUAD_MEMORY
, false, true },
30606 { "recip-precision", OPTION_MASK_RECIP_PRECISION
, false, true },
30607 { "string", OPTION_MASK_STRING
, false, true },
30608 { "update", OPTION_MASK_NO_UPDATE
, true , true },
30609 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF
, false, false },
30610 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF
, false, false },
30611 { "vsx", OPTION_MASK_VSX
, false, true },
30612 { "vsx-timode", OPTION_MASK_VSX_TIMODE
, false, true },
30613 #ifdef OPTION_MASK_64BIT
30615 { "aix64", OPTION_MASK_64BIT
, false, false },
30616 { "aix32", OPTION_MASK_64BIT
, true, false },
30618 { "64", OPTION_MASK_64BIT
, false, false },
30619 { "32", OPTION_MASK_64BIT
, true, false },
30622 #ifdef OPTION_MASK_EABI
30623 { "eabi", OPTION_MASK_EABI
, false, false },
30625 #ifdef OPTION_MASK_LITTLE_ENDIAN
30626 { "little", OPTION_MASK_LITTLE_ENDIAN
, false, false },
30627 { "big", OPTION_MASK_LITTLE_ENDIAN
, true, false },
30629 #ifdef OPTION_MASK_RELOCATABLE
30630 { "relocatable", OPTION_MASK_RELOCATABLE
, false, false },
30632 #ifdef OPTION_MASK_STRICT_ALIGN
30633 { "strict-align", OPTION_MASK_STRICT_ALIGN
, false, false },
30635 { "soft-float", OPTION_MASK_SOFT_FLOAT
, false, false },
30636 { "string", OPTION_MASK_STRING
, false, false },
30639 /* Builtin mask mapping for printing the flags. */
30640 static struct rs6000_opt_mask
const rs6000_builtin_mask_names
[] =
30642 { "altivec", RS6000_BTM_ALTIVEC
, false, false },
30643 { "vsx", RS6000_BTM_VSX
, false, false },
30644 { "spe", RS6000_BTM_SPE
, false, false },
30645 { "paired", RS6000_BTM_PAIRED
, false, false },
30646 { "fre", RS6000_BTM_FRE
, false, false },
30647 { "fres", RS6000_BTM_FRES
, false, false },
30648 { "frsqrte", RS6000_BTM_FRSQRTE
, false, false },
30649 { "frsqrtes", RS6000_BTM_FRSQRTES
, false, false },
30650 { "popcntd", RS6000_BTM_POPCNTD
, false, false },
30651 { "cell", RS6000_BTM_CELL
, false, false },
30652 { "power8-vector", RS6000_BTM_P8_VECTOR
, false, false },
30653 { "crypto", RS6000_BTM_CRYPTO
, false, false },
30654 { "htm", RS6000_BTM_HTM
, false, false },
30657 /* Option variables that we want to support inside attribute((target)) and
30658 #pragma GCC target operations. */
30660 struct rs6000_opt_var
{
30661 const char *name
; /* option name */
30662 size_t global_offset
; /* offset of the option in global_options. */
30663 size_t target_offset
; /* offset of the option in target optiosn. */
30666 static struct rs6000_opt_var
const rs6000_opt_vars
[] =
30669 offsetof (struct gcc_options
, x_TARGET_FRIZ
),
30670 offsetof (struct cl_target_option
, x_TARGET_FRIZ
), },
30671 { "avoid-indexed-addresses",
30672 offsetof (struct gcc_options
, x_TARGET_AVOID_XFORM
),
30673 offsetof (struct cl_target_option
, x_TARGET_AVOID_XFORM
) },
30675 offsetof (struct gcc_options
, x_rs6000_paired_float
),
30676 offsetof (struct cl_target_option
, x_rs6000_paired_float
), },
30678 offsetof (struct gcc_options
, x_rs6000_default_long_calls
),
30679 offsetof (struct cl_target_option
, x_rs6000_default_long_calls
), },
30682 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
30683 parsing. Return true if there were no errors. */
30686 rs6000_inner_target_options (tree args
, bool attr_p
)
30690 if (args
== NULL_TREE
)
30693 else if (TREE_CODE (args
) == STRING_CST
)
30695 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
30698 while ((q
= strtok (p
, ",")) != NULL
)
30700 bool error_p
= false;
30701 bool not_valid_p
= false;
30702 const char *cpu_opt
= NULL
;
30705 if (strncmp (q
, "cpu=", 4) == 0)
30707 int cpu_index
= rs6000_cpu_name_lookup (q
+4);
30708 if (cpu_index
>= 0)
30709 rs6000_cpu_index
= cpu_index
;
30716 else if (strncmp (q
, "tune=", 5) == 0)
30718 int tune_index
= rs6000_cpu_name_lookup (q
+5);
30719 if (tune_index
>= 0)
30720 rs6000_tune_index
= tune_index
;
30730 bool invert
= false;
30734 if (strncmp (r
, "no-", 3) == 0)
30740 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_masks
); i
++)
30741 if (strcmp (r
, rs6000_opt_masks
[i
].name
) == 0)
30743 HOST_WIDE_INT mask
= rs6000_opt_masks
[i
].mask
;
30745 if (!rs6000_opt_masks
[i
].valid_target
)
30746 not_valid_p
= true;
30750 rs6000_isa_flags_explicit
|= mask
;
30752 /* VSX needs altivec, so -mvsx automagically sets
30754 if (mask
== OPTION_MASK_VSX
&& !invert
)
30755 mask
|= OPTION_MASK_ALTIVEC
;
30757 if (rs6000_opt_masks
[i
].invert
)
30761 rs6000_isa_flags
&= ~mask
;
30763 rs6000_isa_flags
|= mask
;
30768 if (error_p
&& !not_valid_p
)
30770 for (i
= 0; i
< ARRAY_SIZE (rs6000_opt_vars
); i
++)
30771 if (strcmp (r
, rs6000_opt_vars
[i
].name
) == 0)
30773 size_t j
= rs6000_opt_vars
[i
].global_offset
;
30774 *((int *) ((char *)&global_options
+ j
)) = !invert
;
30783 const char *eprefix
, *esuffix
;
30788 eprefix
= "__attribute__((__target__(";
30793 eprefix
= "#pragma GCC target ";
30798 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt
, eprefix
,
30800 else if (not_valid_p
)
30801 error ("%s\"%s\"%s is not allowed", eprefix
, q
, esuffix
);
30803 error ("%s\"%s\"%s is invalid", eprefix
, q
, esuffix
);
30808 else if (TREE_CODE (args
) == TREE_LIST
)
30812 tree value
= TREE_VALUE (args
);
30815 bool ret2
= rs6000_inner_target_options (value
, attr_p
);
30819 args
= TREE_CHAIN (args
);
30821 while (args
!= NULL_TREE
);
30825 gcc_unreachable ();
30830 /* Print out the target options as a list for -mdebug=target. */
30833 rs6000_debug_target_options (tree args
, const char *prefix
)
30835 if (args
== NULL_TREE
)
30836 fprintf (stderr
, "%s<NULL>", prefix
);
30838 else if (TREE_CODE (args
) == STRING_CST
)
30840 char *p
= ASTRDUP (TREE_STRING_POINTER (args
));
30843 while ((q
= strtok (p
, ",")) != NULL
)
30846 fprintf (stderr
, "%s\"%s\"", prefix
, q
);
30851 else if (TREE_CODE (args
) == TREE_LIST
)
30855 tree value
= TREE_VALUE (args
);
30858 rs6000_debug_target_options (value
, prefix
);
30861 args
= TREE_CHAIN (args
);
30863 while (args
!= NULL_TREE
);
30867 gcc_unreachable ();
30873 /* Hook to validate attribute((target("..."))). */
30876 rs6000_valid_attribute_p (tree fndecl
,
30877 tree
ARG_UNUSED (name
),
30881 struct cl_target_option cur_target
;
30883 tree old_optimize
= build_optimization_node (&global_options
);
30884 tree new_target
, new_optimize
;
30885 tree func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
30887 gcc_assert ((fndecl
!= NULL_TREE
) && (args
!= NULL_TREE
));
30889 if (TARGET_DEBUG_TARGET
)
30891 tree tname
= DECL_NAME (fndecl
);
30892 fprintf (stderr
, "\n==================== rs6000_valid_attribute_p:\n");
30894 fprintf (stderr
, "function: %.*s\n",
30895 (int) IDENTIFIER_LENGTH (tname
),
30896 IDENTIFIER_POINTER (tname
));
30898 fprintf (stderr
, "function: unknown\n");
30900 fprintf (stderr
, "args:");
30901 rs6000_debug_target_options (args
, " ");
30902 fprintf (stderr
, "\n");
30905 fprintf (stderr
, "flags: 0x%x\n", flags
);
30907 fprintf (stderr
, "--------------------\n");
30910 old_optimize
= build_optimization_node (&global_options
);
30911 func_optimize
= DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
);
30913 /* If the function changed the optimization levels as well as setting target
30914 options, start with the optimizations specified. */
30915 if (func_optimize
&& func_optimize
!= old_optimize
)
30916 cl_optimization_restore (&global_options
,
30917 TREE_OPTIMIZATION (func_optimize
));
30919 /* The target attributes may also change some optimization flags, so update
30920 the optimization options if necessary. */
30921 cl_target_option_save (&cur_target
, &global_options
);
30922 rs6000_cpu_index
= rs6000_tune_index
= -1;
30923 ret
= rs6000_inner_target_options (args
, true);
30925 /* Set up any additional state. */
30928 ret
= rs6000_option_override_internal (false);
30929 new_target
= build_target_option_node (&global_options
);
30934 new_optimize
= build_optimization_node (&global_options
);
30941 DECL_FUNCTION_SPECIFIC_TARGET (fndecl
) = new_target
;
30943 if (old_optimize
!= new_optimize
)
30944 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl
) = new_optimize
;
30947 cl_target_option_restore (&global_options
, &cur_target
);
30949 if (old_optimize
!= new_optimize
)
30950 cl_optimization_restore (&global_options
,
30951 TREE_OPTIMIZATION (old_optimize
));
30957 /* Hook to validate the current #pragma GCC target and set the state, and
30958 update the macros based on what was changed. If ARGS is NULL, then
30959 POP_TARGET is used to reset the options. */
30962 rs6000_pragma_target_parse (tree args
, tree pop_target
)
30964 tree prev_tree
= build_target_option_node (&global_options
);
30966 struct cl_target_option
*prev_opt
, *cur_opt
;
30967 HOST_WIDE_INT prev_flags
, cur_flags
, diff_flags
;
30968 HOST_WIDE_INT prev_bumask
, cur_bumask
, diff_bumask
;
30970 if (TARGET_DEBUG_TARGET
)
30972 fprintf (stderr
, "\n==================== rs6000_pragma_target_parse\n");
30973 fprintf (stderr
, "args:");
30974 rs6000_debug_target_options (args
, " ");
30975 fprintf (stderr
, "\n");
30979 fprintf (stderr
, "pop_target:\n");
30980 debug_tree (pop_target
);
30983 fprintf (stderr
, "pop_target: <NULL>\n");
30985 fprintf (stderr
, "--------------------\n");
30990 cur_tree
= ((pop_target
)
30992 : target_option_default_node
);
30993 cl_target_option_restore (&global_options
,
30994 TREE_TARGET_OPTION (cur_tree
));
30998 rs6000_cpu_index
= rs6000_tune_index
= -1;
30999 if (!rs6000_inner_target_options (args
, false)
31000 || !rs6000_option_override_internal (false)
31001 || (cur_tree
= build_target_option_node (&global_options
))
31004 if (TARGET_DEBUG_BUILTIN
|| TARGET_DEBUG_TARGET
)
31005 fprintf (stderr
, "invalid pragma\n");
31011 target_option_current_node
= cur_tree
;
31013 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
31014 change the macros that are defined. */
31015 if (rs6000_target_modify_macros_ptr
)
31017 prev_opt
= TREE_TARGET_OPTION (prev_tree
);
31018 prev_bumask
= prev_opt
->x_rs6000_builtin_mask
;
31019 prev_flags
= prev_opt
->x_rs6000_isa_flags
;
31021 cur_opt
= TREE_TARGET_OPTION (cur_tree
);
31022 cur_flags
= cur_opt
->x_rs6000_isa_flags
;
31023 cur_bumask
= cur_opt
->x_rs6000_builtin_mask
;
31025 diff_bumask
= (prev_bumask
^ cur_bumask
);
31026 diff_flags
= (prev_flags
^ cur_flags
);
31028 if ((diff_flags
!= 0) || (diff_bumask
!= 0))
31030 /* Delete old macros. */
31031 rs6000_target_modify_macros_ptr (false,
31032 prev_flags
& diff_flags
,
31033 prev_bumask
& diff_bumask
);
31035 /* Define new macros. */
31036 rs6000_target_modify_macros_ptr (true,
31037 cur_flags
& diff_flags
,
31038 cur_bumask
& diff_bumask
);
31046 /* Remember the last target of rs6000_set_current_function. */
31047 static GTY(()) tree rs6000_previous_fndecl
;
31049 /* Establish appropriate back-end context for processing the function
31050 FNDECL. The argument might be NULL to indicate processing at top
31051 level, outside of any function scope. */
31053 rs6000_set_current_function (tree fndecl
)
31055 tree old_tree
= (rs6000_previous_fndecl
31056 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl
)
31059 tree new_tree
= (fndecl
31060 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl
)
31063 if (TARGET_DEBUG_TARGET
)
31065 bool print_final
= false;
31066 fprintf (stderr
, "\n==================== rs6000_set_current_function");
31069 fprintf (stderr
, ", fndecl %s (%p)",
31070 (DECL_NAME (fndecl
)
31071 ? IDENTIFIER_POINTER (DECL_NAME (fndecl
))
31072 : "<unknown>"), (void *)fndecl
);
31074 if (rs6000_previous_fndecl
)
31075 fprintf (stderr
, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl
);
31077 fprintf (stderr
, "\n");
31080 fprintf (stderr
, "\nnew fndecl target specific options:\n");
31081 debug_tree (new_tree
);
31082 print_final
= true;
31087 fprintf (stderr
, "\nold fndecl target specific options:\n");
31088 debug_tree (old_tree
);
31089 print_final
= true;
31093 fprintf (stderr
, "--------------------\n");
31096 /* Only change the context if the function changes. This hook is called
31097 several times in the course of compiling a function, and we don't want to
31098 slow things down too much or call target_reinit when it isn't safe. */
31099 if (fndecl
&& fndecl
!= rs6000_previous_fndecl
)
31101 rs6000_previous_fndecl
= fndecl
;
31102 if (old_tree
== new_tree
)
31107 cl_target_option_restore (&global_options
,
31108 TREE_TARGET_OPTION (new_tree
));
31114 struct cl_target_option
*def
31115 = TREE_TARGET_OPTION (target_option_current_node
);
31117 cl_target_option_restore (&global_options
, def
);
31124 /* Save the current options */
31127 rs6000_function_specific_save (struct cl_target_option
*ptr
,
31128 struct gcc_options
*opts
)
31130 ptr
->x_rs6000_isa_flags
= opts
->x_rs6000_isa_flags
;
31131 ptr
->x_rs6000_isa_flags_explicit
= opts
->x_rs6000_isa_flags_explicit
;
31134 /* Restore the current options */
31137 rs6000_function_specific_restore (struct gcc_options
*opts
,
31138 struct cl_target_option
*ptr
)
31141 opts
->x_rs6000_isa_flags
= ptr
->x_rs6000_isa_flags
;
31142 opts
->x_rs6000_isa_flags_explicit
= ptr
->x_rs6000_isa_flags_explicit
;
31143 (void) rs6000_option_override_internal (false);
31146 /* Print the current options */
31149 rs6000_function_specific_print (FILE *file
, int indent
,
31150 struct cl_target_option
*ptr
)
31152 rs6000_print_isa_options (file
, indent
, "Isa options set",
31153 ptr
->x_rs6000_isa_flags
);
31155 rs6000_print_isa_options (file
, indent
, "Isa options explicit",
31156 ptr
->x_rs6000_isa_flags_explicit
);
31159 /* Helper function to print the current isa or misc options on a line. */
31162 rs6000_print_options_internal (FILE *file
,
31164 const char *string
,
31165 HOST_WIDE_INT flags
,
31166 const char *prefix
,
31167 const struct rs6000_opt_mask
*opts
,
31168 size_t num_elements
)
31171 size_t start_column
= 0;
31173 size_t max_column
= 76;
31174 const char *comma
= "";
31177 start_column
+= fprintf (file
, "%*s", indent
, "");
31181 fprintf (stderr
, DEBUG_FMT_S
, string
, "<none>");
31185 start_column
+= fprintf (stderr
, DEBUG_FMT_WX
, string
, flags
);
31187 /* Print the various mask options. */
31188 cur_column
= start_column
;
31189 for (i
= 0; i
< num_elements
; i
++)
31191 if ((flags
& opts
[i
].mask
) != 0)
31193 const char *no_str
= rs6000_opt_masks
[i
].invert
? "no-" : "";
31194 size_t len
= (strlen (comma
)
31197 + strlen (rs6000_opt_masks
[i
].name
));
31200 if (cur_column
> max_column
)
31202 fprintf (stderr
, ", \\\n%*s", (int)start_column
, "");
31203 cur_column
= start_column
+ len
;
31207 fprintf (file
, "%s%s%s%s", comma
, prefix
, no_str
,
31208 rs6000_opt_masks
[i
].name
);
31209 flags
&= ~ opts
[i
].mask
;
31214 fputs ("\n", file
);
31217 /* Helper function to print the current isa options on a line. */
31220 rs6000_print_isa_options (FILE *file
, int indent
, const char *string
,
31221 HOST_WIDE_INT flags
)
31223 rs6000_print_options_internal (file
, indent
, string
, flags
, "-m",
31224 &rs6000_opt_masks
[0],
31225 ARRAY_SIZE (rs6000_opt_masks
));
31229 rs6000_print_builtin_options (FILE *file
, int indent
, const char *string
,
31230 HOST_WIDE_INT flags
)
31232 rs6000_print_options_internal (file
, indent
, string
, flags
, "",
31233 &rs6000_builtin_mask_names
[0],
31234 ARRAY_SIZE (rs6000_builtin_mask_names
));
31238 /* Hook to determine if one function can safely inline another. */
31241 rs6000_can_inline_p (tree caller
, tree callee
)
31244 tree caller_tree
= DECL_FUNCTION_SPECIFIC_TARGET (caller
);
31245 tree callee_tree
= DECL_FUNCTION_SPECIFIC_TARGET (callee
);
31247 /* If callee has no option attributes, then it is ok to inline. */
31251 /* If caller has no option attributes, but callee does then it is not ok to
31253 else if (!caller_tree
)
31258 struct cl_target_option
*caller_opts
= TREE_TARGET_OPTION (caller_tree
);
31259 struct cl_target_option
*callee_opts
= TREE_TARGET_OPTION (callee_tree
);
31261 /* Callee's options should a subset of the caller's, i.e. a vsx function
31262 can inline an altivec function but a non-vsx function can't inline a
31264 if ((caller_opts
->x_rs6000_isa_flags
& callee_opts
->x_rs6000_isa_flags
)
31265 == callee_opts
->x_rs6000_isa_flags
)
31269 if (TARGET_DEBUG_TARGET
)
31270 fprintf (stderr
, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
31271 (DECL_NAME (caller
)
31272 ? IDENTIFIER_POINTER (DECL_NAME (caller
))
31274 (DECL_NAME (callee
)
31275 ? IDENTIFIER_POINTER (DECL_NAME (callee
))
31277 (ret
? "can" : "cannot"));
31282 /* Allocate a stack temp and fixup the address so it meets the particular
31283 memory requirements (either offetable or REG+REG addressing). */
31286 rs6000_allocate_stack_temp (enum machine_mode mode
,
31287 bool offsettable_p
,
31290 rtx stack
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
));
31291 rtx addr
= XEXP (stack
, 0);
31292 int strict_p
= (reload_in_progress
|| reload_completed
);
31294 if (!legitimate_indirect_address_p (addr
, strict_p
))
31297 && !rs6000_legitimate_offset_address_p (mode
, addr
, strict_p
, true))
31298 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
31300 else if (reg_reg_p
&& !legitimate_indexed_address_p (addr
, strict_p
))
31301 stack
= replace_equiv_address (stack
, copy_addr_to_reg (addr
));
31307 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
31308 to such a form to deal with memory reference instructions like STFIWX that
31309 only take reg+reg addressing. */
31312 rs6000_address_for_fpconvert (rtx x
)
31314 int strict_p
= (reload_in_progress
|| reload_completed
);
31317 gcc_assert (MEM_P (x
));
31318 addr
= XEXP (x
, 0);
31319 if (! legitimate_indirect_address_p (addr
, strict_p
)
31320 && ! legitimate_indexed_address_p (addr
, strict_p
))
31322 if (GET_CODE (addr
) == PRE_INC
|| GET_CODE (addr
) == PRE_DEC
)
31324 rtx reg
= XEXP (addr
, 0);
31325 HOST_WIDE_INT size
= GET_MODE_SIZE (GET_MODE (x
));
31326 rtx size_rtx
= GEN_INT ((GET_CODE (addr
) == PRE_DEC
) ? -size
: size
);
31327 gcc_assert (REG_P (reg
));
31328 emit_insn (gen_add3_insn (reg
, reg
, size_rtx
));
31331 else if (GET_CODE (addr
) == PRE_MODIFY
)
31333 rtx reg
= XEXP (addr
, 0);
31334 rtx expr
= XEXP (addr
, 1);
31335 gcc_assert (REG_P (reg
));
31336 gcc_assert (GET_CODE (expr
) == PLUS
);
31337 emit_insn (gen_add3_insn (reg
, XEXP (expr
, 0), XEXP (expr
, 1)));
31341 x
= replace_equiv_address (x
, copy_addr_to_reg (addr
));
31347 /* Given a memory reference, if it is not in the form for altivec memory
31348 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
31349 convert to the altivec format. */
31352 rs6000_address_for_altivec (rtx x
)
31354 gcc_assert (MEM_P (x
));
31355 if (!altivec_indexed_or_indirect_operand (x
, GET_MODE (x
)))
31357 rtx addr
= XEXP (x
, 0);
31358 int strict_p
= (reload_in_progress
|| reload_completed
);
31360 if (!legitimate_indexed_address_p (addr
, strict_p
)
31361 && !legitimate_indirect_address_p (addr
, strict_p
))
31362 addr
= copy_to_mode_reg (Pmode
, addr
);
31364 addr
= gen_rtx_AND (Pmode
, addr
, GEN_INT (-16));
31365 x
= change_address (x
, GET_MODE (x
), addr
);
31371 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
31373 On the RS/6000, all integer constants are acceptable, most won't be valid
31374 for particular insns, though. Only easy FP constants are acceptable. */
31377 rs6000_legitimate_constant_p (enum machine_mode mode
, rtx x
)
31379 if (TARGET_ELF
&& rs6000_tls_referenced_p (x
))
31382 return ((GET_CODE (x
) != CONST_DOUBLE
&& GET_CODE (x
) != CONST_VECTOR
)
31383 || GET_MODE (x
) == VOIDmode
31384 || (TARGET_POWERPC64
&& mode
== DImode
)
31385 || easy_fp_constant (x
, mode
)
31386 || easy_vector_constant (x
, mode
));
31391 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
31394 rs6000_call_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
31396 rtx toc_reg
= gen_rtx_REG (Pmode
, TOC_REGNUM
);
31397 rtx toc_load
= NULL_RTX
;
31398 rtx toc_restore
= NULL_RTX
;
31400 rtx abi_reg
= NULL_RTX
;
31405 /* Handle longcall attributes. */
31406 if (INTVAL (cookie
) & CALL_LONG
)
31407 func_desc
= rs6000_longcall_ref (func_desc
);
31409 /* Handle indirect calls. */
31410 if (GET_CODE (func_desc
) != SYMBOL_REF
31411 || (DEFAULT_ABI
== ABI_AIX
&& !SYMBOL_REF_FUNCTION_P (func_desc
)))
31413 /* Save the TOC into its reserved slot before the call,
31414 and prepare to restore it after the call. */
31415 rtx stack_ptr
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
31416 rtx stack_toc_offset
= GEN_INT (RS6000_TOC_SAVE_SLOT
);
31417 rtx stack_toc_mem
= gen_frame_mem (Pmode
,
31418 gen_rtx_PLUS (Pmode
, stack_ptr
,
31419 stack_toc_offset
));
31420 toc_restore
= gen_rtx_SET (VOIDmode
, toc_reg
, stack_toc_mem
);
31422 /* Can we optimize saving the TOC in the prologue or
31423 do we need to do it at every call? */
31424 if (TARGET_SAVE_TOC_INDIRECT
&& !cfun
->calls_alloca
)
31425 cfun
->machine
->save_toc_in_prologue
= true;
31428 MEM_VOLATILE_P (stack_toc_mem
) = 1;
31429 emit_move_insn (stack_toc_mem
, toc_reg
);
31432 if (DEFAULT_ABI
== ABI_ELFv2
)
31434 /* A function pointer in the ELFv2 ABI is just a plain address, but
31435 the ABI requires it to be loaded into r12 before the call. */
31436 func_addr
= gen_rtx_REG (Pmode
, 12);
31437 emit_move_insn (func_addr
, func_desc
);
31438 abi_reg
= func_addr
;
31442 /* A function pointer under AIX is a pointer to a data area whose
31443 first word contains the actual address of the function, whose
31444 second word contains a pointer to its TOC, and whose third word
31445 contains a value to place in the static chain register (r11).
31446 Note that if we load the static chain, our "trampoline" need
31447 not have any executable code. */
31449 /* Load up address of the actual function. */
31450 func_desc
= force_reg (Pmode
, func_desc
);
31451 func_addr
= gen_reg_rtx (Pmode
);
31452 emit_move_insn (func_addr
, gen_rtx_MEM (Pmode
, func_desc
));
31454 /* Prepare to load the TOC of the called function. Note that the
31455 TOC load must happen immediately before the actual call so
31456 that unwinding the TOC registers works correctly. See the
31457 comment in frob_update_context. */
31458 rtx func_toc_offset
= GEN_INT (GET_MODE_SIZE (Pmode
));
31459 rtx func_toc_mem
= gen_rtx_MEM (Pmode
,
31460 gen_rtx_PLUS (Pmode
, func_desc
,
31462 toc_load
= gen_rtx_USE (VOIDmode
, func_toc_mem
);
31464 /* If we have a static chain, load it up. */
31465 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS
)
31467 rtx sc_reg
= gen_rtx_REG (Pmode
, STATIC_CHAIN_REGNUM
);
31468 rtx func_sc_offset
= GEN_INT (2 * GET_MODE_SIZE (Pmode
));
31469 rtx func_sc_mem
= gen_rtx_MEM (Pmode
,
31470 gen_rtx_PLUS (Pmode
, func_desc
,
31472 emit_move_insn (sc_reg
, func_sc_mem
);
31479 /* Direct calls use the TOC: for local calls, the callee will
31480 assume the TOC register is set; for non-local calls, the
31481 PLT stub needs the TOC register. */
31483 func_addr
= func_desc
;
31486 /* Create the call. */
31487 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_addr
), flag
);
31488 if (value
!= NULL_RTX
)
31489 call
[0] = gen_rtx_SET (VOIDmode
, value
, call
[0]);
31493 call
[n_call
++] = toc_load
;
31495 call
[n_call
++] = toc_restore
;
31497 call
[n_call
++] = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, LR_REGNO
));
31499 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (n_call
, call
));
31500 insn
= emit_call_insn (insn
);
31502 /* Mention all registers defined by the ABI to hold information
31503 as uses in CALL_INSN_FUNCTION_USAGE. */
31505 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), abi_reg
);
31508 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
31511 rs6000_sibcall_aix (rtx value
, rtx func_desc
, rtx flag
, rtx cookie
)
31516 gcc_assert (INTVAL (cookie
) == 0);
31518 /* Create the call. */
31519 call
[0] = gen_rtx_CALL (VOIDmode
, gen_rtx_MEM (SImode
, func_desc
), flag
);
31520 if (value
!= NULL_RTX
)
31521 call
[0] = gen_rtx_SET (VOIDmode
, value
, call
[0]);
31523 call
[1] = simple_return_rtx
;
31525 insn
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (2, call
));
31526 insn
= emit_call_insn (insn
);
31528 /* Note use of the TOC register. */
31529 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), gen_rtx_REG (Pmode
, TOC_REGNUM
));
31530 /* We need to also mark a use of the link register since the function we
31531 sibling-call to will use it to return to our caller. */
31532 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), gen_rtx_REG (Pmode
, LR_REGNO
));
31535 /* Return whether we need to always update the saved TOC pointer when we update
31536 the stack pointer. */
31539 rs6000_save_toc_in_prologue_p (void)
31541 return (cfun
&& cfun
->machine
&& cfun
->machine
->save_toc_in_prologue
);
31544 #ifdef HAVE_GAS_HIDDEN
31545 # define USE_HIDDEN_LINKONCE 1
31547 # define USE_HIDDEN_LINKONCE 0
31550 /* Fills in the label name that should be used for a 476 link stack thunk. */
31553 get_ppc476_thunk_name (char name
[32])
31555 gcc_assert (TARGET_LINK_STACK
);
31557 if (USE_HIDDEN_LINKONCE
)
31558 sprintf (name
, "__ppc476.get_thunk");
31560 ASM_GENERATE_INTERNAL_LABEL (name
, "LPPC476_", 0);
31563 /* This function emits the simple thunk routine that is used to preserve
31564 the link stack on the 476 cpu. */
31566 static void rs6000_code_end (void) ATTRIBUTE_UNUSED
;
31568 rs6000_code_end (void)
31573 if (!TARGET_LINK_STACK
)
31576 get_ppc476_thunk_name (name
);
31578 decl
= build_decl (BUILTINS_LOCATION
, FUNCTION_DECL
, get_identifier (name
),
31579 build_function_type_list (void_type_node
, NULL_TREE
));
31580 DECL_RESULT (decl
) = build_decl (BUILTINS_LOCATION
, RESULT_DECL
,
31581 NULL_TREE
, void_type_node
);
31582 TREE_PUBLIC (decl
) = 1;
31583 TREE_STATIC (decl
) = 1;
31586 if (USE_HIDDEN_LINKONCE
)
31588 DECL_COMDAT_GROUP (decl
) = DECL_ASSEMBLER_NAME (decl
);
31589 targetm
.asm_out
.unique_section (decl
, 0);
31590 switch_to_section (get_named_section (decl
, NULL
, 0));
31591 DECL_WEAK (decl
) = 1;
31592 ASM_WEAKEN_DECL (asm_out_file
, decl
, name
, 0);
31593 targetm
.asm_out
.globalize_label (asm_out_file
, name
);
31594 targetm
.asm_out
.assemble_visibility (decl
, VISIBILITY_HIDDEN
);
31595 ASM_DECLARE_FUNCTION_NAME (asm_out_file
, name
, decl
);
31600 switch_to_section (text_section
);
31601 ASM_OUTPUT_LABEL (asm_out_file
, name
);
31604 DECL_INITIAL (decl
) = make_node (BLOCK
);
31605 current_function_decl
= decl
;
31606 init_function_start (decl
);
31607 first_function_block_is_cold
= false;
31608 /* Make sure unwind info is emitted for the thunk if needed. */
31609 final_start_function (emit_barrier (), asm_out_file
, 1);
31611 fputs ("\tblr\n", asm_out_file
);
31613 final_end_function ();
31614 init_insn_lengths ();
31615 free_after_compilation (cfun
);
31617 current_function_decl
= NULL
;
31620 /* Add r30 to hard reg set if the prologue sets it up and it is not
31621 pic_offset_table_rtx. */
31624 rs6000_set_up_by_prologue (struct hard_reg_set_container
*set
)
31626 if (!TARGET_SINGLE_PIC_BASE
31628 && TARGET_MINIMAL_TOC
31629 && get_pool_size () != 0)
31630 add_to_hard_reg_set (&set
->set
, Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
31634 /* Helper function for rs6000_split_logical to emit a logical instruction after
31635 spliting the operation to single GPR registers.
31637 DEST is the destination register.
31638 OP1 and OP2 are the input source registers.
31639 CODE is the base operation (AND, IOR, XOR, NOT).
31640 MODE is the machine mode.
31641 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
31642 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
31643 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
31644 CLOBBER_REG is either NULL or a scratch register of type CC to allow
31645 formation of the AND instructions. */
31648 rs6000_split_logical_inner (rtx dest
,
31651 enum rtx_code code
,
31652 enum machine_mode mode
,
31653 bool complement_final_p
,
31654 bool complement_op1_p
,
31655 bool complement_op2_p
,
31661 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
31662 if (op2
&& GET_CODE (op2
) == CONST_INT
31663 && (mode
== SImode
|| (mode
== DImode
&& TARGET_POWERPC64
))
31664 && !complement_final_p
&& !complement_op1_p
&& !complement_op2_p
)
31666 HOST_WIDE_INT mask
= GET_MODE_MASK (mode
);
31667 HOST_WIDE_INT value
= INTVAL (op2
) & mask
;
31669 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
31674 emit_insn (gen_rtx_SET (VOIDmode
, dest
, const0_rtx
));
31678 else if (value
== mask
)
31680 if (!rtx_equal_p (dest
, op1
))
31681 emit_insn (gen_rtx_SET (VOIDmode
, dest
, op1
));
31686 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
31687 into separate ORI/ORIS or XORI/XORIS instrucitons. */
31688 else if (code
== IOR
|| code
== XOR
)
31692 if (!rtx_equal_p (dest
, op1
))
31693 emit_insn (gen_rtx_SET (VOIDmode
, dest
, op1
));
31699 if (complement_op1_p
)
31700 op1
= gen_rtx_NOT (mode
, op1
);
31702 if (complement_op2_p
)
31703 op2
= gen_rtx_NOT (mode
, op2
);
31705 bool_rtx
= ((code
== NOT
)
31706 ? gen_rtx_NOT (mode
, op1
)
31707 : gen_rtx_fmt_ee (code
, mode
, op1
, op2
));
31709 if (complement_final_p
)
31710 bool_rtx
= gen_rtx_NOT (mode
, bool_rtx
);
31712 set_rtx
= gen_rtx_SET (VOIDmode
, dest
, bool_rtx
);
31714 /* Is this AND with an explicit clobber? */
31717 rtx clobber
= gen_rtx_CLOBBER (VOIDmode
, clobber_reg
);
31718 set_rtx
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec (2, set_rtx
, clobber
));
31721 emit_insn (set_rtx
);
31725 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
31726 operations are split immediately during RTL generation to allow for more
31727 optimizations of the AND/IOR/XOR.
31729 OPERANDS is an array containing the destination and two input operands.
31730 CODE is the base operation (AND, IOR, XOR, NOT).
31731 MODE is the machine mode.
31732 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
31733 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
31734 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
31735 CLOBBER_REG is either NULL or a scratch register of type CC to allow
31736 formation of the AND instructions. */
31739 rs6000_split_logical_di (rtx operands
[3],
31740 enum rtx_code code
,
31741 bool complement_final_p
,
31742 bool complement_op1_p
,
31743 bool complement_op2_p
,
31746 const HOST_WIDE_INT lower_32bits
= HOST_WIDE_INT_C(0xffffffff);
31747 const HOST_WIDE_INT upper_32bits
= ~ lower_32bits
;
31748 const HOST_WIDE_INT sign_bit
= HOST_WIDE_INT_C(0x80000000);
31749 enum hi_lo
{ hi
= 0, lo
= 1 };
31750 rtx op0_hi_lo
[2], op1_hi_lo
[2], op2_hi_lo
[2];
31753 op0_hi_lo
[hi
] = gen_highpart (SImode
, operands
[0]);
31754 op1_hi_lo
[hi
] = gen_highpart (SImode
, operands
[1]);
31755 op0_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[0]);
31756 op1_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[1]);
31759 op2_hi_lo
[hi
] = op2_hi_lo
[lo
] = NULL_RTX
;
31762 if (GET_CODE (operands
[2]) != CONST_INT
)
31764 op2_hi_lo
[hi
] = gen_highpart_mode (SImode
, DImode
, operands
[2]);
31765 op2_hi_lo
[lo
] = gen_lowpart (SImode
, operands
[2]);
31769 HOST_WIDE_INT value
= INTVAL (operands
[2]);
31770 HOST_WIDE_INT value_hi_lo
[2];
31772 gcc_assert (!complement_final_p
);
31773 gcc_assert (!complement_op1_p
);
31774 gcc_assert (!complement_op2_p
);
31776 value_hi_lo
[hi
] = value
>> 32;
31777 value_hi_lo
[lo
] = value
& lower_32bits
;
31779 for (i
= 0; i
< 2; i
++)
31781 HOST_WIDE_INT sub_value
= value_hi_lo
[i
];
31783 if (sub_value
& sign_bit
)
31784 sub_value
|= upper_32bits
;
31786 op2_hi_lo
[i
] = GEN_INT (sub_value
);
31788 /* If this is an AND instruction, check to see if we need to load
31789 the value in a register. */
31790 if (code
== AND
&& sub_value
!= -1 && sub_value
!= 0
31791 && !and_operand (op2_hi_lo
[i
], SImode
))
31792 op2_hi_lo
[i
] = force_reg (SImode
, op2_hi_lo
[i
]);
31797 for (i
= 0; i
< 2; i
++)
31799 /* Split large IOR/XOR operations. */
31800 if ((code
== IOR
|| code
== XOR
)
31801 && GET_CODE (op2_hi_lo
[i
]) == CONST_INT
31802 && !complement_final_p
31803 && !complement_op1_p
31804 && !complement_op2_p
31805 && clobber_reg
== NULL_RTX
31806 && !logical_const_operand (op2_hi_lo
[i
], SImode
))
31808 HOST_WIDE_INT value
= INTVAL (op2_hi_lo
[i
]);
31809 HOST_WIDE_INT hi_16bits
= value
& HOST_WIDE_INT_C(0xffff0000);
31810 HOST_WIDE_INT lo_16bits
= value
& HOST_WIDE_INT_C(0x0000ffff);
31811 rtx tmp
= gen_reg_rtx (SImode
);
31813 /* Make sure the constant is sign extended. */
31814 if ((hi_16bits
& sign_bit
) != 0)
31815 hi_16bits
|= upper_32bits
;
31817 rs6000_split_logical_inner (tmp
, op1_hi_lo
[i
], GEN_INT (hi_16bits
),
31818 code
, SImode
, false, false, false,
31821 rs6000_split_logical_inner (op0_hi_lo
[i
], tmp
, GEN_INT (lo_16bits
),
31822 code
, SImode
, false, false, false,
31826 rs6000_split_logical_inner (op0_hi_lo
[i
], op1_hi_lo
[i
], op2_hi_lo
[i
],
31827 code
, SImode
, complement_final_p
,
31828 complement_op1_p
, complement_op2_p
,
31835 /* Split the insns that make up boolean operations operating on multiple GPR
31836 registers. The boolean MD patterns ensure that the inputs either are
31837 exactly the same as the output registers, or there is no overlap.
31839 OPERANDS is an array containing the destination and two input operands.
31840 CODE is the base operation (AND, IOR, XOR, NOT).
31841 MODE is the machine mode.
31842 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
31843 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
31844 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
31845 CLOBBER_REG is either NULL or a scratch register of type CC to allow
31846 formation of the AND instructions. */
31849 rs6000_split_logical (rtx operands
[3],
31850 enum rtx_code code
,
31851 bool complement_final_p
,
31852 bool complement_op1_p
,
31853 bool complement_op2_p
,
31856 enum machine_mode mode
= GET_MODE (operands
[0]);
31857 enum machine_mode sub_mode
;
31859 int sub_size
, regno0
, regno1
, nregs
, i
;
31861 /* If this is DImode, use the specialized version that can run before
31862 register allocation. */
31863 if (mode
== DImode
&& !TARGET_POWERPC64
)
31865 rs6000_split_logical_di (operands
, code
, complement_final_p
,
31866 complement_op1_p
, complement_op2_p
,
31873 op2
= (code
== NOT
) ? NULL_RTX
: operands
[2];
31874 sub_mode
= (TARGET_POWERPC64
) ? DImode
: SImode
;
31875 sub_size
= GET_MODE_SIZE (sub_mode
);
31876 regno0
= REGNO (op0
);
31877 regno1
= REGNO (op1
);
31879 gcc_assert (reload_completed
);
31880 gcc_assert (IN_RANGE (regno0
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
31881 gcc_assert (IN_RANGE (regno1
, FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
31883 nregs
= rs6000_hard_regno_nregs
[(int)mode
][regno0
];
31884 gcc_assert (nregs
> 1);
31886 if (op2
&& REG_P (op2
))
31887 gcc_assert (IN_RANGE (REGNO (op2
), FIRST_GPR_REGNO
, LAST_GPR_REGNO
));
31889 for (i
= 0; i
< nregs
; i
++)
31891 int offset
= i
* sub_size
;
31892 rtx sub_op0
= simplify_subreg (sub_mode
, op0
, mode
, offset
);
31893 rtx sub_op1
= simplify_subreg (sub_mode
, op1
, mode
, offset
);
31894 rtx sub_op2
= ((code
== NOT
)
31896 : simplify_subreg (sub_mode
, op2
, mode
, offset
));
31898 rs6000_split_logical_inner (sub_op0
, sub_op1
, sub_op2
, code
, sub_mode
,
31899 complement_final_p
, complement_op1_p
,
31900 complement_op2_p
, clobber_reg
);
31907 /* Return true if the peephole2 can combine a load involving a combination of
31908 an addis instruction and a load with an offset that can be fused together on
31912 operands[0] register set with addis
31913 operands[1] value set via addis
31914 operands[2] target register being loaded
31915 operands[3] D-form memory reference using operands[0].
31917 In addition, we are passed a boolean that is true if this is a peephole2,
31918 and we can use see if the addis_reg is dead after the insn and can be
31919 replaced by the target register. */
31922 fusion_gpr_load_p (rtx
*operands
, bool peep2_p
)
31924 rtx addis_reg
= operands
[0];
31925 rtx addis_value
= operands
[1];
31926 rtx target
= operands
[2];
31927 rtx mem
= operands
[3];
31931 /* Validate arguments. */
31932 if (!base_reg_operand (addis_reg
, GET_MODE (addis_reg
)))
31935 if (!base_reg_operand (target
, GET_MODE (target
)))
31938 if (!fusion_gpr_addis (addis_value
, GET_MODE (addis_value
)))
31941 if (!fusion_gpr_mem_load (mem
, GET_MODE (mem
)))
31944 /* Allow sign/zero extension. */
31945 if (GET_CODE (mem
) == ZERO_EXTEND
31946 || (GET_CODE (mem
) == SIGN_EXTEND
&& TARGET_P8_FUSION_SIGN
))
31947 mem
= XEXP (mem
, 0);
31952 addr
= XEXP (mem
, 0); /* either PLUS or LO_SUM. */
31953 if (GET_CODE (addr
) != PLUS
&& GET_CODE (addr
) != LO_SUM
)
31956 /* Validate that the register used to load the high value is either the
31957 register being loaded, or we can safely replace its use in a peephole2.
31959 If this is a peephole2, we assume that there are 2 instructions in the
31960 peephole (addis and load), so we want to check if the target register was
31961 not used in the memory address and the register to hold the addis result
31962 is dead after the peephole. */
31963 if (REGNO (addis_reg
) != REGNO (target
))
31968 if (reg_mentioned_p (target
, mem
))
31971 if (!peep2_reg_dead_p (2, addis_reg
))
31975 base_reg
= XEXP (addr
, 0);
31976 return REGNO (addis_reg
) == REGNO (base_reg
);
31979 /* During the peephole2 pass, adjust and expand the insns for a load fusion
31980 sequence. We adjust the addis register to use the target register. If the
31981 load sign extends, we adjust the code to do the zero extending load, and an
31982 explicit sign extension later since the fusion only covers zero extending
31986 operands[0] register set with addis (to be replaced with target)
31987 operands[1] value set via addis
31988 operands[2] target register being loaded
31989 operands[3] D-form memory reference using operands[0]. */
31992 expand_fusion_gpr_load (rtx
*operands
)
31994 rtx addis_value
= operands
[1];
31995 rtx target
= operands
[2];
31996 rtx orig_mem
= operands
[3];
31997 rtx new_addr
, new_mem
, orig_addr
, offset
;
31998 enum rtx_code plus_or_lo_sum
;
31999 enum machine_mode target_mode
= GET_MODE (target
);
32000 enum machine_mode extend_mode
= target_mode
;
32001 enum machine_mode ptr_mode
= Pmode
;
32002 enum rtx_code extend
= UNKNOWN
;
32003 rtx addis_reg
= ((ptr_mode
== target_mode
)
32005 : simplify_subreg (ptr_mode
, target
, target_mode
, 0));
32007 if (GET_CODE (orig_mem
) == ZERO_EXTEND
32008 || (TARGET_P8_FUSION_SIGN
&& GET_CODE (orig_mem
) == SIGN_EXTEND
))
32010 extend
= GET_CODE (orig_mem
);
32011 orig_mem
= XEXP (orig_mem
, 0);
32012 target_mode
= GET_MODE (orig_mem
);
32015 gcc_assert (MEM_P (orig_mem
));
32017 orig_addr
= XEXP (orig_mem
, 0);
32018 plus_or_lo_sum
= GET_CODE (orig_addr
);
32019 gcc_assert (plus_or_lo_sum
== PLUS
|| plus_or_lo_sum
== LO_SUM
);
32021 offset
= XEXP (orig_addr
, 1);
32022 new_addr
= gen_rtx_fmt_ee (plus_or_lo_sum
, ptr_mode
, addis_reg
, offset
);
32023 new_mem
= change_address (orig_mem
, target_mode
, new_addr
);
32025 if (extend
!= UNKNOWN
)
32026 new_mem
= gen_rtx_fmt_e (ZERO_EXTEND
, extend_mode
, new_mem
);
32028 emit_insn (gen_rtx_SET (VOIDmode
, addis_reg
, addis_value
));
32029 emit_insn (gen_rtx_SET (VOIDmode
, target
, new_mem
));
32031 if (extend
== SIGN_EXTEND
)
32033 int sub_off
= ((BYTES_BIG_ENDIAN
)
32034 ? GET_MODE_SIZE (extend_mode
) - GET_MODE_SIZE (target_mode
)
32037 = simplify_subreg (target_mode
, target
, extend_mode
, sub_off
);
32039 emit_insn (gen_rtx_SET (VOIDmode
, target
,
32040 gen_rtx_SIGN_EXTEND (extend_mode
, sign_reg
)));
32046 /* Return a string to fuse an addis instruction with a gpr load to the same
32047 register that we loaded up the addis instruction. The code is complicated,
32048 so we call output_asm_insn directly, and just return "".
32051 operands[0] register set with addis (must be same reg as target).
32052 operands[1] value set via addis
32053 operands[2] target register being loaded
32054 operands[3] D-form memory reference using operands[0]. */
32057 emit_fusion_gpr_load (rtx
*operands
)
32059 rtx addis_reg
= operands
[0];
32060 rtx addis_value
= operands
[1];
32061 rtx target
= operands
[2];
32062 rtx mem
= operands
[3];
32066 const char *addis_str
= NULL
;
32067 const char *load_str
= NULL
;
32068 const char *extend_insn
= NULL
;
32069 const char *mode_name
= NULL
;
32070 char insn_template
[80];
32071 enum machine_mode mode
;
32072 const char *comment_str
= ASM_COMMENT_START
;
32073 bool sign_p
= false;
32075 gcc_assert (REG_P (addis_reg
) && REG_P (target
));
32076 gcc_assert (REGNO (addis_reg
) == REGNO (target
));
32078 if (*comment_str
== ' ')
32081 /* Allow sign/zero extension. */
32082 if (GET_CODE (mem
) == ZERO_EXTEND
)
32083 mem
= XEXP (mem
, 0);
32085 else if (GET_CODE (mem
) == SIGN_EXTEND
&& TARGET_P8_FUSION_SIGN
)
32088 mem
= XEXP (mem
, 0);
32091 gcc_assert (MEM_P (mem
));
32092 addr
= XEXP (mem
, 0);
32093 if (GET_CODE (addr
) != PLUS
&& GET_CODE (addr
) != LO_SUM
)
32094 gcc_unreachable ();
32096 load_offset
= XEXP (addr
, 1);
32098 /* Now emit the load instruction to the same register. */
32099 mode
= GET_MODE (mem
);
32103 mode_name
= "char";
32105 extend_insn
= "extsb %0,%0";
32109 mode_name
= "short";
32111 extend_insn
= "extsh %0,%0";
32117 extend_insn
= "extsw %0,%0";
32121 if (TARGET_POWERPC64
)
32123 mode_name
= "long";
32127 gcc_unreachable ();
32131 gcc_unreachable ();
32134 /* Emit the addis instruction. */
32135 fuse_ops
[0] = target
;
32136 if (satisfies_constraint_L (addis_value
))
32138 fuse_ops
[1] = addis_value
;
32139 addis_str
= "lis %0,%v1";
32142 else if (GET_CODE (addis_value
) == PLUS
)
32144 rtx op0
= XEXP (addis_value
, 0);
32145 rtx op1
= XEXP (addis_value
, 1);
32147 if (REG_P (op0
) && CONST_INT_P (op1
)
32148 && satisfies_constraint_L (op1
))
32152 addis_str
= "addis %0,%1,%v2";
32156 else if (GET_CODE (addis_value
) == HIGH
)
32158 rtx value
= XEXP (addis_value
, 0);
32159 if (GET_CODE (value
) == UNSPEC
&& XINT (value
, 1) == UNSPEC_TOCREL
)
32161 fuse_ops
[1] = XVECEXP (value
, 0, 0); /* symbol ref. */
32162 fuse_ops
[2] = XVECEXP (value
, 0, 1); /* TOC register. */
32164 addis_str
= "addis %0,%2,%1@toc@ha";
32166 else if (TARGET_XCOFF
)
32167 addis_str
= "addis %0,%1@u(%2)";
32170 gcc_unreachable ();
32173 else if (GET_CODE (value
) == PLUS
)
32175 rtx op0
= XEXP (value
, 0);
32176 rtx op1
= XEXP (value
, 1);
32178 if (GET_CODE (op0
) == UNSPEC
32179 && XINT (op0
, 1) == UNSPEC_TOCREL
32180 && CONST_INT_P (op1
))
32182 fuse_ops
[1] = XVECEXP (op0
, 0, 0); /* symbol ref. */
32183 fuse_ops
[2] = XVECEXP (op0
, 0, 1); /* TOC register. */
32186 addis_str
= "addis %0,%2,%1+%3@toc@ha";
32188 else if (TARGET_XCOFF
)
32189 addis_str
= "addis %0,%1+%3@u(%2)";
32192 gcc_unreachable ();
32196 else if (satisfies_constraint_L (value
))
32198 fuse_ops
[1] = value
;
32199 addis_str
= "lis %0,%v1";
32202 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (value
))
32204 fuse_ops
[1] = value
;
32205 addis_str
= "lis %0,%1@ha";
32210 fatal_insn ("Could not generate addis value for fusion", addis_value
);
32212 sprintf (insn_template
, "%s\t\t%s gpr load fusion, type %s", addis_str
,
32213 comment_str
, mode_name
);
32214 output_asm_insn (insn_template
, fuse_ops
);
32216 /* Emit the D-form load instruction. */
32217 if (CONST_INT_P (load_offset
) && satisfies_constraint_I (load_offset
))
32219 sprintf (insn_template
, "%s %%0,%%1(%%0)", load_str
);
32220 fuse_ops
[1] = load_offset
;
32221 output_asm_insn (insn_template
, fuse_ops
);
32224 else if (GET_CODE (load_offset
) == UNSPEC
32225 && XINT (load_offset
, 1) == UNSPEC_TOCREL
)
32228 sprintf (insn_template
, "%s %%0,%%1@toc@l(%%0)", load_str
);
32230 else if (TARGET_XCOFF
)
32231 sprintf (insn_template
, "%s %%0,%%1@l(%%0)", load_str
);
32234 gcc_unreachable ();
32236 fuse_ops
[1] = XVECEXP (load_offset
, 0, 0);
32237 output_asm_insn (insn_template
, fuse_ops
);
32240 else if (GET_CODE (load_offset
) == PLUS
32241 && GET_CODE (XEXP (load_offset
, 0)) == UNSPEC
32242 && XINT (XEXP (load_offset
, 0), 1) == UNSPEC_TOCREL
32243 && CONST_INT_P (XEXP (load_offset
, 1)))
32245 rtx tocrel_unspec
= XEXP (load_offset
, 0);
32247 sprintf (insn_template
, "%s %%0,%%1+%%2@toc@l(%%0)", load_str
);
32249 else if (TARGET_XCOFF
)
32250 sprintf (insn_template
, "%s %%0,%%1+%%2@l(%%0)", load_str
);
32253 gcc_unreachable ();
32255 fuse_ops
[1] = XVECEXP (tocrel_unspec
, 0, 0);
32256 fuse_ops
[2] = XEXP (load_offset
, 1);
32257 output_asm_insn (insn_template
, fuse_ops
);
32260 else if (TARGET_ELF
&& !TARGET_POWERPC64
&& CONSTANT_P (load_offset
))
32262 sprintf (insn_template
, "%s %%0,%%1@l(%%0)", load_str
);
32264 fuse_ops
[1] = load_offset
;
32265 output_asm_insn (insn_template
, fuse_ops
);
32269 fatal_insn ("Unable to generate load offset for fusion", load_offset
);
32271 /* Handle sign extension. The peephole2 pass generates this as a separate
32272 insn, but we handle it just in case it got reattached. */
32275 gcc_assert (extend_insn
!= NULL
);
32276 output_asm_insn (extend_insn
, fuse_ops
);
32283 struct gcc_target targetm
= TARGET_INITIALIZER
;
32285 #include "gt-rs6000.h"