1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
3 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
4 Free Software Foundation, Inc.
5 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
7 This file is part of GCC.
9 GCC is free software; you can redistribute it and/or modify it
10 under the terms of the GNU General Public License as published
11 by the Free Software Foundation; either version 3, or (at your
12 option) any later version.
14 GCC is distributed in the hope that it will be useful, but WITHOUT
15 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
17 License for more details.
19 You should have received a copy of the GNU General Public License
20 along with GCC; see the file COPYING3. If not see
21 <http://www.gnu.org/licenses/>. */
25 #include "coretypes.h"
29 #include "hard-reg-set.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-attr.h"
43 #include "basic-block.h"
44 #include "integrate.h"
50 #include "target-def.h"
51 #include "langhooks.h"
53 #include "cfglayout.h"
54 #include "sched-int.h"
56 #include "tree-flow.h"
59 #include "tm-constrs.h"
61 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
64 #include "gstab.h" /* for N_SLINE */
67 #ifndef TARGET_NO_PROTOTYPE
68 #define TARGET_NO_PROTOTYPE 0
71 #define min(A,B) ((A) < (B) ? (A) : (B))
72 #define max(A,B) ((A) > (B) ? (A) : (B))
74 /* Structure used to define the rs6000 stack */
75 typedef struct rs6000_stack
{
76 int first_gp_reg_save
; /* first callee saved GP register used */
77 int first_fp_reg_save
; /* first callee saved FP register used */
78 int first_altivec_reg_save
; /* first callee saved AltiVec register used */
79 int lr_save_p
; /* true if the link reg needs to be saved */
80 int cr_save_p
; /* true if the CR reg needs to be saved */
81 unsigned int vrsave_mask
; /* mask of vec registers to save */
82 int push_p
; /* true if we need to allocate stack space */
83 int calls_p
; /* true if the function makes any calls */
84 int world_save_p
; /* true if we're saving *everything*:
85 r13-r31, cr, f14-f31, vrsave, v20-v31 */
86 enum rs6000_abi abi
; /* which ABI to use */
87 int gp_save_offset
; /* offset to save GP regs from initial SP */
88 int fp_save_offset
; /* offset to save FP regs from initial SP */
89 int altivec_save_offset
; /* offset to save AltiVec regs from initial SP */
90 int lr_save_offset
; /* offset to save LR from initial SP */
91 int cr_save_offset
; /* offset to save CR from initial SP */
92 int vrsave_save_offset
; /* offset to save VRSAVE from initial SP */
93 int spe_gp_save_offset
; /* offset to save spe 64-bit gprs */
94 int varargs_save_offset
; /* offset to save the varargs registers */
95 int ehrd_offset
; /* offset to EH return data */
96 int reg_size
; /* register size (4 or 8) */
97 HOST_WIDE_INT vars_size
; /* variable save area size */
98 int parm_size
; /* outgoing parameter size */
99 int save_size
; /* save area size */
100 int fixed_size
; /* fixed size of stack frame */
101 int gp_size
; /* size of saved GP registers */
102 int fp_size
; /* size of saved FP registers */
103 int altivec_size
; /* size of saved AltiVec registers */
104 int cr_size
; /* size to hold CR if not in save_size */
105 int vrsave_size
; /* size to hold VRSAVE if not in save_size */
106 int altivec_padding_size
; /* size of altivec alignment padding if
108 int spe_gp_size
; /* size of 64-bit GPR save size for SPE */
109 int spe_padding_size
;
110 HOST_WIDE_INT total_size
; /* total bytes allocated for stack */
111 int spe_64bit_regs_used
;
114 /* A C structure for machine-specific, per-function data.
115 This is added to the cfun structure. */
116 typedef struct GTY(()) machine_function
118 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
119 int ra_needs_full_frame
;
120 /* Some local-dynamic symbol. */
121 const char *some_ld_name
;
122 /* Whether the instruction chain has been scanned already. */
123 int insn_chain_scanned_p
;
124 /* Flags if __builtin_return_address (0) was used. */
126 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
127 varargs save area. */
128 HOST_WIDE_INT varargs_save_offset
;
129 /* Temporary stack slot to use for SDmode copies. This slot is
130 64-bits wide and is allocated early enough so that the offset
131 does not overflow the 16-bit load/store offset field. */
132 rtx sdmode_stack_slot
;
135 /* Target cpu type */
137 enum processor_type rs6000_cpu
;
138 struct rs6000_cpu_select rs6000_select
[3] =
140 /* switch name, tune arch */
141 { (const char *)0, "--with-cpu=", 1, 1 },
142 { (const char *)0, "-mcpu=", 1, 1 },
143 { (const char *)0, "-mtune=", 1, 0 },
146 /* Always emit branch hint bits. */
147 static GTY(()) bool rs6000_always_hint
;
149 /* Schedule instructions for group formation. */
150 static GTY(()) bool rs6000_sched_groups
;
152 /* Align branch targets. */
153 static GTY(()) bool rs6000_align_branch_targets
;
155 /* Support for -msched-costly-dep option. */
156 const char *rs6000_sched_costly_dep_str
;
157 enum rs6000_dependence_cost rs6000_sched_costly_dep
;
159 /* Support for -minsert-sched-nops option. */
160 const char *rs6000_sched_insert_nops_str
;
161 enum rs6000_nop_insertion rs6000_sched_insert_nops
;
163 /* Support targetm.vectorize.builtin_mask_for_load. */
164 static GTY(()) tree altivec_builtin_mask_for_load
;
166 /* Size of long double. */
167 int rs6000_long_double_type_size
;
169 /* IEEE quad extended precision long double. */
172 /* Nonzero to use AltiVec ABI. */
173 int rs6000_altivec_abi
;
175 /* Nonzero if we want SPE SIMD instructions. */
178 /* Nonzero if we want SPE ABI extensions. */
181 /* Nonzero to use isel instructions. */
184 /* Nonzero if floating point operations are done in the GPRs. */
185 int rs6000_float_gprs
= 0;
187 /* Nonzero if we want Darwin's struct-by-value-in-regs ABI. */
188 int rs6000_darwin64_abi
;
190 /* Set to nonzero once AIX common-mode calls have been defined. */
191 static GTY(()) int common_mode_defined
;
193 /* Label number of label created for -mrelocatable, to call to so we can
194 get the address of the GOT section */
195 int rs6000_pic_labelno
;
198 /* Which abi to adhere to */
199 const char *rs6000_abi_name
;
201 /* Semantics of the small data area */
202 enum rs6000_sdata_type rs6000_sdata
= SDATA_DATA
;
204 /* Which small data model to use */
205 const char *rs6000_sdata_name
= (char *)0;
207 /* Counter for labels which are to be placed in .fixup. */
208 int fixuplabelno
= 0;
211 /* Bit size of immediate TLS offsets and string from which it is decoded. */
212 int rs6000_tls_size
= 32;
213 const char *rs6000_tls_size_string
;
215 /* ABI enumeration available for subtarget to use. */
216 enum rs6000_abi rs6000_current_abi
;
218 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
222 const char *rs6000_debug_name
;
223 int rs6000_debug_stack
; /* debug stack applications */
224 int rs6000_debug_arg
; /* debug argument handling */
226 /* Value is TRUE if register/mode pair is acceptable. */
227 bool rs6000_hard_regno_mode_ok_p
[NUM_MACHINE_MODES
][FIRST_PSEUDO_REGISTER
];
229 /* Built in types. */
231 tree rs6000_builtin_types
[RS6000_BTI_MAX
];
232 tree rs6000_builtin_decls
[RS6000_BUILTIN_COUNT
];
234 const char *rs6000_traceback_name
;
236 traceback_default
= 0,
242 /* Flag to say the TOC is initialized */
244 char toc_label_name
[10];
246 /* Cached value of rs6000_variable_issue. This is cached in
247 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
248 static short cached_can_issue_more
;
250 static GTY(()) section
*read_only_data_section
;
251 static GTY(()) section
*private_data_section
;
252 static GTY(()) section
*read_only_private_data_section
;
253 static GTY(()) section
*sdata2_section
;
254 static GTY(()) section
*toc_section
;
256 /* Control alignment for fields within structures. */
257 /* String from -malign-XXXXX. */
258 int rs6000_alignment_flags
;
260 /* True for any options that were explicitly set. */
262 bool aix_struct_ret
; /* True if -maix-struct-ret was used. */
263 bool alignment
; /* True if -malign- was used. */
264 bool spe_abi
; /* True if -mabi=spe/no-spe was used. */
265 bool altivec_abi
; /* True if -mabi=altivec/no-altivec used. */
266 bool spe
; /* True if -mspe= was used. */
267 bool float_gprs
; /* True if -mfloat-gprs= was used. */
268 bool isel
; /* True if -misel was used. */
269 bool long_double
; /* True if -mlong-double- was used. */
270 bool ieee
; /* True if -mabi=ieee/ibmlongdouble used. */
271 bool vrsave
; /* True if -mvrsave was used. */
272 } rs6000_explicit_options
;
274 struct builtin_description
276 /* mask is not const because we're going to alter it below. This
277 nonsense will go away when we rewrite the -march infrastructure
278 to give us more target flag bits. */
280 const enum insn_code icode
;
281 const char *const name
;
282 const enum rs6000_builtins code
;
285 /* Target cpu costs. */
287 struct processor_costs
{
288 const int mulsi
; /* cost of SImode multiplication. */
289 const int mulsi_const
; /* cost of SImode multiplication by constant. */
290 const int mulsi_const9
; /* cost of SImode mult by short constant. */
291 const int muldi
; /* cost of DImode multiplication. */
292 const int divsi
; /* cost of SImode division. */
293 const int divdi
; /* cost of DImode division. */
294 const int fp
; /* cost of simple SFmode and DFmode insns. */
295 const int dmul
; /* cost of DFmode multiplication (and fmadd). */
296 const int sdiv
; /* cost of SFmode division (fdivs). */
297 const int ddiv
; /* cost of DFmode division (fdiv). */
298 const int cache_line_size
; /* cache line size in bytes. */
299 const int l1_cache_size
; /* size of l1 cache, in kilobytes. */
300 const int l2_cache_size
; /* size of l2 cache, in kilobytes. */
301 const int simultaneous_prefetches
; /* number of parallel prefetch
305 const struct processor_costs
*rs6000_cost
;
307 /* Processor costs (relative to an add) */
309 /* Instruction size costs on 32bit processors. */
311 struct processor_costs size32_cost
= {
312 COSTS_N_INSNS (1), /* mulsi */
313 COSTS_N_INSNS (1), /* mulsi_const */
314 COSTS_N_INSNS (1), /* mulsi_const9 */
315 COSTS_N_INSNS (1), /* muldi */
316 COSTS_N_INSNS (1), /* divsi */
317 COSTS_N_INSNS (1), /* divdi */
318 COSTS_N_INSNS (1), /* fp */
319 COSTS_N_INSNS (1), /* dmul */
320 COSTS_N_INSNS (1), /* sdiv */
321 COSTS_N_INSNS (1), /* ddiv */
328 /* Instruction size costs on 64bit processors. */
330 struct processor_costs size64_cost
= {
331 COSTS_N_INSNS (1), /* mulsi */
332 COSTS_N_INSNS (1), /* mulsi_const */
333 COSTS_N_INSNS (1), /* mulsi_const9 */
334 COSTS_N_INSNS (1), /* muldi */
335 COSTS_N_INSNS (1), /* divsi */
336 COSTS_N_INSNS (1), /* divdi */
337 COSTS_N_INSNS (1), /* fp */
338 COSTS_N_INSNS (1), /* dmul */
339 COSTS_N_INSNS (1), /* sdiv */
340 COSTS_N_INSNS (1), /* ddiv */
347 /* Instruction costs on RIOS1 processors. */
349 struct processor_costs rios1_cost
= {
350 COSTS_N_INSNS (5), /* mulsi */
351 COSTS_N_INSNS (4), /* mulsi_const */
352 COSTS_N_INSNS (3), /* mulsi_const9 */
353 COSTS_N_INSNS (5), /* muldi */
354 COSTS_N_INSNS (19), /* divsi */
355 COSTS_N_INSNS (19), /* divdi */
356 COSTS_N_INSNS (2), /* fp */
357 COSTS_N_INSNS (2), /* dmul */
358 COSTS_N_INSNS (19), /* sdiv */
359 COSTS_N_INSNS (19), /* ddiv */
360 128, /* cache line size */
366 /* Instruction costs on RIOS2 processors. */
368 struct processor_costs rios2_cost
= {
369 COSTS_N_INSNS (2), /* mulsi */
370 COSTS_N_INSNS (2), /* mulsi_const */
371 COSTS_N_INSNS (2), /* mulsi_const9 */
372 COSTS_N_INSNS (2), /* muldi */
373 COSTS_N_INSNS (13), /* divsi */
374 COSTS_N_INSNS (13), /* divdi */
375 COSTS_N_INSNS (2), /* fp */
376 COSTS_N_INSNS (2), /* dmul */
377 COSTS_N_INSNS (17), /* sdiv */
378 COSTS_N_INSNS (17), /* ddiv */
379 256, /* cache line size */
385 /* Instruction costs on RS64A processors. */
387 struct processor_costs rs64a_cost
= {
388 COSTS_N_INSNS (20), /* mulsi */
389 COSTS_N_INSNS (12), /* mulsi_const */
390 COSTS_N_INSNS (8), /* mulsi_const9 */
391 COSTS_N_INSNS (34), /* muldi */
392 COSTS_N_INSNS (65), /* divsi */
393 COSTS_N_INSNS (67), /* divdi */
394 COSTS_N_INSNS (4), /* fp */
395 COSTS_N_INSNS (4), /* dmul */
396 COSTS_N_INSNS (31), /* sdiv */
397 COSTS_N_INSNS (31), /* ddiv */
398 128, /* cache line size */
404 /* Instruction costs on MPCCORE processors. */
406 struct processor_costs mpccore_cost
= {
407 COSTS_N_INSNS (2), /* mulsi */
408 COSTS_N_INSNS (2), /* mulsi_const */
409 COSTS_N_INSNS (2), /* mulsi_const9 */
410 COSTS_N_INSNS (2), /* muldi */
411 COSTS_N_INSNS (6), /* divsi */
412 COSTS_N_INSNS (6), /* divdi */
413 COSTS_N_INSNS (4), /* fp */
414 COSTS_N_INSNS (5), /* dmul */
415 COSTS_N_INSNS (10), /* sdiv */
416 COSTS_N_INSNS (17), /* ddiv */
417 32, /* cache line size */
423 /* Instruction costs on PPC403 processors. */
425 struct processor_costs ppc403_cost
= {
426 COSTS_N_INSNS (4), /* mulsi */
427 COSTS_N_INSNS (4), /* mulsi_const */
428 COSTS_N_INSNS (4), /* mulsi_const9 */
429 COSTS_N_INSNS (4), /* muldi */
430 COSTS_N_INSNS (33), /* divsi */
431 COSTS_N_INSNS (33), /* divdi */
432 COSTS_N_INSNS (11), /* fp */
433 COSTS_N_INSNS (11), /* dmul */
434 COSTS_N_INSNS (11), /* sdiv */
435 COSTS_N_INSNS (11), /* ddiv */
436 32, /* cache line size */
442 /* Instruction costs on PPC405 processors. */
444 struct processor_costs ppc405_cost
= {
445 COSTS_N_INSNS (5), /* mulsi */
446 COSTS_N_INSNS (4), /* mulsi_const */
447 COSTS_N_INSNS (3), /* mulsi_const9 */
448 COSTS_N_INSNS (5), /* muldi */
449 COSTS_N_INSNS (35), /* divsi */
450 COSTS_N_INSNS (35), /* divdi */
451 COSTS_N_INSNS (11), /* fp */
452 COSTS_N_INSNS (11), /* dmul */
453 COSTS_N_INSNS (11), /* sdiv */
454 COSTS_N_INSNS (11), /* ddiv */
455 32, /* cache line size */
461 /* Instruction costs on PPC440 processors. */
463 struct processor_costs ppc440_cost
= {
464 COSTS_N_INSNS (3), /* mulsi */
465 COSTS_N_INSNS (2), /* mulsi_const */
466 COSTS_N_INSNS (2), /* mulsi_const9 */
467 COSTS_N_INSNS (3), /* muldi */
468 COSTS_N_INSNS (34), /* divsi */
469 COSTS_N_INSNS (34), /* divdi */
470 COSTS_N_INSNS (5), /* fp */
471 COSTS_N_INSNS (5), /* dmul */
472 COSTS_N_INSNS (19), /* sdiv */
473 COSTS_N_INSNS (33), /* ddiv */
474 32, /* cache line size */
480 /* Instruction costs on PPC601 processors. */
482 struct processor_costs ppc601_cost
= {
483 COSTS_N_INSNS (5), /* mulsi */
484 COSTS_N_INSNS (5), /* mulsi_const */
485 COSTS_N_INSNS (5), /* mulsi_const9 */
486 COSTS_N_INSNS (5), /* muldi */
487 COSTS_N_INSNS (36), /* divsi */
488 COSTS_N_INSNS (36), /* divdi */
489 COSTS_N_INSNS (4), /* fp */
490 COSTS_N_INSNS (5), /* dmul */
491 COSTS_N_INSNS (17), /* sdiv */
492 COSTS_N_INSNS (31), /* ddiv */
493 32, /* cache line size */
499 /* Instruction costs on PPC603 processors. */
501 struct processor_costs ppc603_cost
= {
502 COSTS_N_INSNS (5), /* mulsi */
503 COSTS_N_INSNS (3), /* mulsi_const */
504 COSTS_N_INSNS (2), /* mulsi_const9 */
505 COSTS_N_INSNS (5), /* muldi */
506 COSTS_N_INSNS (37), /* divsi */
507 COSTS_N_INSNS (37), /* divdi */
508 COSTS_N_INSNS (3), /* fp */
509 COSTS_N_INSNS (4), /* dmul */
510 COSTS_N_INSNS (18), /* sdiv */
511 COSTS_N_INSNS (33), /* ddiv */
512 32, /* cache line size */
518 /* Instruction costs on PPC604 processors. */
520 struct processor_costs ppc604_cost
= {
521 COSTS_N_INSNS (4), /* mulsi */
522 COSTS_N_INSNS (4), /* mulsi_const */
523 COSTS_N_INSNS (4), /* mulsi_const9 */
524 COSTS_N_INSNS (4), /* muldi */
525 COSTS_N_INSNS (20), /* divsi */
526 COSTS_N_INSNS (20), /* divdi */
527 COSTS_N_INSNS (3), /* fp */
528 COSTS_N_INSNS (3), /* dmul */
529 COSTS_N_INSNS (18), /* sdiv */
530 COSTS_N_INSNS (32), /* ddiv */
531 32, /* cache line size */
537 /* Instruction costs on PPC604e processors. */
539 struct processor_costs ppc604e_cost
= {
540 COSTS_N_INSNS (2), /* mulsi */
541 COSTS_N_INSNS (2), /* mulsi_const */
542 COSTS_N_INSNS (2), /* mulsi_const9 */
543 COSTS_N_INSNS (2), /* muldi */
544 COSTS_N_INSNS (20), /* divsi */
545 COSTS_N_INSNS (20), /* divdi */
546 COSTS_N_INSNS (3), /* fp */
547 COSTS_N_INSNS (3), /* dmul */
548 COSTS_N_INSNS (18), /* sdiv */
549 COSTS_N_INSNS (32), /* ddiv */
550 32, /* cache line size */
556 /* Instruction costs on PPC620 processors. */
558 struct processor_costs ppc620_cost
= {
559 COSTS_N_INSNS (5), /* mulsi */
560 COSTS_N_INSNS (4), /* mulsi_const */
561 COSTS_N_INSNS (3), /* mulsi_const9 */
562 COSTS_N_INSNS (7), /* muldi */
563 COSTS_N_INSNS (21), /* divsi */
564 COSTS_N_INSNS (37), /* divdi */
565 COSTS_N_INSNS (3), /* fp */
566 COSTS_N_INSNS (3), /* dmul */
567 COSTS_N_INSNS (18), /* sdiv */
568 COSTS_N_INSNS (32), /* ddiv */
569 128, /* cache line size */
575 /* Instruction costs on PPC630 processors. */
577 struct processor_costs ppc630_cost
= {
578 COSTS_N_INSNS (5), /* mulsi */
579 COSTS_N_INSNS (4), /* mulsi_const */
580 COSTS_N_INSNS (3), /* mulsi_const9 */
581 COSTS_N_INSNS (7), /* muldi */
582 COSTS_N_INSNS (21), /* divsi */
583 COSTS_N_INSNS (37), /* divdi */
584 COSTS_N_INSNS (3), /* fp */
585 COSTS_N_INSNS (3), /* dmul */
586 COSTS_N_INSNS (17), /* sdiv */
587 COSTS_N_INSNS (21), /* ddiv */
588 128, /* cache line size */
594 /* Instruction costs on Cell processor. */
595 /* COSTS_N_INSNS (1) ~ one add. */
597 struct processor_costs ppccell_cost
= {
598 COSTS_N_INSNS (9/2)+2, /* mulsi */
599 COSTS_N_INSNS (6/2), /* mulsi_const */
600 COSTS_N_INSNS (6/2), /* mulsi_const9 */
601 COSTS_N_INSNS (15/2)+2, /* muldi */
602 COSTS_N_INSNS (38/2), /* divsi */
603 COSTS_N_INSNS (70/2), /* divdi */
604 COSTS_N_INSNS (10/2), /* fp */
605 COSTS_N_INSNS (10/2), /* dmul */
606 COSTS_N_INSNS (74/2), /* sdiv */
607 COSTS_N_INSNS (74/2), /* ddiv */
608 128, /* cache line size */
614 /* Instruction costs on PPC750 and PPC7400 processors. */
616 struct processor_costs ppc750_cost
= {
617 COSTS_N_INSNS (5), /* mulsi */
618 COSTS_N_INSNS (3), /* mulsi_const */
619 COSTS_N_INSNS (2), /* mulsi_const9 */
620 COSTS_N_INSNS (5), /* muldi */
621 COSTS_N_INSNS (17), /* divsi */
622 COSTS_N_INSNS (17), /* divdi */
623 COSTS_N_INSNS (3), /* fp */
624 COSTS_N_INSNS (3), /* dmul */
625 COSTS_N_INSNS (17), /* sdiv */
626 COSTS_N_INSNS (31), /* ddiv */
627 32, /* cache line size */
633 /* Instruction costs on PPC7450 processors. */
635 struct processor_costs ppc7450_cost
= {
636 COSTS_N_INSNS (4), /* mulsi */
637 COSTS_N_INSNS (3), /* mulsi_const */
638 COSTS_N_INSNS (3), /* mulsi_const9 */
639 COSTS_N_INSNS (4), /* muldi */
640 COSTS_N_INSNS (23), /* divsi */
641 COSTS_N_INSNS (23), /* divdi */
642 COSTS_N_INSNS (5), /* fp */
643 COSTS_N_INSNS (5), /* dmul */
644 COSTS_N_INSNS (21), /* sdiv */
645 COSTS_N_INSNS (35), /* ddiv */
646 32, /* cache line size */
652 /* Instruction costs on PPC8540 processors. */
654 struct processor_costs ppc8540_cost
= {
655 COSTS_N_INSNS (4), /* mulsi */
656 COSTS_N_INSNS (4), /* mulsi_const */
657 COSTS_N_INSNS (4), /* mulsi_const9 */
658 COSTS_N_INSNS (4), /* muldi */
659 COSTS_N_INSNS (19), /* divsi */
660 COSTS_N_INSNS (19), /* divdi */
661 COSTS_N_INSNS (4), /* fp */
662 COSTS_N_INSNS (4), /* dmul */
663 COSTS_N_INSNS (29), /* sdiv */
664 COSTS_N_INSNS (29), /* ddiv */
665 32, /* cache line size */
668 1, /* prefetch streams /*/
671 /* Instruction costs on E300C2 and E300C3 cores. */
673 struct processor_costs ppce300c2c3_cost
= {
674 COSTS_N_INSNS (4), /* mulsi */
675 COSTS_N_INSNS (4), /* mulsi_const */
676 COSTS_N_INSNS (4), /* mulsi_const9 */
677 COSTS_N_INSNS (4), /* muldi */
678 COSTS_N_INSNS (19), /* divsi */
679 COSTS_N_INSNS (19), /* divdi */
680 COSTS_N_INSNS (3), /* fp */
681 COSTS_N_INSNS (4), /* dmul */
682 COSTS_N_INSNS (18), /* sdiv */
683 COSTS_N_INSNS (33), /* ddiv */
687 1, /* prefetch streams /*/
690 /* Instruction costs on PPCE500MC processors. */
692 struct processor_costs ppce500mc_cost
= {
693 COSTS_N_INSNS (4), /* mulsi */
694 COSTS_N_INSNS (4), /* mulsi_const */
695 COSTS_N_INSNS (4), /* mulsi_const9 */
696 COSTS_N_INSNS (4), /* muldi */
697 COSTS_N_INSNS (14), /* divsi */
698 COSTS_N_INSNS (14), /* divdi */
699 COSTS_N_INSNS (8), /* fp */
700 COSTS_N_INSNS (10), /* dmul */
701 COSTS_N_INSNS (36), /* sdiv */
702 COSTS_N_INSNS (66), /* ddiv */
703 64, /* cache line size */
706 1, /* prefetch streams /*/
709 /* Instruction costs on POWER4 and POWER5 processors. */
711 struct processor_costs power4_cost
= {
712 COSTS_N_INSNS (3), /* mulsi */
713 COSTS_N_INSNS (2), /* mulsi_const */
714 COSTS_N_INSNS (2), /* mulsi_const9 */
715 COSTS_N_INSNS (4), /* muldi */
716 COSTS_N_INSNS (18), /* divsi */
717 COSTS_N_INSNS (34), /* divdi */
718 COSTS_N_INSNS (3), /* fp */
719 COSTS_N_INSNS (3), /* dmul */
720 COSTS_N_INSNS (17), /* sdiv */
721 COSTS_N_INSNS (17), /* ddiv */
722 128, /* cache line size */
725 8, /* prefetch streams /*/
728 /* Instruction costs on POWER6 processors. */
730 struct processor_costs power6_cost
= {
731 COSTS_N_INSNS (8), /* mulsi */
732 COSTS_N_INSNS (8), /* mulsi_const */
733 COSTS_N_INSNS (8), /* mulsi_const9 */
734 COSTS_N_INSNS (8), /* muldi */
735 COSTS_N_INSNS (22), /* divsi */
736 COSTS_N_INSNS (28), /* divdi */
737 COSTS_N_INSNS (3), /* fp */
738 COSTS_N_INSNS (3), /* dmul */
739 COSTS_N_INSNS (13), /* sdiv */
740 COSTS_N_INSNS (16), /* ddiv */
741 128, /* cache line size */
744 16, /* prefetch streams */
748 static bool rs6000_function_ok_for_sibcall (tree
, tree
);
749 static const char *rs6000_invalid_within_doloop (const_rtx
);
750 static bool rs6000_legitimate_address_p (enum machine_mode
, rtx
, bool);
751 static rtx
rs6000_generate_compare (rtx
, enum machine_mode
);
752 static void rs6000_emit_stack_tie (void);
753 static void rs6000_frame_related (rtx
, rtx
, HOST_WIDE_INT
, rtx
, rtx
);
754 static bool spe_func_has_64bit_regs_p (void);
755 static void emit_frame_save (rtx
, rtx
, enum machine_mode
, unsigned int,
757 static rtx
gen_frame_mem_offset (enum machine_mode
, rtx
, int);
758 static void rs6000_emit_allocate_stack (HOST_WIDE_INT
, int, int);
759 static unsigned rs6000_hash_constant (rtx
);
760 static unsigned toc_hash_function (const void *);
761 static int toc_hash_eq (const void *, const void *);
762 static bool constant_pool_expr_p (rtx
);
763 static bool legitimate_small_data_p (enum machine_mode
, rtx
);
764 static bool legitimate_lo_sum_address_p (enum machine_mode
, rtx
, int);
765 static struct machine_function
* rs6000_init_machine_status (void);
766 static bool rs6000_assemble_integer (rtx
, unsigned int, int);
767 static bool no_global_regs_above (int, bool);
768 #ifdef HAVE_GAS_HIDDEN
769 static void rs6000_assemble_visibility (tree
, int);
771 static int rs6000_ra_ever_killed (void);
772 static tree
rs6000_handle_longcall_attribute (tree
*, tree
, tree
, int, bool *);
773 static tree
rs6000_handle_altivec_attribute (tree
*, tree
, tree
, int, bool *);
774 static bool rs6000_ms_bitfield_layout_p (const_tree
);
775 static tree
rs6000_handle_struct_attribute (tree
*, tree
, tree
, int, bool *);
776 static void rs6000_eliminate_indexed_memrefs (rtx operands
[2]);
777 static const char *rs6000_mangle_type (const_tree
);
778 EXPORTED_CONST
struct attribute_spec rs6000_attribute_table
[];
779 static void rs6000_set_default_type_attributes (tree
);
780 static rtx
rs6000_savres_routine_sym (rs6000_stack_t
*, bool, bool, bool);
781 static rtx
rs6000_emit_stack_reset (rs6000_stack_t
*, rtx
, rtx
, int, bool);
782 static rtx
rs6000_make_savres_rtx (rs6000_stack_t
*, rtx
, int,
783 enum machine_mode
, bool, bool, bool);
784 static bool rs6000_reg_live_or_pic_offset_p (int);
785 static int rs6000_savres_strategy (rs6000_stack_t
*, bool, int, int);
786 static void rs6000_restore_saved_cr (rtx
, int);
787 static void rs6000_output_function_prologue (FILE *, HOST_WIDE_INT
);
788 static void rs6000_output_function_epilogue (FILE *, HOST_WIDE_INT
);
789 static void rs6000_output_mi_thunk (FILE *, tree
, HOST_WIDE_INT
, HOST_WIDE_INT
,
791 static rtx
rs6000_emit_set_long_const (rtx
, HOST_WIDE_INT
, HOST_WIDE_INT
);
792 static bool rs6000_return_in_memory (const_tree
, const_tree
);
793 static void rs6000_file_start (void);
795 static int rs6000_elf_reloc_rw_mask (void);
796 static void rs6000_elf_asm_out_constructor (rtx
, int);
797 static void rs6000_elf_asm_out_destructor (rtx
, int);
798 static void rs6000_elf_end_indicate_exec_stack (void) ATTRIBUTE_UNUSED
;
799 static void rs6000_elf_asm_init_sections (void);
800 static section
*rs6000_elf_select_rtx_section (enum machine_mode
, rtx
,
801 unsigned HOST_WIDE_INT
);
802 static void rs6000_elf_encode_section_info (tree
, rtx
, int)
805 static bool rs6000_use_blocks_for_constant_p (enum machine_mode
, const_rtx
);
806 static void rs6000_alloc_sdmode_stack_slot (void);
807 static void rs6000_instantiate_decls (void);
809 static void rs6000_xcoff_asm_output_anchor (rtx
);
810 static void rs6000_xcoff_asm_globalize_label (FILE *, const char *);
811 static void rs6000_xcoff_asm_init_sections (void);
812 static int rs6000_xcoff_reloc_rw_mask (void);
813 static void rs6000_xcoff_asm_named_section (const char *, unsigned int, tree
);
814 static section
*rs6000_xcoff_select_section (tree
, int,
815 unsigned HOST_WIDE_INT
);
816 static void rs6000_xcoff_unique_section (tree
, int);
817 static section
*rs6000_xcoff_select_rtx_section
818 (enum machine_mode
, rtx
, unsigned HOST_WIDE_INT
);
819 static const char * rs6000_xcoff_strip_name_encoding (const char *);
820 static unsigned int rs6000_xcoff_section_type_flags (tree
, const char *, int);
821 static void rs6000_xcoff_file_start (void);
822 static void rs6000_xcoff_file_end (void);
824 static int rs6000_variable_issue (FILE *, int, rtx
, int);
825 static bool rs6000_rtx_costs (rtx
, int, int, int *, bool);
826 static int rs6000_adjust_cost (rtx
, rtx
, rtx
, int);
827 static void rs6000_sched_init (FILE *, int, int);
828 static bool is_microcoded_insn (rtx
);
829 static bool is_nonpipeline_insn (rtx
);
830 static bool is_cracked_insn (rtx
);
831 static bool is_branch_slot_insn (rtx
);
832 static bool is_load_insn (rtx
);
833 static rtx
get_store_dest (rtx pat
);
834 static bool is_store_insn (rtx
);
835 static bool set_to_load_agen (rtx
,rtx
);
836 static bool adjacent_mem_locations (rtx
,rtx
);
837 static int rs6000_adjust_priority (rtx
, int);
838 static int rs6000_issue_rate (void);
839 static bool rs6000_is_costly_dependence (dep_t
, int, int);
840 static rtx
get_next_active_insn (rtx
, rtx
);
841 static bool insn_terminates_group_p (rtx
, enum group_termination
);
842 static bool insn_must_be_first_in_group (rtx
);
843 static bool insn_must_be_last_in_group (rtx
);
844 static bool is_costly_group (rtx
*, rtx
);
845 static int force_new_group (int, FILE *, rtx
*, rtx
, bool *, int, int *);
846 static int redefine_groups (FILE *, int, rtx
, rtx
);
847 static int pad_groups (FILE *, int, rtx
, rtx
);
848 static void rs6000_sched_finish (FILE *, int);
849 static int rs6000_sched_reorder (FILE *, int, rtx
*, int *, int);
850 static int rs6000_sched_reorder2 (FILE *, int, rtx
*, int *, int);
851 static int rs6000_use_sched_lookahead (void);
852 static int rs6000_use_sched_lookahead_guard (rtx
);
853 static void * rs6000_alloc_sched_context (void);
854 static void rs6000_init_sched_context (void *, bool);
855 static void rs6000_set_sched_context (void *);
856 static void rs6000_free_sched_context (void *);
857 static tree
rs6000_builtin_reciprocal (unsigned int, bool, bool);
858 static tree
rs6000_builtin_mask_for_load (void);
859 static tree
rs6000_builtin_mul_widen_even (tree
);
860 static tree
rs6000_builtin_mul_widen_odd (tree
);
861 static tree
rs6000_builtin_conversion (unsigned int, tree
);
862 static tree
rs6000_builtin_vec_perm (tree
, tree
*);
864 static void def_builtin (int, const char *, tree
, int);
865 static bool rs6000_vector_alignment_reachable (const_tree
, bool);
866 static void rs6000_init_builtins (void);
867 static rtx
rs6000_expand_unop_builtin (enum insn_code
, tree
, rtx
);
868 static rtx
rs6000_expand_binop_builtin (enum insn_code
, tree
, rtx
);
869 static rtx
rs6000_expand_ternop_builtin (enum insn_code
, tree
, rtx
);
870 static rtx
rs6000_expand_builtin (tree
, rtx
, rtx
, enum machine_mode
, int);
871 static void altivec_init_builtins (void);
872 static void rs6000_common_init_builtins (void);
873 static void rs6000_init_libfuncs (void);
875 static void paired_init_builtins (void);
876 static rtx
paired_expand_builtin (tree
, rtx
, bool *);
877 static rtx
paired_expand_lv_builtin (enum insn_code
, tree
, rtx
);
878 static rtx
paired_expand_stv_builtin (enum insn_code
, tree
);
879 static rtx
paired_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
881 static void enable_mask_for_builtins (struct builtin_description
*, int,
882 enum rs6000_builtins
,
883 enum rs6000_builtins
);
884 static void spe_init_builtins (void);
885 static rtx
spe_expand_builtin (tree
, rtx
, bool *);
886 static rtx
spe_expand_stv_builtin (enum insn_code
, tree
);
887 static rtx
spe_expand_predicate_builtin (enum insn_code
, tree
, rtx
);
888 static rtx
spe_expand_evsel_builtin (enum insn_code
, tree
, rtx
);
889 static int rs6000_emit_int_cmove (rtx
, rtx
, rtx
, rtx
);
890 static rs6000_stack_t
*rs6000_stack_info (void);
891 static void debug_stack_info (rs6000_stack_t
*);
893 static rtx
altivec_expand_builtin (tree
, rtx
, bool *);
894 static rtx
altivec_expand_ld_builtin (tree
, rtx
, bool *);
895 static rtx
altivec_expand_st_builtin (tree
, rtx
, bool *);
896 static rtx
altivec_expand_dst_builtin (tree
, rtx
, bool *);
897 static rtx
altivec_expand_abs_builtin (enum insn_code
, tree
, rtx
);
898 static rtx
altivec_expand_predicate_builtin (enum insn_code
,
899 const char *, tree
, rtx
);
900 static rtx
altivec_expand_stv_builtin (enum insn_code
, tree
);
901 static rtx
altivec_expand_vec_init_builtin (tree
, tree
, rtx
);
902 static rtx
altivec_expand_vec_set_builtin (tree
);
903 static rtx
altivec_expand_vec_ext_builtin (tree
, rtx
);
904 static int get_element_number (tree
, tree
);
905 static bool rs6000_handle_option (size_t, const char *, int);
906 static void rs6000_parse_tls_size_option (void);
907 static void rs6000_parse_yes_no_option (const char *, const char *, int *);
908 static int first_altivec_reg_to_save (void);
909 static unsigned int compute_vrsave_mask (void);
910 static void compute_save_world_info (rs6000_stack_t
*info_ptr
);
911 static void is_altivec_return_reg (rtx
, void *);
912 static rtx
generate_set_vrsave (rtx
, rs6000_stack_t
*, int);
913 int easy_vector_constant (rtx
, enum machine_mode
);
914 static rtx
rs6000_dwarf_register_span (rtx
);
915 static void rs6000_init_dwarf_reg_sizes_extra (tree
);
916 static rtx
rs6000_legitimize_address (rtx
, rtx
, enum machine_mode
);
917 static rtx
rs6000_legitimize_tls_address (rtx
, enum tls_model
);
918 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx
) ATTRIBUTE_UNUSED
;
919 static rtx
rs6000_tls_get_addr (void);
920 static rtx
rs6000_got_sym (void);
921 static int rs6000_tls_symbol_ref_1 (rtx
*, void *);
922 static const char *rs6000_get_some_local_dynamic_name (void);
923 static int rs6000_get_some_local_dynamic_name_1 (rtx
*, void *);
924 static rtx
rs6000_complex_function_value (enum machine_mode
);
925 static rtx
rs6000_spe_function_arg (CUMULATIVE_ARGS
*,
926 enum machine_mode
, tree
);
927 static void rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*,
929 static void rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*,
930 tree
, HOST_WIDE_INT
);
931 static void rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*,
934 static void rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*,
935 const_tree
, HOST_WIDE_INT
,
937 static rtx
rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*, const_tree
, int, bool);
938 static rtx
rs6000_mixed_function_arg (enum machine_mode
, tree
, int);
939 static void rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
);
940 static void setup_incoming_varargs (CUMULATIVE_ARGS
*,
941 enum machine_mode
, tree
,
943 static bool rs6000_pass_by_reference (CUMULATIVE_ARGS
*, enum machine_mode
,
945 static int rs6000_arg_partial_bytes (CUMULATIVE_ARGS
*, enum machine_mode
,
947 static const char *invalid_arg_for_unprototyped_fn (const_tree
, const_tree
, const_tree
);
949 static void macho_branch_islands (void);
950 static int no_previous_def (tree function_name
);
951 static tree
get_prev_label (tree function_name
);
952 static void rs6000_darwin_file_start (void);
955 static tree
rs6000_build_builtin_va_list (void);
956 static void rs6000_va_start (tree
, rtx
);
957 static tree
rs6000_gimplify_va_arg (tree
, tree
, gimple_seq
*, gimple_seq
*);
958 static bool rs6000_must_pass_in_stack (enum machine_mode
, const_tree
);
959 static bool rs6000_scalar_mode_supported_p (enum machine_mode
);
960 static bool rs6000_vector_mode_supported_p (enum machine_mode
);
961 static int get_vec_cmp_insn (enum rtx_code
, enum machine_mode
,
963 static rtx
rs6000_emit_vector_compare (enum rtx_code
, rtx
, rtx
,
965 static int get_vsel_insn (enum machine_mode
);
966 static void rs6000_emit_vector_select (rtx
, rtx
, rtx
, rtx
);
967 static tree
rs6000_stack_protect_fail (void);
969 const int INSN_NOT_AVAILABLE
= -1;
970 static enum machine_mode
rs6000_eh_return_filter_mode (void);
972 /* Hash table stuff for keeping track of TOC entries. */
974 struct GTY(()) toc_hash_struct
976 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
977 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
979 enum machine_mode key_mode
;
983 static GTY ((param_is (struct toc_hash_struct
))) htab_t toc_hash_table
;
985 /* Default register names. */
986 char rs6000_reg_names
[][8] =
988 "0", "1", "2", "3", "4", "5", "6", "7",
989 "8", "9", "10", "11", "12", "13", "14", "15",
990 "16", "17", "18", "19", "20", "21", "22", "23",
991 "24", "25", "26", "27", "28", "29", "30", "31",
992 "0", "1", "2", "3", "4", "5", "6", "7",
993 "8", "9", "10", "11", "12", "13", "14", "15",
994 "16", "17", "18", "19", "20", "21", "22", "23",
995 "24", "25", "26", "27", "28", "29", "30", "31",
996 "mq", "lr", "ctr","ap",
997 "0", "1", "2", "3", "4", "5", "6", "7",
999 /* AltiVec registers. */
1000 "0", "1", "2", "3", "4", "5", "6", "7",
1001 "8", "9", "10", "11", "12", "13", "14", "15",
1002 "16", "17", "18", "19", "20", "21", "22", "23",
1003 "24", "25", "26", "27", "28", "29", "30", "31",
1005 /* SPE registers. */
1006 "spe_acc", "spefscr",
1007 /* Soft frame pointer. */
1011 #ifdef TARGET_REGNAMES
1012 static const char alt_reg_names
[][8] =
1014 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1015 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1016 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1017 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1018 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1019 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1020 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1021 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1022 "mq", "lr", "ctr", "ap",
1023 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1025 /* AltiVec registers. */
1026 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1027 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1028 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1029 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1031 /* SPE registers. */
1032 "spe_acc", "spefscr",
1033 /* Soft frame pointer. */
1038 #ifndef MASK_STRICT_ALIGN
1039 #define MASK_STRICT_ALIGN 0
1041 #ifndef TARGET_PROFILE_KERNEL
1042 #define TARGET_PROFILE_KERNEL 0
1045 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1046 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1048 /* Initialize the GCC target structure. */
1049 #undef TARGET_ATTRIBUTE_TABLE
1050 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1051 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1052 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1054 #undef TARGET_ASM_ALIGNED_DI_OP
1055 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1057 /* Default unaligned ops are only provided for ELF. Find the ops needed
1058 for non-ELF systems. */
1059 #ifndef OBJECT_FORMAT_ELF
1061 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1063 #undef TARGET_ASM_UNALIGNED_HI_OP
1064 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1065 #undef TARGET_ASM_UNALIGNED_SI_OP
1066 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1067 #undef TARGET_ASM_UNALIGNED_DI_OP
1068 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1071 #undef TARGET_ASM_UNALIGNED_HI_OP
1072 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1073 #undef TARGET_ASM_UNALIGNED_SI_OP
1074 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1075 #undef TARGET_ASM_UNALIGNED_DI_OP
1076 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1077 #undef TARGET_ASM_ALIGNED_DI_OP
1078 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1082 /* This hook deals with fixups for relocatable code and DI-mode objects
1084 #undef TARGET_ASM_INTEGER
1085 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1087 #ifdef HAVE_GAS_HIDDEN
1088 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1089 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1092 #undef TARGET_HAVE_TLS
1093 #define TARGET_HAVE_TLS HAVE_AS_TLS
1095 #undef TARGET_CANNOT_FORCE_CONST_MEM
1096 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_tls_referenced_p
1098 #undef TARGET_ASM_FUNCTION_PROLOGUE
1099 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1100 #undef TARGET_ASM_FUNCTION_EPILOGUE
1101 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1103 #undef TARGET_LEGITIMIZE_ADDRESS
1104 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1106 #undef TARGET_SCHED_VARIABLE_ISSUE
1107 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1109 #undef TARGET_SCHED_ISSUE_RATE
1110 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1111 #undef TARGET_SCHED_ADJUST_COST
1112 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1113 #undef TARGET_SCHED_ADJUST_PRIORITY
1114 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1115 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1116 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1117 #undef TARGET_SCHED_INIT
1118 #define TARGET_SCHED_INIT rs6000_sched_init
1119 #undef TARGET_SCHED_FINISH
1120 #define TARGET_SCHED_FINISH rs6000_sched_finish
1121 #undef TARGET_SCHED_REORDER
1122 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1123 #undef TARGET_SCHED_REORDER2
1124 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1126 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1127 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1129 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1130 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1132 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1133 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1134 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1135 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1136 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1137 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1138 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1139 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1141 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1142 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1143 #undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN
1144 #define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_EVEN rs6000_builtin_mul_widen_even
1145 #undef TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD
1146 #define TARGET_VECTORIZE_BUILTIN_MUL_WIDEN_ODD rs6000_builtin_mul_widen_odd
1147 #undef TARGET_VECTORIZE_BUILTIN_CONVERSION
1148 #define TARGET_VECTORIZE_BUILTIN_CONVERSION rs6000_builtin_conversion
1149 #undef TARGET_VECTORIZE_BUILTIN_VEC_PERM
1150 #define TARGET_VECTORIZE_BUILTIN_VEC_PERM rs6000_builtin_vec_perm
1152 #undef TARGET_VECTOR_ALIGNMENT_REACHABLE
1153 #define TARGET_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1155 #undef TARGET_INIT_BUILTINS
1156 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1158 #undef TARGET_EXPAND_BUILTIN
1159 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1161 #undef TARGET_MANGLE_TYPE
1162 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1164 #undef TARGET_INIT_LIBFUNCS
1165 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1168 #undef TARGET_BINDS_LOCAL_P
1169 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1172 #undef TARGET_MS_BITFIELD_LAYOUT_P
1173 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1175 #undef TARGET_ASM_OUTPUT_MI_THUNK
1176 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1178 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1179 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1181 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1182 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1184 #undef TARGET_INVALID_WITHIN_DOLOOP
1185 #define TARGET_INVALID_WITHIN_DOLOOP rs6000_invalid_within_doloop
1187 #undef TARGET_RTX_COSTS
1188 #define TARGET_RTX_COSTS rs6000_rtx_costs
1189 #undef TARGET_ADDRESS_COST
1190 #define TARGET_ADDRESS_COST hook_int_rtx_bool_0
1192 #undef TARGET_DWARF_REGISTER_SPAN
1193 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1195 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1196 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1198 /* On rs6000, function arguments are promoted, as are function return
1200 #undef TARGET_PROMOTE_FUNCTION_ARGS
1201 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_const_tree_true
1202 #undef TARGET_PROMOTE_FUNCTION_RETURN
1203 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_const_tree_true
1205 #undef TARGET_RETURN_IN_MEMORY
1206 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1208 #undef TARGET_SETUP_INCOMING_VARARGS
1209 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1211 /* Always strict argument naming on rs6000. */
1212 #undef TARGET_STRICT_ARGUMENT_NAMING
1213 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1214 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1215 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1216 #undef TARGET_SPLIT_COMPLEX_ARG
1217 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1218 #undef TARGET_MUST_PASS_IN_STACK
1219 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1220 #undef TARGET_PASS_BY_REFERENCE
1221 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1222 #undef TARGET_ARG_PARTIAL_BYTES
1223 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1225 #undef TARGET_BUILD_BUILTIN_VA_LIST
1226 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1228 #undef TARGET_EXPAND_BUILTIN_VA_START
1229 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1231 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1232 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1234 #undef TARGET_EH_RETURN_FILTER_MODE
1235 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1237 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1238 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1240 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1241 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1243 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1244 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1246 #undef TARGET_HANDLE_OPTION
1247 #define TARGET_HANDLE_OPTION rs6000_handle_option
1249 #undef TARGET_DEFAULT_TARGET_FLAGS
1250 #define TARGET_DEFAULT_TARGET_FLAGS \
1253 #undef TARGET_STACK_PROTECT_FAIL
1254 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1256 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1257 The PowerPC architecture requires only weak consistency among
1258 processors--that is, memory accesses between processors need not be
1259 sequentially consistent and memory accesses among processors can occur
1260 in any order. The ability to order memory accesses weakly provides
1261 opportunities for more efficient use of the system bus. Unless a
1262 dependency exists, the 604e allows read operations to precede store
1264 #undef TARGET_RELAXED_ORDERING
1265 #define TARGET_RELAXED_ORDERING true
1268 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1269 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1272 /* Use a 32-bit anchor range. This leads to sequences like:
1274 addis tmp,anchor,high
1277 where tmp itself acts as an anchor, and can be shared between
1278 accesses to the same 64k page. */
1279 #undef TARGET_MIN_ANCHOR_OFFSET
1280 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1281 #undef TARGET_MAX_ANCHOR_OFFSET
1282 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1283 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1284 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1286 #undef TARGET_BUILTIN_RECIPROCAL
1287 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1289 #undef TARGET_EXPAND_TO_RTL_HOOK
1290 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1292 #undef TARGET_INSTANTIATE_DECLS
1293 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1295 #undef TARGET_LEGITIMATE_ADDRESS_P
1296 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1298 struct gcc_target targetm
= TARGET_INITIALIZER
;
1301 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1304 rs6000_hard_regno_mode_ok (int regno
, enum machine_mode mode
)
1306 /* The GPRs can hold any mode, but values bigger than one register
1307 cannot go past R31. */
1308 if (INT_REGNO_P (regno
))
1309 return INT_REGNO_P (regno
+ HARD_REGNO_NREGS (regno
, mode
) - 1);
1311 /* The float registers can only hold floating modes and DImode.
1312 This excludes the 32-bit decimal float mode for now. */
1313 if (FP_REGNO_P (regno
))
1315 ((SCALAR_FLOAT_MODE_P (mode
)
1316 && (mode
!= TDmode
|| (regno
% 2) == 0)
1317 && FP_REGNO_P (regno
+ HARD_REGNO_NREGS (regno
, mode
) - 1))
1318 || (GET_MODE_CLASS (mode
) == MODE_INT
1319 && GET_MODE_SIZE (mode
) == UNITS_PER_FP_WORD
)
1320 || (PAIRED_SIMD_REGNO_P (regno
) && TARGET_PAIRED_FLOAT
1321 && PAIRED_VECTOR_MODE (mode
)));
1323 /* The CR register can only hold CC modes. */
1324 if (CR_REGNO_P (regno
))
1325 return GET_MODE_CLASS (mode
) == MODE_CC
;
1327 if (XER_REGNO_P (regno
))
1328 return mode
== PSImode
;
1330 /* AltiVec only in AldyVec registers. */
1331 if (ALTIVEC_REGNO_P (regno
))
1332 return ALTIVEC_VECTOR_MODE (mode
);
1334 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1335 if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
1338 /* We cannot put TImode anywhere except general register and it must be
1339 able to fit within the register set. */
1341 return GET_MODE_SIZE (mode
) <= UNITS_PER_WORD
;
1344 /* Initialize rs6000_hard_regno_mode_ok_p table. */
1346 rs6000_init_hard_regno_mode_ok (void)
1350 for (r
= 0; r
< FIRST_PSEUDO_REGISTER
; ++r
)
1351 for (m
= 0; m
< NUM_MACHINE_MODES
; ++m
)
1352 if (rs6000_hard_regno_mode_ok (r
, (enum machine_mode
) m
))
1353 rs6000_hard_regno_mode_ok_p
[m
][r
] = true;
1357 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
1360 darwin_rs6000_override_options (void)
1362 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
1364 rs6000_altivec_abi
= 1;
1365 TARGET_ALTIVEC_VRSAVE
= 1;
1366 if (DEFAULT_ABI
== ABI_DARWIN
)
1368 if (MACHO_DYNAMIC_NO_PIC_P
)
1371 warning (0, "-mdynamic-no-pic overrides -fpic or -fPIC");
1374 else if (flag_pic
== 1)
1379 if (TARGET_64BIT
&& ! TARGET_POWERPC64
)
1381 target_flags
|= MASK_POWERPC64
;
1382 warning (0, "-m64 requires PowerPC64 architecture, enabling");
1386 rs6000_default_long_calls
= 1;
1387 target_flags
|= MASK_SOFT_FLOAT
;
1390 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
1392 if (!flag_mkernel
&& !flag_apple_kext
1394 && ! (target_flags_explicit
& MASK_ALTIVEC
))
1395 target_flags
|= MASK_ALTIVEC
;
1397 /* Unless the user (not the configurer) has explicitly overridden
1398 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
1399 G4 unless targetting the kernel. */
1402 && strverscmp (darwin_macosx_version_min
, "10.5") >= 0
1403 && ! (target_flags_explicit
& MASK_ALTIVEC
)
1404 && ! rs6000_select
[1].string
)
1406 target_flags
|= MASK_ALTIVEC
;
1411 /* If not otherwise specified by a target, make 'long double' equivalent to
1414 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
1415 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
1418 /* Override command line options. Mostly we process the processor
1419 type and sometimes adjust other TARGET_ options. */
1422 rs6000_override_options (const char *default_cpu
)
1425 struct rs6000_cpu_select
*ptr
;
1428 /* Simplifications for entries below. */
1431 POWERPC_BASE_MASK
= MASK_POWERPC
| MASK_NEW_MNEMONICS
,
1432 POWERPC_7400_MASK
= POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_ALTIVEC
1435 /* This table occasionally claims that a processor does not support
1436 a particular feature even though it does, but the feature is slower
1437 than the alternative. Thus, it shouldn't be relied on as a
1438 complete description of the processor's support.
1440 Please keep this list in order, and don't forget to update the
1441 documentation in invoke.texi when adding a new processor or
1445 const char *const name
; /* Canonical processor name. */
1446 const enum processor_type processor
; /* Processor type enum value. */
1447 const int target_enable
; /* Target flags to enable. */
1448 } const processor_target_table
[]
1449 = {{"401", PROCESSOR_PPC403
, POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
},
1450 {"403", PROCESSOR_PPC403
,
1451 POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
| MASK_STRICT_ALIGN
},
1452 {"405", PROCESSOR_PPC405
,
1453 POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
| MASK_MULHW
| MASK_DLMZB
},
1454 {"405fp", PROCESSOR_PPC405
,
1455 POWERPC_BASE_MASK
| MASK_MULHW
| MASK_DLMZB
},
1456 {"440", PROCESSOR_PPC440
,
1457 POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
| MASK_MULHW
| MASK_DLMZB
},
1458 {"440fp", PROCESSOR_PPC440
,
1459 POWERPC_BASE_MASK
| MASK_MULHW
| MASK_DLMZB
},
1460 {"464", PROCESSOR_PPC440
,
1461 POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
| MASK_MULHW
| MASK_DLMZB
},
1462 {"464fp", PROCESSOR_PPC440
,
1463 POWERPC_BASE_MASK
| MASK_MULHW
| MASK_DLMZB
},
1464 {"505", PROCESSOR_MPCCORE
, POWERPC_BASE_MASK
},
1465 {"601", PROCESSOR_PPC601
,
1466 MASK_POWER
| POWERPC_BASE_MASK
| MASK_MULTIPLE
| MASK_STRING
},
1467 {"602", PROCESSOR_PPC603
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1468 {"603", PROCESSOR_PPC603
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1469 {"603e", PROCESSOR_PPC603
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1470 {"604", PROCESSOR_PPC604
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1471 {"604e", PROCESSOR_PPC604e
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1472 {"620", PROCESSOR_PPC620
,
1473 POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_POWERPC64
},
1474 {"630", PROCESSOR_PPC630
,
1475 POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_POWERPC64
},
1476 {"740", PROCESSOR_PPC750
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1477 {"7400", PROCESSOR_PPC7400
, POWERPC_7400_MASK
},
1478 {"7450", PROCESSOR_PPC7450
, POWERPC_7400_MASK
},
1479 {"750", PROCESSOR_PPC750
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1480 {"801", PROCESSOR_MPCCORE
, POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
},
1481 {"821", PROCESSOR_MPCCORE
, POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
},
1482 {"823", PROCESSOR_MPCCORE
, POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
},
1483 {"8540", PROCESSOR_PPC8540
, POWERPC_BASE_MASK
| MASK_STRICT_ALIGN
},
1484 /* 8548 has a dummy entry for now. */
1485 {"8548", PROCESSOR_PPC8540
, POWERPC_BASE_MASK
| MASK_STRICT_ALIGN
},
1486 {"e300c2", PROCESSOR_PPCE300C2
, POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
},
1487 {"e300c3", PROCESSOR_PPCE300C3
, POWERPC_BASE_MASK
},
1488 {"e500mc", PROCESSOR_PPCE500MC
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1489 {"860", PROCESSOR_MPCCORE
, POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
},
1490 {"970", PROCESSOR_POWER4
,
1491 POWERPC_7400_MASK
| MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
1492 {"cell", PROCESSOR_CELL
,
1493 POWERPC_7400_MASK
| MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
1494 {"common", PROCESSOR_COMMON
, MASK_NEW_MNEMONICS
},
1495 {"ec603e", PROCESSOR_PPC603
, POWERPC_BASE_MASK
| MASK_SOFT_FLOAT
},
1496 {"G3", PROCESSOR_PPC750
, POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
},
1497 {"G4", PROCESSOR_PPC7450
, POWERPC_7400_MASK
},
1498 {"G5", PROCESSOR_POWER4
,
1499 POWERPC_7400_MASK
| MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
1500 {"power", PROCESSOR_POWER
, MASK_POWER
| MASK_MULTIPLE
| MASK_STRING
},
1501 {"power2", PROCESSOR_POWER
,
1502 MASK_POWER
| MASK_POWER2
| MASK_MULTIPLE
| MASK_STRING
},
1503 {"power3", PROCESSOR_PPC630
,
1504 POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_POWERPC64
},
1505 {"power4", PROCESSOR_POWER4
,
1506 POWERPC_BASE_MASK
| MASK_POWERPC64
| MASK_PPC_GPOPT
| MASK_PPC_GFXOPT
1508 {"power5", PROCESSOR_POWER5
,
1509 POWERPC_BASE_MASK
| MASK_POWERPC64
| MASK_PPC_GPOPT
| MASK_PPC_GFXOPT
1510 | MASK_MFCRF
| MASK_POPCNTB
},
1511 {"power5+", PROCESSOR_POWER5
,
1512 POWERPC_BASE_MASK
| MASK_POWERPC64
| MASK_PPC_GPOPT
| MASK_PPC_GFXOPT
1513 | MASK_MFCRF
| MASK_POPCNTB
| MASK_FPRND
},
1514 {"power6", PROCESSOR_POWER6
,
1515 POWERPC_BASE_MASK
| MASK_POWERPC64
| MASK_PPC_GPOPT
| MASK_PPC_GFXOPT
1516 | MASK_MFCRF
| MASK_POPCNTB
| MASK_FPRND
| MASK_CMPB
| MASK_DFP
},
1517 {"power6x", PROCESSOR_POWER6
,
1518 POWERPC_BASE_MASK
| MASK_POWERPC64
| MASK_PPC_GPOPT
| MASK_PPC_GFXOPT
1519 | MASK_MFCRF
| MASK_POPCNTB
| MASK_FPRND
| MASK_CMPB
| MASK_DFP
1521 {"power7", PROCESSOR_POWER5
,
1522 POWERPC_7400_MASK
| MASK_POWERPC64
| MASK_PPC_GPOPT
| MASK_MFCRF
1523 | MASK_POPCNTB
| MASK_FPRND
| MASK_CMPB
| MASK_DFP
},
1524 {"powerpc", PROCESSOR_POWERPC
, POWERPC_BASE_MASK
},
1525 {"powerpc64", PROCESSOR_POWERPC64
,
1526 POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_POWERPC64
},
1527 {"rios", PROCESSOR_RIOS1
, MASK_POWER
| MASK_MULTIPLE
| MASK_STRING
},
1528 {"rios1", PROCESSOR_RIOS1
, MASK_POWER
| MASK_MULTIPLE
| MASK_STRING
},
1529 {"rios2", PROCESSOR_RIOS2
,
1530 MASK_POWER
| MASK_POWER2
| MASK_MULTIPLE
| MASK_STRING
},
1531 {"rsc", PROCESSOR_PPC601
, MASK_POWER
| MASK_MULTIPLE
| MASK_STRING
},
1532 {"rsc1", PROCESSOR_PPC601
, MASK_POWER
| MASK_MULTIPLE
| MASK_STRING
},
1533 {"rs64", PROCESSOR_RS64A
,
1534 POWERPC_BASE_MASK
| MASK_PPC_GFXOPT
| MASK_POWERPC64
}
1537 const size_t ptt_size
= ARRAY_SIZE (processor_target_table
);
1539 /* Some OSs don't support saving the high part of 64-bit registers on
1540 context switch. Other OSs don't support saving Altivec registers.
1541 On those OSs, we don't touch the MASK_POWERPC64 or MASK_ALTIVEC
1542 settings; if the user wants either, the user must explicitly specify
1543 them and we won't interfere with the user's specification. */
1546 POWER_MASKS
= MASK_POWER
| MASK_POWER2
| MASK_MULTIPLE
| MASK_STRING
,
1547 POWERPC_MASKS
= (POWERPC_BASE_MASK
| MASK_PPC_GPOPT
| MASK_STRICT_ALIGN
1548 | MASK_PPC_GFXOPT
| MASK_POWERPC64
| MASK_ALTIVEC
1549 | MASK_MFCRF
| MASK_POPCNTB
| MASK_FPRND
| MASK_MULHW
1550 | MASK_DLMZB
| MASK_CMPB
| MASK_MFPGPR
| MASK_DFP
)
1553 set_masks
= POWER_MASKS
| POWERPC_MASKS
| MASK_SOFT_FLOAT
;
1554 #ifdef OS_MISSING_POWERPC64
1555 if (OS_MISSING_POWERPC64
)
1556 set_masks
&= ~MASK_POWERPC64
;
1558 #ifdef OS_MISSING_ALTIVEC
1559 if (OS_MISSING_ALTIVEC
)
1560 set_masks
&= ~MASK_ALTIVEC
;
1563 /* Don't override by the processor default if given explicitly. */
1564 set_masks
&= ~target_flags_explicit
;
1566 /* Identify the processor type. */
1567 rs6000_select
[0].string
= default_cpu
;
1568 rs6000_cpu
= TARGET_POWERPC64
? PROCESSOR_DEFAULT64
: PROCESSOR_DEFAULT
;
1570 for (i
= 0; i
< ARRAY_SIZE (rs6000_select
); i
++)
1572 ptr
= &rs6000_select
[i
];
1573 if (ptr
->string
!= (char *)0 && ptr
->string
[0] != '\0')
1575 for (j
= 0; j
< ptt_size
; j
++)
1576 if (! strcmp (ptr
->string
, processor_target_table
[j
].name
))
1578 if (ptr
->set_tune_p
)
1579 rs6000_cpu
= processor_target_table
[j
].processor
;
1581 if (ptr
->set_arch_p
)
1583 target_flags
&= ~set_masks
;
1584 target_flags
|= (processor_target_table
[j
].target_enable
1591 error ("bad value (%s) for %s switch", ptr
->string
, ptr
->name
);
1595 if ((TARGET_E500
|| rs6000_cpu
== PROCESSOR_PPCE500MC
)
1596 && !rs6000_explicit_options
.isel
)
1599 if (rs6000_cpu
== PROCESSOR_PPCE300C2
|| rs6000_cpu
== PROCESSOR_PPCE300C3
1600 || rs6000_cpu
== PROCESSOR_PPCE500MC
)
1603 error ("AltiVec not supported in this target");
1605 error ("Spe not supported in this target");
1608 /* Disable Cell microcode if we are optimizing for the Cell
1609 and not optimizing for size. */
1610 if (rs6000_gen_cell_microcode
== -1)
1611 rs6000_gen_cell_microcode
= !(rs6000_cpu
== PROCESSOR_CELL
1614 /* If we are optimizing big endian systems for space, use the load/store
1615 multiple and string instructions unless we are not generating
1617 if (BYTES_BIG_ENDIAN
&& optimize_size
&& !rs6000_gen_cell_microcode
)
1618 target_flags
|= ~target_flags_explicit
& (MASK_MULTIPLE
| MASK_STRING
);
1620 /* Don't allow -mmultiple or -mstring on little endian systems
1621 unless the cpu is a 750, because the hardware doesn't support the
1622 instructions used in little endian mode, and causes an alignment
1623 trap. The 750 does not cause an alignment trap (except when the
1624 target is unaligned). */
1626 if (!BYTES_BIG_ENDIAN
&& rs6000_cpu
!= PROCESSOR_PPC750
)
1628 if (TARGET_MULTIPLE
)
1630 target_flags
&= ~MASK_MULTIPLE
;
1631 if ((target_flags_explicit
& MASK_MULTIPLE
) != 0)
1632 warning (0, "-mmultiple is not supported on little endian systems");
1637 target_flags
&= ~MASK_STRING
;
1638 if ((target_flags_explicit
& MASK_STRING
) != 0)
1639 warning (0, "-mstring is not supported on little endian systems");
1643 /* Set debug flags */
1644 if (rs6000_debug_name
)
1646 if (! strcmp (rs6000_debug_name
, "all"))
1647 rs6000_debug_stack
= rs6000_debug_arg
= 1;
1648 else if (! strcmp (rs6000_debug_name
, "stack"))
1649 rs6000_debug_stack
= 1;
1650 else if (! strcmp (rs6000_debug_name
, "arg"))
1651 rs6000_debug_arg
= 1;
1653 error ("unknown -mdebug-%s switch", rs6000_debug_name
);
1656 if (rs6000_traceback_name
)
1658 if (! strncmp (rs6000_traceback_name
, "full", 4))
1659 rs6000_traceback
= traceback_full
;
1660 else if (! strncmp (rs6000_traceback_name
, "part", 4))
1661 rs6000_traceback
= traceback_part
;
1662 else if (! strncmp (rs6000_traceback_name
, "no", 2))
1663 rs6000_traceback
= traceback_none
;
1665 error ("unknown -mtraceback arg %qs; expecting %<full%>, %<partial%> or %<none%>",
1666 rs6000_traceback_name
);
1669 if (!rs6000_explicit_options
.long_double
)
1670 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
1672 #ifndef POWERPC_LINUX
1673 if (!rs6000_explicit_options
.ieee
)
1674 rs6000_ieeequad
= 1;
1677 /* Enable Altivec ABI for AIX -maltivec. */
1678 if (TARGET_XCOFF
&& TARGET_ALTIVEC
)
1679 rs6000_altivec_abi
= 1;
1681 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
1682 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
1683 be explicitly overridden in either case. */
1686 if (!rs6000_explicit_options
.altivec_abi
1687 && (TARGET_64BIT
|| TARGET_ALTIVEC
))
1688 rs6000_altivec_abi
= 1;
1690 /* Enable VRSAVE for AltiVec ABI, unless explicitly overridden. */
1691 if (!rs6000_explicit_options
.vrsave
)
1692 TARGET_ALTIVEC_VRSAVE
= rs6000_altivec_abi
;
1695 /* Set the Darwin64 ABI as default for 64-bit Darwin. */
1696 if (DEFAULT_ABI
== ABI_DARWIN
&& TARGET_64BIT
)
1698 rs6000_darwin64_abi
= 1;
1700 darwin_one_byte_bool
= 1;
1702 /* Default to natural alignment, for better performance. */
1703 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
1706 /* Place FP constants in the constant pool instead of TOC
1707 if section anchors enabled. */
1708 if (flag_section_anchors
)
1709 TARGET_NO_FP_IN_TOC
= 1;
1711 /* Handle -mtls-size option. */
1712 rs6000_parse_tls_size_option ();
1714 #ifdef SUBTARGET_OVERRIDE_OPTIONS
1715 SUBTARGET_OVERRIDE_OPTIONS
;
1717 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
1718 SUBSUBTARGET_OVERRIDE_OPTIONS
;
1720 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
1721 SUB3TARGET_OVERRIDE_OPTIONS
;
1724 if (TARGET_E500
|| rs6000_cpu
== PROCESSOR_PPCE500MC
)
1726 /* The e500 and e500mc do not have string instructions, and we set
1727 MASK_STRING above when optimizing for size. */
1728 if ((target_flags
& MASK_STRING
) != 0)
1729 target_flags
= target_flags
& ~MASK_STRING
;
1731 else if (rs6000_select
[1].string
!= NULL
)
1733 /* For the powerpc-eabispe configuration, we set all these by
1734 default, so let's unset them if we manually set another
1735 CPU that is not the E500. */
1736 if (!rs6000_explicit_options
.spe_abi
)
1738 if (!rs6000_explicit_options
.spe
)
1740 if (!rs6000_explicit_options
.float_gprs
)
1741 rs6000_float_gprs
= 0;
1742 if (!rs6000_explicit_options
.isel
)
1746 /* Detect invalid option combinations with E500. */
1749 rs6000_always_hint
= (rs6000_cpu
!= PROCESSOR_POWER4
1750 && rs6000_cpu
!= PROCESSOR_POWER5
1751 && rs6000_cpu
!= PROCESSOR_POWER6
1752 && rs6000_cpu
!= PROCESSOR_CELL
);
1753 rs6000_sched_groups
= (rs6000_cpu
== PROCESSOR_POWER4
1754 || rs6000_cpu
== PROCESSOR_POWER5
);
1755 rs6000_align_branch_targets
= (rs6000_cpu
== PROCESSOR_POWER4
1756 || rs6000_cpu
== PROCESSOR_POWER5
1757 || rs6000_cpu
== PROCESSOR_POWER6
);
1759 rs6000_sched_restricted_insns_priority
1760 = (rs6000_sched_groups
? 1 : 0);
1762 /* Handle -msched-costly-dep option. */
1763 rs6000_sched_costly_dep
1764 = (rs6000_sched_groups
? store_to_load_dep_costly
: no_dep_costly
);
1766 if (rs6000_sched_costly_dep_str
)
1768 if (! strcmp (rs6000_sched_costly_dep_str
, "no"))
1769 rs6000_sched_costly_dep
= no_dep_costly
;
1770 else if (! strcmp (rs6000_sched_costly_dep_str
, "all"))
1771 rs6000_sched_costly_dep
= all_deps_costly
;
1772 else if (! strcmp (rs6000_sched_costly_dep_str
, "true_store_to_load"))
1773 rs6000_sched_costly_dep
= true_store_to_load_dep_costly
;
1774 else if (! strcmp (rs6000_sched_costly_dep_str
, "store_to_load"))
1775 rs6000_sched_costly_dep
= store_to_load_dep_costly
;
1777 rs6000_sched_costly_dep
= ((enum rs6000_dependence_cost
)
1778 atoi (rs6000_sched_costly_dep_str
));
1781 /* Handle -minsert-sched-nops option. */
1782 rs6000_sched_insert_nops
1783 = (rs6000_sched_groups
? sched_finish_regroup_exact
: sched_finish_none
);
1785 if (rs6000_sched_insert_nops_str
)
1787 if (! strcmp (rs6000_sched_insert_nops_str
, "no"))
1788 rs6000_sched_insert_nops
= sched_finish_none
;
1789 else if (! strcmp (rs6000_sched_insert_nops_str
, "pad"))
1790 rs6000_sched_insert_nops
= sched_finish_pad_groups
;
1791 else if (! strcmp (rs6000_sched_insert_nops_str
, "regroup_exact"))
1792 rs6000_sched_insert_nops
= sched_finish_regroup_exact
;
1794 rs6000_sched_insert_nops
= ((enum rs6000_nop_insertion
)
1795 atoi (rs6000_sched_insert_nops_str
));
1798 #ifdef TARGET_REGNAMES
1799 /* If the user desires alternate register names, copy in the
1800 alternate names now. */
1801 if (TARGET_REGNAMES
)
1802 memcpy (rs6000_reg_names
, alt_reg_names
, sizeof (rs6000_reg_names
));
1805 /* Set aix_struct_return last, after the ABI is determined.
1806 If -maix-struct-return or -msvr4-struct-return was explicitly
1807 used, don't override with the ABI default. */
1808 if (!rs6000_explicit_options
.aix_struct_ret
)
1809 aix_struct_return
= (DEFAULT_ABI
!= ABI_V4
|| DRAFT_V4_STRUCT_RET
);
1811 if (TARGET_LONG_DOUBLE_128
&& !TARGET_IEEEQUAD
)
1812 REAL_MODE_FORMAT (TFmode
) = &ibm_extended_format
;
1815 ASM_GENERATE_INTERNAL_LABEL (toc_label_name
, "LCTOC", 1);
1817 /* We can only guarantee the availability of DI pseudo-ops when
1818 assembling for 64-bit targets. */
1821 targetm
.asm_out
.aligned_op
.di
= NULL
;
1822 targetm
.asm_out
.unaligned_op
.di
= NULL
;
1825 /* Set branch target alignment, if not optimizing for size. */
1828 /* Cell wants to be aligned 8byte for dual issue. */
1829 if (rs6000_cpu
== PROCESSOR_CELL
)
1831 if (align_functions
<= 0)
1832 align_functions
= 8;
1833 if (align_jumps
<= 0)
1835 if (align_loops
<= 0)
1838 if (rs6000_align_branch_targets
)
1840 if (align_functions
<= 0)
1841 align_functions
= 16;
1842 if (align_jumps
<= 0)
1844 if (align_loops
<= 0)
1847 if (align_jumps_max_skip
<= 0)
1848 align_jumps_max_skip
= 15;
1849 if (align_loops_max_skip
<= 0)
1850 align_loops_max_skip
= 15;
1853 /* Arrange to save and restore machine status around nested functions. */
1854 init_machine_status
= rs6000_init_machine_status
;
1856 /* We should always be splitting complex arguments, but we can't break
1857 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
1858 if (DEFAULT_ABI
!= ABI_AIX
)
1859 targetm
.calls
.split_complex_arg
= NULL
;
1861 /* Initialize rs6000_cost with the appropriate target costs. */
1863 rs6000_cost
= TARGET_POWERPC64
? &size64_cost
: &size32_cost
;
1867 case PROCESSOR_RIOS1
:
1868 rs6000_cost
= &rios1_cost
;
1871 case PROCESSOR_RIOS2
:
1872 rs6000_cost
= &rios2_cost
;
1875 case PROCESSOR_RS64A
:
1876 rs6000_cost
= &rs64a_cost
;
1879 case PROCESSOR_MPCCORE
:
1880 rs6000_cost
= &mpccore_cost
;
1883 case PROCESSOR_PPC403
:
1884 rs6000_cost
= &ppc403_cost
;
1887 case PROCESSOR_PPC405
:
1888 rs6000_cost
= &ppc405_cost
;
1891 case PROCESSOR_PPC440
:
1892 rs6000_cost
= &ppc440_cost
;
1895 case PROCESSOR_PPC601
:
1896 rs6000_cost
= &ppc601_cost
;
1899 case PROCESSOR_PPC603
:
1900 rs6000_cost
= &ppc603_cost
;
1903 case PROCESSOR_PPC604
:
1904 rs6000_cost
= &ppc604_cost
;
1907 case PROCESSOR_PPC604e
:
1908 rs6000_cost
= &ppc604e_cost
;
1911 case PROCESSOR_PPC620
:
1912 rs6000_cost
= &ppc620_cost
;
1915 case PROCESSOR_PPC630
:
1916 rs6000_cost
= &ppc630_cost
;
1919 case PROCESSOR_CELL
:
1920 rs6000_cost
= &ppccell_cost
;
1923 case PROCESSOR_PPC750
:
1924 case PROCESSOR_PPC7400
:
1925 rs6000_cost
= &ppc750_cost
;
1928 case PROCESSOR_PPC7450
:
1929 rs6000_cost
= &ppc7450_cost
;
1932 case PROCESSOR_PPC8540
:
1933 rs6000_cost
= &ppc8540_cost
;
1936 case PROCESSOR_PPCE300C2
:
1937 case PROCESSOR_PPCE300C3
:
1938 rs6000_cost
= &ppce300c2c3_cost
;
1941 case PROCESSOR_PPCE500MC
:
1942 rs6000_cost
= &ppce500mc_cost
;
1945 case PROCESSOR_POWER4
:
1946 case PROCESSOR_POWER5
:
1947 rs6000_cost
= &power4_cost
;
1950 case PROCESSOR_POWER6
:
1951 rs6000_cost
= &power6_cost
;
1958 if (!PARAM_SET_P (PARAM_SIMULTANEOUS_PREFETCHES
))
1959 set_param_value ("simultaneous-prefetches",
1960 rs6000_cost
->simultaneous_prefetches
);
1961 if (!PARAM_SET_P (PARAM_L1_CACHE_SIZE
))
1962 set_param_value ("l1-cache-size", rs6000_cost
->l1_cache_size
);
1963 if (!PARAM_SET_P (PARAM_L1_CACHE_LINE_SIZE
))
1964 set_param_value ("l1-cache-line-size", rs6000_cost
->cache_line_size
);
1965 if (!PARAM_SET_P (PARAM_L2_CACHE_SIZE
))
1966 set_param_value ("l2-cache-size", rs6000_cost
->l2_cache_size
);
1968 /* If using typedef char *va_list, signal that __builtin_va_start (&ap, 0)
1969 can be optimized to ap = __builtin_next_arg (0). */
1970 if (DEFAULT_ABI
!= ABI_V4
)
1971 targetm
.expand_builtin_va_start
= NULL
;
1973 /* Set up single/double float flags.
1974 If TARGET_HARD_FLOAT is set, but neither single or double is set,
1975 then set both flags. */
1976 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
1977 && rs6000_single_float
== 0 && rs6000_double_float
== 0)
1978 rs6000_single_float
= rs6000_double_float
= 1;
1980 /* Reset single and double FP flags if target is E500. */
1983 rs6000_single_float
= rs6000_double_float
= 0;
1984 if (TARGET_E500_SINGLE
)
1985 rs6000_single_float
= 1;
1986 if (TARGET_E500_DOUBLE
)
1987 rs6000_single_float
= rs6000_double_float
= 1;
1990 /* If not explicitly specified via option, decide whether to generate indexed
1991 load/store instructions. */
1992 if (TARGET_AVOID_XFORM
== -1)
1993 /* Avoid indexed addressing when targeting Power6 in order to avoid
1994 the DERAT mispredict penalty. */
1995 TARGET_AVOID_XFORM
= (rs6000_cpu
== PROCESSOR_POWER6
&& TARGET_CMPB
);
1997 rs6000_init_hard_regno_mode_ok ();
2000 /* Implement targetm.vectorize.builtin_mask_for_load. */
2002 rs6000_builtin_mask_for_load (void)
2005 return altivec_builtin_mask_for_load
;
2010 /* Implement targetm.vectorize.builtin_conversion.
2011 Returns a decl of a function that implements conversion of an integer vector
2012 into a floating-point vector, or vice-versa. TYPE is the type of the integer
2013 side of the conversion.
2014 Return NULL_TREE if it is not available. */
2016 rs6000_builtin_conversion (unsigned int tcode
, tree type
)
2018 enum tree_code code
= (enum tree_code
) tcode
;
2020 if (!TARGET_ALTIVEC
)
2025 case FIX_TRUNC_EXPR
:
2026 switch (TYPE_MODE (type
))
2029 return TYPE_UNSIGNED (type
)
2030 ? rs6000_builtin_decls
[ALTIVEC_BUILTIN_VCTUXS
]
2031 : rs6000_builtin_decls
[ALTIVEC_BUILTIN_VCTSXS
];
2037 switch (TYPE_MODE (type
))
2040 return TYPE_UNSIGNED (type
)
2041 ? rs6000_builtin_decls
[ALTIVEC_BUILTIN_VCFUX
]
2042 : rs6000_builtin_decls
[ALTIVEC_BUILTIN_VCFSX
];
2052 /* Implement targetm.vectorize.builtin_mul_widen_even. */
2054 rs6000_builtin_mul_widen_even (tree type
)
2056 if (!TARGET_ALTIVEC
)
2059 switch (TYPE_MODE (type
))
2062 return TYPE_UNSIGNED (type
)
2063 ? rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULEUH
]
2064 : rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULESH
];
2067 return TYPE_UNSIGNED (type
)
2068 ? rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULEUB
]
2069 : rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULESB
];
2075 /* Implement targetm.vectorize.builtin_mul_widen_odd. */
2077 rs6000_builtin_mul_widen_odd (tree type
)
2079 if (!TARGET_ALTIVEC
)
2082 switch (TYPE_MODE (type
))
2085 return TYPE_UNSIGNED (type
)
2086 ? rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULOUH
]
2087 : rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULOSH
];
2090 return TYPE_UNSIGNED (type
)
2091 ? rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULOUB
]
2092 : rs6000_builtin_decls
[ALTIVEC_BUILTIN_VMULOSB
];
2099 /* Return true iff, data reference of TYPE can reach vector alignment (16)
2100 after applying N number of iterations. This routine does not determine
2101 how may iterations are required to reach desired alignment. */
2104 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED
, bool is_packed
)
2111 if (rs6000_alignment_flags
== MASK_ALIGN_NATURAL
)
2114 if (rs6000_alignment_flags
== MASK_ALIGN_POWER
)
2124 /* Assuming that all other types are naturally aligned. CHECKME! */
2129 /* Implement targetm.vectorize.builtin_vec_perm. */
2131 rs6000_builtin_vec_perm (tree type
, tree
*mask_element_type
)
2135 *mask_element_type
= unsigned_char_type_node
;
2137 switch (TYPE_MODE (type
))
2140 d
= rs6000_builtin_decls
[ALTIVEC_BUILTIN_VPERM_16QI
];
2144 d
= rs6000_builtin_decls
[ALTIVEC_BUILTIN_VPERM_8HI
];
2148 d
= rs6000_builtin_decls
[ALTIVEC_BUILTIN_VPERM_4SI
];
2152 d
= rs6000_builtin_decls
[ALTIVEC_BUILTIN_VPERM_4SF
];
2163 /* Handle generic options of the form -mfoo=yes/no.
2164 NAME is the option name.
2165 VALUE is the option value.
2166 FLAG is the pointer to the flag where to store a 1 or 0, depending on
2167 whether the option value is 'yes' or 'no' respectively. */
2169 rs6000_parse_yes_no_option (const char *name
, const char *value
, int *flag
)
2173 else if (!strcmp (value
, "yes"))
2175 else if (!strcmp (value
, "no"))
2178 error ("unknown -m%s= option specified: '%s'", name
, value
);
2181 /* Validate and record the size specified with the -mtls-size option. */
2184 rs6000_parse_tls_size_option (void)
2186 if (rs6000_tls_size_string
== 0)
2188 else if (strcmp (rs6000_tls_size_string
, "16") == 0)
2189 rs6000_tls_size
= 16;
2190 else if (strcmp (rs6000_tls_size_string
, "32") == 0)
2191 rs6000_tls_size
= 32;
2192 else if (strcmp (rs6000_tls_size_string
, "64") == 0)
2193 rs6000_tls_size
= 64;
2195 error ("bad value %qs for -mtls-size switch", rs6000_tls_size_string
);
2199 optimization_options (int level ATTRIBUTE_UNUSED
, int size ATTRIBUTE_UNUSED
)
2201 if (DEFAULT_ABI
== ABI_DARWIN
)
2202 /* The Darwin libraries never set errno, so we might as well
2203 avoid calling them when that's the only reason we would. */
2204 flag_errno_math
= 0;
2206 /* Double growth factor to counter reduced min jump length. */
2207 set_param_value ("max-grow-copy-bb-insns", 16);
2209 /* Enable section anchors by default.
2210 Skip section anchors for Objective C and Objective C++
2211 until front-ends fixed. */
2212 if (!TARGET_MACHO
&& lang_hooks
.name
[4] != 'O')
2213 flag_section_anchors
= 2;
2216 static enum fpu_type_t
2217 rs6000_parse_fpu_option (const char *option
)
2219 if (!strcmp("none", option
)) return FPU_NONE
;
2220 if (!strcmp("sp_lite", option
)) return FPU_SF_LITE
;
2221 if (!strcmp("dp_lite", option
)) return FPU_DF_LITE
;
2222 if (!strcmp("sp_full", option
)) return FPU_SF_FULL
;
2223 if (!strcmp("dp_full", option
)) return FPU_DF_FULL
;
2224 error("unknown value %s for -mfpu", option
);
2228 /* Implement TARGET_HANDLE_OPTION. */
2231 rs6000_handle_option (size_t code
, const char *arg
, int value
)
2233 enum fpu_type_t fpu_type
= FPU_NONE
;
2238 target_flags
&= ~(MASK_POWER
| MASK_POWER2
2239 | MASK_MULTIPLE
| MASK_STRING
);
2240 target_flags_explicit
|= (MASK_POWER
| MASK_POWER2
2241 | MASK_MULTIPLE
| MASK_STRING
);
2243 case OPT_mno_powerpc
:
2244 target_flags
&= ~(MASK_POWERPC
| MASK_PPC_GPOPT
2245 | MASK_PPC_GFXOPT
| MASK_POWERPC64
);
2246 target_flags_explicit
|= (MASK_POWERPC
| MASK_PPC_GPOPT
2247 | MASK_PPC_GFXOPT
| MASK_POWERPC64
);
2250 target_flags
&= ~MASK_MINIMAL_TOC
;
2251 TARGET_NO_FP_IN_TOC
= 0;
2252 TARGET_NO_SUM_IN_TOC
= 0;
2253 target_flags_explicit
|= MASK_MINIMAL_TOC
;
2254 #ifdef TARGET_USES_SYSV4_OPT
2255 /* Note, V.4 no longer uses a normal TOC, so make -mfull-toc, be
2256 just the same as -mminimal-toc. */
2257 target_flags
|= MASK_MINIMAL_TOC
;
2258 target_flags_explicit
|= MASK_MINIMAL_TOC
;
2262 #ifdef TARGET_USES_SYSV4_OPT
2264 /* Make -mtoc behave like -mminimal-toc. */
2265 target_flags
|= MASK_MINIMAL_TOC
;
2266 target_flags_explicit
|= MASK_MINIMAL_TOC
;
2270 #ifdef TARGET_USES_AIX64_OPT
2275 target_flags
|= MASK_POWERPC64
| MASK_POWERPC
;
2276 target_flags
|= ~target_flags_explicit
& MASK_PPC_GFXOPT
;
2277 target_flags_explicit
|= MASK_POWERPC64
| MASK_POWERPC
;
2280 #ifdef TARGET_USES_AIX64_OPT
2285 target_flags
&= ~MASK_POWERPC64
;
2286 target_flags_explicit
|= MASK_POWERPC64
;
2289 case OPT_minsert_sched_nops_
:
2290 rs6000_sched_insert_nops_str
= arg
;
2293 case OPT_mminimal_toc
:
2296 TARGET_NO_FP_IN_TOC
= 0;
2297 TARGET_NO_SUM_IN_TOC
= 0;
2304 target_flags
|= (MASK_MULTIPLE
| MASK_STRING
);
2305 target_flags_explicit
|= (MASK_MULTIPLE
| MASK_STRING
);
2312 target_flags
|= (MASK_POWER
| MASK_MULTIPLE
| MASK_STRING
);
2313 target_flags_explicit
|= (MASK_POWER
| MASK_MULTIPLE
| MASK_STRING
);
2317 case OPT_mpowerpc_gpopt
:
2318 case OPT_mpowerpc_gfxopt
:
2321 target_flags
|= MASK_POWERPC
;
2322 target_flags_explicit
|= MASK_POWERPC
;
2326 case OPT_maix_struct_return
:
2327 case OPT_msvr4_struct_return
:
2328 rs6000_explicit_options
.aix_struct_ret
= true;
2332 rs6000_explicit_options
.vrsave
= true;
2333 TARGET_ALTIVEC_VRSAVE
= value
;
2337 rs6000_explicit_options
.vrsave
= true;
2338 rs6000_parse_yes_no_option ("vrsave", arg
, &(TARGET_ALTIVEC_VRSAVE
));
2342 rs6000_explicit_options
.isel
= true;
2343 rs6000_isel
= value
;
2347 rs6000_explicit_options
.isel
= true;
2348 rs6000_parse_yes_no_option ("isel", arg
, &(rs6000_isel
));
2352 rs6000_explicit_options
.spe
= true;
2357 rs6000_explicit_options
.spe
= true;
2358 rs6000_parse_yes_no_option ("spe", arg
, &(rs6000_spe
));
2362 rs6000_debug_name
= arg
;
2365 #ifdef TARGET_USES_SYSV4_OPT
2367 rs6000_abi_name
= arg
;
2371 rs6000_sdata_name
= arg
;
2374 case OPT_mtls_size_
:
2375 rs6000_tls_size_string
= arg
;
2378 case OPT_mrelocatable
:
2381 target_flags
|= MASK_MINIMAL_TOC
;
2382 target_flags_explicit
|= MASK_MINIMAL_TOC
;
2383 TARGET_NO_FP_IN_TOC
= 1;
2387 case OPT_mrelocatable_lib
:
2390 target_flags
|= MASK_RELOCATABLE
| MASK_MINIMAL_TOC
;
2391 target_flags_explicit
|= MASK_RELOCATABLE
| MASK_MINIMAL_TOC
;
2392 TARGET_NO_FP_IN_TOC
= 1;
2396 target_flags
&= ~MASK_RELOCATABLE
;
2397 target_flags_explicit
|= MASK_RELOCATABLE
;
2403 if (!strcmp (arg
, "altivec"))
2405 rs6000_explicit_options
.altivec_abi
= true;
2406 rs6000_altivec_abi
= 1;
2408 /* Enabling the AltiVec ABI turns off the SPE ABI. */
2411 else if (! strcmp (arg
, "no-altivec"))
2413 rs6000_explicit_options
.altivec_abi
= true;
2414 rs6000_altivec_abi
= 0;
2416 else if (! strcmp (arg
, "spe"))
2418 rs6000_explicit_options
.spe_abi
= true;
2420 rs6000_altivec_abi
= 0;
2421 if (!TARGET_SPE_ABI
)
2422 error ("not configured for ABI: '%s'", arg
);
2424 else if (! strcmp (arg
, "no-spe"))
2426 rs6000_explicit_options
.spe_abi
= true;
2430 /* These are here for testing during development only, do not
2431 document in the manual please. */
2432 else if (! strcmp (arg
, "d64"))
2434 rs6000_darwin64_abi
= 1;
2435 warning (0, "Using darwin64 ABI");
2437 else if (! strcmp (arg
, "d32"))
2439 rs6000_darwin64_abi
= 0;
2440 warning (0, "Using old darwin ABI");
2443 else if (! strcmp (arg
, "ibmlongdouble"))
2445 rs6000_explicit_options
.ieee
= true;
2446 rs6000_ieeequad
= 0;
2447 warning (0, "Using IBM extended precision long double");
2449 else if (! strcmp (arg
, "ieeelongdouble"))
2451 rs6000_explicit_options
.ieee
= true;
2452 rs6000_ieeequad
= 1;
2453 warning (0, "Using IEEE extended precision long double");
2458 error ("unknown ABI specified: '%s'", arg
);
2464 rs6000_select
[1].string
= arg
;
2468 rs6000_select
[2].string
= arg
;
2471 case OPT_mtraceback_
:
2472 rs6000_traceback_name
= arg
;
2475 case OPT_mfloat_gprs_
:
2476 rs6000_explicit_options
.float_gprs
= true;
2477 if (! strcmp (arg
, "yes") || ! strcmp (arg
, "single"))
2478 rs6000_float_gprs
= 1;
2479 else if (! strcmp (arg
, "double"))
2480 rs6000_float_gprs
= 2;
2481 else if (! strcmp (arg
, "no"))
2482 rs6000_float_gprs
= 0;
2485 error ("invalid option for -mfloat-gprs: '%s'", arg
);
2490 case OPT_mlong_double_
:
2491 rs6000_explicit_options
.long_double
= true;
2492 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
2493 if (value
!= 64 && value
!= 128)
2495 error ("Unknown switch -mlong-double-%s", arg
);
2496 rs6000_long_double_type_size
= RS6000_DEFAULT_LONG_DOUBLE_SIZE
;
2500 rs6000_long_double_type_size
= value
;
2503 case OPT_msched_costly_dep_
:
2504 rs6000_sched_costly_dep_str
= arg
;
2508 rs6000_explicit_options
.alignment
= true;
2509 if (! strcmp (arg
, "power"))
2511 /* On 64-bit Darwin, power alignment is ABI-incompatible with
2512 some C library functions, so warn about it. The flag may be
2513 useful for performance studies from time to time though, so
2514 don't disable it entirely. */
2515 if (DEFAULT_ABI
== ABI_DARWIN
&& TARGET_64BIT
)
2516 warning (0, "-malign-power is not supported for 64-bit Darwin;"
2517 " it is incompatible with the installed C and C++ libraries");
2518 rs6000_alignment_flags
= MASK_ALIGN_POWER
;
2520 else if (! strcmp (arg
, "natural"))
2521 rs6000_alignment_flags
= MASK_ALIGN_NATURAL
;
2524 error ("unknown -malign-XXXXX option specified: '%s'", arg
);
2529 case OPT_msingle_float
:
2530 if (!TARGET_SINGLE_FPU
)
2531 warning (0, "-msingle-float option equivalent to -mhard-float");
2532 /* -msingle-float implies -mno-double-float and TARGET_HARD_FLOAT. */
2533 rs6000_double_float
= 0;
2534 target_flags
&= ~MASK_SOFT_FLOAT
;
2535 target_flags_explicit
|= MASK_SOFT_FLOAT
;
2538 case OPT_mdouble_float
:
2539 /* -mdouble-float implies -msingle-float and TARGET_HARD_FLOAT. */
2540 rs6000_single_float
= 1;
2541 target_flags
&= ~MASK_SOFT_FLOAT
;
2542 target_flags_explicit
|= MASK_SOFT_FLOAT
;
2545 case OPT_msimple_fpu
:
2546 if (!TARGET_SINGLE_FPU
)
2547 warning (0, "-msimple-fpu option ignored");
2550 case OPT_mhard_float
:
2551 /* -mhard_float implies -msingle-float and -mdouble-float. */
2552 rs6000_single_float
= rs6000_double_float
= 1;
2555 case OPT_msoft_float
:
2556 /* -msoft_float implies -mnosingle-float and -mnodouble-float. */
2557 rs6000_single_float
= rs6000_double_float
= 0;
2561 fpu_type
= rs6000_parse_fpu_option(arg
);
2562 if (fpu_type
!= FPU_NONE
)
2563 /* If -mfpu is not none, then turn off SOFT_FLOAT, turn on HARD_FLOAT. */
2565 target_flags
&= ~MASK_SOFT_FLOAT
;
2566 target_flags_explicit
|= MASK_SOFT_FLOAT
;
2567 rs6000_xilinx_fpu
= 1;
2568 if (fpu_type
== FPU_SF_LITE
|| fpu_type
== FPU_SF_FULL
)
2569 rs6000_single_float
= 1;
2570 if (fpu_type
== FPU_DF_LITE
|| fpu_type
== FPU_DF_FULL
)
2571 rs6000_single_float
= rs6000_double_float
= 1;
2572 if (fpu_type
== FPU_SF_LITE
|| fpu_type
== FPU_DF_LITE
)
2573 rs6000_simple_fpu
= 1;
2577 /* -mfpu=none is equivalent to -msoft-float */
2578 target_flags
|= MASK_SOFT_FLOAT
;
2579 target_flags_explicit
|= MASK_SOFT_FLOAT
;
2580 rs6000_single_float
= rs6000_double_float
= 0;
2587 /* Do anything needed at the start of the asm file. */
2590 rs6000_file_start (void)
2594 const char *start
= buffer
;
2595 struct rs6000_cpu_select
*ptr
;
2596 const char *default_cpu
= TARGET_CPU_DEFAULT
;
2597 FILE *file
= asm_out_file
;
2599 default_file_start ();
2601 #ifdef TARGET_BI_ARCH
2602 if ((TARGET_DEFAULT
^ target_flags
) & MASK_64BIT
)
2606 if (flag_verbose_asm
)
2608 sprintf (buffer
, "\n%s rs6000/powerpc options:", ASM_COMMENT_START
);
2609 rs6000_select
[0].string
= default_cpu
;
2611 for (i
= 0; i
< ARRAY_SIZE (rs6000_select
); i
++)
2613 ptr
= &rs6000_select
[i
];
2614 if (ptr
->string
!= (char *)0 && ptr
->string
[0] != '\0')
2616 fprintf (file
, "%s %s%s", start
, ptr
->name
, ptr
->string
);
2621 if (PPC405_ERRATUM77
)
2623 fprintf (file
, "%s PPC405CR_ERRATUM77", start
);
2627 #ifdef USING_ELFOS_H
2628 switch (rs6000_sdata
)
2630 case SDATA_NONE
: fprintf (file
, "%s -msdata=none", start
); start
= ""; break;
2631 case SDATA_DATA
: fprintf (file
, "%s -msdata=data", start
); start
= ""; break;
2632 case SDATA_SYSV
: fprintf (file
, "%s -msdata=sysv", start
); start
= ""; break;
2633 case SDATA_EABI
: fprintf (file
, "%s -msdata=eabi", start
); start
= ""; break;
2636 if (rs6000_sdata
&& g_switch_value
)
2638 fprintf (file
, "%s -G " HOST_WIDE_INT_PRINT_UNSIGNED
, start
,
2648 #ifdef HAVE_AS_GNU_ATTRIBUTE
2649 if (TARGET_32BIT
&& DEFAULT_ABI
== ABI_V4
)
2651 fprintf (file
, "\t.gnu_attribute 4, %d\n",
2652 ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
) ? 1
2653 : (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_SINGLE_FLOAT
) ? 3
2655 fprintf (file
, "\t.gnu_attribute 8, %d\n",
2656 (TARGET_ALTIVEC_ABI
? 2
2657 : TARGET_SPE_ABI
? 3
2659 fprintf (file
, "\t.gnu_attribute 12, %d\n",
2660 aix_struct_return
? 2 : 1);
2665 if (DEFAULT_ABI
== ABI_AIX
|| (TARGET_ELF
&& flag_pic
== 2))
2667 switch_to_section (toc_section
);
2668 switch_to_section (text_section
);
2673 /* Return nonzero if this function is known to have a null epilogue. */
2676 direct_return (void)
2678 if (reload_completed
)
2680 rs6000_stack_t
*info
= rs6000_stack_info ();
2682 if (info
->first_gp_reg_save
== 32
2683 && info
->first_fp_reg_save
== 64
2684 && info
->first_altivec_reg_save
== LAST_ALTIVEC_REGNO
+ 1
2685 && ! info
->lr_save_p
2686 && ! info
->cr_save_p
2687 && info
->vrsave_mask
== 0
2695 /* Return the number of instructions it takes to form a constant in an
2696 integer register. */
2699 num_insns_constant_wide (HOST_WIDE_INT value
)
2701 /* signed constant loadable with {cal|addi} */
2702 if ((unsigned HOST_WIDE_INT
) (value
+ 0x8000) < 0x10000)
2705 /* constant loadable with {cau|addis} */
2706 else if ((value
& 0xffff) == 0
2707 && (value
>> 31 == -1 || value
>> 31 == 0))
2710 #if HOST_BITS_PER_WIDE_INT == 64
2711 else if (TARGET_POWERPC64
)
2713 HOST_WIDE_INT low
= ((value
& 0xffffffff) ^ 0x80000000) - 0x80000000;
2714 HOST_WIDE_INT high
= value
>> 31;
2716 if (high
== 0 || high
== -1)
2722 return num_insns_constant_wide (high
) + 1;
2724 return (num_insns_constant_wide (high
)
2725 + num_insns_constant_wide (low
) + 1);
2734 num_insns_constant (rtx op
, enum machine_mode mode
)
2736 HOST_WIDE_INT low
, high
;
2738 switch (GET_CODE (op
))
2741 #if HOST_BITS_PER_WIDE_INT == 64
2742 if ((INTVAL (op
) >> 31) != 0 && (INTVAL (op
) >> 31) != -1
2743 && mask64_operand (op
, mode
))
2747 return num_insns_constant_wide (INTVAL (op
));
2750 if (mode
== SFmode
|| mode
== SDmode
)
2755 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
2756 if (DECIMAL_FLOAT_MODE_P (mode
))
2757 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
2759 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
2760 return num_insns_constant_wide ((HOST_WIDE_INT
) l
);
2763 if (mode
== VOIDmode
|| mode
== DImode
)
2765 high
= CONST_DOUBLE_HIGH (op
);
2766 low
= CONST_DOUBLE_LOW (op
);
2773 REAL_VALUE_FROM_CONST_DOUBLE (rv
, op
);
2774 if (DECIMAL_FLOAT_MODE_P (mode
))
2775 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, l
);
2777 REAL_VALUE_TO_TARGET_DOUBLE (rv
, l
);
2778 high
= l
[WORDS_BIG_ENDIAN
== 0];
2779 low
= l
[WORDS_BIG_ENDIAN
!= 0];
2783 return (num_insns_constant_wide (low
)
2784 + num_insns_constant_wide (high
));
2787 if ((high
== 0 && low
>= 0)
2788 || (high
== -1 && low
< 0))
2789 return num_insns_constant_wide (low
);
2791 else if (mask64_operand (op
, mode
))
2795 return num_insns_constant_wide (high
) + 1;
2798 return (num_insns_constant_wide (high
)
2799 + num_insns_constant_wide (low
) + 1);
2807 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
2808 If the mode of OP is MODE_VECTOR_INT, this simply returns the
2809 corresponding element of the vector, but for V4SFmode and V2SFmode,
2810 the corresponding "float" is interpreted as an SImode integer. */
2813 const_vector_elt_as_int (rtx op
, unsigned int elt
)
2815 rtx tmp
= CONST_VECTOR_ELT (op
, elt
);
2816 if (GET_MODE (op
) == V4SFmode
2817 || GET_MODE (op
) == V2SFmode
)
2818 tmp
= gen_lowpart (SImode
, tmp
);
2819 return INTVAL (tmp
);
2822 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
2823 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
2824 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
2825 all items are set to the same value and contain COPIES replicas of the
2826 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
2827 operand and the others are set to the value of the operand's msb. */
2830 vspltis_constant (rtx op
, unsigned step
, unsigned copies
)
2832 enum machine_mode mode
= GET_MODE (op
);
2833 enum machine_mode inner
= GET_MODE_INNER (mode
);
2836 unsigned nunits
= GET_MODE_NUNITS (mode
);
2837 unsigned bitsize
= GET_MODE_BITSIZE (inner
);
2838 unsigned mask
= GET_MODE_MASK (inner
);
2840 HOST_WIDE_INT val
= const_vector_elt_as_int (op
, nunits
- 1);
2841 HOST_WIDE_INT splat_val
= val
;
2842 HOST_WIDE_INT msb_val
= val
> 0 ? 0 : -1;
2844 /* Construct the value to be splatted, if possible. If not, return 0. */
2845 for (i
= 2; i
<= copies
; i
*= 2)
2847 HOST_WIDE_INT small_val
;
2849 small_val
= splat_val
>> bitsize
;
2851 if (splat_val
!= ((small_val
<< bitsize
) | (small_val
& mask
)))
2853 splat_val
= small_val
;
2856 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
2857 if (EASY_VECTOR_15 (splat_val
))
2860 /* Also check if we can splat, and then add the result to itself. Do so if
2861 the value is positive, of if the splat instruction is using OP's mode;
2862 for splat_val < 0, the splat and the add should use the same mode. */
2863 else if (EASY_VECTOR_15_ADD_SELF (splat_val
)
2864 && (splat_val
>= 0 || (step
== 1 && copies
== 1)))
2870 /* Check if VAL is present in every STEP-th element, and the
2871 other elements are filled with its most significant bit. */
2872 for (i
= 0; i
< nunits
- 1; ++i
)
2874 HOST_WIDE_INT desired_val
;
2875 if (((i
+ 1) & (step
- 1)) == 0)
2878 desired_val
= msb_val
;
2880 if (desired_val
!= const_vector_elt_as_int (op
, i
))
2888 /* Return true if OP is of the given MODE and can be synthesized
2889 with a vspltisb, vspltish or vspltisw. */
2892 easy_altivec_constant (rtx op
, enum machine_mode mode
)
2894 unsigned step
, copies
;
2896 if (mode
== VOIDmode
)
2897 mode
= GET_MODE (op
);
2898 else if (mode
!= GET_MODE (op
))
2901 /* Start with a vspltisw. */
2902 step
= GET_MODE_NUNITS (mode
) / 4;
2905 if (vspltis_constant (op
, step
, copies
))
2908 /* Then try with a vspltish. */
2914 if (vspltis_constant (op
, step
, copies
))
2917 /* And finally a vspltisb. */
2923 if (vspltis_constant (op
, step
, copies
))
2929 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
2930 result is OP. Abort if it is not possible. */
2933 gen_easy_altivec_constant (rtx op
)
2935 enum machine_mode mode
= GET_MODE (op
);
2936 int nunits
= GET_MODE_NUNITS (mode
);
2937 rtx last
= CONST_VECTOR_ELT (op
, nunits
- 1);
2938 unsigned step
= nunits
/ 4;
2939 unsigned copies
= 1;
2941 /* Start with a vspltisw. */
2942 if (vspltis_constant (op
, step
, copies
))
2943 return gen_rtx_VEC_DUPLICATE (V4SImode
, gen_lowpart (SImode
, last
));
2945 /* Then try with a vspltish. */
2951 if (vspltis_constant (op
, step
, copies
))
2952 return gen_rtx_VEC_DUPLICATE (V8HImode
, gen_lowpart (HImode
, last
));
2954 /* And finally a vspltisb. */
2960 if (vspltis_constant (op
, step
, copies
))
2961 return gen_rtx_VEC_DUPLICATE (V16QImode
, gen_lowpart (QImode
, last
));
2967 output_vec_const_move (rtx
*operands
)
2970 enum machine_mode mode
;
2975 mode
= GET_MODE (dest
);
2980 if (zero_constant (vec
, mode
))
2981 return "vxor %0,%0,%0";
2983 splat_vec
= gen_easy_altivec_constant (vec
);
2984 gcc_assert (GET_CODE (splat_vec
) == VEC_DUPLICATE
);
2985 operands
[1] = XEXP (splat_vec
, 0);
2986 if (!EASY_VECTOR_15 (INTVAL (operands
[1])))
2989 switch (GET_MODE (splat_vec
))
2992 return "vspltisw %0,%1";
2995 return "vspltish %0,%1";
2998 return "vspltisb %0,%1";
3005 gcc_assert (TARGET_SPE
);
3007 /* Vector constant 0 is handled as a splitter of V2SI, and in the
3008 pattern of V1DI, V4HI, and V2SF.
3010 FIXME: We should probably return # and add post reload
3011 splitters for these, but this way is so easy ;-). */
3012 cst
= INTVAL (CONST_VECTOR_ELT (vec
, 0));
3013 cst2
= INTVAL (CONST_VECTOR_ELT (vec
, 1));
3014 operands
[1] = CONST_VECTOR_ELT (vec
, 0);
3015 operands
[2] = CONST_VECTOR_ELT (vec
, 1);
3017 return "li %0,%1\n\tevmergelo %0,%0,%0";
3019 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
3022 /* Initialize TARGET of vector PAIRED to VALS. */
3025 paired_expand_vector_init (rtx target
, rtx vals
)
3027 enum machine_mode mode
= GET_MODE (target
);
3028 int n_elts
= GET_MODE_NUNITS (mode
);
3030 rtx x
, new_rtx
, tmp
, constant_op
, op1
, op2
;
3033 for (i
= 0; i
< n_elts
; ++i
)
3035 x
= XVECEXP (vals
, 0, i
);
3036 if (!CONSTANT_P (x
))
3041 /* Load from constant pool. */
3042 emit_move_insn (target
, gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0)));
3048 /* The vector is initialized only with non-constants. */
3049 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, XVECEXP (vals
, 0, 0),
3050 XVECEXP (vals
, 0, 1));
3052 emit_move_insn (target
, new_rtx
);
3056 /* One field is non-constant and the other one is a constant. Load the
3057 constant from the constant pool and use ps_merge instruction to
3058 construct the whole vector. */
3059 op1
= XVECEXP (vals
, 0, 0);
3060 op2
= XVECEXP (vals
, 0, 1);
3062 constant_op
= (CONSTANT_P (op1
)) ? op1
: op2
;
3064 tmp
= gen_reg_rtx (GET_MODE (constant_op
));
3065 emit_move_insn (tmp
, constant_op
);
3067 if (CONSTANT_P (op1
))
3068 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, tmp
, op2
);
3070 new_rtx
= gen_rtx_VEC_CONCAT (V2SFmode
, op1
, tmp
);
3072 emit_move_insn (target
, new_rtx
);
3076 paired_expand_vector_move (rtx operands
[])
3078 rtx op0
= operands
[0], op1
= operands
[1];
3080 emit_move_insn (op0
, op1
);
3083 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
3084 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
3085 operands for the relation operation COND. This is a recursive
3089 paired_emit_vector_compare (enum rtx_code rcode
,
3090 rtx dest
, rtx op0
, rtx op1
,
3091 rtx cc_op0
, rtx cc_op1
)
3093 rtx tmp
= gen_reg_rtx (V2SFmode
);
3094 rtx tmp1
, max
, min
, equal_zero
;
3096 gcc_assert (TARGET_PAIRED_FLOAT
);
3097 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
3103 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
3107 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
3108 emit_insn (gen_selv2sf4 (dest
, tmp
, op0
, op1
, CONST0_RTX (SFmode
)));
3112 paired_emit_vector_compare (GE
, dest
, op0
, op1
, cc_op1
, cc_op0
);
3115 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
3118 tmp1
= gen_reg_rtx (V2SFmode
);
3119 max
= gen_reg_rtx (V2SFmode
);
3120 min
= gen_reg_rtx (V2SFmode
);
3121 equal_zero
= gen_reg_rtx (V2SFmode
);
3123 emit_insn (gen_subv2sf3 (tmp
, cc_op0
, cc_op1
));
3124 emit_insn (gen_selv2sf4
3125 (max
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
3126 emit_insn (gen_subv2sf3 (tmp
, cc_op1
, cc_op0
));
3127 emit_insn (gen_selv2sf4
3128 (min
, tmp
, cc_op0
, cc_op1
, CONST0_RTX (SFmode
)));
3129 emit_insn (gen_subv2sf3 (tmp1
, min
, max
));
3130 emit_insn (gen_selv2sf4 (dest
, tmp1
, op0
, op1
, CONST0_RTX (SFmode
)));
3133 paired_emit_vector_compare (EQ
, dest
, op1
, op0
, cc_op0
, cc_op1
);
3136 paired_emit_vector_compare (LE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
3139 paired_emit_vector_compare (LT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
3142 paired_emit_vector_compare (GE
, dest
, op1
, op0
, cc_op0
, cc_op1
);
3145 paired_emit_vector_compare (GT
, dest
, op1
, op0
, cc_op0
, cc_op1
);
3154 /* Emit vector conditional expression.
3155 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
3156 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
3159 paired_emit_vector_cond_expr (rtx dest
, rtx op1
, rtx op2
,
3160 rtx cond
, rtx cc_op0
, rtx cc_op1
)
3162 enum rtx_code rcode
= GET_CODE (cond
);
3164 if (!TARGET_PAIRED_FLOAT
)
3167 paired_emit_vector_compare (rcode
, dest
, op1
, op2
, cc_op0
, cc_op1
);
3172 /* Initialize vector TARGET to VALS. */
3175 rs6000_expand_vector_init (rtx target
, rtx vals
)
3177 enum machine_mode mode
= GET_MODE (target
);
3178 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
3179 int n_elts
= GET_MODE_NUNITS (mode
);
3180 int n_var
= 0, one_var
= -1;
3181 bool all_same
= true, all_const_zero
= true;
3185 for (i
= 0; i
< n_elts
; ++i
)
3187 x
= XVECEXP (vals
, 0, i
);
3188 if (!CONSTANT_P (x
))
3189 ++n_var
, one_var
= i
;
3190 else if (x
!= CONST0_RTX (inner_mode
))
3191 all_const_zero
= false;
3193 if (i
> 0 && !rtx_equal_p (x
, XVECEXP (vals
, 0, 0)))
3199 rtx const_vec
= gen_rtx_CONST_VECTOR (mode
, XVEC (vals
, 0));
3200 if (mode
!= V4SFmode
&& all_const_zero
)
3202 /* Zero register. */
3203 emit_insn (gen_rtx_SET (VOIDmode
, target
,
3204 gen_rtx_XOR (mode
, target
, target
)));
3207 else if (mode
!= V4SFmode
&& easy_vector_constant (const_vec
, mode
))
3209 /* Splat immediate. */
3210 emit_insn (gen_rtx_SET (VOIDmode
, target
, const_vec
));
3214 ; /* Splat vector element. */
3217 /* Load from constant pool. */
3218 emit_move_insn (target
, const_vec
);
3223 /* Store value to stack temp. Load vector element. Splat. */
3226 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
), 0);
3227 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0),
3228 XVECEXP (vals
, 0, 0));
3229 x
= gen_rtx_UNSPEC (VOIDmode
,
3230 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
3231 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
3233 gen_rtx_SET (VOIDmode
,
3236 x
= gen_rtx_VEC_SELECT (inner_mode
, target
,
3237 gen_rtx_PARALLEL (VOIDmode
,
3238 gen_rtvec (1, const0_rtx
)));
3239 emit_insn (gen_rtx_SET (VOIDmode
, target
,
3240 gen_rtx_VEC_DUPLICATE (mode
, x
)));
3244 /* One field is non-constant. Load constant then overwrite
3248 rtx copy
= copy_rtx (vals
);
3250 /* Load constant part of vector, substitute neighboring value for
3252 XVECEXP (copy
, 0, one_var
) = XVECEXP (vals
, 0, (one_var
+ 1) % n_elts
);
3253 rs6000_expand_vector_init (target
, copy
);
3255 /* Insert variable. */
3256 rs6000_expand_vector_set (target
, XVECEXP (vals
, 0, one_var
), one_var
);
3260 /* Construct the vector in memory one field at a time
3261 and load the whole vector. */
3262 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
), 0);
3263 for (i
= 0; i
< n_elts
; i
++)
3264 emit_move_insn (adjust_address_nv (mem
, inner_mode
,
3265 i
* GET_MODE_SIZE (inner_mode
)),
3266 XVECEXP (vals
, 0, i
));
3267 emit_move_insn (target
, mem
);
3270 /* Set field ELT of TARGET to VAL. */
3273 rs6000_expand_vector_set (rtx target
, rtx val
, int elt
)
3275 enum machine_mode mode
= GET_MODE (target
);
3276 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
3277 rtx reg
= gen_reg_rtx (mode
);
3279 int width
= GET_MODE_SIZE (inner_mode
);
3282 /* Load single variable value. */
3283 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (inner_mode
), 0);
3284 emit_move_insn (adjust_address_nv (mem
, inner_mode
, 0), val
);
3285 x
= gen_rtx_UNSPEC (VOIDmode
,
3286 gen_rtvec (1, const0_rtx
), UNSPEC_LVE
);
3287 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
3289 gen_rtx_SET (VOIDmode
,
3293 /* Linear sequence. */
3294 mask
= gen_rtx_PARALLEL (V16QImode
, rtvec_alloc (16));
3295 for (i
= 0; i
< 16; ++i
)
3296 XVECEXP (mask
, 0, i
) = GEN_INT (i
);
3298 /* Set permute mask to insert element into target. */
3299 for (i
= 0; i
< width
; ++i
)
3300 XVECEXP (mask
, 0, elt
*width
+ i
)
3301 = GEN_INT (i
+ 0x10);
3302 x
= gen_rtx_CONST_VECTOR (V16QImode
, XVEC (mask
, 0));
3303 x
= gen_rtx_UNSPEC (mode
,
3304 gen_rtvec (3, target
, reg
,
3305 force_reg (V16QImode
, x
)),
3307 emit_insn (gen_rtx_SET (VOIDmode
, target
, x
));
3310 /* Extract field ELT from VEC into TARGET. */
3313 rs6000_expand_vector_extract (rtx target
, rtx vec
, int elt
)
3315 enum machine_mode mode
= GET_MODE (vec
);
3316 enum machine_mode inner_mode
= GET_MODE_INNER (mode
);
3319 /* Allocate mode-sized buffer. */
3320 mem
= assign_stack_temp (mode
, GET_MODE_SIZE (mode
), 0);
3322 /* Add offset to field within buffer matching vector element. */
3323 mem
= adjust_address_nv (mem
, mode
, elt
* GET_MODE_SIZE (inner_mode
));
3325 /* Store single field into mode-sized buffer. */
3326 x
= gen_rtx_UNSPEC (VOIDmode
,
3327 gen_rtvec (1, const0_rtx
), UNSPEC_STVE
);
3328 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
3330 gen_rtx_SET (VOIDmode
,
3333 emit_move_insn (target
, adjust_address_nv (mem
, inner_mode
, 0));
3336 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
3337 implement ANDing by the mask IN. */
3339 build_mask64_2_operands (rtx in
, rtx
*out
)
3341 #if HOST_BITS_PER_WIDE_INT >= 64
3342 unsigned HOST_WIDE_INT c
, lsb
, m1
, m2
;
3345 gcc_assert (GET_CODE (in
) == CONST_INT
);
3350 /* Assume c initially something like 0x00fff000000fffff. The idea
3351 is to rotate the word so that the middle ^^^^^^ group of zeros
3352 is at the MS end and can be cleared with an rldicl mask. We then
3353 rotate back and clear off the MS ^^ group of zeros with a
3355 c
= ~c
; /* c == 0xff000ffffff00000 */
3356 lsb
= c
& -c
; /* lsb == 0x0000000000100000 */
3357 m1
= -lsb
; /* m1 == 0xfffffffffff00000 */
3358 c
= ~c
; /* c == 0x00fff000000fffff */
3359 c
&= -lsb
; /* c == 0x00fff00000000000 */
3360 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
3361 c
= ~c
; /* c == 0xff000fffffffffff */
3362 c
&= -lsb
; /* c == 0xff00000000000000 */
3364 while ((lsb
>>= 1) != 0)
3365 shift
++; /* shift == 44 on exit from loop */
3366 m1
<<= 64 - shift
; /* m1 == 0xffffff0000000000 */
3367 m1
= ~m1
; /* m1 == 0x000000ffffffffff */
3368 m2
= ~c
; /* m2 == 0x00ffffffffffffff */
3372 /* Assume c initially something like 0xff000f0000000000. The idea
3373 is to rotate the word so that the ^^^ middle group of zeros
3374 is at the LS end and can be cleared with an rldicr mask. We then
3375 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
3377 lsb
= c
& -c
; /* lsb == 0x0000010000000000 */
3378 m2
= -lsb
; /* m2 == 0xffffff0000000000 */
3379 c
= ~c
; /* c == 0x00fff0ffffffffff */
3380 c
&= -lsb
; /* c == 0x00fff00000000000 */
3381 lsb
= c
& -c
; /* lsb == 0x0000100000000000 */
3382 c
= ~c
; /* c == 0xff000fffffffffff */
3383 c
&= -lsb
; /* c == 0xff00000000000000 */
3385 while ((lsb
>>= 1) != 0)
3386 shift
++; /* shift == 44 on exit from loop */
3387 m1
= ~c
; /* m1 == 0x00ffffffffffffff */
3388 m1
>>= shift
; /* m1 == 0x0000000000000fff */
3389 m1
= ~m1
; /* m1 == 0xfffffffffffff000 */
3392 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
3393 masks will be all 1's. We are guaranteed more than one transition. */
3394 out
[0] = GEN_INT (64 - shift
);
3395 out
[1] = GEN_INT (m1
);
3396 out
[2] = GEN_INT (shift
);
3397 out
[3] = GEN_INT (m2
);
3405 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
3408 invalid_e500_subreg (rtx op
, enum machine_mode mode
)
3410 if (TARGET_E500_DOUBLE
)
3412 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
3413 subreg:TI and reg:TF. Decimal float modes are like integer
3414 modes (only low part of each register used) for this
3416 if (GET_CODE (op
) == SUBREG
3417 && (mode
== SImode
|| mode
== DImode
|| mode
== TImode
3418 || mode
== DDmode
|| mode
== TDmode
)
3419 && REG_P (SUBREG_REG (op
))
3420 && (GET_MODE (SUBREG_REG (op
)) == DFmode
3421 || GET_MODE (SUBREG_REG (op
)) == TFmode
))
3424 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
3426 if (GET_CODE (op
) == SUBREG
3427 && (mode
== DFmode
|| mode
== TFmode
)
3428 && REG_P (SUBREG_REG (op
))
3429 && (GET_MODE (SUBREG_REG (op
)) == DImode
3430 || GET_MODE (SUBREG_REG (op
)) == TImode
3431 || GET_MODE (SUBREG_REG (op
)) == DDmode
3432 || GET_MODE (SUBREG_REG (op
)) == TDmode
))
3437 && GET_CODE (op
) == SUBREG
3439 && REG_P (SUBREG_REG (op
))
3440 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op
))))
3446 /* AIX increases natural record alignment to doubleword if the first
3447 field is an FP double while the FP fields remain word aligned. */
3450 rs6000_special_round_type_align (tree type
, unsigned int computed
,
3451 unsigned int specified
)
3453 unsigned int align
= MAX (computed
, specified
);
3454 tree field
= TYPE_FIELDS (type
);
3456 /* Skip all non field decls */
3457 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
3458 field
= TREE_CHAIN (field
);
3460 if (field
!= NULL
&& field
!= type
)
3462 type
= TREE_TYPE (field
);
3463 while (TREE_CODE (type
) == ARRAY_TYPE
)
3464 type
= TREE_TYPE (type
);
3466 if (type
!= error_mark_node
&& TYPE_MODE (type
) == DFmode
)
3467 align
= MAX (align
, 64);
3473 /* Darwin increases record alignment to the natural alignment of
3477 darwin_rs6000_special_round_type_align (tree type
, unsigned int computed
,
3478 unsigned int specified
)
3480 unsigned int align
= MAX (computed
, specified
);
3482 if (TYPE_PACKED (type
))
3485 /* Find the first field, looking down into aggregates. */
3487 tree field
= TYPE_FIELDS (type
);
3488 /* Skip all non field decls */
3489 while (field
!= NULL
&& TREE_CODE (field
) != FIELD_DECL
)
3490 field
= TREE_CHAIN (field
);
3493 type
= TREE_TYPE (field
);
3494 while (TREE_CODE (type
) == ARRAY_TYPE
)
3495 type
= TREE_TYPE (type
);
3496 } while (AGGREGATE_TYPE_P (type
));
3498 if (! AGGREGATE_TYPE_P (type
) && type
!= error_mark_node
)
3499 align
= MAX (align
, TYPE_ALIGN (type
));
3504 /* Return 1 for an operand in small memory on V.4/eabi. */
3507 small_data_operand (rtx op ATTRIBUTE_UNUSED
,
3508 enum machine_mode mode ATTRIBUTE_UNUSED
)
3513 if (rs6000_sdata
== SDATA_NONE
|| rs6000_sdata
== SDATA_DATA
)
3516 if (DEFAULT_ABI
!= ABI_V4
)
3519 /* Vector and float memory instructions have a limited offset on the
3520 SPE, so using a vector or float variable directly as an operand is
3523 && (SPE_VECTOR_MODE (mode
) || FLOAT_MODE_P (mode
)))
3526 if (GET_CODE (op
) == SYMBOL_REF
)
3529 else if (GET_CODE (op
) != CONST
3530 || GET_CODE (XEXP (op
, 0)) != PLUS
3531 || GET_CODE (XEXP (XEXP (op
, 0), 0)) != SYMBOL_REF
3532 || GET_CODE (XEXP (XEXP (op
, 0), 1)) != CONST_INT
)
3537 rtx sum
= XEXP (op
, 0);
3538 HOST_WIDE_INT summand
;
3540 /* We have to be careful here, because it is the referenced address
3541 that must be 32k from _SDA_BASE_, not just the symbol. */
3542 summand
= INTVAL (XEXP (sum
, 1));
3543 if (summand
< 0 || (unsigned HOST_WIDE_INT
) summand
> g_switch_value
)
3546 sym_ref
= XEXP (sum
, 0);
3549 return SYMBOL_REF_SMALL_P (sym_ref
);
3555 /* Return true if either operand is a general purpose register. */
3558 gpr_or_gpr_p (rtx op0
, rtx op1
)
3560 return ((REG_P (op0
) && INT_REGNO_P (REGNO (op0
)))
3561 || (REG_P (op1
) && INT_REGNO_P (REGNO (op1
))));
3565 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
3568 constant_pool_expr_p (rtx op
)
3572 split_const (op
, &base
, &offset
);
3573 return (GET_CODE (base
) == SYMBOL_REF
3574 && CONSTANT_POOL_ADDRESS_P (base
)
3575 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base
), Pmode
));
3579 toc_relative_expr_p (rtx op
)
3583 if (GET_CODE (op
) != CONST
)
3586 split_const (op
, &base
, &offset
);
3587 return (GET_CODE (base
) == UNSPEC
3588 && XINT (base
, 1) == UNSPEC_TOCREL
);
3592 legitimate_constant_pool_address_p (rtx x
)
3595 && GET_CODE (x
) == PLUS
3596 && GET_CODE (XEXP (x
, 0)) == REG
3597 && (TARGET_MINIMAL_TOC
|| REGNO (XEXP (x
, 0)) == TOC_REGISTER
)
3598 && toc_relative_expr_p (XEXP (x
, 1)));
3602 legitimate_small_data_p (enum machine_mode mode
, rtx x
)
3604 return (DEFAULT_ABI
== ABI_V4
3605 && !flag_pic
&& !TARGET_TOC
3606 && (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
)
3607 && small_data_operand (x
, mode
));
3610 /* SPE offset addressing is limited to 5-bits worth of double words. */
3611 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
3614 rs6000_legitimate_offset_address_p (enum machine_mode mode
, rtx x
, int strict
)
3616 unsigned HOST_WIDE_INT offset
, extra
;
3618 if (GET_CODE (x
) != PLUS
)
3620 if (GET_CODE (XEXP (x
, 0)) != REG
)
3622 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
3624 if (legitimate_constant_pool_address_p (x
))
3626 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
3629 offset
= INTVAL (XEXP (x
, 1));
3637 /* AltiVec vector modes. Only reg+reg addressing is valid and
3638 constant offset zero should not occur due to canonicalization. */
3645 /* Paired vector modes. Only reg+reg addressing is valid and
3646 constant offset zero should not occur due to canonicalization. */
3647 if (TARGET_PAIRED_FLOAT
)
3649 /* SPE vector modes. */
3650 return SPE_CONST_OFFSET_OK (offset
);
3653 if (TARGET_E500_DOUBLE
)
3654 return SPE_CONST_OFFSET_OK (offset
);
3658 /* On e500v2, we may have:
3660 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
3662 Which gets addressed with evldd instructions. */
3663 if (TARGET_E500_DOUBLE
)
3664 return SPE_CONST_OFFSET_OK (offset
);
3666 if (mode
== DFmode
|| mode
== DDmode
|| !TARGET_POWERPC64
)
3668 else if (offset
& 3)
3673 if (TARGET_E500_DOUBLE
)
3674 return (SPE_CONST_OFFSET_OK (offset
)
3675 && SPE_CONST_OFFSET_OK (offset
+ 8));
3679 if (mode
== TFmode
|| mode
== TDmode
|| !TARGET_POWERPC64
)
3681 else if (offset
& 3)
3692 return (offset
< 0x10000) && (offset
+ extra
< 0x10000);
3696 legitimate_indexed_address_p (rtx x
, int strict
)
3700 if (GET_CODE (x
) != PLUS
)
3706 /* Recognize the rtl generated by reload which we know will later be
3707 replaced with proper base and index regs. */
3709 && reload_in_progress
3710 && (REG_P (op0
) || GET_CODE (op0
) == PLUS
)
3714 return (REG_P (op0
) && REG_P (op1
)
3715 && ((INT_REG_OK_FOR_BASE_P (op0
, strict
)
3716 && INT_REG_OK_FOR_INDEX_P (op1
, strict
))
3717 || (INT_REG_OK_FOR_BASE_P (op1
, strict
)
3718 && INT_REG_OK_FOR_INDEX_P (op0
, strict
))));
3722 avoiding_indexed_address_p (enum machine_mode mode
)
3724 /* Avoid indexed addressing for modes that have non-indexed
3725 load/store instruction forms. */
3726 return TARGET_AVOID_XFORM
&& !ALTIVEC_VECTOR_MODE (mode
);
3730 legitimate_indirect_address_p (rtx x
, int strict
)
3732 return GET_CODE (x
) == REG
&& INT_REG_OK_FOR_BASE_P (x
, strict
);
3736 macho_lo_sum_memory_operand (rtx x
, enum machine_mode mode
)
3738 if (!TARGET_MACHO
|| !flag_pic
3739 || mode
!= SImode
|| GET_CODE (x
) != MEM
)
3743 if (GET_CODE (x
) != LO_SUM
)
3745 if (GET_CODE (XEXP (x
, 0)) != REG
)
3747 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 0))
3751 return CONSTANT_P (x
);
3755 legitimate_lo_sum_address_p (enum machine_mode mode
, rtx x
, int strict
)
3757 if (GET_CODE (x
) != LO_SUM
)
3759 if (GET_CODE (XEXP (x
, 0)) != REG
)
3761 if (!INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), strict
))
3763 /* Restrict addressing for DI because of our SUBREG hackery. */
3764 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
3765 || mode
== DDmode
|| mode
== TDmode
3770 if (TARGET_ELF
|| TARGET_MACHO
)
3772 if (DEFAULT_ABI
!= ABI_AIX
&& DEFAULT_ABI
!= ABI_DARWIN
&& flag_pic
)
3776 if (GET_MODE_NUNITS (mode
) != 1)
3778 if (GET_MODE_BITSIZE (mode
) > 64
3779 || (GET_MODE_BITSIZE (mode
) > 32 && !TARGET_POWERPC64
3780 && !(TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
3781 && (mode
== DFmode
|| mode
== DDmode
))))
3784 return CONSTANT_P (x
);
3791 /* Try machine-dependent ways of modifying an illegitimate address
3792 to be legitimate. If we find one, return the new, valid address.
3793 This is used from only one place: `memory_address' in explow.c.
3795 OLDX is the address as it was before break_out_memory_refs was
3796 called. In some cases it is useful to look at this to decide what
3799 It is always safe for this function to do nothing. It exists to
3800 recognize opportunities to optimize the output.
3802 On RS/6000, first check for the sum of a register with a constant
3803 integer that is out of range. If so, generate code to add the
3804 constant with the low-order 16 bits masked to the register and force
3805 this result into another register (this can be done with `cau').
3806 Then generate an address of REG+(CONST&0xffff), allowing for the
3807 possibility of bit 16 being a one.
3809 Then check for the sum of a register and something not constant, try to
3810 load the other things into a register and return the sum. */
3813 rs6000_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
3814 enum machine_mode mode
)
3816 if (GET_CODE (x
) == SYMBOL_REF
)
3818 enum tls_model model
= SYMBOL_REF_TLS_MODEL (x
);
3820 return rs6000_legitimize_tls_address (x
, model
);
3823 if (GET_CODE (x
) == PLUS
3824 && GET_CODE (XEXP (x
, 0)) == REG
3825 && GET_CODE (XEXP (x
, 1)) == CONST_INT
3826 && (unsigned HOST_WIDE_INT
) (INTVAL (XEXP (x
, 1)) + 0x8000) >= 0x10000
3827 && !((TARGET_POWERPC64
3828 && (mode
== DImode
|| mode
== TImode
)
3829 && (INTVAL (XEXP (x
, 1)) & 3) != 0)
3830 || SPE_VECTOR_MODE (mode
)
3831 || ALTIVEC_VECTOR_MODE (mode
)
3832 || (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
3833 || mode
== DImode
|| mode
== DDmode
3834 || mode
== TDmode
))))
3836 HOST_WIDE_INT high_int
, low_int
;
3838 low_int
= ((INTVAL (XEXP (x
, 1)) & 0xffff) ^ 0x8000) - 0x8000;
3839 high_int
= INTVAL (XEXP (x
, 1)) - low_int
;
3840 sum
= force_operand (gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
3841 GEN_INT (high_int
)), 0);
3842 return gen_rtx_PLUS (Pmode
, sum
, GEN_INT (low_int
));
3844 else if (GET_CODE (x
) == PLUS
3845 && GET_CODE (XEXP (x
, 0)) == REG
3846 && GET_CODE (XEXP (x
, 1)) != CONST_INT
3847 && GET_MODE_NUNITS (mode
) == 1
3848 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
3850 || ((mode
!= DImode
&& mode
!= DFmode
&& mode
!= DDmode
)
3851 || (TARGET_E500_DOUBLE
&& mode
!= DDmode
)))
3852 && (TARGET_POWERPC64
|| mode
!= DImode
)
3853 && !avoiding_indexed_address_p (mode
)
3858 return gen_rtx_PLUS (Pmode
, XEXP (x
, 0),
3859 force_reg (Pmode
, force_operand (XEXP (x
, 1), 0)));
3861 else if (ALTIVEC_VECTOR_MODE (mode
))
3865 /* Make sure both operands are registers. */
3866 if (GET_CODE (x
) == PLUS
)
3867 return gen_rtx_PLUS (Pmode
, force_reg (Pmode
, XEXP (x
, 0)),
3868 force_reg (Pmode
, XEXP (x
, 1)));
3870 reg
= force_reg (Pmode
, x
);
3873 else if (SPE_VECTOR_MODE (mode
)
3874 || (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
3875 || mode
== DDmode
|| mode
== TDmode
3876 || mode
== DImode
)))
3880 /* We accept [reg + reg] and [reg + OFFSET]. */
3882 if (GET_CODE (x
) == PLUS
)
3884 rtx op1
= XEXP (x
, 0);
3885 rtx op2
= XEXP (x
, 1);
3888 op1
= force_reg (Pmode
, op1
);
3890 if (GET_CODE (op2
) != REG
3891 && (GET_CODE (op2
) != CONST_INT
3892 || !SPE_CONST_OFFSET_OK (INTVAL (op2
))
3893 || (GET_MODE_SIZE (mode
) > 8
3894 && !SPE_CONST_OFFSET_OK (INTVAL (op2
) + 8))))
3895 op2
= force_reg (Pmode
, op2
);
3897 /* We can't always do [reg + reg] for these, because [reg +
3898 reg + offset] is not a legitimate addressing mode. */
3899 y
= gen_rtx_PLUS (Pmode
, op1
, op2
);
3901 if ((GET_MODE_SIZE (mode
) > 8 || mode
== DDmode
) && REG_P (op2
))
3902 return force_reg (Pmode
, y
);
3907 return force_reg (Pmode
, x
);
3913 && GET_CODE (x
) != CONST_INT
3914 && GET_CODE (x
) != CONST_DOUBLE
3916 && GET_MODE_NUNITS (mode
) == 1
3917 && (GET_MODE_BITSIZE (mode
) <= 32
3918 || ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
3919 && (mode
== DFmode
|| mode
== DDmode
))))
3921 rtx reg
= gen_reg_rtx (Pmode
);
3922 emit_insn (gen_elf_high (reg
, x
));
3923 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
3925 else if (TARGET_MACHO
&& TARGET_32BIT
&& TARGET_NO_TOC
3928 && ! MACHO_DYNAMIC_NO_PIC_P
3930 && GET_CODE (x
) != CONST_INT
3931 && GET_CODE (x
) != CONST_DOUBLE
3933 && GET_MODE_NUNITS (mode
) == 1
3934 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
3935 || (mode
!= DFmode
&& mode
!= DDmode
))
3939 rtx reg
= gen_reg_rtx (Pmode
);
3940 emit_insn (gen_macho_high (reg
, x
));
3941 return gen_rtx_LO_SUM (Pmode
, reg
, x
);
3944 && GET_CODE (x
) == SYMBOL_REF
3945 && constant_pool_expr_p (x
)
3946 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), Pmode
))
3948 return create_TOC_reference (x
);
3954 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
3955 We need to emit DTP-relative relocations. */
3958 rs6000_output_dwarf_dtprel (FILE *file
, int size
, rtx x
)
3963 fputs ("\t.long\t", file
);
3966 fputs (DOUBLE_INT_ASM_OP
, file
);
3971 output_addr_const (file
, x
);
3972 fputs ("@dtprel+0x8000", file
);
3975 /* Construct the SYMBOL_REF for the tls_get_addr function. */
3977 static GTY(()) rtx rs6000_tls_symbol
;
3979 rs6000_tls_get_addr (void)
3981 if (!rs6000_tls_symbol
)
3982 rs6000_tls_symbol
= init_one_libfunc ("__tls_get_addr");
3984 return rs6000_tls_symbol
;
3987 /* Construct the SYMBOL_REF for TLS GOT references. */
3989 static GTY(()) rtx rs6000_got_symbol
;
3991 rs6000_got_sym (void)
3993 if (!rs6000_got_symbol
)
3995 rs6000_got_symbol
= gen_rtx_SYMBOL_REF (Pmode
, "_GLOBAL_OFFSET_TABLE_");
3996 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_LOCAL
;
3997 SYMBOL_REF_FLAGS (rs6000_got_symbol
) |= SYMBOL_FLAG_EXTERNAL
;
4000 return rs6000_got_symbol
;
4003 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
4004 this (thread-local) address. */
4007 rs6000_legitimize_tls_address (rtx addr
, enum tls_model model
)
4011 dest
= gen_reg_rtx (Pmode
);
4012 if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 16)
4018 tlsreg
= gen_rtx_REG (Pmode
, 13);
4019 insn
= gen_tls_tprel_64 (dest
, tlsreg
, addr
);
4023 tlsreg
= gen_rtx_REG (Pmode
, 2);
4024 insn
= gen_tls_tprel_32 (dest
, tlsreg
, addr
);
4028 else if (model
== TLS_MODEL_LOCAL_EXEC
&& rs6000_tls_size
== 32)
4032 tmp
= gen_reg_rtx (Pmode
);
4035 tlsreg
= gen_rtx_REG (Pmode
, 13);
4036 insn
= gen_tls_tprel_ha_64 (tmp
, tlsreg
, addr
);
4040 tlsreg
= gen_rtx_REG (Pmode
, 2);
4041 insn
= gen_tls_tprel_ha_32 (tmp
, tlsreg
, addr
);
4045 insn
= gen_tls_tprel_lo_64 (dest
, tmp
, addr
);
4047 insn
= gen_tls_tprel_lo_32 (dest
, tmp
, addr
);
4052 rtx r3
, got
, tga
, tmp1
, tmp2
, eqv
;
4054 /* We currently use relocations like @got@tlsgd for tls, which
4055 means the linker will handle allocation of tls entries, placing
4056 them in the .got section. So use a pointer to the .got section,
4057 not one to secondary TOC sections used by 64-bit -mminimal-toc,
4058 or to secondary GOT sections used by 32-bit -fPIC. */
4060 got
= gen_rtx_REG (Pmode
, 2);
4064 got
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
4067 rtx gsym
= rs6000_got_sym ();
4068 got
= gen_reg_rtx (Pmode
);
4070 rs6000_emit_move (got
, gsym
, Pmode
);
4076 tmp1
= gen_reg_rtx (Pmode
);
4077 tmp2
= gen_reg_rtx (Pmode
);
4078 tmp3
= gen_reg_rtx (Pmode
);
4079 mem
= gen_const_mem (Pmode
, tmp1
);
4081 first
= emit_insn (gen_load_toc_v4_PIC_1b (gsym
));
4082 emit_move_insn (tmp1
,
4083 gen_rtx_REG (Pmode
, LR_REGNO
));
4084 emit_move_insn (tmp2
, mem
);
4085 emit_insn (gen_addsi3 (tmp3
, tmp1
, tmp2
));
4086 last
= emit_move_insn (got
, tmp3
);
4087 set_unique_reg_note (last
, REG_EQUAL
, gsym
);
4092 if (model
== TLS_MODEL_GLOBAL_DYNAMIC
)
4094 r3
= gen_rtx_REG (Pmode
, 3);
4095 tga
= rs6000_tls_get_addr ();
4097 if (DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
4098 insn
= gen_tls_gd_aix64 (r3
, got
, addr
, tga
, const0_rtx
);
4099 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_64BIT
)
4100 insn
= gen_tls_gd_aix32 (r3
, got
, addr
, tga
, const0_rtx
);
4101 else if (DEFAULT_ABI
== ABI_V4
)
4102 insn
= gen_tls_gd_sysvsi (r3
, got
, addr
, tga
, const0_rtx
);
4107 insn
= emit_call_insn (insn
);
4108 RTL_CONST_CALL_P (insn
) = 1;
4109 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r3
);
4110 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
4111 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), pic_offset_table_rtx
);
4112 insn
= get_insns ();
4114 emit_libcall_block (insn
, dest
, r3
, addr
);
4116 else if (model
== TLS_MODEL_LOCAL_DYNAMIC
)
4118 r3
= gen_rtx_REG (Pmode
, 3);
4119 tga
= rs6000_tls_get_addr ();
4121 if (DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
4122 insn
= gen_tls_ld_aix64 (r3
, got
, tga
, const0_rtx
);
4123 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_64BIT
)
4124 insn
= gen_tls_ld_aix32 (r3
, got
, tga
, const0_rtx
);
4125 else if (DEFAULT_ABI
== ABI_V4
)
4126 insn
= gen_tls_ld_sysvsi (r3
, got
, tga
, const0_rtx
);
4131 insn
= emit_call_insn (insn
);
4132 RTL_CONST_CALL_P (insn
) = 1;
4133 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), r3
);
4134 if (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
4135 use_reg (&CALL_INSN_FUNCTION_USAGE (insn
), pic_offset_table_rtx
);
4136 insn
= get_insns ();
4138 tmp1
= gen_reg_rtx (Pmode
);
4139 eqv
= gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, const0_rtx
),
4141 emit_libcall_block (insn
, tmp1
, r3
, eqv
);
4142 if (rs6000_tls_size
== 16)
4145 insn
= gen_tls_dtprel_64 (dest
, tmp1
, addr
);
4147 insn
= gen_tls_dtprel_32 (dest
, tmp1
, addr
);
4149 else if (rs6000_tls_size
== 32)
4151 tmp2
= gen_reg_rtx (Pmode
);
4153 insn
= gen_tls_dtprel_ha_64 (tmp2
, tmp1
, addr
);
4155 insn
= gen_tls_dtprel_ha_32 (tmp2
, tmp1
, addr
);
4158 insn
= gen_tls_dtprel_lo_64 (dest
, tmp2
, addr
);
4160 insn
= gen_tls_dtprel_lo_32 (dest
, tmp2
, addr
);
4164 tmp2
= gen_reg_rtx (Pmode
);
4166 insn
= gen_tls_got_dtprel_64 (tmp2
, got
, addr
);
4168 insn
= gen_tls_got_dtprel_32 (tmp2
, got
, addr
);
4170 insn
= gen_rtx_SET (Pmode
, dest
,
4171 gen_rtx_PLUS (Pmode
, tmp2
, tmp1
));
4177 /* IE, or 64-bit offset LE. */
4178 tmp2
= gen_reg_rtx (Pmode
);
4180 insn
= gen_tls_got_tprel_64 (tmp2
, got
, addr
);
4182 insn
= gen_tls_got_tprel_32 (tmp2
, got
, addr
);
4185 insn
= gen_tls_tls_64 (dest
, tmp2
, addr
);
4187 insn
= gen_tls_tls_32 (dest
, tmp2
, addr
);
4195 /* Return 1 if X contains a thread-local symbol. */
4198 rs6000_tls_referenced_p (rtx x
)
4200 if (! TARGET_HAVE_TLS
)
4203 return for_each_rtx (&x
, &rs6000_tls_symbol_ref_1
, 0);
4206 /* Return 1 if *X is a thread-local symbol. This is the same as
4207 rs6000_tls_symbol_ref except for the type of the unused argument. */
4210 rs6000_tls_symbol_ref_1 (rtx
*x
, void *data ATTRIBUTE_UNUSED
)
4212 return RS6000_SYMBOL_REF_TLS_P (*x
);
4215 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
4216 replace the input X, or the original X if no replacement is called for.
4217 The output parameter *WIN is 1 if the calling macro should goto WIN,
4220 For RS/6000, we wish to handle large displacements off a base
4221 register by splitting the addend across an addiu/addis and the mem insn.
4222 This cuts number of extra insns needed from 3 to 1.
4224 On Darwin, we use this to generate code for floating point constants.
4225 A movsf_low is generated so we wind up with 2 instructions rather than 3.
4226 The Darwin code is inside #if TARGET_MACHO because only then are the
4227 machopic_* functions defined. */
4229 rs6000_legitimize_reload_address (rtx x
, enum machine_mode mode
,
4230 int opnum
, int type
,
4231 int ind_levels ATTRIBUTE_UNUSED
, int *win
)
4233 /* We must recognize output that we have already generated ourselves. */
4234 if (GET_CODE (x
) == PLUS
4235 && GET_CODE (XEXP (x
, 0)) == PLUS
4236 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
4237 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
4238 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4240 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
4241 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
4242 opnum
, (enum reload_type
)type
);
4248 if (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
4249 && GET_CODE (x
) == LO_SUM
4250 && GET_CODE (XEXP (x
, 0)) == PLUS
4251 && XEXP (XEXP (x
, 0), 0) == pic_offset_table_rtx
4252 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == HIGH
4253 && XEXP (XEXP (XEXP (x
, 0), 1), 0) == XEXP (x
, 1)
4254 && machopic_operand_p (XEXP (x
, 1)))
4256 /* Result of previous invocation of this function on Darwin
4257 floating point constant. */
4258 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
4259 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
4260 opnum
, (enum reload_type
)type
);
4266 /* Force ld/std non-word aligned offset into base register by wrapping
4268 if (GET_CODE (x
) == PLUS
4269 && GET_CODE (XEXP (x
, 0)) == REG
4270 && REGNO (XEXP (x
, 0)) < 32
4271 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
4272 && GET_CODE (XEXP (x
, 1)) == CONST_INT
4273 && (INTVAL (XEXP (x
, 1)) & 3) != 0
4274 && !ALTIVEC_VECTOR_MODE (mode
)
4275 && GET_MODE_SIZE (mode
) >= UNITS_PER_WORD
4276 && TARGET_POWERPC64
)
4278 x
= gen_rtx_PLUS (GET_MODE (x
), x
, GEN_INT (0));
4279 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
4280 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
4281 opnum
, (enum reload_type
) type
);
4286 if (GET_CODE (x
) == PLUS
4287 && GET_CODE (XEXP (x
, 0)) == REG
4288 && REGNO (XEXP (x
, 0)) < FIRST_PSEUDO_REGISTER
4289 && INT_REG_OK_FOR_BASE_P (XEXP (x
, 0), 1)
4290 && GET_CODE (XEXP (x
, 1)) == CONST_INT
4291 && !SPE_VECTOR_MODE (mode
)
4292 && !(TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
4293 || mode
== DDmode
|| mode
== TDmode
4295 && !ALTIVEC_VECTOR_MODE (mode
))
4297 HOST_WIDE_INT val
= INTVAL (XEXP (x
, 1));
4298 HOST_WIDE_INT low
= ((val
& 0xffff) ^ 0x8000) - 0x8000;
4300 = (((val
- low
) & 0xffffffff) ^ 0x80000000) - 0x80000000;
4302 /* Check for 32-bit overflow. */
4303 if (high
+ low
!= val
)
4309 /* Reload the high part into a base reg; leave the low part
4310 in the mem directly. */
4312 x
= gen_rtx_PLUS (GET_MODE (x
),
4313 gen_rtx_PLUS (GET_MODE (x
), XEXP (x
, 0),
4317 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
4318 BASE_REG_CLASS
, GET_MODE (x
), VOIDmode
, 0, 0,
4319 opnum
, (enum reload_type
)type
);
4324 if (GET_CODE (x
) == SYMBOL_REF
4325 && !ALTIVEC_VECTOR_MODE (mode
)
4326 && !SPE_VECTOR_MODE (mode
)
4328 && DEFAULT_ABI
== ABI_DARWIN
4329 && (flag_pic
|| MACHO_DYNAMIC_NO_PIC_P
)
4331 && DEFAULT_ABI
== ABI_V4
4334 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
4335 The same goes for DImode without 64-bit gprs and DFmode and DDmode
4339 && (mode
!= DImode
|| TARGET_POWERPC64
)
4340 && ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_POWERPC64
4341 || (TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)))
4346 rtx offset
= machopic_gen_offset (x
);
4347 x
= gen_rtx_LO_SUM (GET_MODE (x
),
4348 gen_rtx_PLUS (Pmode
, pic_offset_table_rtx
,
4349 gen_rtx_HIGH (Pmode
, offset
)), offset
);
4353 x
= gen_rtx_LO_SUM (GET_MODE (x
),
4354 gen_rtx_HIGH (Pmode
, x
), x
);
4356 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
4357 BASE_REG_CLASS
, Pmode
, VOIDmode
, 0, 0,
4358 opnum
, (enum reload_type
)type
);
4363 /* Reload an offset address wrapped by an AND that represents the
4364 masking of the lower bits. Strip the outer AND and let reload
4365 convert the offset address into an indirect address. */
4367 && ALTIVEC_VECTOR_MODE (mode
)
4368 && GET_CODE (x
) == AND
4369 && GET_CODE (XEXP (x
, 0)) == PLUS
4370 && GET_CODE (XEXP (XEXP (x
, 0), 0)) == REG
4371 && GET_CODE (XEXP (XEXP (x
, 0), 1)) == CONST_INT
4372 && GET_CODE (XEXP (x
, 1)) == CONST_INT
4373 && INTVAL (XEXP (x
, 1)) == -16)
4381 && GET_CODE (x
) == SYMBOL_REF
4382 && constant_pool_expr_p (x
)
4383 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x
), mode
))
4385 x
= create_TOC_reference (x
);
4393 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
4394 that is a valid memory address for an instruction.
4395 The MODE argument is the machine mode for the MEM expression
4396 that wants to use this address.
4398 On the RS/6000, there are four valid address: a SYMBOL_REF that
4399 refers to a constant pool entry of an address (or the sum of it
4400 plus a constant), a short (16-bit signed) constant plus a register,
4401 the sum of two registers, or a register indirect, possibly with an
4402 auto-increment. For DFmode, DDmode and DImode with a constant plus
4403 register, we must ensure that both words are addressable or PowerPC64
4404 with offset word aligned.
4406 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
4407 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
4408 because adjacent memory cells are accessed by adding word-sized offsets
4409 during assembly output. */
4411 rs6000_legitimate_address_p (enum machine_mode mode
, rtx x
, bool reg_ok_strict
)
4413 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
4415 && ALTIVEC_VECTOR_MODE (mode
)
4416 && GET_CODE (x
) == AND
4417 && GET_CODE (XEXP (x
, 1)) == CONST_INT
4418 && INTVAL (XEXP (x
, 1)) == -16)
4421 if (RS6000_SYMBOL_REF_TLS_P (x
))
4423 if (legitimate_indirect_address_p (x
, reg_ok_strict
))
4425 if ((GET_CODE (x
) == PRE_INC
|| GET_CODE (x
) == PRE_DEC
)
4426 && !ALTIVEC_VECTOR_MODE (mode
)
4427 && !SPE_VECTOR_MODE (mode
)
4430 /* Restrict addressing for DI because of our SUBREG hackery. */
4431 && !(TARGET_E500_DOUBLE
4432 && (mode
== DFmode
|| mode
== DDmode
|| mode
== DImode
))
4434 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
))
4436 if (legitimate_small_data_p (mode
, x
))
4438 if (legitimate_constant_pool_address_p (x
))
4440 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
4442 && GET_CODE (x
) == PLUS
4443 && GET_CODE (XEXP (x
, 0)) == REG
4444 && (XEXP (x
, 0) == virtual_stack_vars_rtx
4445 || XEXP (x
, 0) == arg_pointer_rtx
)
4446 && GET_CODE (XEXP (x
, 1)) == CONST_INT
)
4448 if (rs6000_legitimate_offset_address_p (mode
, x
, reg_ok_strict
))
4453 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
4455 || (mode
!= DFmode
&& mode
!= DDmode
)
4456 || (TARGET_E500_DOUBLE
&& mode
!= DDmode
))
4457 && (TARGET_POWERPC64
|| mode
!= DImode
)
4458 && !avoiding_indexed_address_p (mode
)
4459 && legitimate_indexed_address_p (x
, reg_ok_strict
))
4461 if (GET_CODE (x
) == PRE_MODIFY
4465 && ((TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
)
4467 || ((mode
!= DFmode
&& mode
!= DDmode
) || TARGET_E500_DOUBLE
))
4468 && (TARGET_POWERPC64
|| mode
!= DImode
)
4469 && !ALTIVEC_VECTOR_MODE (mode
)
4470 && !SPE_VECTOR_MODE (mode
)
4471 /* Restrict addressing for DI because of our SUBREG hackery. */
4472 && !(TARGET_E500_DOUBLE
4473 && (mode
== DFmode
|| mode
== DDmode
|| mode
== DImode
))
4475 && legitimate_indirect_address_p (XEXP (x
, 0), reg_ok_strict
)
4476 && (rs6000_legitimate_offset_address_p (mode
, XEXP (x
, 1), reg_ok_strict
)
4477 || (!avoiding_indexed_address_p (mode
)
4478 && legitimate_indexed_address_p (XEXP (x
, 1), reg_ok_strict
)))
4479 && rtx_equal_p (XEXP (XEXP (x
, 1), 0), XEXP (x
, 0)))
4481 if (legitimate_lo_sum_address_p (mode
, x
, reg_ok_strict
))
4486 /* Go to LABEL if ADDR (a legitimate address expression)
4487 has an effect that depends on the machine mode it is used for.
4489 On the RS/6000 this is true of all integral offsets (since AltiVec
4490 modes don't allow them) or is a pre-increment or decrement.
4492 ??? Except that due to conceptual problems in offsettable_address_p
4493 we can't really report the problems of integral offsets. So leave
4494 this assuming that the adjustable offset must be valid for the
4495 sub-words of a TFmode operand, which is what we had before. */
4498 rs6000_mode_dependent_address (rtx addr
)
4500 switch (GET_CODE (addr
))
4503 if (GET_CODE (XEXP (addr
, 1)) == CONST_INT
)
4505 unsigned HOST_WIDE_INT val
= INTVAL (XEXP (addr
, 1));
4506 return val
+ 12 + 0x8000 >= 0x10000;
4513 /* Auto-increment cases are now treated generically in recog.c. */
4515 return TARGET_UPDATE
;
4524 /* Implement FIND_BASE_TERM. */
4527 rs6000_find_base_term (rtx op
)
4531 split_const (op
, &base
, &offset
);
4532 if (GET_CODE (base
) == UNSPEC
)
4533 switch (XINT (base
, 1))
4536 case UNSPEC_MACHOPIC_OFFSET
:
4537 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
4538 for aliasing purposes. */
4539 return XVECEXP (base
, 0, 0);
4545 /* More elaborate version of recog's offsettable_memref_p predicate
4546 that works around the ??? note of rs6000_mode_dependent_address.
4547 In particular it accepts
4549 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
4551 in 32-bit mode, that the recog predicate rejects. */
4554 rs6000_offsettable_memref_p (rtx op
)
4559 /* First mimic offsettable_memref_p. */
4560 if (offsettable_address_p (1, GET_MODE (op
), XEXP (op
, 0)))
4563 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
4564 the latter predicate knows nothing about the mode of the memory
4565 reference and, therefore, assumes that it is the largest supported
4566 mode (TFmode). As a consequence, legitimate offsettable memory
4567 references are rejected. rs6000_legitimate_offset_address_p contains
4568 the correct logic for the PLUS case of rs6000_mode_dependent_address. */
4569 return rs6000_legitimate_offset_address_p (GET_MODE (op
), XEXP (op
, 0), 1);
4572 /* Return number of consecutive hard regs needed starting at reg REGNO
4573 to hold something of mode MODE.
4574 This is ordinarily the length in words of a value of mode MODE
4575 but can be less for certain modes in special long registers.
4577 For the SPE, GPRs are 64 bits but only 32 bits are visible in
4578 scalar instructions. The upper 32 bits are only available to the
4581 POWER and PowerPC GPRs hold 32 bits worth;
4582 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
4585 rs6000_hard_regno_nregs (int regno
, enum machine_mode mode
)
4587 if (FP_REGNO_P (regno
))
4588 return (GET_MODE_SIZE (mode
) + UNITS_PER_FP_WORD
- 1) / UNITS_PER_FP_WORD
;
4590 if (SPE_SIMD_REGNO_P (regno
) && TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
4591 return (GET_MODE_SIZE (mode
) + UNITS_PER_SPE_WORD
- 1) / UNITS_PER_SPE_WORD
;
4593 if (ALTIVEC_REGNO_P (regno
))
4595 (GET_MODE_SIZE (mode
) + UNITS_PER_ALTIVEC_WORD
- 1) / UNITS_PER_ALTIVEC_WORD
;
4597 /* The value returned for SCmode in the E500 double case is 2 for
4598 ABI compatibility; storing an SCmode value in a single register
4599 would require function_arg and rs6000_spe_function_arg to handle
4600 SCmode so as to pass the value correctly in a pair of
4602 if (TARGET_E500_DOUBLE
&& FLOAT_MODE_P (mode
) && mode
!= SCmode
4603 && !DECIMAL_FLOAT_MODE_P (mode
))
4604 return (GET_MODE_SIZE (mode
) + UNITS_PER_FP_WORD
- 1) / UNITS_PER_FP_WORD
;
4606 return (GET_MODE_SIZE (mode
) + UNITS_PER_WORD
- 1) / UNITS_PER_WORD
;
4609 /* Change register usage conditional on target flags. */
4611 rs6000_conditional_register_usage (void)
4615 /* Set MQ register fixed (already call_used) if not POWER
4616 architecture (RIOS1, RIOS2, RSC, and PPC601) so that it will not
4621 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
4623 fixed_regs
[13] = call_used_regs
[13]
4624 = call_really_used_regs
[13] = 1;
4626 /* Conditionally disable FPRs. */
4627 if (TARGET_SOFT_FLOAT
|| !TARGET_FPRS
)
4628 for (i
= 32; i
< 64; i
++)
4629 fixed_regs
[i
] = call_used_regs
[i
]
4630 = call_really_used_regs
[i
] = 1;
4632 /* The TOC register is not killed across calls in a way that is
4633 visible to the compiler. */
4634 if (DEFAULT_ABI
== ABI_AIX
)
4635 call_really_used_regs
[2] = 0;
4637 if (DEFAULT_ABI
== ABI_V4
4638 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
4640 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
4642 if (DEFAULT_ABI
== ABI_V4
4643 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
4645 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
4646 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
4647 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
4649 if (DEFAULT_ABI
== ABI_DARWIN
4650 && PIC_OFFSET_TABLE_REGNUM
!= INVALID_REGNUM
)
4651 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
4652 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
4653 = call_really_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
4655 if (TARGET_TOC
&& TARGET_MINIMAL_TOC
)
4656 fixed_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
]
4657 = call_used_regs
[RS6000_PIC_OFFSET_TABLE_REGNUM
] = 1;
4661 global_regs
[SPEFSCR_REGNO
] = 1;
4662 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
4663 registers in prologues and epilogues. We no longer use r14
4664 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
4665 pool for link-compatibility with older versions of GCC. Once
4666 "old" code has died out, we can return r14 to the allocation
4669 = call_used_regs
[14]
4670 = call_really_used_regs
[14] = 1;
4673 if (!TARGET_ALTIVEC
)
4675 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
4676 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
4677 call_really_used_regs
[VRSAVE_REGNO
] = 1;
4681 global_regs
[VSCR_REGNO
] = 1;
4683 if (TARGET_ALTIVEC_ABI
)
4685 for (i
= FIRST_ALTIVEC_REGNO
; i
< FIRST_ALTIVEC_REGNO
+ 20; ++i
)
4686 call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
4688 /* AIX reserves VR20:31 in non-extended ABI mode. */
4690 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
< FIRST_ALTIVEC_REGNO
+ 32; ++i
)
4691 fixed_regs
[i
] = call_used_regs
[i
] = call_really_used_regs
[i
] = 1;
4695 /* Try to output insns to set TARGET equal to the constant C if it can
4696 be done in less than N insns. Do all computations in MODE.
4697 Returns the place where the output has been placed if it can be
4698 done and the insns have been emitted. If it would take more than N
4699 insns, zero is returned and no insns and emitted. */
4702 rs6000_emit_set_const (rtx dest
, enum machine_mode mode
,
4703 rtx source
, int n ATTRIBUTE_UNUSED
)
4705 rtx result
, insn
, set
;
4706 HOST_WIDE_INT c0
, c1
;
4713 dest
= gen_reg_rtx (mode
);
4714 emit_insn (gen_rtx_SET (VOIDmode
, dest
, source
));
4718 result
= !can_create_pseudo_p () ? dest
: gen_reg_rtx (SImode
);
4720 emit_insn (gen_rtx_SET (VOIDmode
, copy_rtx (result
),
4721 GEN_INT (INTVAL (source
)
4722 & (~ (HOST_WIDE_INT
) 0xffff))));
4723 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
4724 gen_rtx_IOR (SImode
, copy_rtx (result
),
4725 GEN_INT (INTVAL (source
) & 0xffff))));
4730 switch (GET_CODE (source
))
4733 c0
= INTVAL (source
);
4738 #if HOST_BITS_PER_WIDE_INT >= 64
4739 c0
= CONST_DOUBLE_LOW (source
);
4742 c0
= CONST_DOUBLE_LOW (source
);
4743 c1
= CONST_DOUBLE_HIGH (source
);
4751 result
= rs6000_emit_set_long_const (dest
, c0
, c1
);
4758 insn
= get_last_insn ();
4759 set
= single_set (insn
);
4760 if (! CONSTANT_P (SET_SRC (set
)))
4761 set_unique_reg_note (insn
, REG_EQUAL
, source
);
4766 /* Having failed to find a 3 insn sequence in rs6000_emit_set_const,
4767 fall back to a straight forward decomposition. We do this to avoid
4768 exponential run times encountered when looking for longer sequences
4769 with rs6000_emit_set_const. */
4771 rs6000_emit_set_long_const (rtx dest
, HOST_WIDE_INT c1
, HOST_WIDE_INT c2
)
4773 if (!TARGET_POWERPC64
)
4775 rtx operand1
, operand2
;
4777 operand1
= operand_subword_force (dest
, WORDS_BIG_ENDIAN
== 0,
4779 operand2
= operand_subword_force (copy_rtx (dest
), WORDS_BIG_ENDIAN
!= 0,
4781 emit_move_insn (operand1
, GEN_INT (c1
));
4782 emit_move_insn (operand2
, GEN_INT (c2
));
4786 HOST_WIDE_INT ud1
, ud2
, ud3
, ud4
;
4789 ud2
= (c1
& 0xffff0000) >> 16;
4790 #if HOST_BITS_PER_WIDE_INT >= 64
4794 ud4
= (c2
& 0xffff0000) >> 16;
4796 if ((ud4
== 0xffff && ud3
== 0xffff && ud2
== 0xffff && (ud1
& 0x8000))
4797 || (ud4
== 0 && ud3
== 0 && ud2
== 0 && ! (ud1
& 0x8000)))
4800 emit_move_insn (dest
, GEN_INT (((ud1
^ 0x8000) - 0x8000)));
4802 emit_move_insn (dest
, GEN_INT (ud1
));
4805 else if ((ud4
== 0xffff && ud3
== 0xffff && (ud2
& 0x8000))
4806 || (ud4
== 0 && ud3
== 0 && ! (ud2
& 0x8000)))
4809 emit_move_insn (dest
, GEN_INT (((ud2
<< 16) ^ 0x80000000)
4812 emit_move_insn (dest
, GEN_INT (ud2
<< 16));
4814 emit_move_insn (copy_rtx (dest
),
4815 gen_rtx_IOR (DImode
, copy_rtx (dest
),
4818 else if ((ud4
== 0xffff && (ud3
& 0x8000))
4819 || (ud4
== 0 && ! (ud3
& 0x8000)))
4822 emit_move_insn (dest
, GEN_INT (((ud3
<< 16) ^ 0x80000000)
4825 emit_move_insn (dest
, GEN_INT (ud3
<< 16));
4828 emit_move_insn (copy_rtx (dest
),
4829 gen_rtx_IOR (DImode
, copy_rtx (dest
),
4831 emit_move_insn (copy_rtx (dest
),
4832 gen_rtx_ASHIFT (DImode
, copy_rtx (dest
),
4835 emit_move_insn (copy_rtx (dest
),
4836 gen_rtx_IOR (DImode
, copy_rtx (dest
),
4842 emit_move_insn (dest
, GEN_INT (((ud4
<< 16) ^ 0x80000000)
4845 emit_move_insn (dest
, GEN_INT (ud4
<< 16));
4848 emit_move_insn (copy_rtx (dest
),
4849 gen_rtx_IOR (DImode
, copy_rtx (dest
),
4852 emit_move_insn (copy_rtx (dest
),
4853 gen_rtx_ASHIFT (DImode
, copy_rtx (dest
),
4856 emit_move_insn (copy_rtx (dest
),
4857 gen_rtx_IOR (DImode
, copy_rtx (dest
),
4858 GEN_INT (ud2
<< 16)));
4860 emit_move_insn (copy_rtx (dest
),
4861 gen_rtx_IOR (DImode
, copy_rtx (dest
), GEN_INT (ud1
)));
4867 /* Helper for the following. Get rid of [r+r] memory refs
4868 in cases where it won't work (TImode, TFmode, TDmode). */
4871 rs6000_eliminate_indexed_memrefs (rtx operands
[2])
4873 if (GET_CODE (operands
[0]) == MEM
4874 && GET_CODE (XEXP (operands
[0], 0)) != REG
4875 && ! legitimate_constant_pool_address_p (XEXP (operands
[0], 0))
4876 && ! reload_in_progress
)
4878 = replace_equiv_address (operands
[0],
4879 copy_addr_to_reg (XEXP (operands
[0], 0)));
4881 if (GET_CODE (operands
[1]) == MEM
4882 && GET_CODE (XEXP (operands
[1], 0)) != REG
4883 && ! legitimate_constant_pool_address_p (XEXP (operands
[1], 0))
4884 && ! reload_in_progress
)
4886 = replace_equiv_address (operands
[1],
4887 copy_addr_to_reg (XEXP (operands
[1], 0)));
4890 /* Emit a move from SOURCE to DEST in mode MODE. */
4892 rs6000_emit_move (rtx dest
, rtx source
, enum machine_mode mode
)
4896 operands
[1] = source
;
4898 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
4899 if (GET_CODE (operands
[1]) == CONST_DOUBLE
4900 && ! FLOAT_MODE_P (mode
)
4901 && GET_MODE_BITSIZE (mode
) <= HOST_BITS_PER_WIDE_INT
)
4903 /* FIXME. This should never happen. */
4904 /* Since it seems that it does, do the safe thing and convert
4906 operands
[1] = gen_int_mode (CONST_DOUBLE_LOW (operands
[1]), mode
);
4908 gcc_assert (GET_CODE (operands
[1]) != CONST_DOUBLE
4909 || FLOAT_MODE_P (mode
)
4910 || ((CONST_DOUBLE_HIGH (operands
[1]) != 0
4911 || CONST_DOUBLE_LOW (operands
[1]) < 0)
4912 && (CONST_DOUBLE_HIGH (operands
[1]) != -1
4913 || CONST_DOUBLE_LOW (operands
[1]) >= 0)));
4915 /* Check if GCC is setting up a block move that will end up using FP
4916 registers as temporaries. We must make sure this is acceptable. */
4917 if (GET_CODE (operands
[0]) == MEM
4918 && GET_CODE (operands
[1]) == MEM
4920 && (SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[0]))
4921 || SLOW_UNALIGNED_ACCESS (DImode
, MEM_ALIGN (operands
[1])))
4922 && ! (SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[0]) > 32
4923 ? 32 : MEM_ALIGN (operands
[0])))
4924 || SLOW_UNALIGNED_ACCESS (SImode
, (MEM_ALIGN (operands
[1]) > 32
4926 : MEM_ALIGN (operands
[1]))))
4927 && ! MEM_VOLATILE_P (operands
[0])
4928 && ! MEM_VOLATILE_P (operands
[1]))
4930 emit_move_insn (adjust_address (operands
[0], SImode
, 0),
4931 adjust_address (operands
[1], SImode
, 0));
4932 emit_move_insn (adjust_address (copy_rtx (operands
[0]), SImode
, 4),
4933 adjust_address (copy_rtx (operands
[1]), SImode
, 4));
4937 if (can_create_pseudo_p () && GET_CODE (operands
[0]) == MEM
4938 && !gpc_reg_operand (operands
[1], mode
))
4939 operands
[1] = force_reg (mode
, operands
[1]);
4941 if (mode
== SFmode
&& ! TARGET_POWERPC
4942 && TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_DOUBLE_FLOAT
4943 && GET_CODE (operands
[0]) == MEM
)
4947 if (reload_in_progress
|| reload_completed
)
4948 regnum
= true_regnum (operands
[1]);
4949 else if (GET_CODE (operands
[1]) == REG
)
4950 regnum
= REGNO (operands
[1]);
4954 /* If operands[1] is a register, on POWER it may have
4955 double-precision data in it, so truncate it to single
4957 if (FP_REGNO_P (regnum
) || regnum
>= FIRST_PSEUDO_REGISTER
)
4960 newreg
= (!can_create_pseudo_p () ? copy_rtx (operands
[1])
4961 : gen_reg_rtx (mode
));
4962 emit_insn (gen_aux_truncdfsf2 (newreg
, operands
[1]));
4963 operands
[1] = newreg
;
4967 /* Recognize the case where operand[1] is a reference to thread-local
4968 data and load its address to a register. */
4969 if (rs6000_tls_referenced_p (operands
[1]))
4971 enum tls_model model
;
4972 rtx tmp
= operands
[1];
4975 if (GET_CODE (tmp
) == CONST
&& GET_CODE (XEXP (tmp
, 0)) == PLUS
)
4977 addend
= XEXP (XEXP (tmp
, 0), 1);
4978 tmp
= XEXP (XEXP (tmp
, 0), 0);
4981 gcc_assert (GET_CODE (tmp
) == SYMBOL_REF
);
4982 model
= SYMBOL_REF_TLS_MODEL (tmp
);
4983 gcc_assert (model
!= 0);
4985 tmp
= rs6000_legitimize_tls_address (tmp
, model
);
4988 tmp
= gen_rtx_PLUS (mode
, tmp
, addend
);
4989 tmp
= force_operand (tmp
, operands
[0]);
4994 /* Handle the case where reload calls us with an invalid address. */
4995 if (reload_in_progress
&& mode
== Pmode
4996 && (! general_operand (operands
[1], mode
)
4997 || ! nonimmediate_operand (operands
[0], mode
)))
5000 /* 128-bit constant floating-point values on Darwin should really be
5001 loaded as two parts. */
5002 if (!TARGET_IEEEQUAD
&& TARGET_LONG_DOUBLE_128
5003 && mode
== TFmode
&& GET_CODE (operands
[1]) == CONST_DOUBLE
)
5005 /* DImode is used, not DFmode, because simplify_gen_subreg doesn't
5006 know how to get a DFmode SUBREG of a TFmode. */
5007 enum machine_mode imode
= (TARGET_E500_DOUBLE
? DFmode
: DImode
);
5008 rs6000_emit_move (simplify_gen_subreg (imode
, operands
[0], mode
, 0),
5009 simplify_gen_subreg (imode
, operands
[1], mode
, 0),
5011 rs6000_emit_move (simplify_gen_subreg (imode
, operands
[0], mode
,
5012 GET_MODE_SIZE (imode
)),
5013 simplify_gen_subreg (imode
, operands
[1], mode
,
5014 GET_MODE_SIZE (imode
)),
5019 if (reload_in_progress
&& cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
5020 cfun
->machine
->sdmode_stack_slot
=
5021 eliminate_regs (cfun
->machine
->sdmode_stack_slot
, VOIDmode
, NULL_RTX
);
5023 if (reload_in_progress
5025 && MEM_P (operands
[0])
5026 && rtx_equal_p (operands
[0], cfun
->machine
->sdmode_stack_slot
)
5027 && REG_P (operands
[1]))
5029 if (FP_REGNO_P (REGNO (operands
[1])))
5031 rtx mem
= adjust_address_nv (operands
[0], DDmode
, 0);
5032 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
5033 emit_insn (gen_movsd_store (mem
, operands
[1]));
5035 else if (INT_REGNO_P (REGNO (operands
[1])))
5037 rtx mem
= adjust_address_nv (operands
[0], mode
, 4);
5038 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
5039 emit_insn (gen_movsd_hardfloat (mem
, operands
[1]));
5045 if (reload_in_progress
5047 && REG_P (operands
[0])
5048 && MEM_P (operands
[1])
5049 && rtx_equal_p (operands
[1], cfun
->machine
->sdmode_stack_slot
))
5051 if (FP_REGNO_P (REGNO (operands
[0])))
5053 rtx mem
= adjust_address_nv (operands
[1], DDmode
, 0);
5054 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
5055 emit_insn (gen_movsd_load (operands
[0], mem
));
5057 else if (INT_REGNO_P (REGNO (operands
[0])))
5059 rtx mem
= adjust_address_nv (operands
[1], mode
, 4);
5060 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
5061 emit_insn (gen_movsd_hardfloat (operands
[0], mem
));
5068 /* FIXME: In the long term, this switch statement should go away
5069 and be replaced by a sequence of tests based on things like
5075 if (CONSTANT_P (operands
[1])
5076 && GET_CODE (operands
[1]) != CONST_INT
)
5077 operands
[1] = force_const_mem (mode
, operands
[1]);
5082 rs6000_eliminate_indexed_memrefs (operands
);
5089 if (CONSTANT_P (operands
[1])
5090 && ! easy_fp_constant (operands
[1], mode
))
5091 operands
[1] = force_const_mem (mode
, operands
[1]);
5102 if (CONSTANT_P (operands
[1])
5103 && !easy_vector_constant (operands
[1], mode
))
5104 operands
[1] = force_const_mem (mode
, operands
[1]);
5109 /* Use default pattern for address of ELF small data */
5112 && DEFAULT_ABI
== ABI_V4
5113 && (GET_CODE (operands
[1]) == SYMBOL_REF
5114 || GET_CODE (operands
[1]) == CONST
)
5115 && small_data_operand (operands
[1], mode
))
5117 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
5121 if (DEFAULT_ABI
== ABI_V4
5122 && mode
== Pmode
&& mode
== SImode
5123 && flag_pic
== 1 && got_operand (operands
[1], mode
))
5125 emit_insn (gen_movsi_got (operands
[0], operands
[1]));
5129 if ((TARGET_ELF
|| DEFAULT_ABI
== ABI_DARWIN
)
5133 && CONSTANT_P (operands
[1])
5134 && GET_CODE (operands
[1]) != HIGH
5135 && GET_CODE (operands
[1]) != CONST_INT
)
5137 rtx target
= (!can_create_pseudo_p ()
5139 : gen_reg_rtx (mode
));
5141 /* If this is a function address on -mcall-aixdesc,
5142 convert it to the address of the descriptor. */
5143 if (DEFAULT_ABI
== ABI_AIX
5144 && GET_CODE (operands
[1]) == SYMBOL_REF
5145 && XSTR (operands
[1], 0)[0] == '.')
5147 const char *name
= XSTR (operands
[1], 0);
5149 while (*name
== '.')
5151 new_ref
= gen_rtx_SYMBOL_REF (Pmode
, name
);
5152 CONSTANT_POOL_ADDRESS_P (new_ref
)
5153 = CONSTANT_POOL_ADDRESS_P (operands
[1]);
5154 SYMBOL_REF_FLAGS (new_ref
) = SYMBOL_REF_FLAGS (operands
[1]);
5155 SYMBOL_REF_USED (new_ref
) = SYMBOL_REF_USED (operands
[1]);
5156 SYMBOL_REF_DATA (new_ref
) = SYMBOL_REF_DATA (operands
[1]);
5157 operands
[1] = new_ref
;
5160 if (DEFAULT_ABI
== ABI_DARWIN
)
5163 if (MACHO_DYNAMIC_NO_PIC_P
)
5165 /* Take care of any required data indirection. */
5166 operands
[1] = rs6000_machopic_legitimize_pic_address (
5167 operands
[1], mode
, operands
[0]);
5168 if (operands
[0] != operands
[1])
5169 emit_insn (gen_rtx_SET (VOIDmode
,
5170 operands
[0], operands
[1]));
5174 emit_insn (gen_macho_high (target
, operands
[1]));
5175 emit_insn (gen_macho_low (operands
[0], target
, operands
[1]));
5179 emit_insn (gen_elf_high (target
, operands
[1]));
5180 emit_insn (gen_elf_low (operands
[0], target
, operands
[1]));
5184 /* If this is a SYMBOL_REF that refers to a constant pool entry,
5185 and we have put it in the TOC, we just need to make a TOC-relative
5188 && GET_CODE (operands
[1]) == SYMBOL_REF
5189 && constant_pool_expr_p (operands
[1])
5190 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (operands
[1]),
5191 get_pool_mode (operands
[1])))
5193 operands
[1] = create_TOC_reference (operands
[1]);
5195 else if (mode
== Pmode
5196 && CONSTANT_P (operands
[1])
5197 && ((GET_CODE (operands
[1]) != CONST_INT
5198 && ! easy_fp_constant (operands
[1], mode
))
5199 || (GET_CODE (operands
[1]) == CONST_INT
5200 && num_insns_constant (operands
[1], mode
) > 2)
5201 || (GET_CODE (operands
[0]) == REG
5202 && FP_REGNO_P (REGNO (operands
[0]))))
5203 && GET_CODE (operands
[1]) != HIGH
5204 && ! legitimate_constant_pool_address_p (operands
[1])
5205 && ! toc_relative_expr_p (operands
[1]))
5209 /* Darwin uses a special PIC legitimizer. */
5210 if (DEFAULT_ABI
== ABI_DARWIN
&& MACHOPIC_INDIRECT
)
5213 rs6000_machopic_legitimize_pic_address (operands
[1], mode
,
5215 if (operands
[0] != operands
[1])
5216 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
5221 /* If we are to limit the number of things we put in the TOC and
5222 this is a symbol plus a constant we can add in one insn,
5223 just put the symbol in the TOC and add the constant. Don't do
5224 this if reload is in progress. */
5225 if (GET_CODE (operands
[1]) == CONST
5226 && TARGET_NO_SUM_IN_TOC
&& ! reload_in_progress
5227 && GET_CODE (XEXP (operands
[1], 0)) == PLUS
5228 && add_operand (XEXP (XEXP (operands
[1], 0), 1), mode
)
5229 && (GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == LABEL_REF
5230 || GET_CODE (XEXP (XEXP (operands
[1], 0), 0)) == SYMBOL_REF
)
5231 && ! side_effects_p (operands
[0]))
5234 force_const_mem (mode
, XEXP (XEXP (operands
[1], 0), 0));
5235 rtx other
= XEXP (XEXP (operands
[1], 0), 1);
5237 sym
= force_reg (mode
, sym
);
5239 emit_insn (gen_addsi3 (operands
[0], sym
, other
));
5241 emit_insn (gen_adddi3 (operands
[0], sym
, other
));
5245 operands
[1] = force_const_mem (mode
, operands
[1]);
5248 && GET_CODE (XEXP (operands
[1], 0)) == SYMBOL_REF
5249 && constant_pool_expr_p (XEXP (operands
[1], 0))
5250 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
5251 get_pool_constant (XEXP (operands
[1], 0)),
5252 get_pool_mode (XEXP (operands
[1], 0))))
5255 = gen_const_mem (mode
,
5256 create_TOC_reference (XEXP (operands
[1], 0)));
5257 set_mem_alias_set (operands
[1], get_TOC_alias_set ());
5263 rs6000_eliminate_indexed_memrefs (operands
);
5267 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
5269 gen_rtx_SET (VOIDmode
,
5270 operands
[0], operands
[1]),
5271 gen_rtx_CLOBBER (VOIDmode
,
5272 gen_rtx_SCRATCH (SImode
)))));
5281 /* Above, we may have called force_const_mem which may have returned
5282 an invalid address. If we can, fix this up; otherwise, reload will
5283 have to deal with it. */
5284 if (GET_CODE (operands
[1]) == MEM
&& ! reload_in_progress
)
5285 operands
[1] = validize_mem (operands
[1]);
5288 emit_insn (gen_rtx_SET (VOIDmode
, operands
[0], operands
[1]));
5291 /* Nonzero if we can use a floating-point register to pass this arg. */
5292 #define USE_FP_FOR_ARG_P(CUM,MODE,TYPE) \
5293 (SCALAR_FLOAT_MODE_P (MODE) \
5294 && (CUM)->fregno <= FP_ARG_MAX_REG \
5295 && TARGET_HARD_FLOAT && TARGET_FPRS)
5297 /* Nonzero if we can use an AltiVec register to pass this arg. */
5298 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,TYPE,NAMED) \
5299 (ALTIVEC_VECTOR_MODE (MODE) \
5300 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
5301 && TARGET_ALTIVEC_ABI \
5304 /* Return a nonzero value to say to return the function value in
5305 memory, just as large structures are always returned. TYPE will be
5306 the data type of the value, and FNTYPE will be the type of the
5307 function doing the returning, or @code{NULL} for libcalls.
5309 The AIX ABI for the RS/6000 specifies that all structures are
5310 returned in memory. The Darwin ABI does the same. The SVR4 ABI
5311 specifies that structures <= 8 bytes are returned in r3/r4, but a
5312 draft put them in memory, and GCC used to implement the draft
5313 instead of the final standard. Therefore, aix_struct_return
5314 controls this instead of DEFAULT_ABI; V.4 targets needing backward
5315 compatibility can change DRAFT_V4_STRUCT_RET to override the
5316 default, and -m switches get the final word. See
5317 rs6000_override_options for more details.
5319 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
5320 long double support is enabled. These values are returned in memory.
5322 int_size_in_bytes returns -1 for variable size objects, which go in
5323 memory always. The cast to unsigned makes -1 > 8. */
5326 rs6000_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
5328 /* In the darwin64 abi, try to use registers for larger structs
5330 if (rs6000_darwin64_abi
5331 && TREE_CODE (type
) == RECORD_TYPE
5332 && int_size_in_bytes (type
) > 0)
5334 CUMULATIVE_ARGS valcum
;
5338 valcum
.fregno
= FP_ARG_MIN_REG
;
5339 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
5340 /* Do a trial code generation as if this were going to be passed
5341 as an argument; if any part goes in memory, we return NULL. */
5342 valret
= rs6000_darwin64_record_arg (&valcum
, type
, 1, true);
5345 /* Otherwise fall through to more conventional ABI rules. */
5348 if (AGGREGATE_TYPE_P (type
)
5349 && (aix_struct_return
5350 || (unsigned HOST_WIDE_INT
) int_size_in_bytes (type
) > 8))
5353 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
5354 modes only exist for GCC vector types if -maltivec. */
5355 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
5356 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
5359 /* Return synthetic vectors in memory. */
5360 if (TREE_CODE (type
) == VECTOR_TYPE
5361 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
5363 static bool warned_for_return_big_vectors
= false;
5364 if (!warned_for_return_big_vectors
)
5366 warning (0, "GCC vector returned by reference: "
5367 "non-standard ABI extension with no compatibility guarantee");
5368 warned_for_return_big_vectors
= true;
5373 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& TYPE_MODE (type
) == TFmode
)
5379 /* Initialize a variable CUM of type CUMULATIVE_ARGS
5380 for a call to a function whose data type is FNTYPE.
5381 For a library call, FNTYPE is 0.
5383 For incoming args we set the number of arguments in the prototype large
5384 so we never return a PARALLEL. */
5387 init_cumulative_args (CUMULATIVE_ARGS
*cum
, tree fntype
,
5388 rtx libname ATTRIBUTE_UNUSED
, int incoming
,
5389 int libcall
, int n_named_args
)
5391 static CUMULATIVE_ARGS zero_cumulative
;
5393 *cum
= zero_cumulative
;
5395 cum
->fregno
= FP_ARG_MIN_REG
;
5396 cum
->vregno
= ALTIVEC_ARG_MIN_REG
;
5397 cum
->prototype
= (fntype
&& TYPE_ARG_TYPES (fntype
));
5398 cum
->call_cookie
= ((DEFAULT_ABI
== ABI_V4
&& libcall
)
5399 ? CALL_LIBCALL
: CALL_NORMAL
);
5400 cum
->sysv_gregno
= GP_ARG_MIN_REG
;
5401 cum
->stdarg
= fntype
5402 && (TYPE_ARG_TYPES (fntype
) != 0
5403 && (TREE_VALUE (tree_last (TYPE_ARG_TYPES (fntype
)))
5404 != void_type_node
));
5406 cum
->nargs_prototype
= 0;
5407 if (incoming
|| cum
->prototype
)
5408 cum
->nargs_prototype
= n_named_args
;
5410 /* Check for a longcall attribute. */
5411 if ((!fntype
&& rs6000_default_long_calls
)
5413 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype
))
5414 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype
))))
5415 cum
->call_cookie
|= CALL_LONG
;
5417 if (TARGET_DEBUG_ARG
)
5419 fprintf (stderr
, "\ninit_cumulative_args:");
5422 tree ret_type
= TREE_TYPE (fntype
);
5423 fprintf (stderr
, " ret code = %s,",
5424 tree_code_name
[ (int)TREE_CODE (ret_type
) ]);
5427 if (cum
->call_cookie
& CALL_LONG
)
5428 fprintf (stderr
, " longcall,");
5430 fprintf (stderr
, " proto = %d, nargs = %d\n",
5431 cum
->prototype
, cum
->nargs_prototype
);
5436 && TARGET_ALTIVEC_ABI
5437 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype
))))
5439 error ("cannot return value in vector register because"
5440 " altivec instructions are disabled, use -maltivec"
5445 /* Return true if TYPE must be passed on the stack and not in registers. */
5448 rs6000_must_pass_in_stack (enum machine_mode mode
, const_tree type
)
5450 if (DEFAULT_ABI
== ABI_AIX
|| TARGET_64BIT
)
5451 return must_pass_in_stack_var_size (mode
, type
);
5453 return must_pass_in_stack_var_size_or_pad (mode
, type
);
5456 /* If defined, a C expression which determines whether, and in which
5457 direction, to pad out an argument with extra space. The value
5458 should be of type `enum direction': either `upward' to pad above
5459 the argument, `downward' to pad below, or `none' to inhibit
5462 For the AIX ABI structs are always stored left shifted in their
5466 function_arg_padding (enum machine_mode mode
, const_tree type
)
5468 #ifndef AGGREGATE_PADDING_FIXED
5469 #define AGGREGATE_PADDING_FIXED 0
5471 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
5472 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
5475 if (!AGGREGATE_PADDING_FIXED
)
5477 /* GCC used to pass structures of the same size as integer types as
5478 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
5479 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
5480 passed padded downward, except that -mstrict-align further
5481 muddied the water in that multi-component structures of 2 and 4
5482 bytes in size were passed padded upward.
5484 The following arranges for best compatibility with previous
5485 versions of gcc, but removes the -mstrict-align dependency. */
5486 if (BYTES_BIG_ENDIAN
)
5488 HOST_WIDE_INT size
= 0;
5490 if (mode
== BLKmode
)
5492 if (type
&& TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
)
5493 size
= int_size_in_bytes (type
);
5496 size
= GET_MODE_SIZE (mode
);
5498 if (size
== 1 || size
== 2 || size
== 4)
5504 if (AGGREGATES_PAD_UPWARD_ALWAYS
)
5506 if (type
!= 0 && AGGREGATE_TYPE_P (type
))
5510 /* Fall back to the default. */
5511 return DEFAULT_FUNCTION_ARG_PADDING (mode
, type
);
5514 /* If defined, a C expression that gives the alignment boundary, in bits,
5515 of an argument with the specified mode and type. If it is not defined,
5516 PARM_BOUNDARY is used for all arguments.
5518 V.4 wants long longs and doubles to be double word aligned. Just
5519 testing the mode size is a boneheaded way to do this as it means
5520 that other types such as complex int are also double word aligned.
5521 However, we're stuck with this because changing the ABI might break
5522 existing library interfaces.
5524 Doubleword align SPE vectors.
5525 Quadword align Altivec vectors.
5526 Quadword align large synthetic vector types. */
5529 function_arg_boundary (enum machine_mode mode
, tree type
)
5531 if (DEFAULT_ABI
== ABI_V4
5532 && (GET_MODE_SIZE (mode
) == 8
5533 || (TARGET_HARD_FLOAT
5535 && (mode
== TFmode
|| mode
== TDmode
))))
5537 else if (SPE_VECTOR_MODE (mode
)
5538 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
5539 && int_size_in_bytes (type
) >= 8
5540 && int_size_in_bytes (type
) < 16))
5542 else if (ALTIVEC_VECTOR_MODE (mode
)
5543 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
5544 && int_size_in_bytes (type
) >= 16))
5546 else if (rs6000_darwin64_abi
&& mode
== BLKmode
5547 && type
&& TYPE_ALIGN (type
) > 64)
5550 return PARM_BOUNDARY
;
5553 /* For a function parm of MODE and TYPE, return the starting word in
5554 the parameter area. NWORDS of the parameter area are already used. */
5557 rs6000_parm_start (enum machine_mode mode
, tree type
, unsigned int nwords
)
5560 unsigned int parm_offset
;
5562 align
= function_arg_boundary (mode
, type
) / PARM_BOUNDARY
- 1;
5563 parm_offset
= DEFAULT_ABI
== ABI_V4
? 2 : 6;
5564 return nwords
+ (-(parm_offset
+ nwords
) & align
);
5567 /* Compute the size (in words) of a function argument. */
5569 static unsigned long
5570 rs6000_arg_size (enum machine_mode mode
, tree type
)
5574 if (mode
!= BLKmode
)
5575 size
= GET_MODE_SIZE (mode
);
5577 size
= int_size_in_bytes (type
);
5580 return (size
+ 3) >> 2;
5582 return (size
+ 7) >> 3;
5585 /* Use this to flush pending int fields. */
5588 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS
*cum
,
5589 HOST_WIDE_INT bitpos
)
5591 unsigned int startbit
, endbit
;
5592 int intregs
, intoffset
;
5593 enum machine_mode mode
;
5595 if (cum
->intoffset
== -1)
5598 intoffset
= cum
->intoffset
;
5599 cum
->intoffset
= -1;
5601 if (intoffset
% BITS_PER_WORD
!= 0)
5603 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
5605 if (mode
== BLKmode
)
5607 /* We couldn't find an appropriate mode, which happens,
5608 e.g., in packed structs when there are 3 bytes to load.
5609 Back intoffset back to the beginning of the word in this
5611 intoffset
= intoffset
& -BITS_PER_WORD
;
5615 startbit
= intoffset
& -BITS_PER_WORD
;
5616 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
5617 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
5618 cum
->words
+= intregs
;
5621 /* The darwin64 ABI calls for us to recurse down through structs,
5622 looking for elements passed in registers. Unfortunately, we have
5623 to track int register count here also because of misalignments
5624 in powerpc alignment mode. */
5627 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS
*cum
,
5629 HOST_WIDE_INT startbitpos
)
5633 for (f
= TYPE_FIELDS (type
); f
; f
= TREE_CHAIN (f
))
5634 if (TREE_CODE (f
) == FIELD_DECL
)
5636 HOST_WIDE_INT bitpos
= startbitpos
;
5637 tree ftype
= TREE_TYPE (f
);
5638 enum machine_mode mode
;
5639 if (ftype
== error_mark_node
)
5641 mode
= TYPE_MODE (ftype
);
5643 if (DECL_SIZE (f
) != 0
5644 && host_integerp (bit_position (f
), 1))
5645 bitpos
+= int_bit_position (f
);
5647 /* ??? FIXME: else assume zero offset. */
5649 if (TREE_CODE (ftype
) == RECORD_TYPE
)
5650 rs6000_darwin64_record_arg_advance_recurse (cum
, ftype
, bitpos
);
5651 else if (USE_FP_FOR_ARG_P (cum
, mode
, ftype
))
5653 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
);
5654 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
5655 cum
->words
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
5657 else if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, 1))
5659 rs6000_darwin64_record_arg_advance_flush (cum
, bitpos
);
5663 else if (cum
->intoffset
== -1)
5664 cum
->intoffset
= bitpos
;
5668 /* Update the data in CUM to advance over an argument
5669 of mode MODE and data type TYPE.
5670 (TYPE is null for libcalls where that information may not be available.)
5672 Note that for args passed by reference, function_arg will be called
5673 with MODE and TYPE set to that of the pointer to the arg, not the arg
5677 function_arg_advance (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
5678 tree type
, int named
, int depth
)
5682 /* Only tick off an argument if we're not recursing. */
5684 cum
->nargs_prototype
--;
5686 if (TARGET_ALTIVEC_ABI
5687 && (ALTIVEC_VECTOR_MODE (mode
)
5688 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
5689 && int_size_in_bytes (type
) == 16)))
5693 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
))
5696 if (!TARGET_ALTIVEC
)
5697 error ("cannot pass argument in vector register because"
5698 " altivec instructions are disabled, use -maltivec"
5701 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
5702 even if it is going to be passed in a vector register.
5703 Darwin does the same for variable-argument functions. */
5704 if ((DEFAULT_ABI
== ABI_AIX
&& TARGET_64BIT
)
5705 || (cum
->stdarg
&& DEFAULT_ABI
!= ABI_V4
))
5715 /* Vector parameters must be 16-byte aligned. This places
5716 them at 2 mod 4 in terms of words in 32-bit mode, since
5717 the parameter save area starts at offset 24 from the
5718 stack. In 64-bit mode, they just have to start on an
5719 even word, since the parameter save area is 16-byte
5720 aligned. Space for GPRs is reserved even if the argument
5721 will be passed in memory. */
5723 align
= (2 - cum
->words
) & 3;
5725 align
= cum
->words
& 1;
5726 cum
->words
+= align
+ rs6000_arg_size (mode
, type
);
5728 if (TARGET_DEBUG_ARG
)
5730 fprintf (stderr
, "function_adv: words = %2d, align=%d, ",
5732 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s\n",
5733 cum
->nargs_prototype
, cum
->prototype
,
5734 GET_MODE_NAME (mode
));
5738 else if (TARGET_SPE_ABI
&& TARGET_SPE
&& SPE_VECTOR_MODE (mode
)
5740 && cum
->sysv_gregno
<= GP_ARG_MAX_REG
)
5743 else if (rs6000_darwin64_abi
5745 && TREE_CODE (type
) == RECORD_TYPE
5746 && (size
= int_size_in_bytes (type
)) > 0)
5748 /* Variable sized types have size == -1 and are
5749 treated as if consisting entirely of ints.
5750 Pad to 16 byte boundary if needed. */
5751 if (TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
5752 && (cum
->words
% 2) != 0)
5754 /* For varargs, we can just go up by the size of the struct. */
5756 cum
->words
+= (size
+ 7) / 8;
5759 /* It is tempting to say int register count just goes up by
5760 sizeof(type)/8, but this is wrong in a case such as
5761 { int; double; int; } [powerpc alignment]. We have to
5762 grovel through the fields for these too. */
5764 rs6000_darwin64_record_arg_advance_recurse (cum
, type
, 0);
5765 rs6000_darwin64_record_arg_advance_flush (cum
,
5766 size
* BITS_PER_UNIT
);
5769 else if (DEFAULT_ABI
== ABI_V4
)
5771 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
5772 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
5773 || (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
5774 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)
5775 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
))
5777 /* _Decimal128 must use an even/odd register pair. This assumes
5778 that the register number is odd when fregno is odd. */
5779 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
5782 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
5783 <= FP_ARG_V4_MAX_REG
)
5784 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
5787 cum
->fregno
= FP_ARG_V4_MAX_REG
+ 1;
5788 if (mode
== DFmode
|| mode
== TFmode
5789 || mode
== DDmode
|| mode
== TDmode
)
5790 cum
->words
+= cum
->words
& 1;
5791 cum
->words
+= rs6000_arg_size (mode
, type
);
5796 int n_words
= rs6000_arg_size (mode
, type
);
5797 int gregno
= cum
->sysv_gregno
;
5799 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
5800 (r7,r8) or (r9,r10). As does any other 2 word item such
5801 as complex int due to a historical mistake. */
5803 gregno
+= (1 - gregno
) & 1;
5805 /* Multi-reg args are not split between registers and stack. */
5806 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
5808 /* Long long and SPE vectors are aligned on the stack.
5809 So are other 2 word items such as complex int due to
5810 a historical mistake. */
5812 cum
->words
+= cum
->words
& 1;
5813 cum
->words
+= n_words
;
5816 /* Note: continuing to accumulate gregno past when we've started
5817 spilling to the stack indicates the fact that we've started
5818 spilling to the stack to expand_builtin_saveregs. */
5819 cum
->sysv_gregno
= gregno
+ n_words
;
5822 if (TARGET_DEBUG_ARG
)
5824 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
5825 cum
->words
, cum
->fregno
);
5826 fprintf (stderr
, "gregno = %2d, nargs = %4d, proto = %d, ",
5827 cum
->sysv_gregno
, cum
->nargs_prototype
, cum
->prototype
);
5828 fprintf (stderr
, "mode = %4s, named = %d\n",
5829 GET_MODE_NAME (mode
), named
);
5834 int n_words
= rs6000_arg_size (mode
, type
);
5835 int start_words
= cum
->words
;
5836 int align_words
= rs6000_parm_start (mode
, type
, start_words
);
5838 cum
->words
= align_words
+ n_words
;
5840 if (SCALAR_FLOAT_MODE_P (mode
)
5841 && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
5843 /* _Decimal128 must be passed in an even/odd float register pair.
5844 This assumes that the register number is odd when fregno is
5846 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
5848 cum
->fregno
+= (GET_MODE_SIZE (mode
) + 7) >> 3;
5851 if (TARGET_DEBUG_ARG
)
5853 fprintf (stderr
, "function_adv: words = %2d, fregno = %2d, ",
5854 cum
->words
, cum
->fregno
);
5855 fprintf (stderr
, "nargs = %4d, proto = %d, mode = %4s, ",
5856 cum
->nargs_prototype
, cum
->prototype
, GET_MODE_NAME (mode
));
5857 fprintf (stderr
, "named = %d, align = %d, depth = %d\n",
5858 named
, align_words
- start_words
, depth
);
5864 spe_build_register_parallel (enum machine_mode mode
, int gregno
)
5871 r1
= gen_rtx_REG (DImode
, gregno
);
5872 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
5873 return gen_rtx_PARALLEL (mode
, gen_rtvec (1, r1
));
5877 r1
= gen_rtx_REG (DImode
, gregno
);
5878 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
5879 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
5880 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
5881 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r3
));
5884 r1
= gen_rtx_REG (DImode
, gregno
);
5885 r1
= gen_rtx_EXPR_LIST (VOIDmode
, r1
, const0_rtx
);
5886 r3
= gen_rtx_REG (DImode
, gregno
+ 2);
5887 r3
= gen_rtx_EXPR_LIST (VOIDmode
, r3
, GEN_INT (8));
5888 r5
= gen_rtx_REG (DImode
, gregno
+ 4);
5889 r5
= gen_rtx_EXPR_LIST (VOIDmode
, r5
, GEN_INT (16));
5890 r7
= gen_rtx_REG (DImode
, gregno
+ 6);
5891 r7
= gen_rtx_EXPR_LIST (VOIDmode
, r7
, GEN_INT (24));
5892 return gen_rtx_PARALLEL (mode
, gen_rtvec (4, r1
, r3
, r5
, r7
));
5899 /* Determine where to put a SIMD argument on the SPE. */
5901 rs6000_spe_function_arg (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
5904 int gregno
= cum
->sysv_gregno
;
5906 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
5907 are passed and returned in a pair of GPRs for ABI compatibility. */
5908 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
5909 || mode
== DCmode
|| mode
== TCmode
))
5911 int n_words
= rs6000_arg_size (mode
, type
);
5913 /* Doubles go in an odd/even register pair (r5/r6, etc). */
5915 gregno
+= (1 - gregno
) & 1;
5917 /* Multi-reg args are not split between registers and stack. */
5918 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
5921 return spe_build_register_parallel (mode
, gregno
);
5925 int n_words
= rs6000_arg_size (mode
, type
);
5927 /* SPE vectors are put in odd registers. */
5928 if (n_words
== 2 && (gregno
& 1) == 0)
5931 if (gregno
+ n_words
- 1 <= GP_ARG_MAX_REG
)
5934 enum machine_mode m
= SImode
;
5936 r1
= gen_rtx_REG (m
, gregno
);
5937 r1
= gen_rtx_EXPR_LIST (m
, r1
, const0_rtx
);
5938 r2
= gen_rtx_REG (m
, gregno
+ 1);
5939 r2
= gen_rtx_EXPR_LIST (m
, r2
, GEN_INT (4));
5940 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
5947 if (gregno
<= GP_ARG_MAX_REG
)
5948 return gen_rtx_REG (mode
, gregno
);
5954 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
5955 structure between cum->intoffset and bitpos to integer registers. */
5958 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS
*cum
,
5959 HOST_WIDE_INT bitpos
, rtx rvec
[], int *k
)
5961 enum machine_mode mode
;
5963 unsigned int startbit
, endbit
;
5964 int this_regno
, intregs
, intoffset
;
5967 if (cum
->intoffset
== -1)
5970 intoffset
= cum
->intoffset
;
5971 cum
->intoffset
= -1;
5973 /* If this is the trailing part of a word, try to only load that
5974 much into the register. Otherwise load the whole register. Note
5975 that in the latter case we may pick up unwanted bits. It's not a
5976 problem at the moment but may wish to revisit. */
5978 if (intoffset
% BITS_PER_WORD
!= 0)
5980 mode
= mode_for_size (BITS_PER_WORD
- intoffset
% BITS_PER_WORD
,
5982 if (mode
== BLKmode
)
5984 /* We couldn't find an appropriate mode, which happens,
5985 e.g., in packed structs when there are 3 bytes to load.
5986 Back intoffset back to the beginning of the word in this
5988 intoffset
= intoffset
& -BITS_PER_WORD
;
5995 startbit
= intoffset
& -BITS_PER_WORD
;
5996 endbit
= (bitpos
+ BITS_PER_WORD
- 1) & -BITS_PER_WORD
;
5997 intregs
= (endbit
- startbit
) / BITS_PER_WORD
;
5998 this_regno
= cum
->words
+ intoffset
/ BITS_PER_WORD
;
6000 if (intregs
> 0 && intregs
> GP_ARG_NUM_REG
- this_regno
)
6003 intregs
= MIN (intregs
, GP_ARG_NUM_REG
- this_regno
);
6007 intoffset
/= BITS_PER_UNIT
;
6010 regno
= GP_ARG_MIN_REG
+ this_regno
;
6011 reg
= gen_rtx_REG (mode
, regno
);
6013 gen_rtx_EXPR_LIST (VOIDmode
, reg
, GEN_INT (intoffset
));
6016 intoffset
= (intoffset
| (UNITS_PER_WORD
-1)) + 1;
6020 while (intregs
> 0);
6023 /* Recursive workhorse for the following. */
6026 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS
*cum
, const_tree type
,
6027 HOST_WIDE_INT startbitpos
, rtx rvec
[],
6032 for (f
= TYPE_FIELDS (type
); f
; f
= TREE_CHAIN (f
))
6033 if (TREE_CODE (f
) == FIELD_DECL
)
6035 HOST_WIDE_INT bitpos
= startbitpos
;
6036 tree ftype
= TREE_TYPE (f
);
6037 enum machine_mode mode
;
6038 if (ftype
== error_mark_node
)
6040 mode
= TYPE_MODE (ftype
);
6042 if (DECL_SIZE (f
) != 0
6043 && host_integerp (bit_position (f
), 1))
6044 bitpos
+= int_bit_position (f
);
6046 /* ??? FIXME: else assume zero offset. */
6048 if (TREE_CODE (ftype
) == RECORD_TYPE
)
6049 rs6000_darwin64_record_arg_recurse (cum
, ftype
, bitpos
, rvec
, k
);
6050 else if (cum
->named
&& USE_FP_FOR_ARG_P (cum
, mode
, ftype
))
6055 case SCmode
: mode
= SFmode
; break;
6056 case DCmode
: mode
= DFmode
; break;
6057 case TCmode
: mode
= TFmode
; break;
6061 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
6063 = gen_rtx_EXPR_LIST (VOIDmode
,
6064 gen_rtx_REG (mode
, cum
->fregno
++),
6065 GEN_INT (bitpos
/ BITS_PER_UNIT
));
6066 if (mode
== TFmode
|| mode
== TDmode
)
6069 else if (cum
->named
&& USE_ALTIVEC_FOR_ARG_P (cum
, mode
, ftype
, 1))
6071 rs6000_darwin64_record_arg_flush (cum
, bitpos
, rvec
, k
);
6073 = gen_rtx_EXPR_LIST (VOIDmode
,
6074 gen_rtx_REG (mode
, cum
->vregno
++),
6075 GEN_INT (bitpos
/ BITS_PER_UNIT
));
6077 else if (cum
->intoffset
== -1)
6078 cum
->intoffset
= bitpos
;
6082 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
6083 the register(s) to be used for each field and subfield of a struct
6084 being passed by value, along with the offset of where the
6085 register's value may be found in the block. FP fields go in FP
6086 register, vector fields go in vector registers, and everything
6087 else goes in int registers, packed as in memory.
6089 This code is also used for function return values. RETVAL indicates
6090 whether this is the case.
6092 Much of this is taken from the SPARC V9 port, which has a similar
6093 calling convention. */
6096 rs6000_darwin64_record_arg (CUMULATIVE_ARGS
*orig_cum
, const_tree type
,
6097 int named
, bool retval
)
6099 rtx rvec
[FIRST_PSEUDO_REGISTER
];
6100 int k
= 1, kbase
= 1;
6101 HOST_WIDE_INT typesize
= int_size_in_bytes (type
);
6102 /* This is a copy; modifications are not visible to our caller. */
6103 CUMULATIVE_ARGS copy_cum
= *orig_cum
;
6104 CUMULATIVE_ARGS
*cum
= ©_cum
;
6106 /* Pad to 16 byte boundary if needed. */
6107 if (!retval
&& TYPE_ALIGN (type
) >= 2 * BITS_PER_WORD
6108 && (cum
->words
% 2) != 0)
6115 /* Put entries into rvec[] for individual FP and vector fields, and
6116 for the chunks of memory that go in int regs. Note we start at
6117 element 1; 0 is reserved for an indication of using memory, and
6118 may or may not be filled in below. */
6119 rs6000_darwin64_record_arg_recurse (cum
, type
, 0, rvec
, &k
);
6120 rs6000_darwin64_record_arg_flush (cum
, typesize
* BITS_PER_UNIT
, rvec
, &k
);
6122 /* If any part of the struct went on the stack put all of it there.
6123 This hack is because the generic code for
6124 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
6125 parts of the struct are not at the beginning. */
6129 return NULL_RTX
; /* doesn't go in registers at all */
6131 rvec
[0] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
6133 if (k
> 1 || cum
->use_stack
)
6134 return gen_rtx_PARALLEL (BLKmode
, gen_rtvec_v (k
- kbase
, &rvec
[kbase
]));
6139 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
6142 rs6000_mixed_function_arg (enum machine_mode mode
, tree type
, int align_words
)
6146 rtx rvec
[GP_ARG_NUM_REG
+ 1];
6148 if (align_words
>= GP_ARG_NUM_REG
)
6151 n_units
= rs6000_arg_size (mode
, type
);
6153 /* Optimize the simple case where the arg fits in one gpr, except in
6154 the case of BLKmode due to assign_parms assuming that registers are
6155 BITS_PER_WORD wide. */
6157 || (n_units
== 1 && mode
!= BLKmode
))
6158 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
6161 if (align_words
+ n_units
> GP_ARG_NUM_REG
)
6162 /* Not all of the arg fits in gprs. Say that it goes in memory too,
6163 using a magic NULL_RTX component.
6164 This is not strictly correct. Only some of the arg belongs in
6165 memory, not all of it. However, the normal scheme using
6166 function_arg_partial_nregs can result in unusual subregs, eg.
6167 (subreg:SI (reg:DF) 4), which are not handled well. The code to
6168 store the whole arg to memory is often more efficient than code
6169 to store pieces, and we know that space is available in the right
6170 place for the whole arg. */
6171 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
6176 rtx r
= gen_rtx_REG (SImode
, GP_ARG_MIN_REG
+ align_words
);
6177 rtx off
= GEN_INT (i
++ * 4);
6178 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
6180 while (++align_words
< GP_ARG_NUM_REG
&& --n_units
!= 0);
6182 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
6185 /* Determine where to put an argument to a function.
6186 Value is zero to push the argument on the stack,
6187 or a hard register in which to store the argument.
6189 MODE is the argument's machine mode.
6190 TYPE is the data type of the argument (as a tree).
6191 This is null for libcalls where that information may
6193 CUM is a variable of type CUMULATIVE_ARGS which gives info about
6194 the preceding args and about the function being called. It is
6195 not modified in this routine.
6196 NAMED is nonzero if this argument is a named parameter
6197 (otherwise it is an extra parameter matching an ellipsis).
6199 On RS/6000 the first eight words of non-FP are normally in registers
6200 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
6201 Under V.4, the first 8 FP args are in registers.
6203 If this is floating-point and no prototype is specified, we use
6204 both an FP and integer register (or possibly FP reg and stack). Library
6205 functions (when CALL_LIBCALL is set) always have the proper types for args,
6206 so we can pass the FP value just in one register. emit_library_function
6207 doesn't support PARALLEL anyway.
6209 Note that for args passed by reference, function_arg will be called
6210 with MODE and TYPE set to that of the pointer to the arg, not the arg
6214 function_arg (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6215 tree type
, int named
)
6217 enum rs6000_abi abi
= DEFAULT_ABI
;
6219 /* Return a marker to indicate whether CR1 needs to set or clear the
6220 bit that V.4 uses to say fp args were passed in registers.
6221 Assume that we don't need the marker for software floating point,
6222 or compiler generated library calls. */
6223 if (mode
== VOIDmode
)
6226 && (cum
->call_cookie
& CALL_LIBCALL
) == 0
6228 || (cum
->nargs_prototype
< 0
6229 && (cum
->prototype
|| TARGET_NO_PROTOTYPE
))))
6231 /* For the SPE, we need to crxor CR6 always. */
6233 return GEN_INT (cum
->call_cookie
| CALL_V4_SET_FP_ARGS
);
6234 else if (TARGET_HARD_FLOAT
&& TARGET_FPRS
)
6235 return GEN_INT (cum
->call_cookie
6236 | ((cum
->fregno
== FP_ARG_MIN_REG
)
6237 ? CALL_V4_SET_FP_ARGS
6238 : CALL_V4_CLEAR_FP_ARGS
));
6241 return GEN_INT (cum
->call_cookie
);
6244 if (rs6000_darwin64_abi
&& mode
== BLKmode
6245 && TREE_CODE (type
) == RECORD_TYPE
)
6247 rtx rslt
= rs6000_darwin64_record_arg (cum
, type
, named
, false);
6248 if (rslt
!= NULL_RTX
)
6250 /* Else fall through to usual handling. */
6253 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
))
6254 if (TARGET_64BIT
&& ! cum
->prototype
)
6256 /* Vector parameters get passed in vector register
6257 and also in GPRs or memory, in absence of prototype. */
6260 align_words
= (cum
->words
+ 1) & ~1;
6262 if (align_words
>= GP_ARG_NUM_REG
)
6268 slot
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
6270 return gen_rtx_PARALLEL (mode
,
6272 gen_rtx_EXPR_LIST (VOIDmode
,
6274 gen_rtx_EXPR_LIST (VOIDmode
,
6275 gen_rtx_REG (mode
, cum
->vregno
),
6279 return gen_rtx_REG (mode
, cum
->vregno
);
6280 else if (TARGET_ALTIVEC_ABI
6281 && (ALTIVEC_VECTOR_MODE (mode
)
6282 || (type
&& TREE_CODE (type
) == VECTOR_TYPE
6283 && int_size_in_bytes (type
) == 16)))
6285 if (named
|| abi
== ABI_V4
)
6289 /* Vector parameters to varargs functions under AIX or Darwin
6290 get passed in memory and possibly also in GPRs. */
6291 int align
, align_words
, n_words
;
6292 enum machine_mode part_mode
;
6294 /* Vector parameters must be 16-byte aligned. This places them at
6295 2 mod 4 in terms of words in 32-bit mode, since the parameter
6296 save area starts at offset 24 from the stack. In 64-bit mode,
6297 they just have to start on an even word, since the parameter
6298 save area is 16-byte aligned. */
6300 align
= (2 - cum
->words
) & 3;
6302 align
= cum
->words
& 1;
6303 align_words
= cum
->words
+ align
;
6305 /* Out of registers? Memory, then. */
6306 if (align_words
>= GP_ARG_NUM_REG
)
6309 if (TARGET_32BIT
&& TARGET_POWERPC64
)
6310 return rs6000_mixed_function_arg (mode
, type
, align_words
);
6312 /* The vector value goes in GPRs. Only the part of the
6313 value in GPRs is reported here. */
6315 n_words
= rs6000_arg_size (mode
, type
);
6316 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
6317 /* Fortunately, there are only two possibilities, the value
6318 is either wholly in GPRs or half in GPRs and half not. */
6321 return gen_rtx_REG (part_mode
, GP_ARG_MIN_REG
+ align_words
);
6324 else if (TARGET_SPE_ABI
&& TARGET_SPE
6325 && (SPE_VECTOR_MODE (mode
)
6326 || (TARGET_E500_DOUBLE
&& (mode
== DFmode
6329 || mode
== TCmode
))))
6330 return rs6000_spe_function_arg (cum
, mode
, type
);
6332 else if (abi
== ABI_V4
)
6334 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
6335 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
)
6336 || (TARGET_DOUBLE_FLOAT
&& mode
== DFmode
)
6337 || (mode
== TFmode
&& !TARGET_IEEEQUAD
)
6338 || mode
== SDmode
|| mode
== DDmode
|| mode
== TDmode
))
6340 /* _Decimal128 must use an even/odd register pair. This assumes
6341 that the register number is odd when fregno is odd. */
6342 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
6345 if (cum
->fregno
+ (mode
== TFmode
|| mode
== TDmode
? 1 : 0)
6346 <= FP_ARG_V4_MAX_REG
)
6347 return gen_rtx_REG (mode
, cum
->fregno
);
6353 int n_words
= rs6000_arg_size (mode
, type
);
6354 int gregno
= cum
->sysv_gregno
;
6356 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
6357 (r7,r8) or (r9,r10). As does any other 2 word item such
6358 as complex int due to a historical mistake. */
6360 gregno
+= (1 - gregno
) & 1;
6362 /* Multi-reg args are not split between registers and stack. */
6363 if (gregno
+ n_words
- 1 > GP_ARG_MAX_REG
)
6366 if (TARGET_32BIT
&& TARGET_POWERPC64
)
6367 return rs6000_mixed_function_arg (mode
, type
,
6368 gregno
- GP_ARG_MIN_REG
);
6369 return gen_rtx_REG (mode
, gregno
);
6374 int align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
6376 /* _Decimal128 must be passed in an even/odd float register pair.
6377 This assumes that the register number is odd when fregno is odd. */
6378 if (mode
== TDmode
&& (cum
->fregno
% 2) == 1)
6381 if (USE_FP_FOR_ARG_P (cum
, mode
, type
))
6383 rtx rvec
[GP_ARG_NUM_REG
+ 1];
6387 enum machine_mode fmode
= mode
;
6388 unsigned long n_fpreg
= (GET_MODE_SIZE (mode
) + 7) >> 3;
6390 if (cum
->fregno
+ n_fpreg
> FP_ARG_MAX_REG
+ 1)
6392 /* Currently, we only ever need one reg here because complex
6393 doubles are split. */
6394 gcc_assert (cum
->fregno
== FP_ARG_MAX_REG
6395 && (fmode
== TFmode
|| fmode
== TDmode
));
6397 /* Long double or _Decimal128 split over regs and memory. */
6398 fmode
= DECIMAL_FLOAT_MODE_P (fmode
) ? DDmode
: DFmode
;
6401 /* Do we also need to pass this arg in the parameter save
6404 && (cum
->nargs_prototype
<= 0
6405 || (DEFAULT_ABI
== ABI_AIX
6407 && align_words
>= GP_ARG_NUM_REG
)));
6409 if (!needs_psave
&& mode
== fmode
)
6410 return gen_rtx_REG (fmode
, cum
->fregno
);
6415 /* Describe the part that goes in gprs or the stack.
6416 This piece must come first, before the fprs. */
6417 if (align_words
< GP_ARG_NUM_REG
)
6419 unsigned long n_words
= rs6000_arg_size (mode
, type
);
6421 if (align_words
+ n_words
> GP_ARG_NUM_REG
6422 || (TARGET_32BIT
&& TARGET_POWERPC64
))
6424 /* If this is partially on the stack, then we only
6425 include the portion actually in registers here. */
6426 enum machine_mode rmode
= TARGET_32BIT
? SImode
: DImode
;
6429 if (align_words
+ n_words
> GP_ARG_NUM_REG
)
6430 /* Not all of the arg fits in gprs. Say that it
6431 goes in memory too, using a magic NULL_RTX
6432 component. Also see comment in
6433 rs6000_mixed_function_arg for why the normal
6434 function_arg_partial_nregs scheme doesn't work
6436 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
,
6440 r
= gen_rtx_REG (rmode
,
6441 GP_ARG_MIN_REG
+ align_words
);
6442 off
= GEN_INT (i
++ * GET_MODE_SIZE (rmode
));
6443 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, off
);
6445 while (++align_words
< GP_ARG_NUM_REG
&& --n_words
!= 0);
6449 /* The whole arg fits in gprs. */
6450 r
= gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
6451 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
6455 /* It's entirely in memory. */
6456 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, NULL_RTX
, const0_rtx
);
6459 /* Describe where this piece goes in the fprs. */
6460 r
= gen_rtx_REG (fmode
, cum
->fregno
);
6461 rvec
[k
++] = gen_rtx_EXPR_LIST (VOIDmode
, r
, const0_rtx
);
6463 return gen_rtx_PARALLEL (mode
, gen_rtvec_v (k
, rvec
));
6465 else if (align_words
< GP_ARG_NUM_REG
)
6467 if (TARGET_32BIT
&& TARGET_POWERPC64
)
6468 return rs6000_mixed_function_arg (mode
, type
, align_words
);
6470 if (mode
== BLKmode
)
6473 return gen_rtx_REG (mode
, GP_ARG_MIN_REG
+ align_words
);
6480 /* For an arg passed partly in registers and partly in memory, this is
6481 the number of bytes passed in registers. For args passed entirely in
6482 registers or entirely in memory, zero. When an arg is described by a
6483 PARALLEL, perhaps using more than one register type, this function
6484 returns the number of bytes used by the first element of the PARALLEL. */
6487 rs6000_arg_partial_bytes (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6488 tree type
, bool named
)
6493 if (DEFAULT_ABI
== ABI_V4
)
6496 if (USE_ALTIVEC_FOR_ARG_P (cum
, mode
, type
, named
)
6497 && cum
->nargs_prototype
>= 0)
6500 /* In this complicated case we just disable the partial_nregs code. */
6501 if (rs6000_darwin64_abi
&& mode
== BLKmode
6502 && TREE_CODE (type
) == RECORD_TYPE
6503 && int_size_in_bytes (type
) > 0)
6506 align_words
= rs6000_parm_start (mode
, type
, cum
->words
);
6508 if (USE_FP_FOR_ARG_P (cum
, mode
, type
))
6510 /* If we are passing this arg in the fixed parameter save area
6511 (gprs or memory) as well as fprs, then this function should
6512 return the number of partial bytes passed in the parameter
6513 save area rather than partial bytes passed in fprs. */
6515 && (cum
->nargs_prototype
<= 0
6516 || (DEFAULT_ABI
== ABI_AIX
6518 && align_words
>= GP_ARG_NUM_REG
)))
6520 else if (cum
->fregno
+ ((GET_MODE_SIZE (mode
) + 7) >> 3)
6521 > FP_ARG_MAX_REG
+ 1)
6522 ret
= (FP_ARG_MAX_REG
+ 1 - cum
->fregno
) * 8;
6523 else if (cum
->nargs_prototype
>= 0)
6527 if (align_words
< GP_ARG_NUM_REG
6528 && GP_ARG_NUM_REG
< align_words
+ rs6000_arg_size (mode
, type
))
6529 ret
= (GP_ARG_NUM_REG
- align_words
) * (TARGET_32BIT
? 4 : 8);
6531 if (ret
!= 0 && TARGET_DEBUG_ARG
)
6532 fprintf (stderr
, "rs6000_arg_partial_bytes: %d\n", ret
);
6537 /* A C expression that indicates when an argument must be passed by
6538 reference. If nonzero for an argument, a copy of that argument is
6539 made in memory and a pointer to the argument is passed instead of
6540 the argument itself. The pointer is passed in whatever way is
6541 appropriate for passing a pointer to that type.
6543 Under V.4, aggregates and long double are passed by reference.
6545 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
6546 reference unless the AltiVec vector extension ABI is in force.
6548 As an extension to all ABIs, variable sized types are passed by
6552 rs6000_pass_by_reference (CUMULATIVE_ARGS
*cum ATTRIBUTE_UNUSED
,
6553 enum machine_mode mode
, const_tree type
,
6554 bool named ATTRIBUTE_UNUSED
)
6556 if (DEFAULT_ABI
== ABI_V4
&& TARGET_IEEEQUAD
&& mode
== TFmode
)
6558 if (TARGET_DEBUG_ARG
)
6559 fprintf (stderr
, "function_arg_pass_by_reference: V4 long double\n");
6566 if (DEFAULT_ABI
== ABI_V4
&& AGGREGATE_TYPE_P (type
))
6568 if (TARGET_DEBUG_ARG
)
6569 fprintf (stderr
, "function_arg_pass_by_reference: V4 aggregate\n");
6573 if (int_size_in_bytes (type
) < 0)
6575 if (TARGET_DEBUG_ARG
)
6576 fprintf (stderr
, "function_arg_pass_by_reference: variable size\n");
6580 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
6581 modes only exist for GCC vector types if -maltivec. */
6582 if (TARGET_32BIT
&& !TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
6584 if (TARGET_DEBUG_ARG
)
6585 fprintf (stderr
, "function_arg_pass_by_reference: AltiVec\n");
6589 /* Pass synthetic vectors in memory. */
6590 if (TREE_CODE (type
) == VECTOR_TYPE
6591 && int_size_in_bytes (type
) > (TARGET_ALTIVEC_ABI
? 16 : 8))
6593 static bool warned_for_pass_big_vectors
= false;
6594 if (TARGET_DEBUG_ARG
)
6595 fprintf (stderr
, "function_arg_pass_by_reference: synthetic vector\n");
6596 if (!warned_for_pass_big_vectors
)
6598 warning (0, "GCC vector passed by reference: "
6599 "non-standard ABI extension with no compatibility guarantee");
6600 warned_for_pass_big_vectors
= true;
6609 rs6000_move_block_from_reg (int regno
, rtx x
, int nregs
)
6612 enum machine_mode reg_mode
= TARGET_32BIT
? SImode
: DImode
;
6617 for (i
= 0; i
< nregs
; i
++)
6619 rtx tem
= adjust_address_nv (x
, reg_mode
, i
* GET_MODE_SIZE (reg_mode
));
6620 if (reload_completed
)
6622 if (! strict_memory_address_p (reg_mode
, XEXP (tem
, 0)))
6625 tem
= simplify_gen_subreg (reg_mode
, x
, BLKmode
,
6626 i
* GET_MODE_SIZE (reg_mode
));
6629 tem
= replace_equiv_address (tem
, XEXP (tem
, 0));
6633 emit_move_insn (tem
, gen_rtx_REG (reg_mode
, regno
+ i
));
6637 /* Perform any needed actions needed for a function that is receiving a
6638 variable number of arguments.
6642 MODE and TYPE are the mode and type of the current parameter.
6644 PRETEND_SIZE is a variable that should be set to the amount of stack
6645 that must be pushed by the prolog to pretend that our caller pushed
6648 Normally, this macro will push all remaining incoming registers on the
6649 stack and set PRETEND_SIZE to the length of the registers pushed. */
6652 setup_incoming_varargs (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
6653 tree type
, int *pretend_size ATTRIBUTE_UNUSED
,
6656 CUMULATIVE_ARGS next_cum
;
6657 int reg_size
= TARGET_32BIT
? 4 : 8;
6658 rtx save_area
= NULL_RTX
, mem
;
6659 int first_reg_offset
;
6662 /* Skip the last named argument. */
6664 function_arg_advance (&next_cum
, mode
, type
, 1, 0);
6666 if (DEFAULT_ABI
== ABI_V4
)
6668 first_reg_offset
= next_cum
.sysv_gregno
- GP_ARG_MIN_REG
;
6672 int gpr_reg_num
= 0, gpr_size
= 0, fpr_size
= 0;
6673 HOST_WIDE_INT offset
= 0;
6675 /* Try to optimize the size of the varargs save area.
6676 The ABI requires that ap.reg_save_area is doubleword
6677 aligned, but we don't need to allocate space for all
6678 the bytes, only those to which we actually will save
6680 if (cfun
->va_list_gpr_size
&& first_reg_offset
< GP_ARG_NUM_REG
)
6681 gpr_reg_num
= GP_ARG_NUM_REG
- first_reg_offset
;
6682 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
6683 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
6684 && cfun
->va_list_fpr_size
)
6687 fpr_size
= (next_cum
.fregno
- FP_ARG_MIN_REG
)
6688 * UNITS_PER_FP_WORD
;
6689 if (cfun
->va_list_fpr_size
6690 < FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
6691 fpr_size
+= cfun
->va_list_fpr_size
* UNITS_PER_FP_WORD
;
6693 fpr_size
+= (FP_ARG_V4_MAX_REG
+ 1 - next_cum
.fregno
)
6694 * UNITS_PER_FP_WORD
;
6698 offset
= -((first_reg_offset
* reg_size
) & ~7);
6699 if (!fpr_size
&& gpr_reg_num
> cfun
->va_list_gpr_size
)
6701 gpr_reg_num
= cfun
->va_list_gpr_size
;
6702 if (reg_size
== 4 && (first_reg_offset
& 1))
6705 gpr_size
= (gpr_reg_num
* reg_size
+ 7) & ~7;
6708 offset
= - (int) (next_cum
.fregno
- FP_ARG_MIN_REG
)
6710 - (int) (GP_ARG_NUM_REG
* reg_size
);
6712 if (gpr_size
+ fpr_size
)
6715 = assign_stack_local (BLKmode
, gpr_size
+ fpr_size
, 64);
6716 gcc_assert (GET_CODE (reg_save_area
) == MEM
);
6717 reg_save_area
= XEXP (reg_save_area
, 0);
6718 if (GET_CODE (reg_save_area
) == PLUS
)
6720 gcc_assert (XEXP (reg_save_area
, 0)
6721 == virtual_stack_vars_rtx
);
6722 gcc_assert (GET_CODE (XEXP (reg_save_area
, 1)) == CONST_INT
);
6723 offset
+= INTVAL (XEXP (reg_save_area
, 1));
6726 gcc_assert (reg_save_area
== virtual_stack_vars_rtx
);
6729 cfun
->machine
->varargs_save_offset
= offset
;
6730 save_area
= plus_constant (virtual_stack_vars_rtx
, offset
);
6735 first_reg_offset
= next_cum
.words
;
6736 save_area
= virtual_incoming_args_rtx
;
6738 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
6739 first_reg_offset
+= rs6000_arg_size (TYPE_MODE (type
), type
);
6742 set
= get_varargs_alias_set ();
6743 if (! no_rtl
&& first_reg_offset
< GP_ARG_NUM_REG
6744 && cfun
->va_list_gpr_size
)
6746 int nregs
= GP_ARG_NUM_REG
- first_reg_offset
;
6748 if (va_list_gpr_counter_field
)
6750 /* V4 va_list_gpr_size counts number of registers needed. */
6751 if (nregs
> cfun
->va_list_gpr_size
)
6752 nregs
= cfun
->va_list_gpr_size
;
6756 /* char * va_list instead counts number of bytes needed. */
6757 if (nregs
> cfun
->va_list_gpr_size
/ reg_size
)
6758 nregs
= cfun
->va_list_gpr_size
/ reg_size
;
6761 mem
= gen_rtx_MEM (BLKmode
,
6762 plus_constant (save_area
,
6763 first_reg_offset
* reg_size
));
6764 MEM_NOTRAP_P (mem
) = 1;
6765 set_mem_alias_set (mem
, set
);
6766 set_mem_align (mem
, BITS_PER_WORD
);
6768 rs6000_move_block_from_reg (GP_ARG_MIN_REG
+ first_reg_offset
, mem
,
6772 /* Save FP registers if needed. */
6773 if (DEFAULT_ABI
== ABI_V4
6774 && TARGET_HARD_FLOAT
&& TARGET_FPRS
6776 && next_cum
.fregno
<= FP_ARG_V4_MAX_REG
6777 && cfun
->va_list_fpr_size
)
6779 int fregno
= next_cum
.fregno
, nregs
;
6780 rtx cr1
= gen_rtx_REG (CCmode
, CR1_REGNO
);
6781 rtx lab
= gen_label_rtx ();
6782 int off
= (GP_ARG_NUM_REG
* reg_size
) + ((fregno
- FP_ARG_MIN_REG
)
6783 * UNITS_PER_FP_WORD
);
6786 (gen_rtx_SET (VOIDmode
,
6788 gen_rtx_IF_THEN_ELSE (VOIDmode
,
6789 gen_rtx_NE (VOIDmode
, cr1
,
6791 gen_rtx_LABEL_REF (VOIDmode
, lab
),
6795 fregno
<= FP_ARG_V4_MAX_REG
&& nregs
< cfun
->va_list_fpr_size
;
6796 fregno
++, off
+= UNITS_PER_FP_WORD
, nregs
++)
6798 mem
= gen_rtx_MEM ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
6800 plus_constant (save_area
, off
));
6801 MEM_NOTRAP_P (mem
) = 1;
6802 set_mem_alias_set (mem
, set
);
6803 set_mem_align (mem
, GET_MODE_ALIGNMENT (
6804 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
6805 ? DFmode
: SFmode
));
6806 emit_move_insn (mem
, gen_rtx_REG (
6807 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
6808 ? DFmode
: SFmode
, fregno
));
6815 /* Create the va_list data type. */
6818 rs6000_build_builtin_va_list (void)
6820 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
, record
, type_decl
;
6822 /* For AIX, prefer 'char *' because that's what the system
6823 header files like. */
6824 if (DEFAULT_ABI
!= ABI_V4
)
6825 return build_pointer_type (char_type_node
);
6827 record
= (*lang_hooks
.types
.make_type
) (RECORD_TYPE
);
6828 type_decl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
6829 get_identifier ("__va_list_tag"), record
);
6831 f_gpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("gpr"),
6832 unsigned_char_type_node
);
6833 f_fpr
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
, get_identifier ("fpr"),
6834 unsigned_char_type_node
);
6835 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
6837 f_res
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
6838 get_identifier ("reserved"), short_unsigned_type_node
);
6839 f_ovf
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
6840 get_identifier ("overflow_arg_area"),
6842 f_sav
= build_decl (BUILTINS_LOCATION
, FIELD_DECL
,
6843 get_identifier ("reg_save_area"),
6846 va_list_gpr_counter_field
= f_gpr
;
6847 va_list_fpr_counter_field
= f_fpr
;
6849 DECL_FIELD_CONTEXT (f_gpr
) = record
;
6850 DECL_FIELD_CONTEXT (f_fpr
) = record
;
6851 DECL_FIELD_CONTEXT (f_res
) = record
;
6852 DECL_FIELD_CONTEXT (f_ovf
) = record
;
6853 DECL_FIELD_CONTEXT (f_sav
) = record
;
6855 TREE_CHAIN (record
) = type_decl
;
6856 TYPE_NAME (record
) = type_decl
;
6857 TYPE_FIELDS (record
) = f_gpr
;
6858 TREE_CHAIN (f_gpr
) = f_fpr
;
6859 TREE_CHAIN (f_fpr
) = f_res
;
6860 TREE_CHAIN (f_res
) = f_ovf
;
6861 TREE_CHAIN (f_ovf
) = f_sav
;
6863 layout_type (record
);
6865 /* The correct type is an array type of one element. */
6866 return build_array_type (record
, build_index_type (size_zero_node
));
6869 /* Implement va_start. */
6872 rs6000_va_start (tree valist
, rtx nextarg
)
6874 HOST_WIDE_INT words
, n_gpr
, n_fpr
;
6875 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
6876 tree gpr
, fpr
, ovf
, sav
, t
;
6878 /* Only SVR4 needs something special. */
6879 if (DEFAULT_ABI
!= ABI_V4
)
6881 std_expand_builtin_va_start (valist
, nextarg
);
6885 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
6886 f_fpr
= TREE_CHAIN (f_gpr
);
6887 f_res
= TREE_CHAIN (f_fpr
);
6888 f_ovf
= TREE_CHAIN (f_res
);
6889 f_sav
= TREE_CHAIN (f_ovf
);
6891 valist
= build_va_arg_indirect_ref (valist
);
6892 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
6893 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
6895 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
6897 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
6900 /* Count number of gp and fp argument registers used. */
6901 words
= crtl
->args
.info
.words
;
6902 n_gpr
= MIN (crtl
->args
.info
.sysv_gregno
- GP_ARG_MIN_REG
,
6904 n_fpr
= MIN (crtl
->args
.info
.fregno
- FP_ARG_MIN_REG
,
6907 if (TARGET_DEBUG_ARG
)
6908 fprintf (stderr
, "va_start: words = "HOST_WIDE_INT_PRINT_DEC
", n_gpr = "
6909 HOST_WIDE_INT_PRINT_DEC
", n_fpr = "HOST_WIDE_INT_PRINT_DEC
"\n",
6910 words
, n_gpr
, n_fpr
);
6912 if (cfun
->va_list_gpr_size
)
6914 t
= build2 (MODIFY_EXPR
, TREE_TYPE (gpr
), gpr
,
6915 build_int_cst (NULL_TREE
, n_gpr
));
6916 TREE_SIDE_EFFECTS (t
) = 1;
6917 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6920 if (cfun
->va_list_fpr_size
)
6922 t
= build2 (MODIFY_EXPR
, TREE_TYPE (fpr
), fpr
,
6923 build_int_cst (NULL_TREE
, n_fpr
));
6924 TREE_SIDE_EFFECTS (t
) = 1;
6925 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6928 /* Find the overflow area. */
6929 t
= make_tree (TREE_TYPE (ovf
), virtual_incoming_args_rtx
);
6931 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (ovf
), t
,
6932 size_int (words
* UNITS_PER_WORD
));
6933 t
= build2 (MODIFY_EXPR
, TREE_TYPE (ovf
), ovf
, t
);
6934 TREE_SIDE_EFFECTS (t
) = 1;
6935 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6937 /* If there were no va_arg invocations, don't set up the register
6939 if (!cfun
->va_list_gpr_size
6940 && !cfun
->va_list_fpr_size
6941 && n_gpr
< GP_ARG_NUM_REG
6942 && n_fpr
< FP_ARG_V4_MAX_REG
)
6945 /* Find the register save area. */
6946 t
= make_tree (TREE_TYPE (sav
), virtual_stack_vars_rtx
);
6947 if (cfun
->machine
->varargs_save_offset
)
6948 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (sav
), t
,
6949 size_int (cfun
->machine
->varargs_save_offset
));
6950 t
= build2 (MODIFY_EXPR
, TREE_TYPE (sav
), sav
, t
);
6951 TREE_SIDE_EFFECTS (t
) = 1;
6952 expand_expr (t
, const0_rtx
, VOIDmode
, EXPAND_NORMAL
);
6955 /* Implement va_arg. */
6958 rs6000_gimplify_va_arg (tree valist
, tree type
, gimple_seq
*pre_p
,
6961 tree f_gpr
, f_fpr
, f_res
, f_ovf
, f_sav
;
6962 tree gpr
, fpr
, ovf
, sav
, reg
, t
, u
;
6963 int size
, rsize
, n_reg
, sav_ofs
, sav_scale
;
6964 tree lab_false
, lab_over
, addr
;
6966 tree ptrtype
= build_pointer_type (type
);
6970 if (pass_by_reference (NULL
, TYPE_MODE (type
), type
, false))
6972 t
= rs6000_gimplify_va_arg (valist
, ptrtype
, pre_p
, post_p
);
6973 return build_va_arg_indirect_ref (t
);
6976 if (DEFAULT_ABI
!= ABI_V4
)
6978 if (targetm
.calls
.split_complex_arg
&& TREE_CODE (type
) == COMPLEX_TYPE
)
6980 tree elem_type
= TREE_TYPE (type
);
6981 enum machine_mode elem_mode
= TYPE_MODE (elem_type
);
6982 int elem_size
= GET_MODE_SIZE (elem_mode
);
6984 if (elem_size
< UNITS_PER_WORD
)
6986 tree real_part
, imag_part
;
6987 gimple_seq post
= NULL
;
6989 real_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
6991 /* Copy the value into a temporary, lest the formal temporary
6992 be reused out from under us. */
6993 real_part
= get_initialized_tmp_var (real_part
, pre_p
, &post
);
6994 gimple_seq_add_seq (pre_p
, post
);
6996 imag_part
= rs6000_gimplify_va_arg (valist
, elem_type
, pre_p
,
6999 return build2 (COMPLEX_EXPR
, type
, real_part
, imag_part
);
7003 return std_gimplify_va_arg_expr (valist
, type
, pre_p
, post_p
);
7006 f_gpr
= TYPE_FIELDS (TREE_TYPE (va_list_type_node
));
7007 f_fpr
= TREE_CHAIN (f_gpr
);
7008 f_res
= TREE_CHAIN (f_fpr
);
7009 f_ovf
= TREE_CHAIN (f_res
);
7010 f_sav
= TREE_CHAIN (f_ovf
);
7012 valist
= build_va_arg_indirect_ref (valist
);
7013 gpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_gpr
), valist
, f_gpr
, NULL_TREE
);
7014 fpr
= build3 (COMPONENT_REF
, TREE_TYPE (f_fpr
), unshare_expr (valist
),
7016 ovf
= build3 (COMPONENT_REF
, TREE_TYPE (f_ovf
), unshare_expr (valist
),
7018 sav
= build3 (COMPONENT_REF
, TREE_TYPE (f_sav
), unshare_expr (valist
),
7021 size
= int_size_in_bytes (type
);
7022 rsize
= (size
+ 3) / 4;
7025 if (TARGET_HARD_FLOAT
&& TARGET_FPRS
7026 && ((TARGET_SINGLE_FLOAT
&& TYPE_MODE (type
) == SFmode
)
7027 || (TARGET_DOUBLE_FLOAT
7028 && (TYPE_MODE (type
) == DFmode
7029 || TYPE_MODE (type
) == TFmode
7030 || TYPE_MODE (type
) == SDmode
7031 || TYPE_MODE (type
) == DDmode
7032 || TYPE_MODE (type
) == TDmode
))))
7034 /* FP args go in FP registers, if present. */
7036 n_reg
= (size
+ 7) / 8;
7037 sav_ofs
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4) * 4;
7038 sav_scale
= ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? 8 : 4);
7039 if (TYPE_MODE (type
) != SFmode
&& TYPE_MODE (type
) != SDmode
)
7044 /* Otherwise into GP registers. */
7053 /* Pull the value out of the saved registers.... */
7056 addr
= create_tmp_var (ptr_type_node
, "addr");
7057 DECL_POINTER_ALIAS_SET (addr
) = get_varargs_alias_set ();
7059 /* AltiVec vectors never go in registers when -mabi=altivec. */
7060 if (TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (TYPE_MODE (type
)))
7064 lab_false
= create_artificial_label (input_location
);
7065 lab_over
= create_artificial_label (input_location
);
7067 /* Long long and SPE vectors are aligned in the registers.
7068 As are any other 2 gpr item such as complex int due to a
7069 historical mistake. */
7071 if (n_reg
== 2 && reg
== gpr
)
7074 u
= build2 (BIT_AND_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
7075 build_int_cst (TREE_TYPE (reg
), n_reg
- 1));
7076 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
),
7077 unshare_expr (reg
), u
);
7079 /* _Decimal128 is passed in even/odd fpr pairs; the stored
7080 reg number is 0 for f1, so we want to make it odd. */
7081 else if (reg
== fpr
&& TYPE_MODE (type
) == TDmode
)
7083 t
= build2 (BIT_IOR_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
7084 build_int_cst (TREE_TYPE (reg
), 1));
7085 u
= build2 (MODIFY_EXPR
, void_type_node
, unshare_expr (reg
), t
);
7088 t
= fold_convert (TREE_TYPE (reg
), size_int (8 - n_reg
+ 1));
7089 t
= build2 (GE_EXPR
, boolean_type_node
, u
, t
);
7090 u
= build1 (GOTO_EXPR
, void_type_node
, lab_false
);
7091 t
= build3 (COND_EXPR
, void_type_node
, t
, u
, NULL_TREE
);
7092 gimplify_and_add (t
, pre_p
);
7096 t
= build2 (POINTER_PLUS_EXPR
, ptr_type_node
, sav
, size_int (sav_ofs
));
7098 u
= build2 (POSTINCREMENT_EXPR
, TREE_TYPE (reg
), unshare_expr (reg
),
7099 build_int_cst (TREE_TYPE (reg
), n_reg
));
7100 u
= fold_convert (sizetype
, u
);
7101 u
= build2 (MULT_EXPR
, sizetype
, u
, size_int (sav_scale
));
7102 t
= build2 (POINTER_PLUS_EXPR
, ptr_type_node
, t
, u
);
7104 /* _Decimal32 varargs are located in the second word of the 64-bit
7105 FP register for 32-bit binaries. */
7106 if (!TARGET_POWERPC64
7107 && TARGET_HARD_FLOAT
&& TARGET_FPRS
7108 && TYPE_MODE (type
) == SDmode
)
7109 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (t
), t
, size_int (size
));
7111 gimplify_assign (addr
, t
, pre_p
);
7113 gimple_seq_add_stmt (pre_p
, gimple_build_goto (lab_over
));
7115 stmt
= gimple_build_label (lab_false
);
7116 gimple_seq_add_stmt (pre_p
, stmt
);
7118 if ((n_reg
== 2 && !regalign
) || n_reg
> 2)
7120 /* Ensure that we don't find any more args in regs.
7121 Alignment has taken care of for special cases. */
7122 gimplify_assign (reg
, build_int_cst (TREE_TYPE (reg
), 8), pre_p
);
7126 /* ... otherwise out of the overflow area. */
7128 /* Care for on-stack alignment if needed. */
7132 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (t
), t
, size_int (align
- 1));
7133 t
= fold_convert (sizetype
, t
);
7134 t
= build2 (BIT_AND_EXPR
, TREE_TYPE (t
), t
,
7136 t
= fold_convert (TREE_TYPE (ovf
), t
);
7138 gimplify_expr (&t
, pre_p
, NULL
, is_gimple_val
, fb_rvalue
);
7140 gimplify_assign (unshare_expr (addr
), t
, pre_p
);
7142 t
= build2 (POINTER_PLUS_EXPR
, TREE_TYPE (t
), t
, size_int (size
));
7143 gimplify_assign (unshare_expr (ovf
), t
, pre_p
);
7147 stmt
= gimple_build_label (lab_over
);
7148 gimple_seq_add_stmt (pre_p
, stmt
);
7151 if (STRICT_ALIGNMENT
7152 && (TYPE_ALIGN (type
)
7153 > (unsigned) BITS_PER_UNIT
* (align
< 4 ? 4 : align
)))
7155 /* The value (of type complex double, for example) may not be
7156 aligned in memory in the saved registers, so copy via a
7157 temporary. (This is the same code as used for SPARC.) */
7158 tree tmp
= create_tmp_var (type
, "va_arg_tmp");
7159 tree dest_addr
= build_fold_addr_expr (tmp
);
7161 tree copy
= build_call_expr (implicit_built_in_decls
[BUILT_IN_MEMCPY
],
7162 3, dest_addr
, addr
, size_int (rsize
* 4));
7164 gimplify_and_add (copy
, pre_p
);
7168 addr
= fold_convert (ptrtype
, addr
);
7169 return build_va_arg_indirect_ref (addr
);
7175 def_builtin (int mask
, const char *name
, tree type
, int code
)
7177 if ((mask
& target_flags
) || TARGET_PAIRED_FLOAT
)
7179 if (rs6000_builtin_decls
[code
])
7182 rs6000_builtin_decls
[code
] =
7183 add_builtin_function (name
, type
, code
, BUILT_IN_MD
,
7188 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
7190 static const struct builtin_description bdesc_3arg
[] =
7192 { MASK_ALTIVEC
, CODE_FOR_altivec_vmaddfp
, "__builtin_altivec_vmaddfp", ALTIVEC_BUILTIN_VMADDFP
},
7193 { MASK_ALTIVEC
, CODE_FOR_altivec_vmhaddshs
, "__builtin_altivec_vmhaddshs", ALTIVEC_BUILTIN_VMHADDSHS
},
7194 { MASK_ALTIVEC
, CODE_FOR_altivec_vmhraddshs
, "__builtin_altivec_vmhraddshs", ALTIVEC_BUILTIN_VMHRADDSHS
},
7195 { MASK_ALTIVEC
, CODE_FOR_altivec_vmladduhm
, "__builtin_altivec_vmladduhm", ALTIVEC_BUILTIN_VMLADDUHM
},
7196 { MASK_ALTIVEC
, CODE_FOR_altivec_vmsumubm
, "__builtin_altivec_vmsumubm", ALTIVEC_BUILTIN_VMSUMUBM
},
7197 { MASK_ALTIVEC
, CODE_FOR_altivec_vmsummbm
, "__builtin_altivec_vmsummbm", ALTIVEC_BUILTIN_VMSUMMBM
},
7198 { MASK_ALTIVEC
, CODE_FOR_altivec_vmsumuhm
, "__builtin_altivec_vmsumuhm", ALTIVEC_BUILTIN_VMSUMUHM
},
7199 { MASK_ALTIVEC
, CODE_FOR_altivec_vmsumshm
, "__builtin_altivec_vmsumshm", ALTIVEC_BUILTIN_VMSUMSHM
},
7200 { MASK_ALTIVEC
, CODE_FOR_altivec_vmsumuhs
, "__builtin_altivec_vmsumuhs", ALTIVEC_BUILTIN_VMSUMUHS
},
7201 { MASK_ALTIVEC
, CODE_FOR_altivec_vmsumshs
, "__builtin_altivec_vmsumshs", ALTIVEC_BUILTIN_VMSUMSHS
},
7202 { MASK_ALTIVEC
, CODE_FOR_altivec_vnmsubfp
, "__builtin_altivec_vnmsubfp", ALTIVEC_BUILTIN_VNMSUBFP
},
7203 { MASK_ALTIVEC
, CODE_FOR_altivec_vperm_v4sf
, "__builtin_altivec_vperm_4sf", ALTIVEC_BUILTIN_VPERM_4SF
},
7204 { MASK_ALTIVEC
, CODE_FOR_altivec_vperm_v4si
, "__builtin_altivec_vperm_4si", ALTIVEC_BUILTIN_VPERM_4SI
},
7205 { MASK_ALTIVEC
, CODE_FOR_altivec_vperm_v8hi
, "__builtin_altivec_vperm_8hi", ALTIVEC_BUILTIN_VPERM_8HI
},
7206 { MASK_ALTIVEC
, CODE_FOR_altivec_vperm_v16qi
, "__builtin_altivec_vperm_16qi", ALTIVEC_BUILTIN_VPERM_16QI
},
7207 { MASK_ALTIVEC
, CODE_FOR_altivec_vsel_v4sf
, "__builtin_altivec_vsel_4sf", ALTIVEC_BUILTIN_VSEL_4SF
},
7208 { MASK_ALTIVEC
, CODE_FOR_altivec_vsel_v4si
, "__builtin_altivec_vsel_4si", ALTIVEC_BUILTIN_VSEL_4SI
},
7209 { MASK_ALTIVEC
, CODE_FOR_altivec_vsel_v8hi
, "__builtin_altivec_vsel_8hi", ALTIVEC_BUILTIN_VSEL_8HI
},
7210 { MASK_ALTIVEC
, CODE_FOR_altivec_vsel_v16qi
, "__builtin_altivec_vsel_16qi", ALTIVEC_BUILTIN_VSEL_16QI
},
7211 { MASK_ALTIVEC
, CODE_FOR_altivec_vsldoi_v16qi
, "__builtin_altivec_vsldoi_16qi", ALTIVEC_BUILTIN_VSLDOI_16QI
},
7212 { MASK_ALTIVEC
, CODE_FOR_altivec_vsldoi_v8hi
, "__builtin_altivec_vsldoi_8hi", ALTIVEC_BUILTIN_VSLDOI_8HI
},
7213 { MASK_ALTIVEC
, CODE_FOR_altivec_vsldoi_v4si
, "__builtin_altivec_vsldoi_4si", ALTIVEC_BUILTIN_VSLDOI_4SI
},
7214 { MASK_ALTIVEC
, CODE_FOR_altivec_vsldoi_v4sf
, "__builtin_altivec_vsldoi_4sf", ALTIVEC_BUILTIN_VSLDOI_4SF
},
7216 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_madd", ALTIVEC_BUILTIN_VEC_MADD
},
7217 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_madds", ALTIVEC_BUILTIN_VEC_MADDS
},
7218 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_mladd", ALTIVEC_BUILTIN_VEC_MLADD
},
7219 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_mradds", ALTIVEC_BUILTIN_VEC_MRADDS
},
7220 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_msum", ALTIVEC_BUILTIN_VEC_MSUM
},
7221 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmsumshm", ALTIVEC_BUILTIN_VEC_VMSUMSHM
},
7222 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmsumuhm", ALTIVEC_BUILTIN_VEC_VMSUMUHM
},
7223 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmsummbm", ALTIVEC_BUILTIN_VEC_VMSUMMBM
},
7224 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmsumubm", ALTIVEC_BUILTIN_VEC_VMSUMUBM
},
7225 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_msums", ALTIVEC_BUILTIN_VEC_MSUMS
},
7226 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmsumshs", ALTIVEC_BUILTIN_VEC_VMSUMSHS
},
7227 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmsumuhs", ALTIVEC_BUILTIN_VEC_VMSUMUHS
},
7228 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_nmsub", ALTIVEC_BUILTIN_VEC_NMSUB
},
7229 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_perm", ALTIVEC_BUILTIN_VEC_PERM
},
7230 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sel", ALTIVEC_BUILTIN_VEC_SEL
},
7232 { 0, CODE_FOR_paired_msub
, "__builtin_paired_msub", PAIRED_BUILTIN_MSUB
},
7233 { 0, CODE_FOR_paired_madd
, "__builtin_paired_madd", PAIRED_BUILTIN_MADD
},
7234 { 0, CODE_FOR_paired_madds0
, "__builtin_paired_madds0", PAIRED_BUILTIN_MADDS0
},
7235 { 0, CODE_FOR_paired_madds1
, "__builtin_paired_madds1", PAIRED_BUILTIN_MADDS1
},
7236 { 0, CODE_FOR_paired_nmsub
, "__builtin_paired_nmsub", PAIRED_BUILTIN_NMSUB
},
7237 { 0, CODE_FOR_paired_nmadd
, "__builtin_paired_nmadd", PAIRED_BUILTIN_NMADD
},
7238 { 0, CODE_FOR_paired_sum0
, "__builtin_paired_sum0", PAIRED_BUILTIN_SUM0
},
7239 { 0, CODE_FOR_paired_sum1
, "__builtin_paired_sum1", PAIRED_BUILTIN_SUM1
},
7240 { 0, CODE_FOR_selv2sf4
, "__builtin_paired_selv2sf4", PAIRED_BUILTIN_SELV2SF4
},
7243 /* DST operations: void foo (void *, const int, const char). */
7245 static const struct builtin_description bdesc_dst
[] =
7247 { MASK_ALTIVEC
, CODE_FOR_altivec_dst
, "__builtin_altivec_dst", ALTIVEC_BUILTIN_DST
},
7248 { MASK_ALTIVEC
, CODE_FOR_altivec_dstt
, "__builtin_altivec_dstt", ALTIVEC_BUILTIN_DSTT
},
7249 { MASK_ALTIVEC
, CODE_FOR_altivec_dstst
, "__builtin_altivec_dstst", ALTIVEC_BUILTIN_DSTST
},
7250 { MASK_ALTIVEC
, CODE_FOR_altivec_dststt
, "__builtin_altivec_dststt", ALTIVEC_BUILTIN_DSTSTT
},
7252 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_dst", ALTIVEC_BUILTIN_VEC_DST
},
7253 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_dstt", ALTIVEC_BUILTIN_VEC_DSTT
},
7254 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_dstst", ALTIVEC_BUILTIN_VEC_DSTST
},
7255 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_dststt", ALTIVEC_BUILTIN_VEC_DSTSTT
}
7258 /* Simple binary operations: VECc = foo (VECa, VECb). */
7260 static struct builtin_description bdesc_2arg
[] =
7262 { MASK_ALTIVEC
, CODE_FOR_addv16qi3
, "__builtin_altivec_vaddubm", ALTIVEC_BUILTIN_VADDUBM
},
7263 { MASK_ALTIVEC
, CODE_FOR_addv8hi3
, "__builtin_altivec_vadduhm", ALTIVEC_BUILTIN_VADDUHM
},
7264 { MASK_ALTIVEC
, CODE_FOR_addv4si3
, "__builtin_altivec_vadduwm", ALTIVEC_BUILTIN_VADDUWM
},
7265 { MASK_ALTIVEC
, CODE_FOR_addv4sf3
, "__builtin_altivec_vaddfp", ALTIVEC_BUILTIN_VADDFP
},
7266 { MASK_ALTIVEC
, CODE_FOR_altivec_vaddcuw
, "__builtin_altivec_vaddcuw", ALTIVEC_BUILTIN_VADDCUW
},
7267 { MASK_ALTIVEC
, CODE_FOR_altivec_vaddubs
, "__builtin_altivec_vaddubs", ALTIVEC_BUILTIN_VADDUBS
},
7268 { MASK_ALTIVEC
, CODE_FOR_altivec_vaddsbs
, "__builtin_altivec_vaddsbs", ALTIVEC_BUILTIN_VADDSBS
},
7269 { MASK_ALTIVEC
, CODE_FOR_altivec_vadduhs
, "__builtin_altivec_vadduhs", ALTIVEC_BUILTIN_VADDUHS
},
7270 { MASK_ALTIVEC
, CODE_FOR_altivec_vaddshs
, "__builtin_altivec_vaddshs", ALTIVEC_BUILTIN_VADDSHS
},
7271 { MASK_ALTIVEC
, CODE_FOR_altivec_vadduws
, "__builtin_altivec_vadduws", ALTIVEC_BUILTIN_VADDUWS
},
7272 { MASK_ALTIVEC
, CODE_FOR_altivec_vaddsws
, "__builtin_altivec_vaddsws", ALTIVEC_BUILTIN_VADDSWS
},
7273 { MASK_ALTIVEC
, CODE_FOR_andv4si3
, "__builtin_altivec_vand", ALTIVEC_BUILTIN_VAND
},
7274 { MASK_ALTIVEC
, CODE_FOR_andcv4si3
, "__builtin_altivec_vandc", ALTIVEC_BUILTIN_VANDC
},
7275 { MASK_ALTIVEC
, CODE_FOR_altivec_vavgub
, "__builtin_altivec_vavgub", ALTIVEC_BUILTIN_VAVGUB
},
7276 { MASK_ALTIVEC
, CODE_FOR_altivec_vavgsb
, "__builtin_altivec_vavgsb", ALTIVEC_BUILTIN_VAVGSB
},
7277 { MASK_ALTIVEC
, CODE_FOR_altivec_vavguh
, "__builtin_altivec_vavguh", ALTIVEC_BUILTIN_VAVGUH
},
7278 { MASK_ALTIVEC
, CODE_FOR_altivec_vavgsh
, "__builtin_altivec_vavgsh", ALTIVEC_BUILTIN_VAVGSH
},
7279 { MASK_ALTIVEC
, CODE_FOR_altivec_vavguw
, "__builtin_altivec_vavguw", ALTIVEC_BUILTIN_VAVGUW
},
7280 { MASK_ALTIVEC
, CODE_FOR_altivec_vavgsw
, "__builtin_altivec_vavgsw", ALTIVEC_BUILTIN_VAVGSW
},
7281 { MASK_ALTIVEC
, CODE_FOR_altivec_vcfux
, "__builtin_altivec_vcfux", ALTIVEC_BUILTIN_VCFUX
},
7282 { MASK_ALTIVEC
, CODE_FOR_altivec_vcfsx
, "__builtin_altivec_vcfsx", ALTIVEC_BUILTIN_VCFSX
},
7283 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpbfp
, "__builtin_altivec_vcmpbfp", ALTIVEC_BUILTIN_VCMPBFP
},
7284 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpequb
, "__builtin_altivec_vcmpequb", ALTIVEC_BUILTIN_VCMPEQUB
},
7285 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpequh
, "__builtin_altivec_vcmpequh", ALTIVEC_BUILTIN_VCMPEQUH
},
7286 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpequw
, "__builtin_altivec_vcmpequw", ALTIVEC_BUILTIN_VCMPEQUW
},
7287 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpeqfp
, "__builtin_altivec_vcmpeqfp", ALTIVEC_BUILTIN_VCMPEQFP
},
7288 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgefp
, "__builtin_altivec_vcmpgefp", ALTIVEC_BUILTIN_VCMPGEFP
},
7289 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgtub
, "__builtin_altivec_vcmpgtub", ALTIVEC_BUILTIN_VCMPGTUB
},
7290 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgtsb
, "__builtin_altivec_vcmpgtsb", ALTIVEC_BUILTIN_VCMPGTSB
},
7291 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgtuh
, "__builtin_altivec_vcmpgtuh", ALTIVEC_BUILTIN_VCMPGTUH
},
7292 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgtsh
, "__builtin_altivec_vcmpgtsh", ALTIVEC_BUILTIN_VCMPGTSH
},
7293 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgtuw
, "__builtin_altivec_vcmpgtuw", ALTIVEC_BUILTIN_VCMPGTUW
},
7294 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgtsw
, "__builtin_altivec_vcmpgtsw", ALTIVEC_BUILTIN_VCMPGTSW
},
7295 { MASK_ALTIVEC
, CODE_FOR_altivec_vcmpgtfp
, "__builtin_altivec_vcmpgtfp", ALTIVEC_BUILTIN_VCMPGTFP
},
7296 { MASK_ALTIVEC
, CODE_FOR_altivec_vctsxs
, "__builtin_altivec_vctsxs", ALTIVEC_BUILTIN_VCTSXS
},
7297 { MASK_ALTIVEC
, CODE_FOR_altivec_vctuxs
, "__builtin_altivec_vctuxs", ALTIVEC_BUILTIN_VCTUXS
},
7298 { MASK_ALTIVEC
, CODE_FOR_umaxv16qi3
, "__builtin_altivec_vmaxub", ALTIVEC_BUILTIN_VMAXUB
},
7299 { MASK_ALTIVEC
, CODE_FOR_smaxv16qi3
, "__builtin_altivec_vmaxsb", ALTIVEC_BUILTIN_VMAXSB
},
7300 { MASK_ALTIVEC
, CODE_FOR_umaxv8hi3
, "__builtin_altivec_vmaxuh", ALTIVEC_BUILTIN_VMAXUH
},
7301 { MASK_ALTIVEC
, CODE_FOR_smaxv8hi3
, "__builtin_altivec_vmaxsh", ALTIVEC_BUILTIN_VMAXSH
},
7302 { MASK_ALTIVEC
, CODE_FOR_umaxv4si3
, "__builtin_altivec_vmaxuw", ALTIVEC_BUILTIN_VMAXUW
},
7303 { MASK_ALTIVEC
, CODE_FOR_smaxv4si3
, "__builtin_altivec_vmaxsw", ALTIVEC_BUILTIN_VMAXSW
},
7304 { MASK_ALTIVEC
, CODE_FOR_smaxv4sf3
, "__builtin_altivec_vmaxfp", ALTIVEC_BUILTIN_VMAXFP
},
7305 { MASK_ALTIVEC
, CODE_FOR_altivec_vmrghb
, "__builtin_altivec_vmrghb", ALTIVEC_BUILTIN_VMRGHB
},
7306 { MASK_ALTIVEC
, CODE_FOR_altivec_vmrghh
, "__builtin_altivec_vmrghh", ALTIVEC_BUILTIN_VMRGHH
},
7307 { MASK_ALTIVEC
, CODE_FOR_altivec_vmrghw
, "__builtin_altivec_vmrghw", ALTIVEC_BUILTIN_VMRGHW
},
7308 { MASK_ALTIVEC
, CODE_FOR_altivec_vmrglb
, "__builtin_altivec_vmrglb", ALTIVEC_BUILTIN_VMRGLB
},
7309 { MASK_ALTIVEC
, CODE_FOR_altivec_vmrglh
, "__builtin_altivec_vmrglh", ALTIVEC_BUILTIN_VMRGLH
},
7310 { MASK_ALTIVEC
, CODE_FOR_altivec_vmrglw
, "__builtin_altivec_vmrglw", ALTIVEC_BUILTIN_VMRGLW
},
7311 { MASK_ALTIVEC
, CODE_FOR_uminv16qi3
, "__builtin_altivec_vminub", ALTIVEC_BUILTIN_VMINUB
},
7312 { MASK_ALTIVEC
, CODE_FOR_sminv16qi3
, "__builtin_altivec_vminsb", ALTIVEC_BUILTIN_VMINSB
},
7313 { MASK_ALTIVEC
, CODE_FOR_uminv8hi3
, "__builtin_altivec_vminuh", ALTIVEC_BUILTIN_VMINUH
},
7314 { MASK_ALTIVEC
, CODE_FOR_sminv8hi3
, "__builtin_altivec_vminsh", ALTIVEC_BUILTIN_VMINSH
},
7315 { MASK_ALTIVEC
, CODE_FOR_uminv4si3
, "__builtin_altivec_vminuw", ALTIVEC_BUILTIN_VMINUW
},
7316 { MASK_ALTIVEC
, CODE_FOR_sminv4si3
, "__builtin_altivec_vminsw", ALTIVEC_BUILTIN_VMINSW
},
7317 { MASK_ALTIVEC
, CODE_FOR_sminv4sf3
, "__builtin_altivec_vminfp", ALTIVEC_BUILTIN_VMINFP
},
7318 { MASK_ALTIVEC
, CODE_FOR_altivec_vmuleub
, "__builtin_altivec_vmuleub", ALTIVEC_BUILTIN_VMULEUB
},
7319 { MASK_ALTIVEC
, CODE_FOR_altivec_vmulesb
, "__builtin_altivec_vmulesb", ALTIVEC_BUILTIN_VMULESB
},
7320 { MASK_ALTIVEC
, CODE_FOR_altivec_vmuleuh
, "__builtin_altivec_vmuleuh", ALTIVEC_BUILTIN_VMULEUH
},
7321 { MASK_ALTIVEC
, CODE_FOR_altivec_vmulesh
, "__builtin_altivec_vmulesh", ALTIVEC_BUILTIN_VMULESH
},
7322 { MASK_ALTIVEC
, CODE_FOR_altivec_vmuloub
, "__builtin_altivec_vmuloub", ALTIVEC_BUILTIN_VMULOUB
},
7323 { MASK_ALTIVEC
, CODE_FOR_altivec_vmulosb
, "__builtin_altivec_vmulosb", ALTIVEC_BUILTIN_VMULOSB
},
7324 { MASK_ALTIVEC
, CODE_FOR_altivec_vmulouh
, "__builtin_altivec_vmulouh", ALTIVEC_BUILTIN_VMULOUH
},
7325 { MASK_ALTIVEC
, CODE_FOR_altivec_vmulosh
, "__builtin_altivec_vmulosh", ALTIVEC_BUILTIN_VMULOSH
},
7326 { MASK_ALTIVEC
, CODE_FOR_altivec_norv4si3
, "__builtin_altivec_vnor", ALTIVEC_BUILTIN_VNOR
},
7327 { MASK_ALTIVEC
, CODE_FOR_iorv4si3
, "__builtin_altivec_vor", ALTIVEC_BUILTIN_VOR
},
7328 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkuhum
, "__builtin_altivec_vpkuhum", ALTIVEC_BUILTIN_VPKUHUM
},
7329 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkuwum
, "__builtin_altivec_vpkuwum", ALTIVEC_BUILTIN_VPKUWUM
},
7330 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkpx
, "__builtin_altivec_vpkpx", ALTIVEC_BUILTIN_VPKPX
},
7331 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkshss
, "__builtin_altivec_vpkshss", ALTIVEC_BUILTIN_VPKSHSS
},
7332 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkswss
, "__builtin_altivec_vpkswss", ALTIVEC_BUILTIN_VPKSWSS
},
7333 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkuhus
, "__builtin_altivec_vpkuhus", ALTIVEC_BUILTIN_VPKUHUS
},
7334 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkshus
, "__builtin_altivec_vpkshus", ALTIVEC_BUILTIN_VPKSHUS
},
7335 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkuwus
, "__builtin_altivec_vpkuwus", ALTIVEC_BUILTIN_VPKUWUS
},
7336 { MASK_ALTIVEC
, CODE_FOR_altivec_vpkswus
, "__builtin_altivec_vpkswus", ALTIVEC_BUILTIN_VPKSWUS
},
7337 { MASK_ALTIVEC
, CODE_FOR_altivec_vrlb
, "__builtin_altivec_vrlb", ALTIVEC_BUILTIN_VRLB
},
7338 { MASK_ALTIVEC
, CODE_FOR_altivec_vrlh
, "__builtin_altivec_vrlh", ALTIVEC_BUILTIN_VRLH
},
7339 { MASK_ALTIVEC
, CODE_FOR_altivec_vrlw
, "__builtin_altivec_vrlw", ALTIVEC_BUILTIN_VRLW
},
7340 { MASK_ALTIVEC
, CODE_FOR_vashlv16qi3
, "__builtin_altivec_vslb", ALTIVEC_BUILTIN_VSLB
},
7341 { MASK_ALTIVEC
, CODE_FOR_vashlv8hi3
, "__builtin_altivec_vslh", ALTIVEC_BUILTIN_VSLH
},
7342 { MASK_ALTIVEC
, CODE_FOR_vashlv4si3
, "__builtin_altivec_vslw", ALTIVEC_BUILTIN_VSLW
},
7343 { MASK_ALTIVEC
, CODE_FOR_altivec_vsl
, "__builtin_altivec_vsl", ALTIVEC_BUILTIN_VSL
},
7344 { MASK_ALTIVEC
, CODE_FOR_altivec_vslo
, "__builtin_altivec_vslo", ALTIVEC_BUILTIN_VSLO
},
7345 { MASK_ALTIVEC
, CODE_FOR_altivec_vspltb
, "__builtin_altivec_vspltb", ALTIVEC_BUILTIN_VSPLTB
},
7346 { MASK_ALTIVEC
, CODE_FOR_altivec_vsplth
, "__builtin_altivec_vsplth", ALTIVEC_BUILTIN_VSPLTH
},
7347 { MASK_ALTIVEC
, CODE_FOR_altivec_vspltw
, "__builtin_altivec_vspltw", ALTIVEC_BUILTIN_VSPLTW
},
7348 { MASK_ALTIVEC
, CODE_FOR_vlshrv16qi3
, "__builtin_altivec_vsrb", ALTIVEC_BUILTIN_VSRB
},
7349 { MASK_ALTIVEC
, CODE_FOR_vlshrv8hi3
, "__builtin_altivec_vsrh", ALTIVEC_BUILTIN_VSRH
},
7350 { MASK_ALTIVEC
, CODE_FOR_vlshrv4si3
, "__builtin_altivec_vsrw", ALTIVEC_BUILTIN_VSRW
},
7351 { MASK_ALTIVEC
, CODE_FOR_vashrv16qi3
, "__builtin_altivec_vsrab", ALTIVEC_BUILTIN_VSRAB
},
7352 { MASK_ALTIVEC
, CODE_FOR_vashrv8hi3
, "__builtin_altivec_vsrah", ALTIVEC_BUILTIN_VSRAH
},
7353 { MASK_ALTIVEC
, CODE_FOR_vashrv4si3
, "__builtin_altivec_vsraw", ALTIVEC_BUILTIN_VSRAW
},
7354 { MASK_ALTIVEC
, CODE_FOR_altivec_vsr
, "__builtin_altivec_vsr", ALTIVEC_BUILTIN_VSR
},
7355 { MASK_ALTIVEC
, CODE_FOR_altivec_vsro
, "__builtin_altivec_vsro", ALTIVEC_BUILTIN_VSRO
},
7356 { MASK_ALTIVEC
, CODE_FOR_subv16qi3
, "__builtin_altivec_vsububm", ALTIVEC_BUILTIN_VSUBUBM
},
7357 { MASK_ALTIVEC
, CODE_FOR_subv8hi3
, "__builtin_altivec_vsubuhm", ALTIVEC_BUILTIN_VSUBUHM
},
7358 { MASK_ALTIVEC
, CODE_FOR_subv4si3
, "__builtin_altivec_vsubuwm", ALTIVEC_BUILTIN_VSUBUWM
},
7359 { MASK_ALTIVEC
, CODE_FOR_subv4sf3
, "__builtin_altivec_vsubfp", ALTIVEC_BUILTIN_VSUBFP
},
7360 { MASK_ALTIVEC
, CODE_FOR_altivec_vsubcuw
, "__builtin_altivec_vsubcuw", ALTIVEC_BUILTIN_VSUBCUW
},
7361 { MASK_ALTIVEC
, CODE_FOR_altivec_vsububs
, "__builtin_altivec_vsububs", ALTIVEC_BUILTIN_VSUBUBS
},
7362 { MASK_ALTIVEC
, CODE_FOR_altivec_vsubsbs
, "__builtin_altivec_vsubsbs", ALTIVEC_BUILTIN_VSUBSBS
},
7363 { MASK_ALTIVEC
, CODE_FOR_altivec_vsubuhs
, "__builtin_altivec_vsubuhs", ALTIVEC_BUILTIN_VSUBUHS
},
7364 { MASK_ALTIVEC
, CODE_FOR_altivec_vsubshs
, "__builtin_altivec_vsubshs", ALTIVEC_BUILTIN_VSUBSHS
},
7365 { MASK_ALTIVEC
, CODE_FOR_altivec_vsubuws
, "__builtin_altivec_vsubuws", ALTIVEC_BUILTIN_VSUBUWS
},
7366 { MASK_ALTIVEC
, CODE_FOR_altivec_vsubsws
, "__builtin_altivec_vsubsws", ALTIVEC_BUILTIN_VSUBSWS
},
7367 { MASK_ALTIVEC
, CODE_FOR_altivec_vsum4ubs
, "__builtin_altivec_vsum4ubs", ALTIVEC_BUILTIN_VSUM4UBS
},
7368 { MASK_ALTIVEC
, CODE_FOR_altivec_vsum4sbs
, "__builtin_altivec_vsum4sbs", ALTIVEC_BUILTIN_VSUM4SBS
},
7369 { MASK_ALTIVEC
, CODE_FOR_altivec_vsum4shs
, "__builtin_altivec_vsum4shs", ALTIVEC_BUILTIN_VSUM4SHS
},
7370 { MASK_ALTIVEC
, CODE_FOR_altivec_vsum2sws
, "__builtin_altivec_vsum2sws", ALTIVEC_BUILTIN_VSUM2SWS
},
7371 { MASK_ALTIVEC
, CODE_FOR_altivec_vsumsws
, "__builtin_altivec_vsumsws", ALTIVEC_BUILTIN_VSUMSWS
},
7372 { MASK_ALTIVEC
, CODE_FOR_xorv4si3
, "__builtin_altivec_vxor", ALTIVEC_BUILTIN_VXOR
},
7374 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_add", ALTIVEC_BUILTIN_VEC_ADD
},
7375 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vaddfp", ALTIVEC_BUILTIN_VEC_VADDFP
},
7376 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vadduwm", ALTIVEC_BUILTIN_VEC_VADDUWM
},
7377 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vadduhm", ALTIVEC_BUILTIN_VEC_VADDUHM
},
7378 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vaddubm", ALTIVEC_BUILTIN_VEC_VADDUBM
},
7379 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_addc", ALTIVEC_BUILTIN_VEC_ADDC
},
7380 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_adds", ALTIVEC_BUILTIN_VEC_ADDS
},
7381 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vaddsws", ALTIVEC_BUILTIN_VEC_VADDSWS
},
7382 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vadduws", ALTIVEC_BUILTIN_VEC_VADDUWS
},
7383 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vaddshs", ALTIVEC_BUILTIN_VEC_VADDSHS
},
7384 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vadduhs", ALTIVEC_BUILTIN_VEC_VADDUHS
},
7385 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vaddsbs", ALTIVEC_BUILTIN_VEC_VADDSBS
},
7386 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vaddubs", ALTIVEC_BUILTIN_VEC_VADDUBS
},
7387 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_and", ALTIVEC_BUILTIN_VEC_AND
},
7388 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_andc", ALTIVEC_BUILTIN_VEC_ANDC
},
7389 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_avg", ALTIVEC_BUILTIN_VEC_AVG
},
7390 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vavgsw", ALTIVEC_BUILTIN_VEC_VAVGSW
},
7391 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vavguw", ALTIVEC_BUILTIN_VEC_VAVGUW
},
7392 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vavgsh", ALTIVEC_BUILTIN_VEC_VAVGSH
},
7393 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vavguh", ALTIVEC_BUILTIN_VEC_VAVGUH
},
7394 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vavgsb", ALTIVEC_BUILTIN_VEC_VAVGSB
},
7395 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vavgub", ALTIVEC_BUILTIN_VEC_VAVGUB
},
7396 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_cmpb", ALTIVEC_BUILTIN_VEC_CMPB
},
7397 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_cmpeq", ALTIVEC_BUILTIN_VEC_CMPEQ
},
7398 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpeqfp", ALTIVEC_BUILTIN_VEC_VCMPEQFP
},
7399 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpequw", ALTIVEC_BUILTIN_VEC_VCMPEQUW
},
7400 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpequh", ALTIVEC_BUILTIN_VEC_VCMPEQUH
},
7401 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpequb", ALTIVEC_BUILTIN_VEC_VCMPEQUB
},
7402 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_cmpge", ALTIVEC_BUILTIN_VEC_CMPGE
},
7403 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_cmpgt", ALTIVEC_BUILTIN_VEC_CMPGT
},
7404 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpgtfp", ALTIVEC_BUILTIN_VEC_VCMPGTFP
},
7405 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpgtsw", ALTIVEC_BUILTIN_VEC_VCMPGTSW
},
7406 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpgtuw", ALTIVEC_BUILTIN_VEC_VCMPGTUW
},
7407 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpgtsh", ALTIVEC_BUILTIN_VEC_VCMPGTSH
},
7408 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpgtuh", ALTIVEC_BUILTIN_VEC_VCMPGTUH
},
7409 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpgtsb", ALTIVEC_BUILTIN_VEC_VCMPGTSB
},
7410 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vcmpgtub", ALTIVEC_BUILTIN_VEC_VCMPGTUB
},
7411 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_cmple", ALTIVEC_BUILTIN_VEC_CMPLE
},
7412 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_cmplt", ALTIVEC_BUILTIN_VEC_CMPLT
},
7413 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_max", ALTIVEC_BUILTIN_VEC_MAX
},
7414 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmaxfp", ALTIVEC_BUILTIN_VEC_VMAXFP
},
7415 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmaxsw", ALTIVEC_BUILTIN_VEC_VMAXSW
},
7416 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmaxuw", ALTIVEC_BUILTIN_VEC_VMAXUW
},
7417 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmaxsh", ALTIVEC_BUILTIN_VEC_VMAXSH
},
7418 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmaxuh", ALTIVEC_BUILTIN_VEC_VMAXUH
},
7419 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmaxsb", ALTIVEC_BUILTIN_VEC_VMAXSB
},
7420 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmaxub", ALTIVEC_BUILTIN_VEC_VMAXUB
},
7421 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_mergeh", ALTIVEC_BUILTIN_VEC_MERGEH
},
7422 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmrghw", ALTIVEC_BUILTIN_VEC_VMRGHW
},
7423 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmrghh", ALTIVEC_BUILTIN_VEC_VMRGHH
},
7424 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmrghb", ALTIVEC_BUILTIN_VEC_VMRGHB
},
7425 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_mergel", ALTIVEC_BUILTIN_VEC_MERGEL
},
7426 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmrglw", ALTIVEC_BUILTIN_VEC_VMRGLW
},
7427 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmrglh", ALTIVEC_BUILTIN_VEC_VMRGLH
},
7428 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmrglb", ALTIVEC_BUILTIN_VEC_VMRGLB
},
7429 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_min", ALTIVEC_BUILTIN_VEC_MIN
},
7430 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vminfp", ALTIVEC_BUILTIN_VEC_VMINFP
},
7431 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vminsw", ALTIVEC_BUILTIN_VEC_VMINSW
},
7432 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vminuw", ALTIVEC_BUILTIN_VEC_VMINUW
},
7433 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vminsh", ALTIVEC_BUILTIN_VEC_VMINSH
},
7434 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vminuh", ALTIVEC_BUILTIN_VEC_VMINUH
},
7435 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vminsb", ALTIVEC_BUILTIN_VEC_VMINSB
},
7436 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vminub", ALTIVEC_BUILTIN_VEC_VMINUB
},
7437 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_mule", ALTIVEC_BUILTIN_VEC_MULE
},
7438 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmuleub", ALTIVEC_BUILTIN_VEC_VMULEUB
},
7439 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmulesb", ALTIVEC_BUILTIN_VEC_VMULESB
},
7440 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmuleuh", ALTIVEC_BUILTIN_VEC_VMULEUH
},
7441 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmulesh", ALTIVEC_BUILTIN_VEC_VMULESH
},
7442 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_mulo", ALTIVEC_BUILTIN_VEC_MULO
},
7443 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmulosh", ALTIVEC_BUILTIN_VEC_VMULOSH
},
7444 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmulouh", ALTIVEC_BUILTIN_VEC_VMULOUH
},
7445 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmulosb", ALTIVEC_BUILTIN_VEC_VMULOSB
},
7446 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vmuloub", ALTIVEC_BUILTIN_VEC_VMULOUB
},
7447 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_nor", ALTIVEC_BUILTIN_VEC_NOR
},
7448 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_or", ALTIVEC_BUILTIN_VEC_OR
},
7449 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_pack", ALTIVEC_BUILTIN_VEC_PACK
},
7450 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkuwum", ALTIVEC_BUILTIN_VEC_VPKUWUM
},
7451 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkuhum", ALTIVEC_BUILTIN_VEC_VPKUHUM
},
7452 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_packpx", ALTIVEC_BUILTIN_VEC_PACKPX
},
7453 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_packs", ALTIVEC_BUILTIN_VEC_PACKS
},
7454 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkswss", ALTIVEC_BUILTIN_VEC_VPKSWSS
},
7455 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkuwus", ALTIVEC_BUILTIN_VEC_VPKUWUS
},
7456 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkshss", ALTIVEC_BUILTIN_VEC_VPKSHSS
},
7457 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkuhus", ALTIVEC_BUILTIN_VEC_VPKUHUS
},
7458 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_packsu", ALTIVEC_BUILTIN_VEC_PACKSU
},
7459 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkswus", ALTIVEC_BUILTIN_VEC_VPKSWUS
},
7460 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vpkshus", ALTIVEC_BUILTIN_VEC_VPKSHUS
},
7461 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_rl", ALTIVEC_BUILTIN_VEC_RL
},
7462 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vrlw", ALTIVEC_BUILTIN_VEC_VRLW
},
7463 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vrlh", ALTIVEC_BUILTIN_VEC_VRLH
},
7464 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vrlb", ALTIVEC_BUILTIN_VEC_VRLB
},
7465 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sl", ALTIVEC_BUILTIN_VEC_SL
},
7466 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vslw", ALTIVEC_BUILTIN_VEC_VSLW
},
7467 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vslh", ALTIVEC_BUILTIN_VEC_VSLH
},
7468 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vslb", ALTIVEC_BUILTIN_VEC_VSLB
},
7469 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sll", ALTIVEC_BUILTIN_VEC_SLL
},
7470 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_slo", ALTIVEC_BUILTIN_VEC_SLO
},
7471 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sr", ALTIVEC_BUILTIN_VEC_SR
},
7472 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsrw", ALTIVEC_BUILTIN_VEC_VSRW
},
7473 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsrh", ALTIVEC_BUILTIN_VEC_VSRH
},
7474 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsrb", ALTIVEC_BUILTIN_VEC_VSRB
},
7475 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sra", ALTIVEC_BUILTIN_VEC_SRA
},
7476 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsraw", ALTIVEC_BUILTIN_VEC_VSRAW
},
7477 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsrah", ALTIVEC_BUILTIN_VEC_VSRAH
},
7478 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsrab", ALTIVEC_BUILTIN_VEC_VSRAB
},
7479 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_srl", ALTIVEC_BUILTIN_VEC_SRL
},
7480 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sro", ALTIVEC_BUILTIN_VEC_SRO
},
7481 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sub", ALTIVEC_BUILTIN_VEC_SUB
},
7482 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubfp", ALTIVEC_BUILTIN_VEC_VSUBFP
},
7483 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubuwm", ALTIVEC_BUILTIN_VEC_VSUBUWM
},
7484 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubuhm", ALTIVEC_BUILTIN_VEC_VSUBUHM
},
7485 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsububm", ALTIVEC_BUILTIN_VEC_VSUBUBM
},
7486 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_subc", ALTIVEC_BUILTIN_VEC_SUBC
},
7487 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_subs", ALTIVEC_BUILTIN_VEC_SUBS
},
7488 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubsws", ALTIVEC_BUILTIN_VEC_VSUBSWS
},
7489 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubuws", ALTIVEC_BUILTIN_VEC_VSUBUWS
},
7490 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubshs", ALTIVEC_BUILTIN_VEC_VSUBSHS
},
7491 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubuhs", ALTIVEC_BUILTIN_VEC_VSUBUHS
},
7492 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsubsbs", ALTIVEC_BUILTIN_VEC_VSUBSBS
},
7493 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsububs", ALTIVEC_BUILTIN_VEC_VSUBUBS
},
7494 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sum4s", ALTIVEC_BUILTIN_VEC_SUM4S
},
7495 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsum4shs", ALTIVEC_BUILTIN_VEC_VSUM4SHS
},
7496 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsum4sbs", ALTIVEC_BUILTIN_VEC_VSUM4SBS
},
7497 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vsum4ubs", ALTIVEC_BUILTIN_VEC_VSUM4UBS
},
7498 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sum2s", ALTIVEC_BUILTIN_VEC_SUM2S
},
7499 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_sums", ALTIVEC_BUILTIN_VEC_SUMS
},
7500 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_xor", ALTIVEC_BUILTIN_VEC_XOR
},
7502 { 0, CODE_FOR_divv2sf3
, "__builtin_paired_divv2sf3", PAIRED_BUILTIN_DIVV2SF3
},
7503 { 0, CODE_FOR_addv2sf3
, "__builtin_paired_addv2sf3", PAIRED_BUILTIN_ADDV2SF3
},
7504 { 0, CODE_FOR_subv2sf3
, "__builtin_paired_subv2sf3", PAIRED_BUILTIN_SUBV2SF3
},
7505 { 0, CODE_FOR_mulv2sf3
, "__builtin_paired_mulv2sf3", PAIRED_BUILTIN_MULV2SF3
},
7506 { 0, CODE_FOR_paired_muls0
, "__builtin_paired_muls0", PAIRED_BUILTIN_MULS0
},
7507 { 0, CODE_FOR_paired_muls1
, "__builtin_paired_muls1", PAIRED_BUILTIN_MULS1
},
7508 { 0, CODE_FOR_paired_merge00
, "__builtin_paired_merge00", PAIRED_BUILTIN_MERGE00
},
7509 { 0, CODE_FOR_paired_merge01
, "__builtin_paired_merge01", PAIRED_BUILTIN_MERGE01
},
7510 { 0, CODE_FOR_paired_merge10
, "__builtin_paired_merge10", PAIRED_BUILTIN_MERGE10
},
7511 { 0, CODE_FOR_paired_merge11
, "__builtin_paired_merge11", PAIRED_BUILTIN_MERGE11
},
7513 /* Place holder, leave as first spe builtin. */
7514 { 0, CODE_FOR_spe_evaddw
, "__builtin_spe_evaddw", SPE_BUILTIN_EVADDW
},
7515 { 0, CODE_FOR_spe_evand
, "__builtin_spe_evand", SPE_BUILTIN_EVAND
},
7516 { 0, CODE_FOR_spe_evandc
, "__builtin_spe_evandc", SPE_BUILTIN_EVANDC
},
7517 { 0, CODE_FOR_spe_evdivws
, "__builtin_spe_evdivws", SPE_BUILTIN_EVDIVWS
},
7518 { 0, CODE_FOR_spe_evdivwu
, "__builtin_spe_evdivwu", SPE_BUILTIN_EVDIVWU
},
7519 { 0, CODE_FOR_spe_eveqv
, "__builtin_spe_eveqv", SPE_BUILTIN_EVEQV
},
7520 { 0, CODE_FOR_spe_evfsadd
, "__builtin_spe_evfsadd", SPE_BUILTIN_EVFSADD
},
7521 { 0, CODE_FOR_spe_evfsdiv
, "__builtin_spe_evfsdiv", SPE_BUILTIN_EVFSDIV
},
7522 { 0, CODE_FOR_spe_evfsmul
, "__builtin_spe_evfsmul", SPE_BUILTIN_EVFSMUL
},
7523 { 0, CODE_FOR_spe_evfssub
, "__builtin_spe_evfssub", SPE_BUILTIN_EVFSSUB
},
7524 { 0, CODE_FOR_spe_evmergehi
, "__builtin_spe_evmergehi", SPE_BUILTIN_EVMERGEHI
},
7525 { 0, CODE_FOR_spe_evmergehilo
, "__builtin_spe_evmergehilo", SPE_BUILTIN_EVMERGEHILO
},
7526 { 0, CODE_FOR_spe_evmergelo
, "__builtin_spe_evmergelo", SPE_BUILTIN_EVMERGELO
},
7527 { 0, CODE_FOR_spe_evmergelohi
, "__builtin_spe_evmergelohi", SPE_BUILTIN_EVMERGELOHI
},
7528 { 0, CODE_FOR_spe_evmhegsmfaa
, "__builtin_spe_evmhegsmfaa", SPE_BUILTIN_EVMHEGSMFAA
},
7529 { 0, CODE_FOR_spe_evmhegsmfan
, "__builtin_spe_evmhegsmfan", SPE_BUILTIN_EVMHEGSMFAN
},
7530 { 0, CODE_FOR_spe_evmhegsmiaa
, "__builtin_spe_evmhegsmiaa", SPE_BUILTIN_EVMHEGSMIAA
},
7531 { 0, CODE_FOR_spe_evmhegsmian
, "__builtin_spe_evmhegsmian", SPE_BUILTIN_EVMHEGSMIAN
},
7532 { 0, CODE_FOR_spe_evmhegumiaa
, "__builtin_spe_evmhegumiaa", SPE_BUILTIN_EVMHEGUMIAA
},
7533 { 0, CODE_FOR_spe_evmhegumian
, "__builtin_spe_evmhegumian", SPE_BUILTIN_EVMHEGUMIAN
},
7534 { 0, CODE_FOR_spe_evmhesmf
, "__builtin_spe_evmhesmf", SPE_BUILTIN_EVMHESMF
},
7535 { 0, CODE_FOR_spe_evmhesmfa
, "__builtin_spe_evmhesmfa", SPE_BUILTIN_EVMHESMFA
},
7536 { 0, CODE_FOR_spe_evmhesmfaaw
, "__builtin_spe_evmhesmfaaw", SPE_BUILTIN_EVMHESMFAAW
},
7537 { 0, CODE_FOR_spe_evmhesmfanw
, "__builtin_spe_evmhesmfanw", SPE_BUILTIN_EVMHESMFANW
},
7538 { 0, CODE_FOR_spe_evmhesmi
, "__builtin_spe_evmhesmi", SPE_BUILTIN_EVMHESMI
},
7539 { 0, CODE_FOR_spe_evmhesmia
, "__builtin_spe_evmhesmia", SPE_BUILTIN_EVMHESMIA
},
7540 { 0, CODE_FOR_spe_evmhesmiaaw
, "__builtin_spe_evmhesmiaaw", SPE_BUILTIN_EVMHESMIAAW
},
7541 { 0, CODE_FOR_spe_evmhesmianw
, "__builtin_spe_evmhesmianw", SPE_BUILTIN_EVMHESMIANW
},
7542 { 0, CODE_FOR_spe_evmhessf
, "__builtin_spe_evmhessf", SPE_BUILTIN_EVMHESSF
},
7543 { 0, CODE_FOR_spe_evmhessfa
, "__builtin_spe_evmhessfa", SPE_BUILTIN_EVMHESSFA
},
7544 { 0, CODE_FOR_spe_evmhessfaaw
, "__builtin_spe_evmhessfaaw", SPE_BUILTIN_EVMHESSFAAW
},
7545 { 0, CODE_FOR_spe_evmhessfanw
, "__builtin_spe_evmhessfanw", SPE_BUILTIN_EVMHESSFANW
},
7546 { 0, CODE_FOR_spe_evmhessiaaw
, "__builtin_spe_evmhessiaaw", SPE_BUILTIN_EVMHESSIAAW
},
7547 { 0, CODE_FOR_spe_evmhessianw
, "__builtin_spe_evmhessianw", SPE_BUILTIN_EVMHESSIANW
},
7548 { 0, CODE_FOR_spe_evmheumi
, "__builtin_spe_evmheumi", SPE_BUILTIN_EVMHEUMI
},
7549 { 0, CODE_FOR_spe_evmheumia
, "__builtin_spe_evmheumia", SPE_BUILTIN_EVMHEUMIA
},
7550 { 0, CODE_FOR_spe_evmheumiaaw
, "__builtin_spe_evmheumiaaw", SPE_BUILTIN_EVMHEUMIAAW
},
7551 { 0, CODE_FOR_spe_evmheumianw
, "__builtin_spe_evmheumianw", SPE_BUILTIN_EVMHEUMIANW
},
7552 { 0, CODE_FOR_spe_evmheusiaaw
, "__builtin_spe_evmheusiaaw", SPE_BUILTIN_EVMHEUSIAAW
},
7553 { 0, CODE_FOR_spe_evmheusianw
, "__builtin_spe_evmheusianw", SPE_BUILTIN_EVMHEUSIANW
},
7554 { 0, CODE_FOR_spe_evmhogsmfaa
, "__builtin_spe_evmhogsmfaa", SPE_BUILTIN_EVMHOGSMFAA
},
7555 { 0, CODE_FOR_spe_evmhogsmfan
, "__builtin_spe_evmhogsmfan", SPE_BUILTIN_EVMHOGSMFAN
},
7556 { 0, CODE_FOR_spe_evmhogsmiaa
, "__builtin_spe_evmhogsmiaa", SPE_BUILTIN_EVMHOGSMIAA
},
7557 { 0, CODE_FOR_spe_evmhogsmian
, "__builtin_spe_evmhogsmian", SPE_BUILTIN_EVMHOGSMIAN
},
7558 { 0, CODE_FOR_spe_evmhogumiaa
, "__builtin_spe_evmhogumiaa", SPE_BUILTIN_EVMHOGUMIAA
},
7559 { 0, CODE_FOR_spe_evmhogumian
, "__builtin_spe_evmhogumian", SPE_BUILTIN_EVMHOGUMIAN
},
7560 { 0, CODE_FOR_spe_evmhosmf
, "__builtin_spe_evmhosmf", SPE_BUILTIN_EVMHOSMF
},
7561 { 0, CODE_FOR_spe_evmhosmfa
, "__builtin_spe_evmhosmfa", SPE_BUILTIN_EVMHOSMFA
},
7562 { 0, CODE_FOR_spe_evmhosmfaaw
, "__builtin_spe_evmhosmfaaw", SPE_BUILTIN_EVMHOSMFAAW
},
7563 { 0, CODE_FOR_spe_evmhosmfanw
, "__builtin_spe_evmhosmfanw", SPE_BUILTIN_EVMHOSMFANW
},
7564 { 0, CODE_FOR_spe_evmhosmi
, "__builtin_spe_evmhosmi", SPE_BUILTIN_EVMHOSMI
},
7565 { 0, CODE_FOR_spe_evmhosmia
, "__builtin_spe_evmhosmia", SPE_BUILTIN_EVMHOSMIA
},
7566 { 0, CODE_FOR_spe_evmhosmiaaw
, "__builtin_spe_evmhosmiaaw", SPE_BUILTIN_EVMHOSMIAAW
},
7567 { 0, CODE_FOR_spe_evmhosmianw
, "__builtin_spe_evmhosmianw", SPE_BUILTIN_EVMHOSMIANW
},
7568 { 0, CODE_FOR_spe_evmhossf
, "__builtin_spe_evmhossf", SPE_BUILTIN_EVMHOSSF
},
7569 { 0, CODE_FOR_spe_evmhossfa
, "__builtin_spe_evmhossfa", SPE_BUILTIN_EVMHOSSFA
},
7570 { 0, CODE_FOR_spe_evmhossfaaw
, "__builtin_spe_evmhossfaaw", SPE_BUILTIN_EVMHOSSFAAW
},
7571 { 0, CODE_FOR_spe_evmhossfanw
, "__builtin_spe_evmhossfanw", SPE_BUILTIN_EVMHOSSFANW
},
7572 { 0, CODE_FOR_spe_evmhossiaaw
, "__builtin_spe_evmhossiaaw", SPE_BUILTIN_EVMHOSSIAAW
},
7573 { 0, CODE_FOR_spe_evmhossianw
, "__builtin_spe_evmhossianw", SPE_BUILTIN_EVMHOSSIANW
},
7574 { 0, CODE_FOR_spe_evmhoumi
, "__builtin_spe_evmhoumi", SPE_BUILTIN_EVMHOUMI
},
7575 { 0, CODE_FOR_spe_evmhoumia
, "__builtin_spe_evmhoumia", SPE_BUILTIN_EVMHOUMIA
},
7576 { 0, CODE_FOR_spe_evmhoumiaaw
, "__builtin_spe_evmhoumiaaw", SPE_BUILTIN_EVMHOUMIAAW
},
7577 { 0, CODE_FOR_spe_evmhoumianw
, "__builtin_spe_evmhoumianw", SPE_BUILTIN_EVMHOUMIANW
},
7578 { 0, CODE_FOR_spe_evmhousiaaw
, "__builtin_spe_evmhousiaaw", SPE_BUILTIN_EVMHOUSIAAW
},
7579 { 0, CODE_FOR_spe_evmhousianw
, "__builtin_spe_evmhousianw", SPE_BUILTIN_EVMHOUSIANW
},
7580 { 0, CODE_FOR_spe_evmwhsmf
, "__builtin_spe_evmwhsmf", SPE_BUILTIN_EVMWHSMF
},
7581 { 0, CODE_FOR_spe_evmwhsmfa
, "__builtin_spe_evmwhsmfa", SPE_BUILTIN_EVMWHSMFA
},
7582 { 0, CODE_FOR_spe_evmwhsmi
, "__builtin_spe_evmwhsmi", SPE_BUILTIN_EVMWHSMI
},
7583 { 0, CODE_FOR_spe_evmwhsmia
, "__builtin_spe_evmwhsmia", SPE_BUILTIN_EVMWHSMIA
},
7584 { 0, CODE_FOR_spe_evmwhssf
, "__builtin_spe_evmwhssf", SPE_BUILTIN_EVMWHSSF
},
7585 { 0, CODE_FOR_spe_evmwhssfa
, "__builtin_spe_evmwhssfa", SPE_BUILTIN_EVMWHSSFA
},
7586 { 0, CODE_FOR_spe_evmwhumi
, "__builtin_spe_evmwhumi", SPE_BUILTIN_EVMWHUMI
},
7587 { 0, CODE_FOR_spe_evmwhumia
, "__builtin_spe_evmwhumia", SPE_BUILTIN_EVMWHUMIA
},
7588 { 0, CODE_FOR_spe_evmwlsmiaaw
, "__builtin_spe_evmwlsmiaaw", SPE_BUILTIN_EVMWLSMIAAW
},
7589 { 0, CODE_FOR_spe_evmwlsmianw
, "__builtin_spe_evmwlsmianw", SPE_BUILTIN_EVMWLSMIANW
},
7590 { 0, CODE_FOR_spe_evmwlssiaaw
, "__builtin_spe_evmwlssiaaw", SPE_BUILTIN_EVMWLSSIAAW
},
7591 { 0, CODE_FOR_spe_evmwlssianw
, "__builtin_spe_evmwlssianw", SPE_BUILTIN_EVMWLSSIANW
},
7592 { 0, CODE_FOR_spe_evmwlumi
, "__builtin_spe_evmwlumi", SPE_BUILTIN_EVMWLUMI
},
7593 { 0, CODE_FOR_spe_evmwlumia
, "__builtin_spe_evmwlumia", SPE_BUILTIN_EVMWLUMIA
},
7594 { 0, CODE_FOR_spe_evmwlumiaaw
, "__builtin_spe_evmwlumiaaw", SPE_BUILTIN_EVMWLUMIAAW
},
7595 { 0, CODE_FOR_spe_evmwlumianw
, "__builtin_spe_evmwlumianw", SPE_BUILTIN_EVMWLUMIANW
},
7596 { 0, CODE_FOR_spe_evmwlusiaaw
, "__builtin_spe_evmwlusiaaw", SPE_BUILTIN_EVMWLUSIAAW
},
7597 { 0, CODE_FOR_spe_evmwlusianw
, "__builtin_spe_evmwlusianw", SPE_BUILTIN_EVMWLUSIANW
},
7598 { 0, CODE_FOR_spe_evmwsmf
, "__builtin_spe_evmwsmf", SPE_BUILTIN_EVMWSMF
},
7599 { 0, CODE_FOR_spe_evmwsmfa
, "__builtin_spe_evmwsmfa", SPE_BUILTIN_EVMWSMFA
},
7600 { 0, CODE_FOR_spe_evmwsmfaa
, "__builtin_spe_evmwsmfaa", SPE_BUILTIN_EVMWSMFAA
},
7601 { 0, CODE_FOR_spe_evmwsmfan
, "__builtin_spe_evmwsmfan", SPE_BUILTIN_EVMWSMFAN
},
7602 { 0, CODE_FOR_spe_evmwsmi
, "__builtin_spe_evmwsmi", SPE_BUILTIN_EVMWSMI
},
7603 { 0, CODE_FOR_spe_evmwsmia
, "__builtin_spe_evmwsmia", SPE_BUILTIN_EVMWSMIA
},
7604 { 0, CODE_FOR_spe_evmwsmiaa
, "__builtin_spe_evmwsmiaa", SPE_BUILTIN_EVMWSMIAA
},
7605 { 0, CODE_FOR_spe_evmwsmian
, "__builtin_spe_evmwsmian", SPE_BUILTIN_EVMWSMIAN
},
7606 { 0, CODE_FOR_spe_evmwssf
, "__builtin_spe_evmwssf", SPE_BUILTIN_EVMWSSF
},
7607 { 0, CODE_FOR_spe_evmwssfa
, "__builtin_spe_evmwssfa", SPE_BUILTIN_EVMWSSFA
},
7608 { 0, CODE_FOR_spe_evmwssfaa
, "__builtin_spe_evmwssfaa", SPE_BUILTIN_EVMWSSFAA
},
7609 { 0, CODE_FOR_spe_evmwssfan
, "__builtin_spe_evmwssfan", SPE_BUILTIN_EVMWSSFAN
},
7610 { 0, CODE_FOR_spe_evmwumi
, "__builtin_spe_evmwumi", SPE_BUILTIN_EVMWUMI
},
7611 { 0, CODE_FOR_spe_evmwumia
, "__builtin_spe_evmwumia", SPE_BUILTIN_EVMWUMIA
},
7612 { 0, CODE_FOR_spe_evmwumiaa
, "__builtin_spe_evmwumiaa", SPE_BUILTIN_EVMWUMIAA
},
7613 { 0, CODE_FOR_spe_evmwumian
, "__builtin_spe_evmwumian", SPE_BUILTIN_EVMWUMIAN
},
7614 { 0, CODE_FOR_spe_evnand
, "__builtin_spe_evnand", SPE_BUILTIN_EVNAND
},
7615 { 0, CODE_FOR_spe_evnor
, "__builtin_spe_evnor", SPE_BUILTIN_EVNOR
},
7616 { 0, CODE_FOR_spe_evor
, "__builtin_spe_evor", SPE_BUILTIN_EVOR
},
7617 { 0, CODE_FOR_spe_evorc
, "__builtin_spe_evorc", SPE_BUILTIN_EVORC
},
7618 { 0, CODE_FOR_spe_evrlw
, "__builtin_spe_evrlw", SPE_BUILTIN_EVRLW
},
7619 { 0, CODE_FOR_spe_evslw
, "__builtin_spe_evslw", SPE_BUILTIN_EVSLW
},
7620 { 0, CODE_FOR_spe_evsrws
, "__builtin_spe_evsrws", SPE_BUILTIN_EVSRWS
},
7621 { 0, CODE_FOR_spe_evsrwu
, "__builtin_spe_evsrwu", SPE_BUILTIN_EVSRWU
},
7622 { 0, CODE_FOR_spe_evsubfw
, "__builtin_spe_evsubfw", SPE_BUILTIN_EVSUBFW
},
7624 /* SPE binary operations expecting a 5-bit unsigned literal. */
7625 { 0, CODE_FOR_spe_evaddiw
, "__builtin_spe_evaddiw", SPE_BUILTIN_EVADDIW
},
7627 { 0, CODE_FOR_spe_evrlwi
, "__builtin_spe_evrlwi", SPE_BUILTIN_EVRLWI
},
7628 { 0, CODE_FOR_spe_evslwi
, "__builtin_spe_evslwi", SPE_BUILTIN_EVSLWI
},
7629 { 0, CODE_FOR_spe_evsrwis
, "__builtin_spe_evsrwis", SPE_BUILTIN_EVSRWIS
},
7630 { 0, CODE_FOR_spe_evsrwiu
, "__builtin_spe_evsrwiu", SPE_BUILTIN_EVSRWIU
},
7631 { 0, CODE_FOR_spe_evsubifw
, "__builtin_spe_evsubifw", SPE_BUILTIN_EVSUBIFW
},
7632 { 0, CODE_FOR_spe_evmwhssfaa
, "__builtin_spe_evmwhssfaa", SPE_BUILTIN_EVMWHSSFAA
},
7633 { 0, CODE_FOR_spe_evmwhssmaa
, "__builtin_spe_evmwhssmaa", SPE_BUILTIN_EVMWHSSMAA
},
7634 { 0, CODE_FOR_spe_evmwhsmfaa
, "__builtin_spe_evmwhsmfaa", SPE_BUILTIN_EVMWHSMFAA
},
7635 { 0, CODE_FOR_spe_evmwhsmiaa
, "__builtin_spe_evmwhsmiaa", SPE_BUILTIN_EVMWHSMIAA
},
7636 { 0, CODE_FOR_spe_evmwhusiaa
, "__builtin_spe_evmwhusiaa", SPE_BUILTIN_EVMWHUSIAA
},
7637 { 0, CODE_FOR_spe_evmwhumiaa
, "__builtin_spe_evmwhumiaa", SPE_BUILTIN_EVMWHUMIAA
},
7638 { 0, CODE_FOR_spe_evmwhssfan
, "__builtin_spe_evmwhssfan", SPE_BUILTIN_EVMWHSSFAN
},
7639 { 0, CODE_FOR_spe_evmwhssian
, "__builtin_spe_evmwhssian", SPE_BUILTIN_EVMWHSSIAN
},
7640 { 0, CODE_FOR_spe_evmwhsmfan
, "__builtin_spe_evmwhsmfan", SPE_BUILTIN_EVMWHSMFAN
},
7641 { 0, CODE_FOR_spe_evmwhsmian
, "__builtin_spe_evmwhsmian", SPE_BUILTIN_EVMWHSMIAN
},
7642 { 0, CODE_FOR_spe_evmwhusian
, "__builtin_spe_evmwhusian", SPE_BUILTIN_EVMWHUSIAN
},
7643 { 0, CODE_FOR_spe_evmwhumian
, "__builtin_spe_evmwhumian", SPE_BUILTIN_EVMWHUMIAN
},
7644 { 0, CODE_FOR_spe_evmwhgssfaa
, "__builtin_spe_evmwhgssfaa", SPE_BUILTIN_EVMWHGSSFAA
},
7645 { 0, CODE_FOR_spe_evmwhgsmfaa
, "__builtin_spe_evmwhgsmfaa", SPE_BUILTIN_EVMWHGSMFAA
},
7646 { 0, CODE_FOR_spe_evmwhgsmiaa
, "__builtin_spe_evmwhgsmiaa", SPE_BUILTIN_EVMWHGSMIAA
},
7647 { 0, CODE_FOR_spe_evmwhgumiaa
, "__builtin_spe_evmwhgumiaa", SPE_BUILTIN_EVMWHGUMIAA
},
7648 { 0, CODE_FOR_spe_evmwhgssfan
, "__builtin_spe_evmwhgssfan", SPE_BUILTIN_EVMWHGSSFAN
},
7649 { 0, CODE_FOR_spe_evmwhgsmfan
, "__builtin_spe_evmwhgsmfan", SPE_BUILTIN_EVMWHGSMFAN
},
7650 { 0, CODE_FOR_spe_evmwhgsmian
, "__builtin_spe_evmwhgsmian", SPE_BUILTIN_EVMWHGSMIAN
},
7651 { 0, CODE_FOR_spe_evmwhgumian
, "__builtin_spe_evmwhgumian", SPE_BUILTIN_EVMWHGUMIAN
},
7652 { 0, CODE_FOR_spe_brinc
, "__builtin_spe_brinc", SPE_BUILTIN_BRINC
},
7654 /* Place-holder. Leave as last binary SPE builtin. */
7655 { 0, CODE_FOR_xorv2si3
, "__builtin_spe_evxor", SPE_BUILTIN_EVXOR
}
7658 /* AltiVec predicates. */
7660 struct builtin_description_predicates
7662 const unsigned int mask
;
7663 const enum insn_code icode
;
7665 const char *const name
;
7666 const enum rs6000_builtins code
;
7669 static const struct builtin_description_predicates bdesc_altivec_preds
[] =
7671 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v4sf
, "*vcmpbfp.", "__builtin_altivec_vcmpbfp_p", ALTIVEC_BUILTIN_VCMPBFP_P
},
7672 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v4sf
, "*vcmpeqfp.", "__builtin_altivec_vcmpeqfp_p", ALTIVEC_BUILTIN_VCMPEQFP_P
},
7673 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v4sf
, "*vcmpgefp.", "__builtin_altivec_vcmpgefp_p", ALTIVEC_BUILTIN_VCMPGEFP_P
},
7674 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v4sf
, "*vcmpgtfp.", "__builtin_altivec_vcmpgtfp_p", ALTIVEC_BUILTIN_VCMPGTFP_P
},
7675 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v4si
, "*vcmpequw.", "__builtin_altivec_vcmpequw_p", ALTIVEC_BUILTIN_VCMPEQUW_P
},
7676 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v4si
, "*vcmpgtsw.", "__builtin_altivec_vcmpgtsw_p", ALTIVEC_BUILTIN_VCMPGTSW_P
},
7677 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v4si
, "*vcmpgtuw.", "__builtin_altivec_vcmpgtuw_p", ALTIVEC_BUILTIN_VCMPGTUW_P
},
7678 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v8hi
, "*vcmpgtuh.", "__builtin_altivec_vcmpgtuh_p", ALTIVEC_BUILTIN_VCMPGTUH_P
},
7679 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v8hi
, "*vcmpgtsh.", "__builtin_altivec_vcmpgtsh_p", ALTIVEC_BUILTIN_VCMPGTSH_P
},
7680 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v8hi
, "*vcmpequh.", "__builtin_altivec_vcmpequh_p", ALTIVEC_BUILTIN_VCMPEQUH_P
},
7681 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v16qi
, "*vcmpequb.", "__builtin_altivec_vcmpequb_p", ALTIVEC_BUILTIN_VCMPEQUB_P
},
7682 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v16qi
, "*vcmpgtsb.", "__builtin_altivec_vcmpgtsb_p", ALTIVEC_BUILTIN_VCMPGTSB_P
},
7683 { MASK_ALTIVEC
, CODE_FOR_altivec_predicate_v16qi
, "*vcmpgtub.", "__builtin_altivec_vcmpgtub_p", ALTIVEC_BUILTIN_VCMPGTUB_P
},
7685 { MASK_ALTIVEC
, CODE_FOR_nothing
, NULL
, "__builtin_vec_vcmpeq_p", ALTIVEC_BUILTIN_VCMPEQ_P
},
7686 { MASK_ALTIVEC
, CODE_FOR_nothing
, NULL
, "__builtin_vec_vcmpgt_p", ALTIVEC_BUILTIN_VCMPGT_P
},
7687 { MASK_ALTIVEC
, CODE_FOR_nothing
, NULL
, "__builtin_vec_vcmpge_p", ALTIVEC_BUILTIN_VCMPGE_P
}
7690 /* SPE predicates. */
7691 static struct builtin_description bdesc_spe_predicates
[] =
7693 /* Place-holder. Leave as first. */
7694 { 0, CODE_FOR_spe_evcmpeq
, "__builtin_spe_evcmpeq", SPE_BUILTIN_EVCMPEQ
},
7695 { 0, CODE_FOR_spe_evcmpgts
, "__builtin_spe_evcmpgts", SPE_BUILTIN_EVCMPGTS
},
7696 { 0, CODE_FOR_spe_evcmpgtu
, "__builtin_spe_evcmpgtu", SPE_BUILTIN_EVCMPGTU
},
7697 { 0, CODE_FOR_spe_evcmplts
, "__builtin_spe_evcmplts", SPE_BUILTIN_EVCMPLTS
},
7698 { 0, CODE_FOR_spe_evcmpltu
, "__builtin_spe_evcmpltu", SPE_BUILTIN_EVCMPLTU
},
7699 { 0, CODE_FOR_spe_evfscmpeq
, "__builtin_spe_evfscmpeq", SPE_BUILTIN_EVFSCMPEQ
},
7700 { 0, CODE_FOR_spe_evfscmpgt
, "__builtin_spe_evfscmpgt", SPE_BUILTIN_EVFSCMPGT
},
7701 { 0, CODE_FOR_spe_evfscmplt
, "__builtin_spe_evfscmplt", SPE_BUILTIN_EVFSCMPLT
},
7702 { 0, CODE_FOR_spe_evfststeq
, "__builtin_spe_evfststeq", SPE_BUILTIN_EVFSTSTEQ
},
7703 { 0, CODE_FOR_spe_evfststgt
, "__builtin_spe_evfststgt", SPE_BUILTIN_EVFSTSTGT
},
7704 /* Place-holder. Leave as last. */
7705 { 0, CODE_FOR_spe_evfststlt
, "__builtin_spe_evfststlt", SPE_BUILTIN_EVFSTSTLT
},
7708 /* SPE evsel predicates. */
7709 static struct builtin_description bdesc_spe_evsel
[] =
7711 /* Place-holder. Leave as first. */
7712 { 0, CODE_FOR_spe_evcmpgts
, "__builtin_spe_evsel_gts", SPE_BUILTIN_EVSEL_CMPGTS
},
7713 { 0, CODE_FOR_spe_evcmpgtu
, "__builtin_spe_evsel_gtu", SPE_BUILTIN_EVSEL_CMPGTU
},
7714 { 0, CODE_FOR_spe_evcmplts
, "__builtin_spe_evsel_lts", SPE_BUILTIN_EVSEL_CMPLTS
},
7715 { 0, CODE_FOR_spe_evcmpltu
, "__builtin_spe_evsel_ltu", SPE_BUILTIN_EVSEL_CMPLTU
},
7716 { 0, CODE_FOR_spe_evcmpeq
, "__builtin_spe_evsel_eq", SPE_BUILTIN_EVSEL_CMPEQ
},
7717 { 0, CODE_FOR_spe_evfscmpgt
, "__builtin_spe_evsel_fsgt", SPE_BUILTIN_EVSEL_FSCMPGT
},
7718 { 0, CODE_FOR_spe_evfscmplt
, "__builtin_spe_evsel_fslt", SPE_BUILTIN_EVSEL_FSCMPLT
},
7719 { 0, CODE_FOR_spe_evfscmpeq
, "__builtin_spe_evsel_fseq", SPE_BUILTIN_EVSEL_FSCMPEQ
},
7720 { 0, CODE_FOR_spe_evfststgt
, "__builtin_spe_evsel_fststgt", SPE_BUILTIN_EVSEL_FSTSTGT
},
7721 { 0, CODE_FOR_spe_evfststlt
, "__builtin_spe_evsel_fststlt", SPE_BUILTIN_EVSEL_FSTSTLT
},
7722 /* Place-holder. Leave as last. */
7723 { 0, CODE_FOR_spe_evfststeq
, "__builtin_spe_evsel_fststeq", SPE_BUILTIN_EVSEL_FSTSTEQ
},
7726 /* PAIRED predicates. */
7727 static const struct builtin_description bdesc_paired_preds
[] =
7729 /* Place-holder. Leave as first. */
7730 { 0, CODE_FOR_paired_cmpu0
, "__builtin_paired_cmpu0", PAIRED_BUILTIN_CMPU0
},
7731 /* Place-holder. Leave as last. */
7732 { 0, CODE_FOR_paired_cmpu1
, "__builtin_paired_cmpu1", PAIRED_BUILTIN_CMPU1
},
7735 /* ABS* operations. */
7737 static const struct builtin_description bdesc_abs
[] =
7739 { MASK_ALTIVEC
, CODE_FOR_absv4si2
, "__builtin_altivec_abs_v4si", ALTIVEC_BUILTIN_ABS_V4SI
},
7740 { MASK_ALTIVEC
, CODE_FOR_absv8hi2
, "__builtin_altivec_abs_v8hi", ALTIVEC_BUILTIN_ABS_V8HI
},
7741 { MASK_ALTIVEC
, CODE_FOR_absv4sf2
, "__builtin_altivec_abs_v4sf", ALTIVEC_BUILTIN_ABS_V4SF
},
7742 { MASK_ALTIVEC
, CODE_FOR_absv16qi2
, "__builtin_altivec_abs_v16qi", ALTIVEC_BUILTIN_ABS_V16QI
},
7743 { MASK_ALTIVEC
, CODE_FOR_altivec_abss_v4si
, "__builtin_altivec_abss_v4si", ALTIVEC_BUILTIN_ABSS_V4SI
},
7744 { MASK_ALTIVEC
, CODE_FOR_altivec_abss_v8hi
, "__builtin_altivec_abss_v8hi", ALTIVEC_BUILTIN_ABSS_V8HI
},
7745 { MASK_ALTIVEC
, CODE_FOR_altivec_abss_v16qi
, "__builtin_altivec_abss_v16qi", ALTIVEC_BUILTIN_ABSS_V16QI
}
7748 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
7751 static struct builtin_description bdesc_1arg
[] =
7753 { MASK_ALTIVEC
, CODE_FOR_altivec_vexptefp
, "__builtin_altivec_vexptefp", ALTIVEC_BUILTIN_VEXPTEFP
},
7754 { MASK_ALTIVEC
, CODE_FOR_altivec_vlogefp
, "__builtin_altivec_vlogefp", ALTIVEC_BUILTIN_VLOGEFP
},
7755 { MASK_ALTIVEC
, CODE_FOR_altivec_vrefp
, "__builtin_altivec_vrefp", ALTIVEC_BUILTIN_VREFP
},
7756 { MASK_ALTIVEC
, CODE_FOR_altivec_vrfim
, "__builtin_altivec_vrfim", ALTIVEC_BUILTIN_VRFIM
},
7757 { MASK_ALTIVEC
, CODE_FOR_altivec_vrfin
, "__builtin_altivec_vrfin", ALTIVEC_BUILTIN_VRFIN
},
7758 { MASK_ALTIVEC
, CODE_FOR_altivec_vrfip
, "__builtin_altivec_vrfip", ALTIVEC_BUILTIN_VRFIP
},
7759 { MASK_ALTIVEC
, CODE_FOR_ftruncv4sf2
, "__builtin_altivec_vrfiz", ALTIVEC_BUILTIN_VRFIZ
},
7760 { MASK_ALTIVEC
, CODE_FOR_altivec_vrsqrtefp
, "__builtin_altivec_vrsqrtefp", ALTIVEC_BUILTIN_VRSQRTEFP
},
7761 { MASK_ALTIVEC
, CODE_FOR_altivec_vspltisb
, "__builtin_altivec_vspltisb", ALTIVEC_BUILTIN_VSPLTISB
},
7762 { MASK_ALTIVEC
, CODE_FOR_altivec_vspltish
, "__builtin_altivec_vspltish", ALTIVEC_BUILTIN_VSPLTISH
},
7763 { MASK_ALTIVEC
, CODE_FOR_altivec_vspltisw
, "__builtin_altivec_vspltisw", ALTIVEC_BUILTIN_VSPLTISW
},
7764 { MASK_ALTIVEC
, CODE_FOR_altivec_vupkhsb
, "__builtin_altivec_vupkhsb", ALTIVEC_BUILTIN_VUPKHSB
},
7765 { MASK_ALTIVEC
, CODE_FOR_altivec_vupkhpx
, "__builtin_altivec_vupkhpx", ALTIVEC_BUILTIN_VUPKHPX
},
7766 { MASK_ALTIVEC
, CODE_FOR_altivec_vupkhsh
, "__builtin_altivec_vupkhsh", ALTIVEC_BUILTIN_VUPKHSH
},
7767 { MASK_ALTIVEC
, CODE_FOR_altivec_vupklsb
, "__builtin_altivec_vupklsb", ALTIVEC_BUILTIN_VUPKLSB
},
7768 { MASK_ALTIVEC
, CODE_FOR_altivec_vupklpx
, "__builtin_altivec_vupklpx", ALTIVEC_BUILTIN_VUPKLPX
},
7769 { MASK_ALTIVEC
, CODE_FOR_altivec_vupklsh
, "__builtin_altivec_vupklsh", ALTIVEC_BUILTIN_VUPKLSH
},
7771 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_abs", ALTIVEC_BUILTIN_VEC_ABS
},
7772 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_abss", ALTIVEC_BUILTIN_VEC_ABSS
},
7773 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_ceil", ALTIVEC_BUILTIN_VEC_CEIL
},
7774 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_expte", ALTIVEC_BUILTIN_VEC_EXPTE
},
7775 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_floor", ALTIVEC_BUILTIN_VEC_FLOOR
},
7776 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_loge", ALTIVEC_BUILTIN_VEC_LOGE
},
7777 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_mtvscr", ALTIVEC_BUILTIN_VEC_MTVSCR
},
7778 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_re", ALTIVEC_BUILTIN_VEC_RE
},
7779 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_round", ALTIVEC_BUILTIN_VEC_ROUND
},
7780 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_rsqrte", ALTIVEC_BUILTIN_VEC_RSQRTE
},
7781 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_trunc", ALTIVEC_BUILTIN_VEC_TRUNC
},
7782 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_unpackh", ALTIVEC_BUILTIN_VEC_UNPACKH
},
7783 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vupkhsh", ALTIVEC_BUILTIN_VEC_VUPKHSH
},
7784 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vupkhpx", ALTIVEC_BUILTIN_VEC_VUPKHPX
},
7785 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vupkhsb", ALTIVEC_BUILTIN_VEC_VUPKHSB
},
7786 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_unpackl", ALTIVEC_BUILTIN_VEC_UNPACKL
},
7787 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vupklpx", ALTIVEC_BUILTIN_VEC_VUPKLPX
},
7788 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vupklsh", ALTIVEC_BUILTIN_VEC_VUPKLSH
},
7789 { MASK_ALTIVEC
, CODE_FOR_nothing
, "__builtin_vec_vupklsb", ALTIVEC_BUILTIN_VEC_VUPKLSB
},
7791 /* The SPE unary builtins must start with SPE_BUILTIN_EVABS and
7792 end with SPE_BUILTIN_EVSUBFUSIAAW. */
7793 { 0, CODE_FOR_spe_evabs
, "__builtin_spe_evabs", SPE_BUILTIN_EVABS
},
7794 { 0, CODE_FOR_spe_evaddsmiaaw
, "__builtin_spe_evaddsmiaaw", SPE_BUILTIN_EVADDSMIAAW
},
7795 { 0, CODE_FOR_spe_evaddssiaaw
, "__builtin_spe_evaddssiaaw", SPE_BUILTIN_EVADDSSIAAW
},
7796 { 0, CODE_FOR_spe_evaddumiaaw
, "__builtin_spe_evaddumiaaw", SPE_BUILTIN_EVADDUMIAAW
},
7797 { 0, CODE_FOR_spe_evaddusiaaw
, "__builtin_spe_evaddusiaaw", SPE_BUILTIN_EVADDUSIAAW
},
7798 { 0, CODE_FOR_spe_evcntlsw
, "__builtin_spe_evcntlsw", SPE_BUILTIN_EVCNTLSW
},
7799 { 0, CODE_FOR_spe_evcntlzw
, "__builtin_spe_evcntlzw", SPE_BUILTIN_EVCNTLZW
},
7800 { 0, CODE_FOR_spe_evextsb
, "__builtin_spe_evextsb", SPE_BUILTIN_EVEXTSB
},
7801 { 0, CODE_FOR_spe_evextsh
, "__builtin_spe_evextsh", SPE_BUILTIN_EVEXTSH
},
7802 { 0, CODE_FOR_spe_evfsabs
, "__builtin_spe_evfsabs", SPE_BUILTIN_EVFSABS
},
7803 { 0, CODE_FOR_spe_evfscfsf
, "__builtin_spe_evfscfsf", SPE_BUILTIN_EVFSCFSF
},
7804 { 0, CODE_FOR_spe_evfscfsi
, "__builtin_spe_evfscfsi", SPE_BUILTIN_EVFSCFSI
},
7805 { 0, CODE_FOR_spe_evfscfuf
, "__builtin_spe_evfscfuf", SPE_BUILTIN_EVFSCFUF
},
7806 { 0, CODE_FOR_spe_evfscfui
, "__builtin_spe_evfscfui", SPE_BUILTIN_EVFSCFUI
},
7807 { 0, CODE_FOR_spe_evfsctsf
, "__builtin_spe_evfsctsf", SPE_BUILTIN_EVFSCTSF
},
7808 { 0, CODE_FOR_spe_evfsctsi
, "__builtin_spe_evfsctsi", SPE_BUILTIN_EVFSCTSI
},
7809 { 0, CODE_FOR_spe_evfsctsiz
, "__builtin_spe_evfsctsiz", SPE_BUILTIN_EVFSCTSIZ
},
7810 { 0, CODE_FOR_spe_evfsctuf
, "__builtin_spe_evfsctuf", SPE_BUILTIN_EVFSCTUF
},
7811 { 0, CODE_FOR_spe_evfsctui
, "__builtin_spe_evfsctui", SPE_BUILTIN_EVFSCTUI
},
7812 { 0, CODE_FOR_spe_evfsctuiz
, "__builtin_spe_evfsctuiz", SPE_BUILTIN_EVFSCTUIZ
},
7813 { 0, CODE_FOR_spe_evfsnabs
, "__builtin_spe_evfsnabs", SPE_BUILTIN_EVFSNABS
},
7814 { 0, CODE_FOR_spe_evfsneg
, "__builtin_spe_evfsneg", SPE_BUILTIN_EVFSNEG
},
7815 { 0, CODE_FOR_spe_evmra
, "__builtin_spe_evmra", SPE_BUILTIN_EVMRA
},
7816 { 0, CODE_FOR_negv2si2
, "__builtin_spe_evneg", SPE_BUILTIN_EVNEG
},
7817 { 0, CODE_FOR_spe_evrndw
, "__builtin_spe_evrndw", SPE_BUILTIN_EVRNDW
},
7818 { 0, CODE_FOR_spe_evsubfsmiaaw
, "__builtin_spe_evsubfsmiaaw", SPE_BUILTIN_EVSUBFSMIAAW
},
7819 { 0, CODE_FOR_spe_evsubfssiaaw
, "__builtin_spe_evsubfssiaaw", SPE_BUILTIN_EVSUBFSSIAAW
},
7820 { 0, CODE_FOR_spe_evsubfumiaaw
, "__builtin_spe_evsubfumiaaw", SPE_BUILTIN_EVSUBFUMIAAW
},
7822 /* Place-holder. Leave as last unary SPE builtin. */
7823 { 0, CODE_FOR_spe_evsubfusiaaw
, "__builtin_spe_evsubfusiaaw", SPE_BUILTIN_EVSUBFUSIAAW
},
7825 { 0, CODE_FOR_absv2sf2
, "__builtin_paired_absv2sf2", PAIRED_BUILTIN_ABSV2SF2
},
7826 { 0, CODE_FOR_nabsv2sf2
, "__builtin_paired_nabsv2sf2", PAIRED_BUILTIN_NABSV2SF2
},
7827 { 0, CODE_FOR_negv2sf2
, "__builtin_paired_negv2sf2", PAIRED_BUILTIN_NEGV2SF2
},
7828 { 0, CODE_FOR_sqrtv2sf2
, "__builtin_paired_sqrtv2sf2", PAIRED_BUILTIN_SQRTV2SF2
},
7829 { 0, CODE_FOR_resv2sf2
, "__builtin_paired_resv2sf2", PAIRED_BUILTIN_RESV2SF2
}
7833 rs6000_expand_unop_builtin (enum insn_code icode
, tree exp
, rtx target
)
7836 tree arg0
= CALL_EXPR_ARG (exp
, 0);
7837 rtx op0
= expand_normal (arg0
);
7838 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
7839 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
7841 if (icode
== CODE_FOR_nothing
)
7842 /* Builtin not supported on this processor. */
7845 /* If we got invalid arguments bail out before generating bad rtl. */
7846 if (arg0
== error_mark_node
)
7849 if (icode
== CODE_FOR_altivec_vspltisb
7850 || icode
== CODE_FOR_altivec_vspltish
7851 || icode
== CODE_FOR_altivec_vspltisw
7852 || icode
== CODE_FOR_spe_evsplatfi
7853 || icode
== CODE_FOR_spe_evsplati
)
7855 /* Only allow 5-bit *signed* literals. */
7856 if (GET_CODE (op0
) != CONST_INT
7857 || INTVAL (op0
) > 15
7858 || INTVAL (op0
) < -16)
7860 error ("argument 1 must be a 5-bit signed literal");
7866 || GET_MODE (target
) != tmode
7867 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
7868 target
= gen_reg_rtx (tmode
);
7870 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
7871 op0
= copy_to_mode_reg (mode0
, op0
);
7873 pat
= GEN_FCN (icode
) (target
, op0
);
7882 altivec_expand_abs_builtin (enum insn_code icode
, tree exp
, rtx target
)
7884 rtx pat
, scratch1
, scratch2
;
7885 tree arg0
= CALL_EXPR_ARG (exp
, 0);
7886 rtx op0
= expand_normal (arg0
);
7887 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
7888 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
7890 /* If we have invalid arguments, bail out before generating bad rtl. */
7891 if (arg0
== error_mark_node
)
7895 || GET_MODE (target
) != tmode
7896 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
7897 target
= gen_reg_rtx (tmode
);
7899 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
7900 op0
= copy_to_mode_reg (mode0
, op0
);
7902 scratch1
= gen_reg_rtx (mode0
);
7903 scratch2
= gen_reg_rtx (mode0
);
7905 pat
= GEN_FCN (icode
) (target
, op0
, scratch1
, scratch2
);
7914 rs6000_expand_binop_builtin (enum insn_code icode
, tree exp
, rtx target
)
7917 tree arg0
= CALL_EXPR_ARG (exp
, 0);
7918 tree arg1
= CALL_EXPR_ARG (exp
, 1);
7919 rtx op0
= expand_normal (arg0
);
7920 rtx op1
= expand_normal (arg1
);
7921 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
7922 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
7923 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
7925 if (icode
== CODE_FOR_nothing
)
7926 /* Builtin not supported on this processor. */
7929 /* If we got invalid arguments bail out before generating bad rtl. */
7930 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
7933 if (icode
== CODE_FOR_altivec_vcfux
7934 || icode
== CODE_FOR_altivec_vcfsx
7935 || icode
== CODE_FOR_altivec_vctsxs
7936 || icode
== CODE_FOR_altivec_vctuxs
7937 || icode
== CODE_FOR_altivec_vspltb
7938 || icode
== CODE_FOR_altivec_vsplth
7939 || icode
== CODE_FOR_altivec_vspltw
7940 || icode
== CODE_FOR_spe_evaddiw
7941 || icode
== CODE_FOR_spe_evldd
7942 || icode
== CODE_FOR_spe_evldh
7943 || icode
== CODE_FOR_spe_evldw
7944 || icode
== CODE_FOR_spe_evlhhesplat
7945 || icode
== CODE_FOR_spe_evlhhossplat
7946 || icode
== CODE_FOR_spe_evlhhousplat
7947 || icode
== CODE_FOR_spe_evlwhe
7948 || icode
== CODE_FOR_spe_evlwhos
7949 || icode
== CODE_FOR_spe_evlwhou
7950 || icode
== CODE_FOR_spe_evlwhsplat
7951 || icode
== CODE_FOR_spe_evlwwsplat
7952 || icode
== CODE_FOR_spe_evrlwi
7953 || icode
== CODE_FOR_spe_evslwi
7954 || icode
== CODE_FOR_spe_evsrwis
7955 || icode
== CODE_FOR_spe_evsubifw
7956 || icode
== CODE_FOR_spe_evsrwiu
)
7958 /* Only allow 5-bit unsigned literals. */
7960 if (TREE_CODE (arg1
) != INTEGER_CST
7961 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
7963 error ("argument 2 must be a 5-bit unsigned literal");
7969 || GET_MODE (target
) != tmode
7970 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
7971 target
= gen_reg_rtx (tmode
);
7973 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
7974 op0
= copy_to_mode_reg (mode0
, op0
);
7975 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
7976 op1
= copy_to_mode_reg (mode1
, op1
);
7978 pat
= GEN_FCN (icode
) (target
, op0
, op1
);
7987 altivec_expand_predicate_builtin (enum insn_code icode
, const char *opcode
,
7988 tree exp
, rtx target
)
7991 tree cr6_form
= CALL_EXPR_ARG (exp
, 0);
7992 tree arg0
= CALL_EXPR_ARG (exp
, 1);
7993 tree arg1
= CALL_EXPR_ARG (exp
, 2);
7994 rtx op0
= expand_normal (arg0
);
7995 rtx op1
= expand_normal (arg1
);
7996 enum machine_mode tmode
= SImode
;
7997 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
7998 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
8001 if (TREE_CODE (cr6_form
) != INTEGER_CST
)
8003 error ("argument 1 of __builtin_altivec_predicate must be a constant");
8007 cr6_form_int
= TREE_INT_CST_LOW (cr6_form
);
8009 gcc_assert (mode0
== mode1
);
8011 /* If we have invalid arguments, bail out before generating bad rtl. */
8012 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
8016 || GET_MODE (target
) != tmode
8017 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
8018 target
= gen_reg_rtx (tmode
);
8020 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
8021 op0
= copy_to_mode_reg (mode0
, op0
);
8022 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
8023 op1
= copy_to_mode_reg (mode1
, op1
);
8025 scratch
= gen_reg_rtx (mode0
);
8027 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
,
8028 gen_rtx_SYMBOL_REF (Pmode
, opcode
));
8033 /* The vec_any* and vec_all* predicates use the same opcodes for two
8034 different operations, but the bits in CR6 will be different
8035 depending on what information we want. So we have to play tricks
8036 with CR6 to get the right bits out.
8038 If you think this is disgusting, look at the specs for the
8039 AltiVec predicates. */
8041 switch (cr6_form_int
)
8044 emit_insn (gen_cr6_test_for_zero (target
));
8047 emit_insn (gen_cr6_test_for_zero_reverse (target
));
8050 emit_insn (gen_cr6_test_for_lt (target
));
8053 emit_insn (gen_cr6_test_for_lt_reverse (target
));
8056 error ("argument 1 of __builtin_altivec_predicate is out of range");
8064 paired_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
)
8067 tree arg0
= CALL_EXPR_ARG (exp
, 0);
8068 tree arg1
= CALL_EXPR_ARG (exp
, 1);
8069 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
8070 enum machine_mode mode0
= Pmode
;
8071 enum machine_mode mode1
= Pmode
;
8072 rtx op0
= expand_normal (arg0
);
8073 rtx op1
= expand_normal (arg1
);
8075 if (icode
== CODE_FOR_nothing
)
8076 /* Builtin not supported on this processor. */
8079 /* If we got invalid arguments bail out before generating bad rtl. */
8080 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
8084 || GET_MODE (target
) != tmode
8085 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
8086 target
= gen_reg_rtx (tmode
);
8088 op1
= copy_to_mode_reg (mode1
, op1
);
8090 if (op0
== const0_rtx
)
8092 addr
= gen_rtx_MEM (tmode
, op1
);
8096 op0
= copy_to_mode_reg (mode0
, op0
);
8097 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
8100 pat
= GEN_FCN (icode
) (target
, addr
);
8110 altivec_expand_lv_builtin (enum insn_code icode
, tree exp
, rtx target
, bool blk
)
8113 tree arg0
= CALL_EXPR_ARG (exp
, 0);
8114 tree arg1
= CALL_EXPR_ARG (exp
, 1);
8115 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
8116 enum machine_mode mode0
= Pmode
;
8117 enum machine_mode mode1
= Pmode
;
8118 rtx op0
= expand_normal (arg0
);
8119 rtx op1
= expand_normal (arg1
);
8121 if (icode
== CODE_FOR_nothing
)
8122 /* Builtin not supported on this processor. */
8125 /* If we got invalid arguments bail out before generating bad rtl. */
8126 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
8130 || GET_MODE (target
) != tmode
8131 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
8132 target
= gen_reg_rtx (tmode
);
8134 op1
= copy_to_mode_reg (mode1
, op1
);
8136 if (op0
== const0_rtx
)
8138 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, op1
);
8142 op0
= copy_to_mode_reg (mode0
, op0
);
8143 addr
= gen_rtx_MEM (blk
? BLKmode
: tmode
, gen_rtx_PLUS (Pmode
, op0
, op1
));
8146 pat
= GEN_FCN (icode
) (target
, addr
);
8156 spe_expand_stv_builtin (enum insn_code icode
, tree exp
)
8158 tree arg0
= CALL_EXPR_ARG (exp
, 0);
8159 tree arg1
= CALL_EXPR_ARG (exp
, 1);
8160 tree arg2
= CALL_EXPR_ARG (exp
, 2);
8161 rtx op0
= expand_normal (arg0
);
8162 rtx op1
= expand_normal (arg1
);
8163 rtx op2
= expand_normal (arg2
);
8165 enum machine_mode mode0
= insn_data
[icode
].operand
[0].mode
;
8166 enum machine_mode mode1
= insn_data
[icode
].operand
[1].mode
;
8167 enum machine_mode mode2
= insn_data
[icode
].operand
[2].mode
;
8169 /* Invalid arguments. Bail before doing anything stoopid! */
8170 if (arg0
== error_mark_node
8171 || arg1
== error_mark_node
8172 || arg2
== error_mark_node
)
8175 if (! (*insn_data
[icode
].operand
[2].predicate
) (op0
, mode2
))
8176 op0
= copy_to_mode_reg (mode2
, op0
);
8177 if (! (*insn_data
[icode
].operand
[0].predicate
) (op1
, mode0
))
8178 op1
= copy_to_mode_reg (mode0
, op1
);
8179 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
8180 op2
= copy_to_mode_reg (mode1
, op2
);
8182 pat
= GEN_FCN (icode
) (op1
, op2
, op0
);
8189 paired_expand_stv_builtin (enum insn_code icode
, tree exp
)
8191 tree arg0
= CALL_EXPR_ARG (exp
, 0);
8192 tree arg1
= CALL_EXPR_ARG (exp
, 1);
8193 tree arg2
= CALL_EXPR_ARG (exp
, 2);
8194 rtx op0
= expand_normal (arg0
);
8195 rtx op1
= expand_normal (arg1
);
8196 rtx op2
= expand_normal (arg2
);
8198 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
8199 enum machine_mode mode1
= Pmode
;
8200 enum machine_mode mode2
= Pmode
;
8202 /* Invalid arguments. Bail before doing anything stoopid! */
8203 if (arg0
== error_mark_node
8204 || arg1
== error_mark_node
8205 || arg2
== error_mark_node
)
8208 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, tmode
))
8209 op0
= copy_to_mode_reg (tmode
, op0
);
8211 op2
= copy_to_mode_reg (mode2
, op2
);
8213 if (op1
== const0_rtx
)
8215 addr
= gen_rtx_MEM (tmode
, op2
);
8219 op1
= copy_to_mode_reg (mode1
, op1
);
8220 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
8223 pat
= GEN_FCN (icode
) (addr
, op0
);
8230 altivec_expand_stv_builtin (enum insn_code icode
, tree exp
)
8232 tree arg0
= CALL_EXPR_ARG (exp
, 0);
8233 tree arg1
= CALL_EXPR_ARG (exp
, 1);
8234 tree arg2
= CALL_EXPR_ARG (exp
, 2);
8235 rtx op0
= expand_normal (arg0
);
8236 rtx op1
= expand_normal (arg1
);
8237 rtx op2
= expand_normal (arg2
);
8239 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
8240 enum machine_mode mode1
= Pmode
;
8241 enum machine_mode mode2
= Pmode
;
8243 /* Invalid arguments. Bail before doing anything stoopid! */
8244 if (arg0
== error_mark_node
8245 || arg1
== error_mark_node
8246 || arg2
== error_mark_node
)
8249 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, tmode
))
8250 op0
= copy_to_mode_reg (tmode
, op0
);
8252 op2
= copy_to_mode_reg (mode2
, op2
);
8254 if (op1
== const0_rtx
)
8256 addr
= gen_rtx_MEM (tmode
, op2
);
8260 op1
= copy_to_mode_reg (mode1
, op1
);
8261 addr
= gen_rtx_MEM (tmode
, gen_rtx_PLUS (Pmode
, op1
, op2
));
8264 pat
= GEN_FCN (icode
) (addr
, op0
);
8271 rs6000_expand_ternop_builtin (enum insn_code icode
, tree exp
, rtx target
)
8274 tree arg0
= CALL_EXPR_ARG (exp
, 0);
8275 tree arg1
= CALL_EXPR_ARG (exp
, 1);
8276 tree arg2
= CALL_EXPR_ARG (exp
, 2);
8277 rtx op0
= expand_normal (arg0
);
8278 rtx op1
= expand_normal (arg1
);
8279 rtx op2
= expand_normal (arg2
);
8280 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
8281 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
8282 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
8283 enum machine_mode mode2
= insn_data
[icode
].operand
[3].mode
;
8285 if (icode
== CODE_FOR_nothing
)
8286 /* Builtin not supported on this processor. */
8289 /* If we got invalid arguments bail out before generating bad rtl. */
8290 if (arg0
== error_mark_node
8291 || arg1
== error_mark_node
8292 || arg2
== error_mark_node
)
8295 if (icode
== CODE_FOR_altivec_vsldoi_v4sf
8296 || icode
== CODE_FOR_altivec_vsldoi_v4si
8297 || icode
== CODE_FOR_altivec_vsldoi_v8hi
8298 || icode
== CODE_FOR_altivec_vsldoi_v16qi
)
8300 /* Only allow 4-bit unsigned literals. */
8302 if (TREE_CODE (arg2
) != INTEGER_CST
8303 || TREE_INT_CST_LOW (arg2
) & ~0xf)
8305 error ("argument 3 must be a 4-bit unsigned literal");
8311 || GET_MODE (target
) != tmode
8312 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
8313 target
= gen_reg_rtx (tmode
);
8315 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
8316 op0
= copy_to_mode_reg (mode0
, op0
);
8317 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
8318 op1
= copy_to_mode_reg (mode1
, op1
);
8319 if (! (*insn_data
[icode
].operand
[3].predicate
) (op2
, mode2
))
8320 op2
= copy_to_mode_reg (mode2
, op2
);
8322 if (TARGET_PAIRED_FLOAT
&& icode
== CODE_FOR_selv2sf4
)
8323 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
, CONST0_RTX (SFmode
));
8325 pat
= GEN_FCN (icode
) (target
, op0
, op1
, op2
);
8333 /* Expand the lvx builtins. */
8335 altivec_expand_ld_builtin (tree exp
, rtx target
, bool *expandedp
)
8337 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
8338 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
8340 enum machine_mode tmode
, mode0
;
8342 enum insn_code icode
;
8346 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi
:
8347 icode
= CODE_FOR_altivec_lvx_v16qi
;
8349 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi
:
8350 icode
= CODE_FOR_altivec_lvx_v8hi
;
8352 case ALTIVEC_BUILTIN_LD_INTERNAL_4si
:
8353 icode
= CODE_FOR_altivec_lvx_v4si
;
8355 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf
:
8356 icode
= CODE_FOR_altivec_lvx_v4sf
;
8365 arg0
= CALL_EXPR_ARG (exp
, 0);
8366 op0
= expand_normal (arg0
);
8367 tmode
= insn_data
[icode
].operand
[0].mode
;
8368 mode0
= insn_data
[icode
].operand
[1].mode
;
8371 || GET_MODE (target
) != tmode
8372 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
8373 target
= gen_reg_rtx (tmode
);
8375 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
8376 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
8378 pat
= GEN_FCN (icode
) (target
, op0
);
8385 /* Expand the stvx builtins. */
8387 altivec_expand_st_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
8390 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
8391 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
8393 enum machine_mode mode0
, mode1
;
8395 enum insn_code icode
;
8399 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi
:
8400 icode
= CODE_FOR_altivec_stvx_v16qi
;
8402 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi
:
8403 icode
= CODE_FOR_altivec_stvx_v8hi
;
8405 case ALTIVEC_BUILTIN_ST_INTERNAL_4si
:
8406 icode
= CODE_FOR_altivec_stvx_v4si
;
8408 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf
:
8409 icode
= CODE_FOR_altivec_stvx_v4sf
;
8416 arg0
= CALL_EXPR_ARG (exp
, 0);
8417 arg1
= CALL_EXPR_ARG (exp
, 1);
8418 op0
= expand_normal (arg0
);
8419 op1
= expand_normal (arg1
);
8420 mode0
= insn_data
[icode
].operand
[0].mode
;
8421 mode1
= insn_data
[icode
].operand
[1].mode
;
8423 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
8424 op0
= gen_rtx_MEM (mode0
, copy_to_mode_reg (Pmode
, op0
));
8425 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
8426 op1
= copy_to_mode_reg (mode1
, op1
);
8428 pat
= GEN_FCN (icode
) (op0
, op1
);
8436 /* Expand the dst builtins. */
8438 altivec_expand_dst_builtin (tree exp
, rtx target ATTRIBUTE_UNUSED
,
8441 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
8442 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
8443 tree arg0
, arg1
, arg2
;
8444 enum machine_mode mode0
, mode1
, mode2
;
8445 rtx pat
, op0
, op1
, op2
;
8446 const struct builtin_description
*d
;
8451 /* Handle DST variants. */
8453 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
8454 if (d
->code
== fcode
)
8456 arg0
= CALL_EXPR_ARG (exp
, 0);
8457 arg1
= CALL_EXPR_ARG (exp
, 1);
8458 arg2
= CALL_EXPR_ARG (exp
, 2);
8459 op0
= expand_normal (arg0
);
8460 op1
= expand_normal (arg1
);
8461 op2
= expand_normal (arg2
);
8462 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
8463 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
8464 mode2
= insn_data
[d
->icode
].operand
[2].mode
;
8466 /* Invalid arguments, bail out before generating bad rtl. */
8467 if (arg0
== error_mark_node
8468 || arg1
== error_mark_node
8469 || arg2
== error_mark_node
)
8474 if (TREE_CODE (arg2
) != INTEGER_CST
8475 || TREE_INT_CST_LOW (arg2
) & ~0x3)
8477 error ("argument to %qs must be a 2-bit unsigned literal", d
->name
);
8481 if (! (*insn_data
[d
->icode
].operand
[0].predicate
) (op0
, mode0
))
8482 op0
= copy_to_mode_reg (Pmode
, op0
);
8483 if (! (*insn_data
[d
->icode
].operand
[1].predicate
) (op1
, mode1
))
8484 op1
= copy_to_mode_reg (mode1
, op1
);
8486 pat
= GEN_FCN (d
->icode
) (op0
, op1
, op2
);
8496 /* Expand vec_init builtin. */
8498 altivec_expand_vec_init_builtin (tree type
, tree exp
, rtx target
)
8500 enum machine_mode tmode
= TYPE_MODE (type
);
8501 enum machine_mode inner_mode
= GET_MODE_INNER (tmode
);
8502 int i
, n_elt
= GET_MODE_NUNITS (tmode
);
8503 rtvec v
= rtvec_alloc (n_elt
);
8505 gcc_assert (VECTOR_MODE_P (tmode
));
8506 gcc_assert (n_elt
== call_expr_nargs (exp
));
8508 for (i
= 0; i
< n_elt
; ++i
)
8510 rtx x
= expand_normal (CALL_EXPR_ARG (exp
, i
));
8511 RTVEC_ELT (v
, i
) = gen_lowpart (inner_mode
, x
);
8514 if (!target
|| !register_operand (target
, tmode
))
8515 target
= gen_reg_rtx (tmode
);
8517 rs6000_expand_vector_init (target
, gen_rtx_PARALLEL (tmode
, v
));
8521 /* Return the integer constant in ARG. Constrain it to be in the range
8522 of the subparts of VEC_TYPE; issue an error if not. */
8525 get_element_number (tree vec_type
, tree arg
)
8527 unsigned HOST_WIDE_INT elt
, max
= TYPE_VECTOR_SUBPARTS (vec_type
) - 1;
8529 if (!host_integerp (arg
, 1)
8530 || (elt
= tree_low_cst (arg
, 1), elt
> max
))
8532 error ("selector must be an integer constant in the range 0..%wi", max
);
8539 /* Expand vec_set builtin. */
8541 altivec_expand_vec_set_builtin (tree exp
)
8543 enum machine_mode tmode
, mode1
;
8544 tree arg0
, arg1
, arg2
;
8548 arg0
= CALL_EXPR_ARG (exp
, 0);
8549 arg1
= CALL_EXPR_ARG (exp
, 1);
8550 arg2
= CALL_EXPR_ARG (exp
, 2);
8552 tmode
= TYPE_MODE (TREE_TYPE (arg0
));
8553 mode1
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
8554 gcc_assert (VECTOR_MODE_P (tmode
));
8556 op0
= expand_expr (arg0
, NULL_RTX
, tmode
, EXPAND_NORMAL
);
8557 op1
= expand_expr (arg1
, NULL_RTX
, mode1
, EXPAND_NORMAL
);
8558 elt
= get_element_number (TREE_TYPE (arg0
), arg2
);
8560 if (GET_MODE (op1
) != mode1
&& GET_MODE (op1
) != VOIDmode
)
8561 op1
= convert_modes (mode1
, GET_MODE (op1
), op1
, true);
8563 op0
= force_reg (tmode
, op0
);
8564 op1
= force_reg (mode1
, op1
);
8566 rs6000_expand_vector_set (op0
, op1
, elt
);
8571 /* Expand vec_ext builtin. */
8573 altivec_expand_vec_ext_builtin (tree exp
, rtx target
)
8575 enum machine_mode tmode
, mode0
;
8580 arg0
= CALL_EXPR_ARG (exp
, 0);
8581 arg1
= CALL_EXPR_ARG (exp
, 1);
8583 op0
= expand_normal (arg0
);
8584 elt
= get_element_number (TREE_TYPE (arg0
), arg1
);
8586 tmode
= TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0
)));
8587 mode0
= TYPE_MODE (TREE_TYPE (arg0
));
8588 gcc_assert (VECTOR_MODE_P (mode0
));
8590 op0
= force_reg (mode0
, op0
);
8592 if (optimize
|| !target
|| !register_operand (target
, tmode
))
8593 target
= gen_reg_rtx (tmode
);
8595 rs6000_expand_vector_extract (target
, op0
, elt
);
8600 /* Expand the builtin in EXP and store the result in TARGET. Store
8601 true in *EXPANDEDP if we found a builtin to expand. */
8603 altivec_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
8605 const struct builtin_description
*d
;
8606 const struct builtin_description_predicates
*dp
;
8608 enum insn_code icode
;
8609 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
8612 enum machine_mode tmode
, mode0
;
8613 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
8615 if (fcode
>= ALTIVEC_BUILTIN_OVERLOADED_FIRST
8616 && fcode
<= ALTIVEC_BUILTIN_OVERLOADED_LAST
)
8619 error ("unresolved overload for Altivec builtin %qF", fndecl
);
8623 target
= altivec_expand_ld_builtin (exp
, target
, expandedp
);
8627 target
= altivec_expand_st_builtin (exp
, target
, expandedp
);
8631 target
= altivec_expand_dst_builtin (exp
, target
, expandedp
);
8639 case ALTIVEC_BUILTIN_STVX
:
8640 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx
, exp
);
8641 case ALTIVEC_BUILTIN_STVEBX
:
8642 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx
, exp
);
8643 case ALTIVEC_BUILTIN_STVEHX
:
8644 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx
, exp
);
8645 case ALTIVEC_BUILTIN_STVEWX
:
8646 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx
, exp
);
8647 case ALTIVEC_BUILTIN_STVXL
:
8648 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl
, exp
);
8650 case ALTIVEC_BUILTIN_STVLX
:
8651 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx
, exp
);
8652 case ALTIVEC_BUILTIN_STVLXL
:
8653 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl
, exp
);
8654 case ALTIVEC_BUILTIN_STVRX
:
8655 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx
, exp
);
8656 case ALTIVEC_BUILTIN_STVRXL
:
8657 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl
, exp
);
8659 case ALTIVEC_BUILTIN_MFVSCR
:
8660 icode
= CODE_FOR_altivec_mfvscr
;
8661 tmode
= insn_data
[icode
].operand
[0].mode
;
8664 || GET_MODE (target
) != tmode
8665 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
8666 target
= gen_reg_rtx (tmode
);
8668 pat
= GEN_FCN (icode
) (target
);
8674 case ALTIVEC_BUILTIN_MTVSCR
:
8675 icode
= CODE_FOR_altivec_mtvscr
;
8676 arg0
= CALL_EXPR_ARG (exp
, 0);
8677 op0
= expand_normal (arg0
);
8678 mode0
= insn_data
[icode
].operand
[0].mode
;
8680 /* If we got invalid arguments bail out before generating bad rtl. */
8681 if (arg0
== error_mark_node
)
8684 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
8685 op0
= copy_to_mode_reg (mode0
, op0
);
8687 pat
= GEN_FCN (icode
) (op0
);
8692 case ALTIVEC_BUILTIN_DSSALL
:
8693 emit_insn (gen_altivec_dssall ());
8696 case ALTIVEC_BUILTIN_DSS
:
8697 icode
= CODE_FOR_altivec_dss
;
8698 arg0
= CALL_EXPR_ARG (exp
, 0);
8700 op0
= expand_normal (arg0
);
8701 mode0
= insn_data
[icode
].operand
[0].mode
;
8703 /* If we got invalid arguments bail out before generating bad rtl. */
8704 if (arg0
== error_mark_node
)
8707 if (TREE_CODE (arg0
) != INTEGER_CST
8708 || TREE_INT_CST_LOW (arg0
) & ~0x3)
8710 error ("argument to dss must be a 2-bit unsigned literal");
8714 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
8715 op0
= copy_to_mode_reg (mode0
, op0
);
8717 emit_insn (gen_altivec_dss (op0
));
8720 case ALTIVEC_BUILTIN_VEC_INIT_V4SI
:
8721 case ALTIVEC_BUILTIN_VEC_INIT_V8HI
:
8722 case ALTIVEC_BUILTIN_VEC_INIT_V16QI
:
8723 case ALTIVEC_BUILTIN_VEC_INIT_V4SF
:
8724 return altivec_expand_vec_init_builtin (TREE_TYPE (exp
), exp
, target
);
8726 case ALTIVEC_BUILTIN_VEC_SET_V4SI
:
8727 case ALTIVEC_BUILTIN_VEC_SET_V8HI
:
8728 case ALTIVEC_BUILTIN_VEC_SET_V16QI
:
8729 case ALTIVEC_BUILTIN_VEC_SET_V4SF
:
8730 return altivec_expand_vec_set_builtin (exp
);
8732 case ALTIVEC_BUILTIN_VEC_EXT_V4SI
:
8733 case ALTIVEC_BUILTIN_VEC_EXT_V8HI
:
8734 case ALTIVEC_BUILTIN_VEC_EXT_V16QI
:
8735 case ALTIVEC_BUILTIN_VEC_EXT_V4SF
:
8736 return altivec_expand_vec_ext_builtin (exp
, target
);
8743 /* Expand abs* operations. */
8745 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
8746 if (d
->code
== fcode
)
8747 return altivec_expand_abs_builtin (d
->icode
, exp
, target
);
8749 /* Expand the AltiVec predicates. */
8750 dp
= bdesc_altivec_preds
;
8751 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, dp
++)
8752 if (dp
->code
== fcode
)
8753 return altivec_expand_predicate_builtin (dp
->icode
, dp
->opcode
,
8756 /* LV* are funky. We initialized them differently. */
8759 case ALTIVEC_BUILTIN_LVSL
:
8760 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl
,
8761 exp
, target
, false);
8762 case ALTIVEC_BUILTIN_LVSR
:
8763 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr
,
8764 exp
, target
, false);
8765 case ALTIVEC_BUILTIN_LVEBX
:
8766 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx
,
8767 exp
, target
, false);
8768 case ALTIVEC_BUILTIN_LVEHX
:
8769 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx
,
8770 exp
, target
, false);
8771 case ALTIVEC_BUILTIN_LVEWX
:
8772 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx
,
8773 exp
, target
, false);
8774 case ALTIVEC_BUILTIN_LVXL
:
8775 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl
,
8776 exp
, target
, false);
8777 case ALTIVEC_BUILTIN_LVX
:
8778 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx
,
8779 exp
, target
, false);
8780 case ALTIVEC_BUILTIN_LVLX
:
8781 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx
,
8783 case ALTIVEC_BUILTIN_LVLXL
:
8784 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl
,
8786 case ALTIVEC_BUILTIN_LVRX
:
8787 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx
,
8789 case ALTIVEC_BUILTIN_LVRXL
:
8790 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl
,
8801 /* Expand the builtin in EXP and store the result in TARGET. Store
8802 true in *EXPANDEDP if we found a builtin to expand. */
8804 paired_expand_builtin (tree exp
, rtx target
, bool * expandedp
)
8806 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
8807 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
8808 const struct builtin_description
*d
;
8815 case PAIRED_BUILTIN_STX
:
8816 return paired_expand_stv_builtin (CODE_FOR_paired_stx
, exp
);
8817 case PAIRED_BUILTIN_LX
:
8818 return paired_expand_lv_builtin (CODE_FOR_paired_lx
, exp
, target
);
8824 /* Expand the paired predicates. */
8825 d
= bdesc_paired_preds
;
8826 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); i
++, d
++)
8827 if (d
->code
== fcode
)
8828 return paired_expand_predicate_builtin (d
->icode
, exp
, target
);
8834 /* Binops that need to be initialized manually, but can be expanded
8835 automagically by rs6000_expand_binop_builtin. */
8836 static struct builtin_description bdesc_2arg_spe
[] =
8838 { 0, CODE_FOR_spe_evlddx
, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX
},
8839 { 0, CODE_FOR_spe_evldwx
, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX
},
8840 { 0, CODE_FOR_spe_evldhx
, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX
},
8841 { 0, CODE_FOR_spe_evlwhex
, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX
},
8842 { 0, CODE_FOR_spe_evlwhoux
, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX
},
8843 { 0, CODE_FOR_spe_evlwhosx
, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX
},
8844 { 0, CODE_FOR_spe_evlwwsplatx
, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX
},
8845 { 0, CODE_FOR_spe_evlwhsplatx
, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX
},
8846 { 0, CODE_FOR_spe_evlhhesplatx
, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX
},
8847 { 0, CODE_FOR_spe_evlhhousplatx
, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX
},
8848 { 0, CODE_FOR_spe_evlhhossplatx
, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX
},
8849 { 0, CODE_FOR_spe_evldd
, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD
},
8850 { 0, CODE_FOR_spe_evldw
, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW
},
8851 { 0, CODE_FOR_spe_evldh
, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH
},
8852 { 0, CODE_FOR_spe_evlwhe
, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE
},
8853 { 0, CODE_FOR_spe_evlwhou
, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU
},
8854 { 0, CODE_FOR_spe_evlwhos
, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS
},
8855 { 0, CODE_FOR_spe_evlwwsplat
, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT
},
8856 { 0, CODE_FOR_spe_evlwhsplat
, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT
},
8857 { 0, CODE_FOR_spe_evlhhesplat
, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT
},
8858 { 0, CODE_FOR_spe_evlhhousplat
, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT
},
8859 { 0, CODE_FOR_spe_evlhhossplat
, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT
}
8862 /* Expand the builtin in EXP and store the result in TARGET. Store
8863 true in *EXPANDEDP if we found a builtin to expand.
8865 This expands the SPE builtins that are not simple unary and binary
8868 spe_expand_builtin (tree exp
, rtx target
, bool *expandedp
)
8870 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
8872 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
8873 enum insn_code icode
;
8874 enum machine_mode tmode
, mode0
;
8876 struct builtin_description
*d
;
8881 /* Syntax check for a 5-bit unsigned immediate. */
8884 case SPE_BUILTIN_EVSTDD
:
8885 case SPE_BUILTIN_EVSTDH
:
8886 case SPE_BUILTIN_EVSTDW
:
8887 case SPE_BUILTIN_EVSTWHE
:
8888 case SPE_BUILTIN_EVSTWHO
:
8889 case SPE_BUILTIN_EVSTWWE
:
8890 case SPE_BUILTIN_EVSTWWO
:
8891 arg1
= CALL_EXPR_ARG (exp
, 2);
8892 if (TREE_CODE (arg1
) != INTEGER_CST
8893 || TREE_INT_CST_LOW (arg1
) & ~0x1f)
8895 error ("argument 2 must be a 5-bit unsigned literal");
8903 /* The evsplat*i instructions are not quite generic. */
8906 case SPE_BUILTIN_EVSPLATFI
:
8907 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi
,
8909 case SPE_BUILTIN_EVSPLATI
:
8910 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati
,
8916 d
= (struct builtin_description
*) bdesc_2arg_spe
;
8917 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg_spe
); ++i
, ++d
)
8918 if (d
->code
== fcode
)
8919 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
8921 d
= (struct builtin_description
*) bdesc_spe_predicates
;
8922 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, ++d
)
8923 if (d
->code
== fcode
)
8924 return spe_expand_predicate_builtin (d
->icode
, exp
, target
);
8926 d
= (struct builtin_description
*) bdesc_spe_evsel
;
8927 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, ++d
)
8928 if (d
->code
== fcode
)
8929 return spe_expand_evsel_builtin (d
->icode
, exp
, target
);
8933 case SPE_BUILTIN_EVSTDDX
:
8934 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx
, exp
);
8935 case SPE_BUILTIN_EVSTDHX
:
8936 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx
, exp
);
8937 case SPE_BUILTIN_EVSTDWX
:
8938 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx
, exp
);
8939 case SPE_BUILTIN_EVSTWHEX
:
8940 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex
, exp
);
8941 case SPE_BUILTIN_EVSTWHOX
:
8942 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox
, exp
);
8943 case SPE_BUILTIN_EVSTWWEX
:
8944 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex
, exp
);
8945 case SPE_BUILTIN_EVSTWWOX
:
8946 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox
, exp
);
8947 case SPE_BUILTIN_EVSTDD
:
8948 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd
, exp
);
8949 case SPE_BUILTIN_EVSTDH
:
8950 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh
, exp
);
8951 case SPE_BUILTIN_EVSTDW
:
8952 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw
, exp
);
8953 case SPE_BUILTIN_EVSTWHE
:
8954 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe
, exp
);
8955 case SPE_BUILTIN_EVSTWHO
:
8956 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho
, exp
);
8957 case SPE_BUILTIN_EVSTWWE
:
8958 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe
, exp
);
8959 case SPE_BUILTIN_EVSTWWO
:
8960 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo
, exp
);
8961 case SPE_BUILTIN_MFSPEFSCR
:
8962 icode
= CODE_FOR_spe_mfspefscr
;
8963 tmode
= insn_data
[icode
].operand
[0].mode
;
8966 || GET_MODE (target
) != tmode
8967 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
8968 target
= gen_reg_rtx (tmode
);
8970 pat
= GEN_FCN (icode
) (target
);
8975 case SPE_BUILTIN_MTSPEFSCR
:
8976 icode
= CODE_FOR_spe_mtspefscr
;
8977 arg0
= CALL_EXPR_ARG (exp
, 0);
8978 op0
= expand_normal (arg0
);
8979 mode0
= insn_data
[icode
].operand
[0].mode
;
8981 if (arg0
== error_mark_node
)
8984 if (! (*insn_data
[icode
].operand
[0].predicate
) (op0
, mode0
))
8985 op0
= copy_to_mode_reg (mode0
, op0
);
8987 pat
= GEN_FCN (icode
) (op0
);
9000 paired_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
9002 rtx pat
, scratch
, tmp
;
9003 tree form
= CALL_EXPR_ARG (exp
, 0);
9004 tree arg0
= CALL_EXPR_ARG (exp
, 1);
9005 tree arg1
= CALL_EXPR_ARG (exp
, 2);
9006 rtx op0
= expand_normal (arg0
);
9007 rtx op1
= expand_normal (arg1
);
9008 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9009 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
9013 if (TREE_CODE (form
) != INTEGER_CST
)
9015 error ("argument 1 of __builtin_paired_predicate must be a constant");
9019 form_int
= TREE_INT_CST_LOW (form
);
9021 gcc_assert (mode0
== mode1
);
9023 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
9027 || GET_MODE (target
) != SImode
9028 || !(*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
9029 target
= gen_reg_rtx (SImode
);
9030 if (!(*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9031 op0
= copy_to_mode_reg (mode0
, op0
);
9032 if (!(*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
9033 op1
= copy_to_mode_reg (mode1
, op1
);
9035 scratch
= gen_reg_rtx (CCFPmode
);
9037 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
9059 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
9062 error ("argument 1 of __builtin_paired_predicate is out of range");
9066 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
9067 emit_move_insn (target
, tmp
);
9072 spe_expand_predicate_builtin (enum insn_code icode
, tree exp
, rtx target
)
9074 rtx pat
, scratch
, tmp
;
9075 tree form
= CALL_EXPR_ARG (exp
, 0);
9076 tree arg0
= CALL_EXPR_ARG (exp
, 1);
9077 tree arg1
= CALL_EXPR_ARG (exp
, 2);
9078 rtx op0
= expand_normal (arg0
);
9079 rtx op1
= expand_normal (arg1
);
9080 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9081 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
9085 if (TREE_CODE (form
) != INTEGER_CST
)
9087 error ("argument 1 of __builtin_spe_predicate must be a constant");
9091 form_int
= TREE_INT_CST_LOW (form
);
9093 gcc_assert (mode0
== mode1
);
9095 if (arg0
== error_mark_node
|| arg1
== error_mark_node
)
9099 || GET_MODE (target
) != SImode
9100 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, SImode
))
9101 target
= gen_reg_rtx (SImode
);
9103 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9104 op0
= copy_to_mode_reg (mode0
, op0
);
9105 if (! (*insn_data
[icode
].operand
[2].predicate
) (op1
, mode1
))
9106 op1
= copy_to_mode_reg (mode1
, op1
);
9108 scratch
= gen_reg_rtx (CCmode
);
9110 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
9115 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
9116 _lower_. We use one compare, but look in different bits of the
9117 CR for each variant.
9119 There are 2 elements in each SPE simd type (upper/lower). The CR
9120 bits are set as follows:
9122 BIT0 | BIT 1 | BIT 2 | BIT 3
9123 U | L | (U | L) | (U & L)
9125 So, for an "all" relationship, BIT 3 would be set.
9126 For an "any" relationship, BIT 2 would be set. Etc.
9128 Following traditional nomenclature, these bits map to:
9130 BIT0 | BIT 1 | BIT 2 | BIT 3
9133 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
9138 /* All variant. OV bit. */
9140 /* We need to get to the OV bit, which is the ORDERED bit. We
9141 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
9142 that's ugly and will make validate_condition_mode die.
9143 So let's just use another pattern. */
9144 emit_insn (gen_move_from_CR_ov_bit (target
, scratch
));
9146 /* Any variant. EQ bit. */
9150 /* Upper variant. LT bit. */
9154 /* Lower variant. GT bit. */
9159 error ("argument 1 of __builtin_spe_predicate is out of range");
9163 tmp
= gen_rtx_fmt_ee (code
, SImode
, scratch
, const0_rtx
);
9164 emit_move_insn (target
, tmp
);
9169 /* The evsel builtins look like this:
9171 e = __builtin_spe_evsel_OP (a, b, c, d);
9175 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
9176 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
9180 spe_expand_evsel_builtin (enum insn_code icode
, tree exp
, rtx target
)
9183 tree arg0
= CALL_EXPR_ARG (exp
, 0);
9184 tree arg1
= CALL_EXPR_ARG (exp
, 1);
9185 tree arg2
= CALL_EXPR_ARG (exp
, 2);
9186 tree arg3
= CALL_EXPR_ARG (exp
, 3);
9187 rtx op0
= expand_normal (arg0
);
9188 rtx op1
= expand_normal (arg1
);
9189 rtx op2
= expand_normal (arg2
);
9190 rtx op3
= expand_normal (arg3
);
9191 enum machine_mode mode0
= insn_data
[icode
].operand
[1].mode
;
9192 enum machine_mode mode1
= insn_data
[icode
].operand
[2].mode
;
9194 gcc_assert (mode0
== mode1
);
9196 if (arg0
== error_mark_node
|| arg1
== error_mark_node
9197 || arg2
== error_mark_node
|| arg3
== error_mark_node
)
9201 || GET_MODE (target
) != mode0
9202 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, mode0
))
9203 target
= gen_reg_rtx (mode0
);
9205 if (! (*insn_data
[icode
].operand
[1].predicate
) (op0
, mode0
))
9206 op0
= copy_to_mode_reg (mode0
, op0
);
9207 if (! (*insn_data
[icode
].operand
[1].predicate
) (op1
, mode1
))
9208 op1
= copy_to_mode_reg (mode0
, op1
);
9209 if (! (*insn_data
[icode
].operand
[1].predicate
) (op2
, mode1
))
9210 op2
= copy_to_mode_reg (mode0
, op2
);
9211 if (! (*insn_data
[icode
].operand
[1].predicate
) (op3
, mode1
))
9212 op3
= copy_to_mode_reg (mode0
, op3
);
9214 /* Generate the compare. */
9215 scratch
= gen_reg_rtx (CCmode
);
9216 pat
= GEN_FCN (icode
) (scratch
, op0
, op1
);
9221 if (mode0
== V2SImode
)
9222 emit_insn (gen_spe_evsel (target
, op2
, op3
, scratch
));
9224 emit_insn (gen_spe_evsel_fs (target
, op2
, op3
, scratch
));
9229 /* Expand an expression EXP that calls a built-in function,
9230 with result going to TARGET if that's convenient
9231 (and in mode MODE if that's convenient).
9232 SUBTARGET may be used as the target for computing one of EXP's operands.
9233 IGNORE is nonzero if the value is to be ignored. */
9236 rs6000_expand_builtin (tree exp
, rtx target
, rtx subtarget ATTRIBUTE_UNUSED
,
9237 enum machine_mode mode ATTRIBUTE_UNUSED
,
9238 int ignore ATTRIBUTE_UNUSED
)
9240 tree fndecl
= TREE_OPERAND (CALL_EXPR_FN (exp
), 0);
9241 unsigned int fcode
= DECL_FUNCTION_CODE (fndecl
);
9242 const struct builtin_description
*d
;
9247 if (fcode
== RS6000_BUILTIN_RECIP
)
9248 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3
, exp
, target
);
9250 if (fcode
== RS6000_BUILTIN_RECIPF
)
9251 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3
, exp
, target
);
9253 if (fcode
== RS6000_BUILTIN_RSQRTF
)
9254 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2
, exp
, target
);
9256 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_LOAD
9257 || fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
9259 int icode
= (int) CODE_FOR_altivec_lvsr
;
9260 enum machine_mode tmode
= insn_data
[icode
].operand
[0].mode
;
9261 enum machine_mode mode
= insn_data
[icode
].operand
[1].mode
;
9265 gcc_assert (TARGET_ALTIVEC
);
9267 arg
= CALL_EXPR_ARG (exp
, 0);
9268 gcc_assert (TREE_CODE (TREE_TYPE (arg
)) == POINTER_TYPE
);
9269 op
= expand_expr (arg
, NULL_RTX
, Pmode
, EXPAND_NORMAL
);
9270 addr
= memory_address (mode
, op
);
9271 if (fcode
== ALTIVEC_BUILTIN_MASK_FOR_STORE
)
9275 /* For the load case need to negate the address. */
9276 op
= gen_reg_rtx (GET_MODE (addr
));
9277 emit_insn (gen_rtx_SET (VOIDmode
, op
,
9278 gen_rtx_NEG (GET_MODE (addr
), addr
)));
9280 op
= gen_rtx_MEM (mode
, op
);
9283 || GET_MODE (target
) != tmode
9284 || ! (*insn_data
[icode
].operand
[0].predicate
) (target
, tmode
))
9285 target
= gen_reg_rtx (tmode
);
9287 /*pat = gen_altivec_lvsr (target, op);*/
9288 pat
= GEN_FCN (icode
) (target
, op
);
9296 /* FIXME: There's got to be a nicer way to handle this case than
9297 constructing a new CALL_EXPR. */
9298 if (fcode
== ALTIVEC_BUILTIN_VCFUX
9299 || fcode
== ALTIVEC_BUILTIN_VCFSX
9300 || fcode
== ALTIVEC_BUILTIN_VCTUXS
9301 || fcode
== ALTIVEC_BUILTIN_VCTSXS
)
9303 if (call_expr_nargs (exp
) == 1)
9304 exp
= build_call_nary (TREE_TYPE (exp
), CALL_EXPR_FN (exp
),
9305 2, CALL_EXPR_ARG (exp
, 0), integer_zero_node
);
9310 ret
= altivec_expand_builtin (exp
, target
, &success
);
9317 ret
= spe_expand_builtin (exp
, target
, &success
);
9322 if (TARGET_PAIRED_FLOAT
)
9324 ret
= paired_expand_builtin (exp
, target
, &success
);
9330 gcc_assert (TARGET_ALTIVEC
|| TARGET_SPE
|| TARGET_PAIRED_FLOAT
);
9332 /* Handle simple unary operations. */
9333 d
= (struct builtin_description
*) bdesc_1arg
;
9334 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
9335 if (d
->code
== fcode
)
9336 return rs6000_expand_unop_builtin (d
->icode
, exp
, target
);
9338 /* Handle simple binary operations. */
9339 d
= (struct builtin_description
*) bdesc_2arg
;
9340 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
9341 if (d
->code
== fcode
)
9342 return rs6000_expand_binop_builtin (d
->icode
, exp
, target
);
9344 /* Handle simple ternary operations. */
9346 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
9347 if (d
->code
== fcode
)
9348 return rs6000_expand_ternop_builtin (d
->icode
, exp
, target
);
9354 rs6000_init_builtins (void)
9358 V2SI_type_node
= build_vector_type (intSI_type_node
, 2);
9359 V2SF_type_node
= build_vector_type (float_type_node
, 2);
9360 V4HI_type_node
= build_vector_type (intHI_type_node
, 4);
9361 V4SI_type_node
= build_vector_type (intSI_type_node
, 4);
9362 V4SF_type_node
= build_vector_type (float_type_node
, 4);
9363 V8HI_type_node
= build_vector_type (intHI_type_node
, 8);
9364 V16QI_type_node
= build_vector_type (intQI_type_node
, 16);
9366 unsigned_V16QI_type_node
= build_vector_type (unsigned_intQI_type_node
, 16);
9367 unsigned_V8HI_type_node
= build_vector_type (unsigned_intHI_type_node
, 8);
9368 unsigned_V4SI_type_node
= build_vector_type (unsigned_intSI_type_node
, 4);
9370 opaque_V2SF_type_node
= build_opaque_vector_type (float_type_node
, 2);
9371 opaque_V2SI_type_node
= build_opaque_vector_type (intSI_type_node
, 2);
9372 opaque_p_V2SI_type_node
= build_pointer_type (opaque_V2SI_type_node
);
9373 opaque_V4SI_type_node
= build_opaque_vector_type (intSI_type_node
, 4);
9375 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
9376 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
9377 'vector unsigned short'. */
9379 bool_char_type_node
= build_distinct_type_copy (unsigned_intQI_type_node
);
9380 bool_short_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
9381 bool_int_type_node
= build_distinct_type_copy (unsigned_intSI_type_node
);
9382 pixel_type_node
= build_distinct_type_copy (unsigned_intHI_type_node
);
9384 long_integer_type_internal_node
= long_integer_type_node
;
9385 long_unsigned_type_internal_node
= long_unsigned_type_node
;
9386 intQI_type_internal_node
= intQI_type_node
;
9387 uintQI_type_internal_node
= unsigned_intQI_type_node
;
9388 intHI_type_internal_node
= intHI_type_node
;
9389 uintHI_type_internal_node
= unsigned_intHI_type_node
;
9390 intSI_type_internal_node
= intSI_type_node
;
9391 uintSI_type_internal_node
= unsigned_intSI_type_node
;
9392 float_type_internal_node
= float_type_node
;
9393 void_type_internal_node
= void_type_node
;
9395 tdecl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
9396 get_identifier ("__bool char"),
9397 bool_char_type_node
);
9398 TYPE_NAME (bool_char_type_node
) = tdecl
;
9399 (*lang_hooks
.decls
.pushdecl
) (tdecl
);
9400 tdecl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
9401 get_identifier ("__bool short"),
9402 bool_short_type_node
);
9403 TYPE_NAME (bool_short_type_node
) = tdecl
;
9404 (*lang_hooks
.decls
.pushdecl
) (tdecl
);
9405 tdecl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
9406 get_identifier ("__bool int"),
9407 bool_int_type_node
);
9408 TYPE_NAME (bool_int_type_node
) = tdecl
;
9409 (*lang_hooks
.decls
.pushdecl
) (tdecl
);
9410 tdecl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
, get_identifier ("__pixel"),
9412 TYPE_NAME (pixel_type_node
) = tdecl
;
9413 (*lang_hooks
.decls
.pushdecl
) (tdecl
);
9415 bool_V16QI_type_node
= build_vector_type (bool_char_type_node
, 16);
9416 bool_V8HI_type_node
= build_vector_type (bool_short_type_node
, 8);
9417 bool_V4SI_type_node
= build_vector_type (bool_int_type_node
, 4);
9418 pixel_V8HI_type_node
= build_vector_type (pixel_type_node
, 8);
9420 tdecl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
9421 get_identifier ("__vector unsigned char"),
9422 unsigned_V16QI_type_node
);
9423 TYPE_NAME (unsigned_V16QI_type_node
) = tdecl
;
9424 (*lang_hooks
.decls
.pushdecl
) (tdecl
);
9425 tdecl
= build_decl (BUILTINS_LOCATION
,
9426 TYPE_DECL
, get_identifier ("__vector signed char"),
9428 TYPE_NAME (V16QI_type_node
) = tdecl
;
9429 (*lang_hooks
.decls
.pushdecl
) (tdecl
);
9430 tdecl
= build_decl (BUILTINS_LOCATION
,
9431 TYPE_DECL
, get_identifier ("__vector __bool char"),
9432 bool_V16QI_type_node
);
9433 TYPE_NAME ( bool_V16QI_type_node
) = tdecl
;
9434 (*lang_hooks
.decls
.pushdecl
) (tdecl
);
9436 tdecl
= build_decl (BUILTINS_LOCATION
,
9437 TYPE_DECL
, get_identifier ("__vector unsigned short"),
9438 unsigned_V8HI_type_node
);
9439 TYPE_NAME (unsigned_V8HI_type_node
) = tdecl
;
9440 (*lang_hooks
.decls
.pushdecl
) (tdecl
);
9441 tdecl
= build_decl (BUILTINS_LOCATION
,
9442 TYPE_DECL
, get_identifier ("__vector signed short"),
9444 TYPE_NAME (V8HI_type_node
) = tdecl
;
9445 (*lang_hooks
.decls
.pushdecl
) (tdecl
);
9446 tdecl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
9447 get_identifier ("__vector __bool short"),
9448 bool_V8HI_type_node
);
9449 TYPE_NAME (bool_V8HI_type_node
) = tdecl
;
9450 (*lang_hooks
.decls
.pushdecl
) (tdecl
);
9452 tdecl
= build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
9453 get_identifier ("__vector unsigned int"),
9454 unsigned_V4SI_type_node
);
9455 TYPE_NAME (unsigned_V4SI_type_node
) = tdecl
;
9456 (*lang_hooks
.decls
.pushdecl
) (tdecl
);
9457 tdecl
= build_decl (BUILTINS_LOCATION
,
9458 TYPE_DECL
, get_identifier ("__vector signed int"),
9460 TYPE_NAME (V4SI_type_node
) = tdecl
;
9461 (*lang_hooks
.decls
.pushdecl
) (tdecl
);
9462 tdecl
= build_decl (BUILTINS_LOCATION
,
9463 TYPE_DECL
, get_identifier ("__vector __bool int"),
9464 bool_V4SI_type_node
);
9465 TYPE_NAME (bool_V4SI_type_node
) = tdecl
;
9466 (*lang_hooks
.decls
.pushdecl
) (tdecl
);
9468 tdecl
= build_decl (BUILTINS_LOCATION
,
9469 TYPE_DECL
, get_identifier ("__vector float"),
9471 TYPE_NAME (V4SF_type_node
) = tdecl
;
9472 (*lang_hooks
.decls
.pushdecl
) (tdecl
);
9473 tdecl
= build_decl (BUILTINS_LOCATION
,
9474 TYPE_DECL
, get_identifier ("__vector __pixel"),
9475 pixel_V8HI_type_node
);
9476 TYPE_NAME (pixel_V8HI_type_node
) = tdecl
;
9477 (*lang_hooks
.decls
.pushdecl
) (tdecl
);
9479 if (TARGET_PAIRED_FLOAT
)
9480 paired_init_builtins ();
9482 spe_init_builtins ();
9484 altivec_init_builtins ();
9485 if (TARGET_ALTIVEC
|| TARGET_SPE
|| TARGET_PAIRED_FLOAT
)
9486 rs6000_common_init_builtins ();
9487 if (TARGET_PPC_GFXOPT
)
9489 tree ftype
= build_function_type_list (float_type_node
,
9493 def_builtin (MASK_PPC_GFXOPT
, "__builtin_recipdivf", ftype
,
9494 RS6000_BUILTIN_RECIPF
);
9496 ftype
= build_function_type_list (float_type_node
,
9499 def_builtin (MASK_PPC_GFXOPT
, "__builtin_rsqrtf", ftype
,
9500 RS6000_BUILTIN_RSQRTF
);
9504 tree ftype
= build_function_type_list (double_type_node
,
9508 def_builtin (MASK_POPCNTB
, "__builtin_recipdiv", ftype
,
9509 RS6000_BUILTIN_RECIP
);
9514 /* AIX libm provides clog as __clog. */
9515 if (built_in_decls
[BUILT_IN_CLOG
])
9516 set_user_assembler_name (built_in_decls
[BUILT_IN_CLOG
], "__clog");
9519 #ifdef SUBTARGET_INIT_BUILTINS
9520 SUBTARGET_INIT_BUILTINS
;
9524 /* Search through a set of builtins and enable the mask bits.
9525 DESC is an array of builtins.
9526 SIZE is the total number of builtins.
9527 START is the builtin enum at which to start.
9528 END is the builtin enum at which to end. */
9530 enable_mask_for_builtins (struct builtin_description
*desc
, int size
,
9531 enum rs6000_builtins start
,
9532 enum rs6000_builtins end
)
9536 for (i
= 0; i
< size
; ++i
)
9537 if (desc
[i
].code
== start
)
9543 for (; i
< size
; ++i
)
9545 /* Flip all the bits on. */
9546 desc
[i
].mask
= target_flags
;
9547 if (desc
[i
].code
== end
)
9553 spe_init_builtins (void)
9555 tree endlink
= void_list_node
;
9556 tree puint_type_node
= build_pointer_type (unsigned_type_node
);
9557 tree pushort_type_node
= build_pointer_type (short_unsigned_type_node
);
9558 struct builtin_description
*d
;
9561 tree v2si_ftype_4_v2si
9562 = build_function_type
9563 (opaque_V2SI_type_node
,
9564 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9565 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9566 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9567 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9570 tree v2sf_ftype_4_v2sf
9571 = build_function_type
9572 (opaque_V2SF_type_node
,
9573 tree_cons (NULL_TREE
, opaque_V2SF_type_node
,
9574 tree_cons (NULL_TREE
, opaque_V2SF_type_node
,
9575 tree_cons (NULL_TREE
, opaque_V2SF_type_node
,
9576 tree_cons (NULL_TREE
, opaque_V2SF_type_node
,
9579 tree int_ftype_int_v2si_v2si
9580 = build_function_type
9582 tree_cons (NULL_TREE
, integer_type_node
,
9583 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9584 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9587 tree int_ftype_int_v2sf_v2sf
9588 = build_function_type
9590 tree_cons (NULL_TREE
, integer_type_node
,
9591 tree_cons (NULL_TREE
, opaque_V2SF_type_node
,
9592 tree_cons (NULL_TREE
, opaque_V2SF_type_node
,
9595 tree void_ftype_v2si_puint_int
9596 = build_function_type (void_type_node
,
9597 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9598 tree_cons (NULL_TREE
, puint_type_node
,
9599 tree_cons (NULL_TREE
,
9603 tree void_ftype_v2si_puint_char
9604 = build_function_type (void_type_node
,
9605 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9606 tree_cons (NULL_TREE
, puint_type_node
,
9607 tree_cons (NULL_TREE
,
9611 tree void_ftype_v2si_pv2si_int
9612 = build_function_type (void_type_node
,
9613 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9614 tree_cons (NULL_TREE
, opaque_p_V2SI_type_node
,
9615 tree_cons (NULL_TREE
,
9619 tree void_ftype_v2si_pv2si_char
9620 = build_function_type (void_type_node
,
9621 tree_cons (NULL_TREE
, opaque_V2SI_type_node
,
9622 tree_cons (NULL_TREE
, opaque_p_V2SI_type_node
,
9623 tree_cons (NULL_TREE
,
9628 = build_function_type (void_type_node
,
9629 tree_cons (NULL_TREE
, integer_type_node
, endlink
));
9632 = build_function_type (integer_type_node
, endlink
);
9634 tree v2si_ftype_pv2si_int
9635 = build_function_type (opaque_V2SI_type_node
,
9636 tree_cons (NULL_TREE
, opaque_p_V2SI_type_node
,
9637 tree_cons (NULL_TREE
, integer_type_node
,
9640 tree v2si_ftype_puint_int
9641 = build_function_type (opaque_V2SI_type_node
,
9642 tree_cons (NULL_TREE
, puint_type_node
,
9643 tree_cons (NULL_TREE
, integer_type_node
,
9646 tree v2si_ftype_pushort_int
9647 = build_function_type (opaque_V2SI_type_node
,
9648 tree_cons (NULL_TREE
, pushort_type_node
,
9649 tree_cons (NULL_TREE
, integer_type_node
,
9652 tree v2si_ftype_signed_char
9653 = build_function_type (opaque_V2SI_type_node
,
9654 tree_cons (NULL_TREE
, signed_char_type_node
,
9657 /* The initialization of the simple binary and unary builtins is
9658 done in rs6000_common_init_builtins, but we have to enable the
9659 mask bits here manually because we have run out of `target_flags'
9660 bits. We really need to redesign this mask business. */
9662 enable_mask_for_builtins ((struct builtin_description
*) bdesc_2arg
,
9663 ARRAY_SIZE (bdesc_2arg
),
9666 enable_mask_for_builtins ((struct builtin_description
*) bdesc_1arg
,
9667 ARRAY_SIZE (bdesc_1arg
),
9669 SPE_BUILTIN_EVSUBFUSIAAW
);
9670 enable_mask_for_builtins ((struct builtin_description
*) bdesc_spe_predicates
,
9671 ARRAY_SIZE (bdesc_spe_predicates
),
9672 SPE_BUILTIN_EVCMPEQ
,
9673 SPE_BUILTIN_EVFSTSTLT
);
9674 enable_mask_for_builtins ((struct builtin_description
*) bdesc_spe_evsel
,
9675 ARRAY_SIZE (bdesc_spe_evsel
),
9676 SPE_BUILTIN_EVSEL_CMPGTS
,
9677 SPE_BUILTIN_EVSEL_FSTSTEQ
);
9679 (*lang_hooks
.decls
.pushdecl
)
9680 (build_decl (BUILTINS_LOCATION
, TYPE_DECL
,
9681 get_identifier ("__ev64_opaque__"),
9682 opaque_V2SI_type_node
));
9684 /* Initialize irregular SPE builtins. */
9686 def_builtin (target_flags
, "__builtin_spe_mtspefscr", void_ftype_int
, SPE_BUILTIN_MTSPEFSCR
);
9687 def_builtin (target_flags
, "__builtin_spe_mfspefscr", int_ftype_void
, SPE_BUILTIN_MFSPEFSCR
);
9688 def_builtin (target_flags
, "__builtin_spe_evstddx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDDX
);
9689 def_builtin (target_flags
, "__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDHX
);
9690 def_builtin (target_flags
, "__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int
, SPE_BUILTIN_EVSTDWX
);
9691 def_builtin (target_flags
, "__builtin_spe_evstwhex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHEX
);
9692 def_builtin (target_flags
, "__builtin_spe_evstwhox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWHOX
);
9693 def_builtin (target_flags
, "__builtin_spe_evstwwex", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWEX
);
9694 def_builtin (target_flags
, "__builtin_spe_evstwwox", void_ftype_v2si_puint_int
, SPE_BUILTIN_EVSTWWOX
);
9695 def_builtin (target_flags
, "__builtin_spe_evstdd", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDD
);
9696 def_builtin (target_flags
, "__builtin_spe_evstdh", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDH
);
9697 def_builtin (target_flags
, "__builtin_spe_evstdw", void_ftype_v2si_pv2si_char
, SPE_BUILTIN_EVSTDW
);
9698 def_builtin (target_flags
, "__builtin_spe_evstwhe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHE
);
9699 def_builtin (target_flags
, "__builtin_spe_evstwho", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWHO
);
9700 def_builtin (target_flags
, "__builtin_spe_evstwwe", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWE
);
9701 def_builtin (target_flags
, "__builtin_spe_evstwwo", void_ftype_v2si_puint_char
, SPE_BUILTIN_EVSTWWO
);
9702 def_builtin (target_flags
, "__builtin_spe_evsplatfi", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATFI
);
9703 def_builtin (target_flags
, "__builtin_spe_evsplati", v2si_ftype_signed_char
, SPE_BUILTIN_EVSPLATI
);
9706 def_builtin (target_flags
, "__builtin_spe_evlddx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDDX
);
9707 def_builtin (target_flags
, "__builtin_spe_evldwx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDWX
);
9708 def_builtin (target_flags
, "__builtin_spe_evldhx", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDHX
);
9709 def_builtin (target_flags
, "__builtin_spe_evlwhex", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHEX
);
9710 def_builtin (target_flags
, "__builtin_spe_evlwhoux", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOUX
);
9711 def_builtin (target_flags
, "__builtin_spe_evlwhosx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOSX
);
9712 def_builtin (target_flags
, "__builtin_spe_evlwwsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLATX
);
9713 def_builtin (target_flags
, "__builtin_spe_evlwhsplatx", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLATX
);
9714 def_builtin (target_flags
, "__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLATX
);
9715 def_builtin (target_flags
, "__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLATX
);
9716 def_builtin (target_flags
, "__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLATX
);
9717 def_builtin (target_flags
, "__builtin_spe_evldd", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDD
);
9718 def_builtin (target_flags
, "__builtin_spe_evldw", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDW
);
9719 def_builtin (target_flags
, "__builtin_spe_evldh", v2si_ftype_pv2si_int
, SPE_BUILTIN_EVLDH
);
9720 def_builtin (target_flags
, "__builtin_spe_evlhhesplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHESPLAT
);
9721 def_builtin (target_flags
, "__builtin_spe_evlhhossplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOSSPLAT
);
9722 def_builtin (target_flags
, "__builtin_spe_evlhhousplat", v2si_ftype_pushort_int
, SPE_BUILTIN_EVLHHOUSPLAT
);
9723 def_builtin (target_flags
, "__builtin_spe_evlwhe", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHE
);
9724 def_builtin (target_flags
, "__builtin_spe_evlwhos", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOS
);
9725 def_builtin (target_flags
, "__builtin_spe_evlwhou", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHOU
);
9726 def_builtin (target_flags
, "__builtin_spe_evlwhsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWHSPLAT
);
9727 def_builtin (target_flags
, "__builtin_spe_evlwwsplat", v2si_ftype_puint_int
, SPE_BUILTIN_EVLWWSPLAT
);
9730 d
= (struct builtin_description
*) bdesc_spe_predicates
;
9731 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_predicates
); ++i
, d
++)
9735 switch (insn_data
[d
->icode
].operand
[1].mode
)
9738 type
= int_ftype_int_v2si_v2si
;
9741 type
= int_ftype_int_v2sf_v2sf
;
9747 def_builtin (d
->mask
, d
->name
, type
, d
->code
);
9750 /* Evsel predicates. */
9751 d
= (struct builtin_description
*) bdesc_spe_evsel
;
9752 for (i
= 0; i
< ARRAY_SIZE (bdesc_spe_evsel
); ++i
, d
++)
9756 switch (insn_data
[d
->icode
].operand
[1].mode
)
9759 type
= v2si_ftype_4_v2si
;
9762 type
= v2sf_ftype_4_v2sf
;
9768 def_builtin (d
->mask
, d
->name
, type
, d
->code
);
9773 paired_init_builtins (void)
9775 const struct builtin_description
*d
;
9777 tree endlink
= void_list_node
;
9779 tree int_ftype_int_v2sf_v2sf
9780 = build_function_type
9782 tree_cons (NULL_TREE
, integer_type_node
,
9783 tree_cons (NULL_TREE
, V2SF_type_node
,
9784 tree_cons (NULL_TREE
, V2SF_type_node
,
9786 tree pcfloat_type_node
=
9787 build_pointer_type (build_qualified_type
9788 (float_type_node
, TYPE_QUAL_CONST
));
9790 tree v2sf_ftype_long_pcfloat
= build_function_type_list (V2SF_type_node
,
9791 long_integer_type_node
,
9794 tree void_ftype_v2sf_long_pcfloat
=
9795 build_function_type_list (void_type_node
,
9797 long_integer_type_node
,
9802 def_builtin (0, "__builtin_paired_lx", v2sf_ftype_long_pcfloat
,
9806 def_builtin (0, "__builtin_paired_stx", void_ftype_v2sf_long_pcfloat
,
9807 PAIRED_BUILTIN_STX
);
9810 d
= bdesc_paired_preds
;
9811 for (i
= 0; i
< ARRAY_SIZE (bdesc_paired_preds
); ++i
, d
++)
9815 switch (insn_data
[d
->icode
].operand
[1].mode
)
9818 type
= int_ftype_int_v2sf_v2sf
;
9824 def_builtin (d
->mask
, d
->name
, type
, d
->code
);
9829 altivec_init_builtins (void)
9831 const struct builtin_description
*d
;
9832 const struct builtin_description_predicates
*dp
;
9836 tree pfloat_type_node
= build_pointer_type (float_type_node
);
9837 tree pint_type_node
= build_pointer_type (integer_type_node
);
9838 tree pshort_type_node
= build_pointer_type (short_integer_type_node
);
9839 tree pchar_type_node
= build_pointer_type (char_type_node
);
9841 tree pvoid_type_node
= build_pointer_type (void_type_node
);
9843 tree pcfloat_type_node
= build_pointer_type (build_qualified_type (float_type_node
, TYPE_QUAL_CONST
));
9844 tree pcint_type_node
= build_pointer_type (build_qualified_type (integer_type_node
, TYPE_QUAL_CONST
));
9845 tree pcshort_type_node
= build_pointer_type (build_qualified_type (short_integer_type_node
, TYPE_QUAL_CONST
));
9846 tree pcchar_type_node
= build_pointer_type (build_qualified_type (char_type_node
, TYPE_QUAL_CONST
));
9848 tree pcvoid_type_node
= build_pointer_type (build_qualified_type (void_type_node
, TYPE_QUAL_CONST
));
9850 tree int_ftype_opaque
9851 = build_function_type_list (integer_type_node
,
9852 opaque_V4SI_type_node
, NULL_TREE
);
9853 tree opaque_ftype_opaque
9854 = build_function_type (integer_type_node
,
9856 tree opaque_ftype_opaque_int
9857 = build_function_type_list (opaque_V4SI_type_node
,
9858 opaque_V4SI_type_node
, integer_type_node
, NULL_TREE
);
9859 tree opaque_ftype_opaque_opaque_int
9860 = build_function_type_list (opaque_V4SI_type_node
,
9861 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
9862 integer_type_node
, NULL_TREE
);
9863 tree int_ftype_int_opaque_opaque
9864 = build_function_type_list (integer_type_node
,
9865 integer_type_node
, opaque_V4SI_type_node
,
9866 opaque_V4SI_type_node
, NULL_TREE
);
9867 tree int_ftype_int_v4si_v4si
9868 = build_function_type_list (integer_type_node
,
9869 integer_type_node
, V4SI_type_node
,
9870 V4SI_type_node
, NULL_TREE
);
9871 tree v4sf_ftype_pcfloat
9872 = build_function_type_list (V4SF_type_node
, pcfloat_type_node
, NULL_TREE
);
9873 tree void_ftype_pfloat_v4sf
9874 = build_function_type_list (void_type_node
,
9875 pfloat_type_node
, V4SF_type_node
, NULL_TREE
);
9876 tree v4si_ftype_pcint
9877 = build_function_type_list (V4SI_type_node
, pcint_type_node
, NULL_TREE
);
9878 tree void_ftype_pint_v4si
9879 = build_function_type_list (void_type_node
,
9880 pint_type_node
, V4SI_type_node
, NULL_TREE
);
9881 tree v8hi_ftype_pcshort
9882 = build_function_type_list (V8HI_type_node
, pcshort_type_node
, NULL_TREE
);
9883 tree void_ftype_pshort_v8hi
9884 = build_function_type_list (void_type_node
,
9885 pshort_type_node
, V8HI_type_node
, NULL_TREE
);
9886 tree v16qi_ftype_pcchar
9887 = build_function_type_list (V16QI_type_node
, pcchar_type_node
, NULL_TREE
);
9888 tree void_ftype_pchar_v16qi
9889 = build_function_type_list (void_type_node
,
9890 pchar_type_node
, V16QI_type_node
, NULL_TREE
);
9891 tree void_ftype_v4si
9892 = build_function_type_list (void_type_node
, V4SI_type_node
, NULL_TREE
);
9893 tree v8hi_ftype_void
9894 = build_function_type (V8HI_type_node
, void_list_node
);
9895 tree void_ftype_void
9896 = build_function_type (void_type_node
, void_list_node
);
9898 = build_function_type_list (void_type_node
, integer_type_node
, NULL_TREE
);
9900 tree opaque_ftype_long_pcvoid
9901 = build_function_type_list (opaque_V4SI_type_node
,
9902 long_integer_type_node
, pcvoid_type_node
, NULL_TREE
);
9903 tree v16qi_ftype_long_pcvoid
9904 = build_function_type_list (V16QI_type_node
,
9905 long_integer_type_node
, pcvoid_type_node
, NULL_TREE
);
9906 tree v8hi_ftype_long_pcvoid
9907 = build_function_type_list (V8HI_type_node
,
9908 long_integer_type_node
, pcvoid_type_node
, NULL_TREE
);
9909 tree v4si_ftype_long_pcvoid
9910 = build_function_type_list (V4SI_type_node
,
9911 long_integer_type_node
, pcvoid_type_node
, NULL_TREE
);
9913 tree void_ftype_opaque_long_pvoid
9914 = build_function_type_list (void_type_node
,
9915 opaque_V4SI_type_node
, long_integer_type_node
,
9916 pvoid_type_node
, NULL_TREE
);
9917 tree void_ftype_v4si_long_pvoid
9918 = build_function_type_list (void_type_node
,
9919 V4SI_type_node
, long_integer_type_node
,
9920 pvoid_type_node
, NULL_TREE
);
9921 tree void_ftype_v16qi_long_pvoid
9922 = build_function_type_list (void_type_node
,
9923 V16QI_type_node
, long_integer_type_node
,
9924 pvoid_type_node
, NULL_TREE
);
9925 tree void_ftype_v8hi_long_pvoid
9926 = build_function_type_list (void_type_node
,
9927 V8HI_type_node
, long_integer_type_node
,
9928 pvoid_type_node
, NULL_TREE
);
9929 tree int_ftype_int_v8hi_v8hi
9930 = build_function_type_list (integer_type_node
,
9931 integer_type_node
, V8HI_type_node
,
9932 V8HI_type_node
, NULL_TREE
);
9933 tree int_ftype_int_v16qi_v16qi
9934 = build_function_type_list (integer_type_node
,
9935 integer_type_node
, V16QI_type_node
,
9936 V16QI_type_node
, NULL_TREE
);
9937 tree int_ftype_int_v4sf_v4sf
9938 = build_function_type_list (integer_type_node
,
9939 integer_type_node
, V4SF_type_node
,
9940 V4SF_type_node
, NULL_TREE
);
9941 tree v4si_ftype_v4si
9942 = build_function_type_list (V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
9943 tree v8hi_ftype_v8hi
9944 = build_function_type_list (V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
9945 tree v16qi_ftype_v16qi
9946 = build_function_type_list (V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
9947 tree v4sf_ftype_v4sf
9948 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
9949 tree void_ftype_pcvoid_int_int
9950 = build_function_type_list (void_type_node
,
9951 pcvoid_type_node
, integer_type_node
,
9952 integer_type_node
, NULL_TREE
);
9954 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_ld_internal_4sf", v4sf_ftype_pcfloat
,
9955 ALTIVEC_BUILTIN_LD_INTERNAL_4sf
);
9956 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_st_internal_4sf", void_ftype_pfloat_v4sf
,
9957 ALTIVEC_BUILTIN_ST_INTERNAL_4sf
);
9958 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_ld_internal_4si", v4si_ftype_pcint
,
9959 ALTIVEC_BUILTIN_LD_INTERNAL_4si
);
9960 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_st_internal_4si", void_ftype_pint_v4si
,
9961 ALTIVEC_BUILTIN_ST_INTERNAL_4si
);
9962 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_ld_internal_8hi", v8hi_ftype_pcshort
,
9963 ALTIVEC_BUILTIN_LD_INTERNAL_8hi
);
9964 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_st_internal_8hi", void_ftype_pshort_v8hi
,
9965 ALTIVEC_BUILTIN_ST_INTERNAL_8hi
);
9966 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_ld_internal_16qi", v16qi_ftype_pcchar
,
9967 ALTIVEC_BUILTIN_LD_INTERNAL_16qi
);
9968 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_st_internal_16qi", void_ftype_pchar_v16qi
,
9969 ALTIVEC_BUILTIN_ST_INTERNAL_16qi
);
9970 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_mtvscr", void_ftype_v4si
, ALTIVEC_BUILTIN_MTVSCR
);
9971 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_mfvscr", v8hi_ftype_void
, ALTIVEC_BUILTIN_MFVSCR
);
9972 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_dssall", void_ftype_void
, ALTIVEC_BUILTIN_DSSALL
);
9973 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_dss", void_ftype_int
, ALTIVEC_BUILTIN_DSS
);
9974 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSL
);
9975 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVSR
);
9976 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEBX
);
9977 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEHX
);
9978 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVEWX
);
9979 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvxl", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVXL
);
9980 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVX
);
9981 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_stvx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVX
);
9982 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVEWX
);
9983 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid
, ALTIVEC_BUILTIN_STVXL
);
9984 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVEBX
);
9985 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid
, ALTIVEC_BUILTIN_STVEHX
);
9986 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ld", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LD
);
9987 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lde", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDE
);
9988 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ldl", opaque_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LDL
);
9989 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lvsl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSL
);
9990 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lvsr", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVSR
);
9991 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lvebx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEBX
);
9992 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lvehx", v8hi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEHX
);
9993 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lvewx", v4si_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVEWX
);
9994 def_builtin (MASK_ALTIVEC
, "__builtin_vec_st", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_ST
);
9995 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ste", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STE
);
9996 def_builtin (MASK_ALTIVEC
, "__builtin_vec_stl", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STL
);
9997 def_builtin (MASK_ALTIVEC
, "__builtin_vec_stvewx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEWX
);
9998 def_builtin (MASK_ALTIVEC
, "__builtin_vec_stvebx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEBX
);
9999 def_builtin (MASK_ALTIVEC
, "__builtin_vec_stvehx", void_ftype_opaque_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVEHX
);
10001 if (rs6000_cpu
== PROCESSOR_CELL
)
10003 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLX
);
10004 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVLXL
);
10005 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRX
);
10006 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_LVRXL
);
10008 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lvlx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLX
);
10009 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVLXL
);
10010 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lvrx", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRX
);
10011 def_builtin (MASK_ALTIVEC
, "__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid
, ALTIVEC_BUILTIN_VEC_LVRXL
);
10013 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLX
);
10014 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVLXL
);
10015 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRX
);
10016 def_builtin (MASK_ALTIVEC
, "__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_STVRXL
);
10018 def_builtin (MASK_ALTIVEC
, "__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLX
);
10019 def_builtin (MASK_ALTIVEC
, "__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVLXL
);
10020 def_builtin (MASK_ALTIVEC
, "__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRX
);
10021 def_builtin (MASK_ALTIVEC
, "__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid
, ALTIVEC_BUILTIN_VEC_STVRXL
);
10023 def_builtin (MASK_ALTIVEC
, "__builtin_vec_step", int_ftype_opaque
, ALTIVEC_BUILTIN_VEC_STEP
);
10024 def_builtin (MASK_ALTIVEC
, "__builtin_vec_splats", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_SPLATS
);
10025 def_builtin (MASK_ALTIVEC
, "__builtin_vec_promote", opaque_ftype_opaque
, ALTIVEC_BUILTIN_VEC_PROMOTE
);
10027 def_builtin (MASK_ALTIVEC
, "__builtin_vec_sld", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_SLD
);
10028 def_builtin (MASK_ALTIVEC
, "__builtin_vec_splat", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_SPLAT
);
10029 def_builtin (MASK_ALTIVEC
, "__builtin_vec_extract", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_EXTRACT
);
10030 def_builtin (MASK_ALTIVEC
, "__builtin_vec_insert", opaque_ftype_opaque_opaque_int
, ALTIVEC_BUILTIN_VEC_INSERT
);
10031 def_builtin (MASK_ALTIVEC
, "__builtin_vec_vspltw", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTW
);
10032 def_builtin (MASK_ALTIVEC
, "__builtin_vec_vsplth", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTH
);
10033 def_builtin (MASK_ALTIVEC
, "__builtin_vec_vspltb", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VSPLTB
);
10034 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ctf", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTF
);
10035 def_builtin (MASK_ALTIVEC
, "__builtin_vec_vcfsx", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFSX
);
10036 def_builtin (MASK_ALTIVEC
, "__builtin_vec_vcfux", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_VCFUX
);
10037 def_builtin (MASK_ALTIVEC
, "__builtin_vec_cts", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTS
);
10038 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ctu", opaque_ftype_opaque_int
, ALTIVEC_BUILTIN_VEC_CTU
);
10040 /* Add the DST variants. */
10042 for (i
= 0; i
< ARRAY_SIZE (bdesc_dst
); i
++, d
++)
10043 def_builtin (d
->mask
, d
->name
, void_ftype_pcvoid_int_int
, d
->code
);
10045 /* Initialize the predicates. */
10046 dp
= bdesc_altivec_preds
;
10047 for (i
= 0; i
< ARRAY_SIZE (bdesc_altivec_preds
); i
++, dp
++)
10049 enum machine_mode mode1
;
10051 bool is_overloaded
= dp
->code
>= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10052 && dp
->code
<= ALTIVEC_BUILTIN_OVERLOADED_LAST
;
10057 mode1
= insn_data
[dp
->icode
].operand
[1].mode
;
10062 type
= int_ftype_int_opaque_opaque
;
10065 type
= int_ftype_int_v4si_v4si
;
10068 type
= int_ftype_int_v8hi_v8hi
;
10071 type
= int_ftype_int_v16qi_v16qi
;
10074 type
= int_ftype_int_v4sf_v4sf
;
10077 gcc_unreachable ();
10080 def_builtin (dp
->mask
, dp
->name
, type
, dp
->code
);
10083 /* Initialize the abs* operators. */
10085 for (i
= 0; i
< ARRAY_SIZE (bdesc_abs
); i
++, d
++)
10087 enum machine_mode mode0
;
10090 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
10095 type
= v4si_ftype_v4si
;
10098 type
= v8hi_ftype_v8hi
;
10101 type
= v16qi_ftype_v16qi
;
10104 type
= v4sf_ftype_v4sf
;
10107 gcc_unreachable ();
10110 def_builtin (d
->mask
, d
->name
, type
, d
->code
);
10113 if (TARGET_ALTIVEC
)
10117 /* Initialize target builtin that implements
10118 targetm.vectorize.builtin_mask_for_load. */
10120 decl
= add_builtin_function ("__builtin_altivec_mask_for_load",
10121 v16qi_ftype_long_pcvoid
,
10122 ALTIVEC_BUILTIN_MASK_FOR_LOAD
,
10123 BUILT_IN_MD
, NULL
, NULL_TREE
);
10124 TREE_READONLY (decl
) = 1;
10125 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
10126 altivec_builtin_mask_for_load
= decl
;
10129 /* Access to the vec_init patterns. */
10130 ftype
= build_function_type_list (V4SI_type_node
, integer_type_node
,
10131 integer_type_node
, integer_type_node
,
10132 integer_type_node
, NULL_TREE
);
10133 def_builtin (MASK_ALTIVEC
, "__builtin_vec_init_v4si", ftype
,
10134 ALTIVEC_BUILTIN_VEC_INIT_V4SI
);
10136 ftype
= build_function_type_list (V8HI_type_node
, short_integer_type_node
,
10137 short_integer_type_node
,
10138 short_integer_type_node
,
10139 short_integer_type_node
,
10140 short_integer_type_node
,
10141 short_integer_type_node
,
10142 short_integer_type_node
,
10143 short_integer_type_node
, NULL_TREE
);
10144 def_builtin (MASK_ALTIVEC
, "__builtin_vec_init_v8hi", ftype
,
10145 ALTIVEC_BUILTIN_VEC_INIT_V8HI
);
10147 ftype
= build_function_type_list (V16QI_type_node
, char_type_node
,
10148 char_type_node
, char_type_node
,
10149 char_type_node
, char_type_node
,
10150 char_type_node
, char_type_node
,
10151 char_type_node
, char_type_node
,
10152 char_type_node
, char_type_node
,
10153 char_type_node
, char_type_node
,
10154 char_type_node
, char_type_node
,
10155 char_type_node
, NULL_TREE
);
10156 def_builtin (MASK_ALTIVEC
, "__builtin_vec_init_v16qi", ftype
,
10157 ALTIVEC_BUILTIN_VEC_INIT_V16QI
);
10159 ftype
= build_function_type_list (V4SF_type_node
, float_type_node
,
10160 float_type_node
, float_type_node
,
10161 float_type_node
, NULL_TREE
);
10162 def_builtin (MASK_ALTIVEC
, "__builtin_vec_init_v4sf", ftype
,
10163 ALTIVEC_BUILTIN_VEC_INIT_V4SF
);
10165 /* Access to the vec_set patterns. */
10166 ftype
= build_function_type_list (V4SI_type_node
, V4SI_type_node
,
10168 integer_type_node
, NULL_TREE
);
10169 def_builtin (MASK_ALTIVEC
, "__builtin_vec_set_v4si", ftype
,
10170 ALTIVEC_BUILTIN_VEC_SET_V4SI
);
10172 ftype
= build_function_type_list (V8HI_type_node
, V8HI_type_node
,
10174 integer_type_node
, NULL_TREE
);
10175 def_builtin (MASK_ALTIVEC
, "__builtin_vec_set_v8hi", ftype
,
10176 ALTIVEC_BUILTIN_VEC_SET_V8HI
);
10178 ftype
= build_function_type_list (V8HI_type_node
, V16QI_type_node
,
10180 integer_type_node
, NULL_TREE
);
10181 def_builtin (MASK_ALTIVEC
, "__builtin_vec_set_v16qi", ftype
,
10182 ALTIVEC_BUILTIN_VEC_SET_V16QI
);
10184 ftype
= build_function_type_list (V4SF_type_node
, V4SF_type_node
,
10186 integer_type_node
, NULL_TREE
);
10187 def_builtin (MASK_ALTIVEC
, "__builtin_vec_set_v4sf", ftype
,
10188 ALTIVEC_BUILTIN_VEC_SET_V4SF
);
10190 /* Access to the vec_extract patterns. */
10191 ftype
= build_function_type_list (intSI_type_node
, V4SI_type_node
,
10192 integer_type_node
, NULL_TREE
);
10193 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ext_v4si", ftype
,
10194 ALTIVEC_BUILTIN_VEC_EXT_V4SI
);
10196 ftype
= build_function_type_list (intHI_type_node
, V8HI_type_node
,
10197 integer_type_node
, NULL_TREE
);
10198 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ext_v8hi", ftype
,
10199 ALTIVEC_BUILTIN_VEC_EXT_V8HI
);
10201 ftype
= build_function_type_list (intQI_type_node
, V16QI_type_node
,
10202 integer_type_node
, NULL_TREE
);
10203 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ext_v16qi", ftype
,
10204 ALTIVEC_BUILTIN_VEC_EXT_V16QI
);
10206 ftype
= build_function_type_list (float_type_node
, V4SF_type_node
,
10207 integer_type_node
, NULL_TREE
);
10208 def_builtin (MASK_ALTIVEC
, "__builtin_vec_ext_v4sf", ftype
,
10209 ALTIVEC_BUILTIN_VEC_EXT_V4SF
);
10213 rs6000_common_init_builtins (void)
10215 const struct builtin_description
*d
;
10218 tree v2sf_ftype_v2sf_v2sf_v2sf
10219 = build_function_type_list (V2SF_type_node
,
10220 V2SF_type_node
, V2SF_type_node
,
10221 V2SF_type_node
, NULL_TREE
);
10223 tree v4sf_ftype_v4sf_v4sf_v16qi
10224 = build_function_type_list (V4SF_type_node
,
10225 V4SF_type_node
, V4SF_type_node
,
10226 V16QI_type_node
, NULL_TREE
);
10227 tree v4si_ftype_v4si_v4si_v16qi
10228 = build_function_type_list (V4SI_type_node
,
10229 V4SI_type_node
, V4SI_type_node
,
10230 V16QI_type_node
, NULL_TREE
);
10231 tree v8hi_ftype_v8hi_v8hi_v16qi
10232 = build_function_type_list (V8HI_type_node
,
10233 V8HI_type_node
, V8HI_type_node
,
10234 V16QI_type_node
, NULL_TREE
);
10235 tree v16qi_ftype_v16qi_v16qi_v16qi
10236 = build_function_type_list (V16QI_type_node
,
10237 V16QI_type_node
, V16QI_type_node
,
10238 V16QI_type_node
, NULL_TREE
);
10239 tree v4si_ftype_int
10240 = build_function_type_list (V4SI_type_node
, integer_type_node
, NULL_TREE
);
10241 tree v8hi_ftype_int
10242 = build_function_type_list (V8HI_type_node
, integer_type_node
, NULL_TREE
);
10243 tree v16qi_ftype_int
10244 = build_function_type_list (V16QI_type_node
, integer_type_node
, NULL_TREE
);
10245 tree v8hi_ftype_v16qi
10246 = build_function_type_list (V8HI_type_node
, V16QI_type_node
, NULL_TREE
);
10247 tree v4sf_ftype_v4sf
10248 = build_function_type_list (V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
10250 tree v2si_ftype_v2si_v2si
10251 = build_function_type_list (opaque_V2SI_type_node
,
10252 opaque_V2SI_type_node
,
10253 opaque_V2SI_type_node
, NULL_TREE
);
10255 tree v2sf_ftype_v2sf_v2sf_spe
10256 = build_function_type_list (opaque_V2SF_type_node
,
10257 opaque_V2SF_type_node
,
10258 opaque_V2SF_type_node
, NULL_TREE
);
10260 tree v2sf_ftype_v2sf_v2sf
10261 = build_function_type_list (V2SF_type_node
,
10263 V2SF_type_node
, NULL_TREE
);
10266 tree v2si_ftype_int_int
10267 = build_function_type_list (opaque_V2SI_type_node
,
10268 integer_type_node
, integer_type_node
,
10271 tree opaque_ftype_opaque
10272 = build_function_type_list (opaque_V4SI_type_node
,
10273 opaque_V4SI_type_node
, NULL_TREE
);
10275 tree v2si_ftype_v2si
10276 = build_function_type_list (opaque_V2SI_type_node
,
10277 opaque_V2SI_type_node
, NULL_TREE
);
10279 tree v2sf_ftype_v2sf_spe
10280 = build_function_type_list (opaque_V2SF_type_node
,
10281 opaque_V2SF_type_node
, NULL_TREE
);
10283 tree v2sf_ftype_v2sf
10284 = build_function_type_list (V2SF_type_node
,
10285 V2SF_type_node
, NULL_TREE
);
10287 tree v2sf_ftype_v2si
10288 = build_function_type_list (opaque_V2SF_type_node
,
10289 opaque_V2SI_type_node
, NULL_TREE
);
10291 tree v2si_ftype_v2sf
10292 = build_function_type_list (opaque_V2SI_type_node
,
10293 opaque_V2SF_type_node
, NULL_TREE
);
10295 tree v2si_ftype_v2si_char
10296 = build_function_type_list (opaque_V2SI_type_node
,
10297 opaque_V2SI_type_node
,
10298 char_type_node
, NULL_TREE
);
10300 tree v2si_ftype_int_char
10301 = build_function_type_list (opaque_V2SI_type_node
,
10302 integer_type_node
, char_type_node
, NULL_TREE
);
10304 tree v2si_ftype_char
10305 = build_function_type_list (opaque_V2SI_type_node
,
10306 char_type_node
, NULL_TREE
);
10308 tree int_ftype_int_int
10309 = build_function_type_list (integer_type_node
,
10310 integer_type_node
, integer_type_node
,
10313 tree opaque_ftype_opaque_opaque
10314 = build_function_type_list (opaque_V4SI_type_node
,
10315 opaque_V4SI_type_node
, opaque_V4SI_type_node
, NULL_TREE
);
10316 tree v4si_ftype_v4si_v4si
10317 = build_function_type_list (V4SI_type_node
,
10318 V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
10319 tree v4sf_ftype_v4si_int
10320 = build_function_type_list (V4SF_type_node
,
10321 V4SI_type_node
, integer_type_node
, NULL_TREE
);
10322 tree v4si_ftype_v4sf_int
10323 = build_function_type_list (V4SI_type_node
,
10324 V4SF_type_node
, integer_type_node
, NULL_TREE
);
10325 tree v4si_ftype_v4si_int
10326 = build_function_type_list (V4SI_type_node
,
10327 V4SI_type_node
, integer_type_node
, NULL_TREE
);
10328 tree v8hi_ftype_v8hi_int
10329 = build_function_type_list (V8HI_type_node
,
10330 V8HI_type_node
, integer_type_node
, NULL_TREE
);
10331 tree v16qi_ftype_v16qi_int
10332 = build_function_type_list (V16QI_type_node
,
10333 V16QI_type_node
, integer_type_node
, NULL_TREE
);
10334 tree v16qi_ftype_v16qi_v16qi_int
10335 = build_function_type_list (V16QI_type_node
,
10336 V16QI_type_node
, V16QI_type_node
,
10337 integer_type_node
, NULL_TREE
);
10338 tree v8hi_ftype_v8hi_v8hi_int
10339 = build_function_type_list (V8HI_type_node
,
10340 V8HI_type_node
, V8HI_type_node
,
10341 integer_type_node
, NULL_TREE
);
10342 tree v4si_ftype_v4si_v4si_int
10343 = build_function_type_list (V4SI_type_node
,
10344 V4SI_type_node
, V4SI_type_node
,
10345 integer_type_node
, NULL_TREE
);
10346 tree v4sf_ftype_v4sf_v4sf_int
10347 = build_function_type_list (V4SF_type_node
,
10348 V4SF_type_node
, V4SF_type_node
,
10349 integer_type_node
, NULL_TREE
);
10350 tree v4sf_ftype_v4sf_v4sf
10351 = build_function_type_list (V4SF_type_node
,
10352 V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
10353 tree opaque_ftype_opaque_opaque_opaque
10354 = build_function_type_list (opaque_V4SI_type_node
,
10355 opaque_V4SI_type_node
, opaque_V4SI_type_node
,
10356 opaque_V4SI_type_node
, NULL_TREE
);
10357 tree v4sf_ftype_v4sf_v4sf_v4si
10358 = build_function_type_list (V4SF_type_node
,
10359 V4SF_type_node
, V4SF_type_node
,
10360 V4SI_type_node
, NULL_TREE
);
10361 tree v4sf_ftype_v4sf_v4sf_v4sf
10362 = build_function_type_list (V4SF_type_node
,
10363 V4SF_type_node
, V4SF_type_node
,
10364 V4SF_type_node
, NULL_TREE
);
10365 tree v4si_ftype_v4si_v4si_v4si
10366 = build_function_type_list (V4SI_type_node
,
10367 V4SI_type_node
, V4SI_type_node
,
10368 V4SI_type_node
, NULL_TREE
);
10369 tree v8hi_ftype_v8hi_v8hi
10370 = build_function_type_list (V8HI_type_node
,
10371 V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
10372 tree v8hi_ftype_v8hi_v8hi_v8hi
10373 = build_function_type_list (V8HI_type_node
,
10374 V8HI_type_node
, V8HI_type_node
,
10375 V8HI_type_node
, NULL_TREE
);
10376 tree v4si_ftype_v8hi_v8hi_v4si
10377 = build_function_type_list (V4SI_type_node
,
10378 V8HI_type_node
, V8HI_type_node
,
10379 V4SI_type_node
, NULL_TREE
);
10380 tree v4si_ftype_v16qi_v16qi_v4si
10381 = build_function_type_list (V4SI_type_node
,
10382 V16QI_type_node
, V16QI_type_node
,
10383 V4SI_type_node
, NULL_TREE
);
10384 tree v16qi_ftype_v16qi_v16qi
10385 = build_function_type_list (V16QI_type_node
,
10386 V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
10387 tree v4si_ftype_v4sf_v4sf
10388 = build_function_type_list (V4SI_type_node
,
10389 V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
10390 tree v8hi_ftype_v16qi_v16qi
10391 = build_function_type_list (V8HI_type_node
,
10392 V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
10393 tree v4si_ftype_v8hi_v8hi
10394 = build_function_type_list (V4SI_type_node
,
10395 V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
10396 tree v8hi_ftype_v4si_v4si
10397 = build_function_type_list (V8HI_type_node
,
10398 V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
10399 tree v16qi_ftype_v8hi_v8hi
10400 = build_function_type_list (V16QI_type_node
,
10401 V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
10402 tree v4si_ftype_v16qi_v4si
10403 = build_function_type_list (V4SI_type_node
,
10404 V16QI_type_node
, V4SI_type_node
, NULL_TREE
);
10405 tree v4si_ftype_v16qi_v16qi
10406 = build_function_type_list (V4SI_type_node
,
10407 V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
10408 tree v4si_ftype_v8hi_v4si
10409 = build_function_type_list (V4SI_type_node
,
10410 V8HI_type_node
, V4SI_type_node
, NULL_TREE
);
10411 tree v4si_ftype_v8hi
10412 = build_function_type_list (V4SI_type_node
, V8HI_type_node
, NULL_TREE
);
10413 tree int_ftype_v4si_v4si
10414 = build_function_type_list (integer_type_node
,
10415 V4SI_type_node
, V4SI_type_node
, NULL_TREE
);
10416 tree int_ftype_v4sf_v4sf
10417 = build_function_type_list (integer_type_node
,
10418 V4SF_type_node
, V4SF_type_node
, NULL_TREE
);
10419 tree int_ftype_v16qi_v16qi
10420 = build_function_type_list (integer_type_node
,
10421 V16QI_type_node
, V16QI_type_node
, NULL_TREE
);
10422 tree int_ftype_v8hi_v8hi
10423 = build_function_type_list (integer_type_node
,
10424 V8HI_type_node
, V8HI_type_node
, NULL_TREE
);
10426 /* Add the simple ternary operators. */
10428 for (i
= 0; i
< ARRAY_SIZE (bdesc_3arg
); i
++, d
++)
10430 enum machine_mode mode0
, mode1
, mode2
, mode3
;
10432 bool is_overloaded
= d
->code
>= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10433 && d
->code
<= ALTIVEC_BUILTIN_OVERLOADED_LAST
;
10444 if (d
->name
== 0 || d
->icode
== CODE_FOR_nothing
)
10447 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
10448 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
10449 mode2
= insn_data
[d
->icode
].operand
[2].mode
;
10450 mode3
= insn_data
[d
->icode
].operand
[3].mode
;
10453 /* When all four are of the same mode. */
10454 if (mode0
== mode1
&& mode1
== mode2
&& mode2
== mode3
)
10459 type
= opaque_ftype_opaque_opaque_opaque
;
10462 type
= v4si_ftype_v4si_v4si_v4si
;
10465 type
= v4sf_ftype_v4sf_v4sf_v4sf
;
10468 type
= v8hi_ftype_v8hi_v8hi_v8hi
;
10471 type
= v16qi_ftype_v16qi_v16qi_v16qi
;
10474 type
= v2sf_ftype_v2sf_v2sf_v2sf
;
10477 gcc_unreachable ();
10480 else if (mode0
== mode1
&& mode1
== mode2
&& mode3
== V16QImode
)
10485 type
= v4si_ftype_v4si_v4si_v16qi
;
10488 type
= v4sf_ftype_v4sf_v4sf_v16qi
;
10491 type
= v8hi_ftype_v8hi_v8hi_v16qi
;
10494 type
= v16qi_ftype_v16qi_v16qi_v16qi
;
10497 gcc_unreachable ();
10500 else if (mode0
== V4SImode
&& mode1
== V16QImode
&& mode2
== V16QImode
10501 && mode3
== V4SImode
)
10502 type
= v4si_ftype_v16qi_v16qi_v4si
;
10503 else if (mode0
== V4SImode
&& mode1
== V8HImode
&& mode2
== V8HImode
10504 && mode3
== V4SImode
)
10505 type
= v4si_ftype_v8hi_v8hi_v4si
;
10506 else if (mode0
== V4SFmode
&& mode1
== V4SFmode
&& mode2
== V4SFmode
10507 && mode3
== V4SImode
)
10508 type
= v4sf_ftype_v4sf_v4sf_v4si
;
10510 /* vchar, vchar, vchar, 4-bit literal. */
10511 else if (mode0
== V16QImode
&& mode1
== mode0
&& mode2
== mode0
10512 && mode3
== QImode
)
10513 type
= v16qi_ftype_v16qi_v16qi_int
;
10515 /* vshort, vshort, vshort, 4-bit literal. */
10516 else if (mode0
== V8HImode
&& mode1
== mode0
&& mode2
== mode0
10517 && mode3
== QImode
)
10518 type
= v8hi_ftype_v8hi_v8hi_int
;
10520 /* vint, vint, vint, 4-bit literal. */
10521 else if (mode0
== V4SImode
&& mode1
== mode0
&& mode2
== mode0
10522 && mode3
== QImode
)
10523 type
= v4si_ftype_v4si_v4si_int
;
10525 /* vfloat, vfloat, vfloat, 4-bit literal. */
10526 else if (mode0
== V4SFmode
&& mode1
== mode0
&& mode2
== mode0
10527 && mode3
== QImode
)
10528 type
= v4sf_ftype_v4sf_v4sf_int
;
10531 gcc_unreachable ();
10533 def_builtin (d
->mask
, d
->name
, type
, d
->code
);
10536 /* Add the simple binary operators. */
10537 d
= (struct builtin_description
*) bdesc_2arg
;
10538 for (i
= 0; i
< ARRAY_SIZE (bdesc_2arg
); i
++, d
++)
10540 enum machine_mode mode0
, mode1
, mode2
;
10542 bool is_overloaded
= d
->code
>= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10543 && d
->code
<= ALTIVEC_BUILTIN_OVERLOADED_LAST
;
10553 if (d
->name
== 0 || d
->icode
== CODE_FOR_nothing
)
10556 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
10557 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
10558 mode2
= insn_data
[d
->icode
].operand
[2].mode
;
10561 /* When all three operands are of the same mode. */
10562 if (mode0
== mode1
&& mode1
== mode2
)
10567 type
= opaque_ftype_opaque_opaque
;
10570 type
= v4sf_ftype_v4sf_v4sf
;
10573 type
= v4si_ftype_v4si_v4si
;
10576 type
= v16qi_ftype_v16qi_v16qi
;
10579 type
= v8hi_ftype_v8hi_v8hi
;
10582 type
= v2si_ftype_v2si_v2si
;
10585 if (TARGET_PAIRED_FLOAT
)
10586 type
= v2sf_ftype_v2sf_v2sf
;
10588 type
= v2sf_ftype_v2sf_v2sf_spe
;
10591 type
= int_ftype_int_int
;
10594 gcc_unreachable ();
10598 /* A few other combos we really don't want to do manually. */
10600 /* vint, vfloat, vfloat. */
10601 else if (mode0
== V4SImode
&& mode1
== V4SFmode
&& mode2
== V4SFmode
)
10602 type
= v4si_ftype_v4sf_v4sf
;
10604 /* vshort, vchar, vchar. */
10605 else if (mode0
== V8HImode
&& mode1
== V16QImode
&& mode2
== V16QImode
)
10606 type
= v8hi_ftype_v16qi_v16qi
;
10608 /* vint, vshort, vshort. */
10609 else if (mode0
== V4SImode
&& mode1
== V8HImode
&& mode2
== V8HImode
)
10610 type
= v4si_ftype_v8hi_v8hi
;
10612 /* vshort, vint, vint. */
10613 else if (mode0
== V8HImode
&& mode1
== V4SImode
&& mode2
== V4SImode
)
10614 type
= v8hi_ftype_v4si_v4si
;
10616 /* vchar, vshort, vshort. */
10617 else if (mode0
== V16QImode
&& mode1
== V8HImode
&& mode2
== V8HImode
)
10618 type
= v16qi_ftype_v8hi_v8hi
;
10620 /* vint, vchar, vint. */
10621 else if (mode0
== V4SImode
&& mode1
== V16QImode
&& mode2
== V4SImode
)
10622 type
= v4si_ftype_v16qi_v4si
;
10624 /* vint, vchar, vchar. */
10625 else if (mode0
== V4SImode
&& mode1
== V16QImode
&& mode2
== V16QImode
)
10626 type
= v4si_ftype_v16qi_v16qi
;
10628 /* vint, vshort, vint. */
10629 else if (mode0
== V4SImode
&& mode1
== V8HImode
&& mode2
== V4SImode
)
10630 type
= v4si_ftype_v8hi_v4si
;
10632 /* vint, vint, 5-bit literal. */
10633 else if (mode0
== V4SImode
&& mode1
== V4SImode
&& mode2
== QImode
)
10634 type
= v4si_ftype_v4si_int
;
10636 /* vshort, vshort, 5-bit literal. */
10637 else if (mode0
== V8HImode
&& mode1
== V8HImode
&& mode2
== QImode
)
10638 type
= v8hi_ftype_v8hi_int
;
10640 /* vchar, vchar, 5-bit literal. */
10641 else if (mode0
== V16QImode
&& mode1
== V16QImode
&& mode2
== QImode
)
10642 type
= v16qi_ftype_v16qi_int
;
10644 /* vfloat, vint, 5-bit literal. */
10645 else if (mode0
== V4SFmode
&& mode1
== V4SImode
&& mode2
== QImode
)
10646 type
= v4sf_ftype_v4si_int
;
10648 /* vint, vfloat, 5-bit literal. */
10649 else if (mode0
== V4SImode
&& mode1
== V4SFmode
&& mode2
== QImode
)
10650 type
= v4si_ftype_v4sf_int
;
10652 else if (mode0
== V2SImode
&& mode1
== SImode
&& mode2
== SImode
)
10653 type
= v2si_ftype_int_int
;
10655 else if (mode0
== V2SImode
&& mode1
== V2SImode
&& mode2
== QImode
)
10656 type
= v2si_ftype_v2si_char
;
10658 else if (mode0
== V2SImode
&& mode1
== SImode
&& mode2
== QImode
)
10659 type
= v2si_ftype_int_char
;
10664 gcc_assert (mode0
== SImode
);
10668 type
= int_ftype_v4si_v4si
;
10671 type
= int_ftype_v4sf_v4sf
;
10674 type
= int_ftype_v16qi_v16qi
;
10677 type
= int_ftype_v8hi_v8hi
;
10680 gcc_unreachable ();
10684 def_builtin (d
->mask
, d
->name
, type
, d
->code
);
10687 /* Add the simple unary operators. */
10688 d
= (struct builtin_description
*) bdesc_1arg
;
10689 for (i
= 0; i
< ARRAY_SIZE (bdesc_1arg
); i
++, d
++)
10691 enum machine_mode mode0
, mode1
;
10693 bool is_overloaded
= d
->code
>= ALTIVEC_BUILTIN_OVERLOADED_FIRST
10694 && d
->code
<= ALTIVEC_BUILTIN_OVERLOADED_LAST
;
10703 if (d
->name
== 0 || d
->icode
== CODE_FOR_nothing
)
10706 mode0
= insn_data
[d
->icode
].operand
[0].mode
;
10707 mode1
= insn_data
[d
->icode
].operand
[1].mode
;
10710 if (mode0
== V4SImode
&& mode1
== QImode
)
10711 type
= v4si_ftype_int
;
10712 else if (mode0
== V8HImode
&& mode1
== QImode
)
10713 type
= v8hi_ftype_int
;
10714 else if (mode0
== V16QImode
&& mode1
== QImode
)
10715 type
= v16qi_ftype_int
;
10716 else if (mode0
== VOIDmode
&& mode1
== VOIDmode
)
10717 type
= opaque_ftype_opaque
;
10718 else if (mode0
== V4SFmode
&& mode1
== V4SFmode
)
10719 type
= v4sf_ftype_v4sf
;
10720 else if (mode0
== V8HImode
&& mode1
== V16QImode
)
10721 type
= v8hi_ftype_v16qi
;
10722 else if (mode0
== V4SImode
&& mode1
== V8HImode
)
10723 type
= v4si_ftype_v8hi
;
10724 else if (mode0
== V2SImode
&& mode1
== V2SImode
)
10725 type
= v2si_ftype_v2si
;
10726 else if (mode0
== V2SFmode
&& mode1
== V2SFmode
)
10728 if (TARGET_PAIRED_FLOAT
)
10729 type
= v2sf_ftype_v2sf
;
10731 type
= v2sf_ftype_v2sf_spe
;
10733 else if (mode0
== V2SFmode
&& mode1
== V2SImode
)
10734 type
= v2sf_ftype_v2si
;
10735 else if (mode0
== V2SImode
&& mode1
== V2SFmode
)
10736 type
= v2si_ftype_v2sf
;
10737 else if (mode0
== V2SImode
&& mode1
== QImode
)
10738 type
= v2si_ftype_char
;
10740 gcc_unreachable ();
10742 def_builtin (d
->mask
, d
->name
, type
, d
->code
);
10747 rs6000_init_libfuncs (void)
10749 if (DEFAULT_ABI
!= ABI_V4
&& TARGET_XCOFF
10750 && !TARGET_POWER2
&& !TARGET_POWERPC
)
10752 /* AIX library routines for float->int conversion. */
10753 set_conv_libfunc (sfix_optab
, SImode
, DFmode
, "__itrunc");
10754 set_conv_libfunc (ufix_optab
, SImode
, DFmode
, "__uitrunc");
10755 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_qitrunc");
10756 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_quitrunc");
10759 if (!TARGET_IEEEQUAD
)
10760 /* AIX/Darwin/64-bit Linux quad floating point routines. */
10761 if (!TARGET_XL_COMPAT
)
10763 set_optab_libfunc (add_optab
, TFmode
, "__gcc_qadd");
10764 set_optab_libfunc (sub_optab
, TFmode
, "__gcc_qsub");
10765 set_optab_libfunc (smul_optab
, TFmode
, "__gcc_qmul");
10766 set_optab_libfunc (sdiv_optab
, TFmode
, "__gcc_qdiv");
10768 if (!(TARGET_HARD_FLOAT
&& (TARGET_FPRS
|| TARGET_E500_DOUBLE
)))
10770 set_optab_libfunc (neg_optab
, TFmode
, "__gcc_qneg");
10771 set_optab_libfunc (eq_optab
, TFmode
, "__gcc_qeq");
10772 set_optab_libfunc (ne_optab
, TFmode
, "__gcc_qne");
10773 set_optab_libfunc (gt_optab
, TFmode
, "__gcc_qgt");
10774 set_optab_libfunc (ge_optab
, TFmode
, "__gcc_qge");
10775 set_optab_libfunc (lt_optab
, TFmode
, "__gcc_qlt");
10776 set_optab_libfunc (le_optab
, TFmode
, "__gcc_qle");
10778 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "__gcc_stoq");
10779 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "__gcc_dtoq");
10780 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "__gcc_qtos");
10781 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "__gcc_qtod");
10782 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "__gcc_qtoi");
10783 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "__gcc_qtou");
10784 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "__gcc_itoq");
10785 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "__gcc_utoq");
10788 if (!(TARGET_HARD_FLOAT
&& TARGET_FPRS
))
10789 set_optab_libfunc (unord_optab
, TFmode
, "__gcc_qunord");
10793 set_optab_libfunc (add_optab
, TFmode
, "_xlqadd");
10794 set_optab_libfunc (sub_optab
, TFmode
, "_xlqsub");
10795 set_optab_libfunc (smul_optab
, TFmode
, "_xlqmul");
10796 set_optab_libfunc (sdiv_optab
, TFmode
, "_xlqdiv");
10800 /* 32-bit SVR4 quad floating point routines. */
10802 set_optab_libfunc (add_optab
, TFmode
, "_q_add");
10803 set_optab_libfunc (sub_optab
, TFmode
, "_q_sub");
10804 set_optab_libfunc (neg_optab
, TFmode
, "_q_neg");
10805 set_optab_libfunc (smul_optab
, TFmode
, "_q_mul");
10806 set_optab_libfunc (sdiv_optab
, TFmode
, "_q_div");
10807 if (TARGET_PPC_GPOPT
|| TARGET_POWER2
)
10808 set_optab_libfunc (sqrt_optab
, TFmode
, "_q_sqrt");
10810 set_optab_libfunc (eq_optab
, TFmode
, "_q_feq");
10811 set_optab_libfunc (ne_optab
, TFmode
, "_q_fne");
10812 set_optab_libfunc (gt_optab
, TFmode
, "_q_fgt");
10813 set_optab_libfunc (ge_optab
, TFmode
, "_q_fge");
10814 set_optab_libfunc (lt_optab
, TFmode
, "_q_flt");
10815 set_optab_libfunc (le_optab
, TFmode
, "_q_fle");
10817 set_conv_libfunc (sext_optab
, TFmode
, SFmode
, "_q_stoq");
10818 set_conv_libfunc (sext_optab
, TFmode
, DFmode
, "_q_dtoq");
10819 set_conv_libfunc (trunc_optab
, SFmode
, TFmode
, "_q_qtos");
10820 set_conv_libfunc (trunc_optab
, DFmode
, TFmode
, "_q_qtod");
10821 set_conv_libfunc (sfix_optab
, SImode
, TFmode
, "_q_qtoi");
10822 set_conv_libfunc (ufix_optab
, SImode
, TFmode
, "_q_qtou");
10823 set_conv_libfunc (sfloat_optab
, TFmode
, SImode
, "_q_itoq");
10824 set_conv_libfunc (ufloat_optab
, TFmode
, SImode
, "_q_utoq");
10829 /* Expand a block clear operation, and return 1 if successful. Return 0
10830 if we should let the compiler generate normal code.
10832 operands[0] is the destination
10833 operands[1] is the length
10834 operands[3] is the alignment */
10837 expand_block_clear (rtx operands
[])
10839 rtx orig_dest
= operands
[0];
10840 rtx bytes_rtx
= operands
[1];
10841 rtx align_rtx
= operands
[3];
10842 bool constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
10843 HOST_WIDE_INT align
;
10844 HOST_WIDE_INT bytes
;
10849 /* If this is not a fixed size move, just call memcpy */
10853 /* This must be a fixed size alignment */
10854 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
10855 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
10857 /* Anything to clear? */
10858 bytes
= INTVAL (bytes_rtx
);
10862 /* Use the builtin memset after a point, to avoid huge code bloat.
10863 When optimize_size, avoid any significant code bloat; calling
10864 memset is about 4 instructions, so allow for one instruction to
10865 load zero and three to do clearing. */
10866 if (TARGET_ALTIVEC
&& align
>= 128)
10868 else if (TARGET_POWERPC64
&& align
>= 32)
10870 else if (TARGET_SPE
&& align
>= 64)
10875 if (optimize_size
&& bytes
> 3 * clear_step
)
10877 if (! optimize_size
&& bytes
> 8 * clear_step
)
10880 for (offset
= 0; bytes
> 0; offset
+= clear_bytes
, bytes
-= clear_bytes
)
10882 enum machine_mode mode
= BLKmode
;
10885 if (bytes
>= 16 && TARGET_ALTIVEC
&& align
>= 128)
10890 else if (bytes
>= 8 && TARGET_SPE
&& align
>= 64)
10895 else if (bytes
>= 8 && TARGET_POWERPC64
10896 /* 64-bit loads and stores require word-aligned
10898 && (align
>= 64 || (!STRICT_ALIGNMENT
&& align
>= 32)))
10903 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
10904 { /* move 4 bytes */
10908 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
10909 { /* move 2 bytes */
10913 else /* move 1 byte at a time */
10919 dest
= adjust_address (orig_dest
, mode
, offset
);
10921 emit_move_insn (dest
, CONST0_RTX (mode
));
10928 /* Expand a block move operation, and return 1 if successful. Return 0
10929 if we should let the compiler generate normal code.
10931 operands[0] is the destination
10932 operands[1] is the source
10933 operands[2] is the length
10934 operands[3] is the alignment */
10936 #define MAX_MOVE_REG 4
10939 expand_block_move (rtx operands
[])
10941 rtx orig_dest
= operands
[0];
10942 rtx orig_src
= operands
[1];
10943 rtx bytes_rtx
= operands
[2];
10944 rtx align_rtx
= operands
[3];
10945 int constp
= (GET_CODE (bytes_rtx
) == CONST_INT
);
10950 rtx stores
[MAX_MOVE_REG
];
10953 /* If this is not a fixed size move, just call memcpy */
10957 /* This must be a fixed size alignment */
10958 gcc_assert (GET_CODE (align_rtx
) == CONST_INT
);
10959 align
= INTVAL (align_rtx
) * BITS_PER_UNIT
;
10961 /* Anything to move? */
10962 bytes
= INTVAL (bytes_rtx
);
10966 /* store_one_arg depends on expand_block_move to handle at least the size of
10967 reg_parm_stack_space. */
10968 if (bytes
> (TARGET_POWERPC64
? 64 : 32))
10971 for (offset
= 0; bytes
> 0; offset
+= move_bytes
, bytes
-= move_bytes
)
10974 rtx (*movmemsi
) (rtx
, rtx
, rtx
, rtx
);
10975 rtx (*mov
) (rtx
, rtx
);
10977 enum machine_mode mode
= BLKmode
;
10980 /* Altivec first, since it will be faster than a string move
10981 when it applies, and usually not significantly larger. */
10982 if (TARGET_ALTIVEC
&& bytes
>= 16 && align
>= 128)
10986 gen_func
.mov
= gen_movv4si
;
10988 else if (TARGET_SPE
&& bytes
>= 8 && align
>= 64)
10992 gen_func
.mov
= gen_movv2si
;
10994 else if (TARGET_STRING
10995 && bytes
> 24 /* move up to 32 bytes at a time */
11001 && ! fixed_regs
[10]
11002 && ! fixed_regs
[11]
11003 && ! fixed_regs
[12])
11005 move_bytes
= (bytes
> 32) ? 32 : bytes
;
11006 gen_func
.movmemsi
= gen_movmemsi_8reg
;
11008 else if (TARGET_STRING
11009 && bytes
> 16 /* move up to 24 bytes at a time */
11015 && ! fixed_regs
[10])
11017 move_bytes
= (bytes
> 24) ? 24 : bytes
;
11018 gen_func
.movmemsi
= gen_movmemsi_6reg
;
11020 else if (TARGET_STRING
11021 && bytes
> 8 /* move up to 16 bytes at a time */
11025 && ! fixed_regs
[8])
11027 move_bytes
= (bytes
> 16) ? 16 : bytes
;
11028 gen_func
.movmemsi
= gen_movmemsi_4reg
;
11030 else if (bytes
>= 8 && TARGET_POWERPC64
11031 /* 64-bit loads and stores require word-aligned
11033 && (align
>= 64 || (!STRICT_ALIGNMENT
&& align
>= 32)))
11037 gen_func
.mov
= gen_movdi
;
11039 else if (TARGET_STRING
&& bytes
> 4 && !TARGET_POWERPC64
)
11040 { /* move up to 8 bytes at a time */
11041 move_bytes
= (bytes
> 8) ? 8 : bytes
;
11042 gen_func
.movmemsi
= gen_movmemsi_2reg
;
11044 else if (bytes
>= 4 && (align
>= 32 || !STRICT_ALIGNMENT
))
11045 { /* move 4 bytes */
11048 gen_func
.mov
= gen_movsi
;
11050 else if (bytes
>= 2 && (align
>= 16 || !STRICT_ALIGNMENT
))
11051 { /* move 2 bytes */
11054 gen_func
.mov
= gen_movhi
;
11056 else if (TARGET_STRING
&& bytes
> 1)
11057 { /* move up to 4 bytes at a time */
11058 move_bytes
= (bytes
> 4) ? 4 : bytes
;
11059 gen_func
.movmemsi
= gen_movmemsi_1reg
;
11061 else /* move 1 byte at a time */
11065 gen_func
.mov
= gen_movqi
;
11068 src
= adjust_address (orig_src
, mode
, offset
);
11069 dest
= adjust_address (orig_dest
, mode
, offset
);
11071 if (mode
!= BLKmode
)
11073 rtx tmp_reg
= gen_reg_rtx (mode
);
11075 emit_insn ((*gen_func
.mov
) (tmp_reg
, src
));
11076 stores
[num_reg
++] = (*gen_func
.mov
) (dest
, tmp_reg
);
11079 if (mode
== BLKmode
|| num_reg
>= MAX_MOVE_REG
|| bytes
== move_bytes
)
11082 for (i
= 0; i
< num_reg
; i
++)
11083 emit_insn (stores
[i
]);
11087 if (mode
== BLKmode
)
11089 /* Move the address into scratch registers. The movmemsi
11090 patterns require zero offset. */
11091 if (!REG_P (XEXP (src
, 0)))
11093 rtx src_reg
= copy_addr_to_reg (XEXP (src
, 0));
11094 src
= replace_equiv_address (src
, src_reg
);
11096 set_mem_size (src
, GEN_INT (move_bytes
));
11098 if (!REG_P (XEXP (dest
, 0)))
11100 rtx dest_reg
= copy_addr_to_reg (XEXP (dest
, 0));
11101 dest
= replace_equiv_address (dest
, dest_reg
);
11103 set_mem_size (dest
, GEN_INT (move_bytes
));
11105 emit_insn ((*gen_func
.movmemsi
) (dest
, src
,
11106 GEN_INT (move_bytes
& 31),
11115 /* Return a string to perform a load_multiple operation.
11116 operands[0] is the vector.
11117 operands[1] is the source address.
11118 operands[2] is the first destination register. */
11121 rs6000_output_load_multiple (rtx operands
[3])
11123 /* We have to handle the case where the pseudo used to contain the address
11124 is assigned to one of the output registers. */
11126 int words
= XVECLEN (operands
[0], 0);
11129 if (XVECLEN (operands
[0], 0) == 1)
11130 return "{l|lwz} %2,0(%1)";
11132 for (i
= 0; i
< words
; i
++)
11133 if (refers_to_regno_p (REGNO (operands
[2]) + i
,
11134 REGNO (operands
[2]) + i
+ 1, operands
[1], 0))
11138 xop
[0] = GEN_INT (4 * (words
-1));
11139 xop
[1] = operands
[1];
11140 xop
[2] = operands
[2];
11141 output_asm_insn ("{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,%0(%1)", xop
);
11146 xop
[0] = GEN_INT (4 * (words
-1));
11147 xop
[1] = operands
[1];
11148 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + 1);
11149 output_asm_insn ("{cal %1,4(%1)|addi %1,%1,4}\n\t{lsi|lswi} %2,%1,%0\n\t{l|lwz} %1,-4(%1)", xop
);
11154 for (j
= 0; j
< words
; j
++)
11157 xop
[0] = GEN_INT (j
* 4);
11158 xop
[1] = operands
[1];
11159 xop
[2] = gen_rtx_REG (SImode
, REGNO (operands
[2]) + j
);
11160 output_asm_insn ("{l|lwz} %2,%0(%1)", xop
);
11162 xop
[0] = GEN_INT (i
* 4);
11163 xop
[1] = operands
[1];
11164 output_asm_insn ("{l|lwz} %1,%0(%1)", xop
);
11169 return "{lsi|lswi} %2,%1,%N0";
11173 /* A validation routine: say whether CODE, a condition code, and MODE
11174 match. The other alternatives either don't make sense or should
11175 never be generated. */
11178 validate_condition_mode (enum rtx_code code
, enum machine_mode mode
)
11180 gcc_assert ((GET_RTX_CLASS (code
) == RTX_COMPARE
11181 || GET_RTX_CLASS (code
) == RTX_COMM_COMPARE
)
11182 && GET_MODE_CLASS (mode
) == MODE_CC
);
11184 /* These don't make sense. */
11185 gcc_assert ((code
!= GT
&& code
!= LT
&& code
!= GE
&& code
!= LE
)
11186 || mode
!= CCUNSmode
);
11188 gcc_assert ((code
!= GTU
&& code
!= LTU
&& code
!= GEU
&& code
!= LEU
)
11189 || mode
== CCUNSmode
);
11191 gcc_assert (mode
== CCFPmode
11192 || (code
!= ORDERED
&& code
!= UNORDERED
11193 && code
!= UNEQ
&& code
!= LTGT
11194 && code
!= UNGT
&& code
!= UNLT
11195 && code
!= UNGE
&& code
!= UNLE
));
11197 /* These should never be generated except for
11198 flag_finite_math_only. */
11199 gcc_assert (mode
!= CCFPmode
11200 || flag_finite_math_only
11201 || (code
!= LE
&& code
!= GE
11202 && code
!= UNEQ
&& code
!= LTGT
11203 && code
!= UNGT
&& code
!= UNLT
));
11205 /* These are invalid; the information is not there. */
11206 gcc_assert (mode
!= CCEQmode
|| code
== EQ
|| code
== NE
);
11210 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
11211 mask required to convert the result of a rotate insn into a shift
11212 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
11215 includes_lshift_p (rtx shiftop
, rtx andop
)
11217 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
11219 shift_mask
<<= INTVAL (shiftop
);
11221 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
11224 /* Similar, but for right shift. */
11227 includes_rshift_p (rtx shiftop
, rtx andop
)
11229 unsigned HOST_WIDE_INT shift_mask
= ~(unsigned HOST_WIDE_INT
) 0;
11231 shift_mask
>>= INTVAL (shiftop
);
11233 return (INTVAL (andop
) & 0xffffffff & ~shift_mask
) == 0;
11236 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
11237 to perform a left shift. It must have exactly SHIFTOP least
11238 significant 0's, then one or more 1's, then zero or more 0's. */
11241 includes_rldic_lshift_p (rtx shiftop
, rtx andop
)
11243 if (GET_CODE (andop
) == CONST_INT
)
11245 HOST_WIDE_INT c
, lsb
, shift_mask
;
11247 c
= INTVAL (andop
);
11248 if (c
== 0 || c
== ~0)
11252 shift_mask
<<= INTVAL (shiftop
);
11254 /* Find the least significant one bit. */
11257 /* It must coincide with the LSB of the shift mask. */
11258 if (-lsb
!= shift_mask
)
11261 /* Invert to look for the next transition (if any). */
11264 /* Remove the low group of ones (originally low group of zeros). */
11267 /* Again find the lsb, and check we have all 1's above. */
11271 else if (GET_CODE (andop
) == CONST_DOUBLE
11272 && (GET_MODE (andop
) == VOIDmode
|| GET_MODE (andop
) == DImode
))
11274 HOST_WIDE_INT low
, high
, lsb
;
11275 HOST_WIDE_INT shift_mask_low
, shift_mask_high
;
11277 low
= CONST_DOUBLE_LOW (andop
);
11278 if (HOST_BITS_PER_WIDE_INT
< 64)
11279 high
= CONST_DOUBLE_HIGH (andop
);
11281 if ((low
== 0 && (HOST_BITS_PER_WIDE_INT
>= 64 || high
== 0))
11282 || (low
== ~0 && (HOST_BITS_PER_WIDE_INT
>= 64 || high
== ~0)))
11285 if (HOST_BITS_PER_WIDE_INT
< 64 && low
== 0)
11287 shift_mask_high
= ~0;
11288 if (INTVAL (shiftop
) > 32)
11289 shift_mask_high
<<= INTVAL (shiftop
) - 32;
11291 lsb
= high
& -high
;
11293 if (-lsb
!= shift_mask_high
|| INTVAL (shiftop
) < 32)
11299 lsb
= high
& -high
;
11300 return high
== -lsb
;
11303 shift_mask_low
= ~0;
11304 shift_mask_low
<<= INTVAL (shiftop
);
11308 if (-lsb
!= shift_mask_low
)
11311 if (HOST_BITS_PER_WIDE_INT
< 64)
11316 if (HOST_BITS_PER_WIDE_INT
< 64 && low
== 0)
11318 lsb
= high
& -high
;
11319 return high
== -lsb
;
11323 return low
== -lsb
&& (HOST_BITS_PER_WIDE_INT
>= 64 || high
== ~0);
11329 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
11330 to perform a left shift. It must have SHIFTOP or more least
11331 significant 0's, with the remainder of the word 1's. */
11334 includes_rldicr_lshift_p (rtx shiftop
, rtx andop
)
11336 if (GET_CODE (andop
) == CONST_INT
)
11338 HOST_WIDE_INT c
, lsb
, shift_mask
;
11341 shift_mask
<<= INTVAL (shiftop
);
11342 c
= INTVAL (andop
);
11344 /* Find the least significant one bit. */
11347 /* It must be covered by the shift mask.
11348 This test also rejects c == 0. */
11349 if ((lsb
& shift_mask
) == 0)
11352 /* Check we have all 1's above the transition, and reject all 1's. */
11353 return c
== -lsb
&& lsb
!= 1;
11355 else if (GET_CODE (andop
) == CONST_DOUBLE
11356 && (GET_MODE (andop
) == VOIDmode
|| GET_MODE (andop
) == DImode
))
11358 HOST_WIDE_INT low
, lsb
, shift_mask_low
;
11360 low
= CONST_DOUBLE_LOW (andop
);
11362 if (HOST_BITS_PER_WIDE_INT
< 64)
11364 HOST_WIDE_INT high
, shift_mask_high
;
11366 high
= CONST_DOUBLE_HIGH (andop
);
11370 shift_mask_high
= ~0;
11371 if (INTVAL (shiftop
) > 32)
11372 shift_mask_high
<<= INTVAL (shiftop
) - 32;
11374 lsb
= high
& -high
;
11376 if ((lsb
& shift_mask_high
) == 0)
11379 return high
== -lsb
;
11385 shift_mask_low
= ~0;
11386 shift_mask_low
<<= INTVAL (shiftop
);
11390 if ((lsb
& shift_mask_low
) == 0)
11393 return low
== -lsb
&& lsb
!= 1;
11399 /* Return 1 if operands will generate a valid arguments to rlwimi
11400 instruction for insert with right shift in 64-bit mode. The mask may
11401 not start on the first bit or stop on the last bit because wrap-around
11402 effects of instruction do not correspond to semantics of RTL insn. */
11405 insvdi_rshift_rlwimi_p (rtx sizeop
, rtx startop
, rtx shiftop
)
11407 if (INTVAL (startop
) > 32
11408 && INTVAL (startop
) < 64
11409 && INTVAL (sizeop
) > 1
11410 && INTVAL (sizeop
) + INTVAL (startop
) < 64
11411 && INTVAL (shiftop
) > 0
11412 && INTVAL (sizeop
) + INTVAL (shiftop
) < 32
11413 && (64 - (INTVAL (shiftop
) & 63)) >= INTVAL (sizeop
))
11419 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
11420 for lfq and stfq insns iff the registers are hard registers. */
11423 registers_ok_for_quad_peep (rtx reg1
, rtx reg2
)
11425 /* We might have been passed a SUBREG. */
11426 if (GET_CODE (reg1
) != REG
|| GET_CODE (reg2
) != REG
)
11429 /* We might have been passed non floating point registers. */
11430 if (!FP_REGNO_P (REGNO (reg1
))
11431 || !FP_REGNO_P (REGNO (reg2
)))
11434 return (REGNO (reg1
) == REGNO (reg2
) - 1);
11437 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
11438 addr1 and addr2 must be in consecutive memory locations
11439 (addr2 == addr1 + 8). */
11442 mems_ok_for_quad_peep (rtx mem1
, rtx mem2
)
11445 unsigned int reg1
, reg2
;
11446 int offset1
, offset2
;
11448 /* The mems cannot be volatile. */
11449 if (MEM_VOLATILE_P (mem1
) || MEM_VOLATILE_P (mem2
))
11452 addr1
= XEXP (mem1
, 0);
11453 addr2
= XEXP (mem2
, 0);
11455 /* Extract an offset (if used) from the first addr. */
11456 if (GET_CODE (addr1
) == PLUS
)
11458 /* If not a REG, return zero. */
11459 if (GET_CODE (XEXP (addr1
, 0)) != REG
)
11463 reg1
= REGNO (XEXP (addr1
, 0));
11464 /* The offset must be constant! */
11465 if (GET_CODE (XEXP (addr1
, 1)) != CONST_INT
)
11467 offset1
= INTVAL (XEXP (addr1
, 1));
11470 else if (GET_CODE (addr1
) != REG
)
11474 reg1
= REGNO (addr1
);
11475 /* This was a simple (mem (reg)) expression. Offset is 0. */
11479 /* And now for the second addr. */
11480 if (GET_CODE (addr2
) == PLUS
)
11482 /* If not a REG, return zero. */
11483 if (GET_CODE (XEXP (addr2
, 0)) != REG
)
11487 reg2
= REGNO (XEXP (addr2
, 0));
11488 /* The offset must be constant. */
11489 if (GET_CODE (XEXP (addr2
, 1)) != CONST_INT
)
11491 offset2
= INTVAL (XEXP (addr2
, 1));
11494 else if (GET_CODE (addr2
) != REG
)
11498 reg2
= REGNO (addr2
);
11499 /* This was a simple (mem (reg)) expression. Offset is 0. */
11503 /* Both of these must have the same base register. */
11507 /* The offset for the second addr must be 8 more than the first addr. */
11508 if (offset2
!= offset1
+ 8)
11511 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
11518 rs6000_secondary_memory_needed_rtx (enum machine_mode mode
)
11520 static bool eliminated
= false;
11521 if (mode
!= SDmode
)
11522 return assign_stack_local (mode
, GET_MODE_SIZE (mode
), 0);
11525 rtx mem
= cfun
->machine
->sdmode_stack_slot
;
11526 gcc_assert (mem
!= NULL_RTX
);
11530 mem
= eliminate_regs (mem
, VOIDmode
, NULL_RTX
);
11531 cfun
->machine
->sdmode_stack_slot
= mem
;
11539 rs6000_check_sdmode (tree
*tp
, int *walk_subtrees
, void *data ATTRIBUTE_UNUSED
)
11541 /* Don't walk into types. */
11542 if (*tp
== NULL_TREE
|| *tp
== error_mark_node
|| TYPE_P (*tp
))
11544 *walk_subtrees
= 0;
11548 switch (TREE_CODE (*tp
))
11557 case ALIGN_INDIRECT_REF
:
11558 case MISALIGNED_INDIRECT_REF
:
11559 case VIEW_CONVERT_EXPR
:
11560 if (TYPE_MODE (TREE_TYPE (*tp
)) == SDmode
)
11571 /* Allocate a 64-bit stack slot to be used for copying SDmode
11572 values through if this function has any SDmode references. */
11575 rs6000_alloc_sdmode_stack_slot (void)
11579 gimple_stmt_iterator gsi
;
11581 gcc_assert (cfun
->machine
->sdmode_stack_slot
== NULL_RTX
);
11584 for (gsi
= gsi_start_bb (bb
); !gsi_end_p (gsi
); gsi_next (&gsi
))
11586 tree ret
= walk_gimple_op (gsi_stmt (gsi
), rs6000_check_sdmode
, NULL
);
11589 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
11590 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
11596 /* Check for any SDmode parameters of the function. */
11597 for (t
= DECL_ARGUMENTS (cfun
->decl
); t
; t
= TREE_CHAIN (t
))
11599 if (TREE_TYPE (t
) == error_mark_node
)
11602 if (TYPE_MODE (TREE_TYPE (t
)) == SDmode
11603 || TYPE_MODE (DECL_ARG_TYPE (t
)) == SDmode
)
11605 rtx stack
= assign_stack_local (DDmode
, GET_MODE_SIZE (DDmode
), 0);
11606 cfun
->machine
->sdmode_stack_slot
= adjust_address_nv (stack
,
11614 rs6000_instantiate_decls (void)
11616 if (cfun
->machine
->sdmode_stack_slot
!= NULL_RTX
)
11617 instantiate_decl_rtl (cfun
->machine
->sdmode_stack_slot
);
11620 /* Return the register class of a scratch register needed to copy IN into
11621 or out of a register in RCLASS in MODE. If it can be done directly,
11622 NO_REGS is returned. */
11625 rs6000_secondary_reload_class (enum reg_class rclass
,
11626 enum machine_mode mode ATTRIBUTE_UNUSED
,
11631 if (TARGET_ELF
|| (DEFAULT_ABI
== ABI_DARWIN
11633 && MACHOPIC_INDIRECT
11637 /* We cannot copy a symbolic operand directly into anything
11638 other than BASE_REGS for TARGET_ELF. So indicate that a
11639 register from BASE_REGS is needed as an intermediate
11642 On Darwin, pic addresses require a load from memory, which
11643 needs a base register. */
11644 if (rclass
!= BASE_REGS
11645 && (GET_CODE (in
) == SYMBOL_REF
11646 || GET_CODE (in
) == HIGH
11647 || GET_CODE (in
) == LABEL_REF
11648 || GET_CODE (in
) == CONST
))
11652 if (GET_CODE (in
) == REG
)
11654 regno
= REGNO (in
);
11655 if (regno
>= FIRST_PSEUDO_REGISTER
)
11657 regno
= true_regnum (in
);
11658 if (regno
>= FIRST_PSEUDO_REGISTER
)
11662 else if (GET_CODE (in
) == SUBREG
)
11664 regno
= true_regnum (in
);
11665 if (regno
>= FIRST_PSEUDO_REGISTER
)
11671 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
11673 if (rclass
== GENERAL_REGS
|| rclass
== BASE_REGS
11674 || (regno
>= 0 && INT_REGNO_P (regno
)))
11677 /* Constants, memory, and FP registers can go into FP registers. */
11678 if ((regno
== -1 || FP_REGNO_P (regno
))
11679 && (rclass
== FLOAT_REGS
|| rclass
== NON_SPECIAL_REGS
))
11680 return (mode
!= SDmode
) ? NO_REGS
: GENERAL_REGS
;
11682 /* Memory, and AltiVec registers can go into AltiVec registers. */
11683 if ((regno
== -1 || ALTIVEC_REGNO_P (regno
))
11684 && rclass
== ALTIVEC_REGS
)
11687 /* We can copy among the CR registers. */
11688 if ((rclass
== CR_REGS
|| rclass
== CR0_REGS
)
11689 && regno
>= 0 && CR_REGNO_P (regno
))
11692 /* Otherwise, we need GENERAL_REGS. */
11693 return GENERAL_REGS
;
11696 /* Given a comparison operation, return the bit number in CCR to test. We
11697 know this is a valid comparison.
11699 SCC_P is 1 if this is for an scc. That means that %D will have been
11700 used instead of %C, so the bits will be in different places.
11702 Return -1 if OP isn't a valid comparison for some reason. */
11705 ccr_bit (rtx op
, int scc_p
)
11707 enum rtx_code code
= GET_CODE (op
);
11708 enum machine_mode cc_mode
;
11713 if (!COMPARISON_P (op
))
11716 reg
= XEXP (op
, 0);
11718 gcc_assert (GET_CODE (reg
) == REG
&& CR_REGNO_P (REGNO (reg
)));
11720 cc_mode
= GET_MODE (reg
);
11721 cc_regnum
= REGNO (reg
);
11722 base_bit
= 4 * (cc_regnum
- CR0_REGNO
);
11724 validate_condition_mode (code
, cc_mode
);
11726 /* When generating a sCOND operation, only positive conditions are
11729 || code
== EQ
|| code
== GT
|| code
== LT
|| code
== UNORDERED
11730 || code
== GTU
|| code
== LTU
);
11735 return scc_p
? base_bit
+ 3 : base_bit
+ 2;
11737 return base_bit
+ 2;
11738 case GT
: case GTU
: case UNLE
:
11739 return base_bit
+ 1;
11740 case LT
: case LTU
: case UNGE
:
11742 case ORDERED
: case UNORDERED
:
11743 return base_bit
+ 3;
11746 /* If scc, we will have done a cror to put the bit in the
11747 unordered position. So test that bit. For integer, this is ! LT
11748 unless this is an scc insn. */
11749 return scc_p
? base_bit
+ 3 : base_bit
;
11752 return scc_p
? base_bit
+ 3 : base_bit
+ 1;
11755 gcc_unreachable ();
11759 /* Return the GOT register. */
11762 rs6000_got_register (rtx value ATTRIBUTE_UNUSED
)
11764 /* The second flow pass currently (June 1999) can't update
11765 regs_ever_live without disturbing other parts of the compiler, so
11766 update it here to make the prolog/epilogue code happy. */
11767 if (!can_create_pseudo_p ()
11768 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
))
11769 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM
, true);
11771 crtl
->uses_pic_offset_table
= 1;
11773 return pic_offset_table_rtx
;
11776 /* Function to init struct machine_function.
11777 This will be called, via a pointer variable,
11778 from push_function_context. */
11780 static struct machine_function
*
11781 rs6000_init_machine_status (void)
11783 return GGC_CNEW (machine_function
);
11786 /* These macros test for integers and extract the low-order bits. */
11788 ((GET_CODE (X) == CONST_INT || GET_CODE (X) == CONST_DOUBLE) \
11789 && GET_MODE (X) == VOIDmode)
11791 #define INT_LOWPART(X) \
11792 (GET_CODE (X) == CONST_INT ? INTVAL (X) : CONST_DOUBLE_LOW (X))
11795 extract_MB (rtx op
)
11798 unsigned long val
= INT_LOWPART (op
);
11800 /* If the high bit is zero, the value is the first 1 bit we find
11802 if ((val
& 0x80000000) == 0)
11804 gcc_assert (val
& 0xffffffff);
11807 while (((val
<<= 1) & 0x80000000) == 0)
11812 /* If the high bit is set and the low bit is not, or the mask is all
11813 1's, the value is zero. */
11814 if ((val
& 1) == 0 || (val
& 0xffffffff) == 0xffffffff)
11817 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
11820 while (((val
>>= 1) & 1) != 0)
11827 extract_ME (rtx op
)
11830 unsigned long val
= INT_LOWPART (op
);
11832 /* If the low bit is zero, the value is the first 1 bit we find from
11834 if ((val
& 1) == 0)
11836 gcc_assert (val
& 0xffffffff);
11839 while (((val
>>= 1) & 1) == 0)
11845 /* If the low bit is set and the high bit is not, or the mask is all
11846 1's, the value is 31. */
11847 if ((val
& 0x80000000) == 0 || (val
& 0xffffffff) == 0xffffffff)
11850 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
11853 while (((val
<<= 1) & 0x80000000) != 0)
11859 /* Locate some local-dynamic symbol still in use by this function
11860 so that we can print its name in some tls_ld pattern. */
11862 static const char *
11863 rs6000_get_some_local_dynamic_name (void)
11867 if (cfun
->machine
->some_ld_name
)
11868 return cfun
->machine
->some_ld_name
;
11870 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
11872 && for_each_rtx (&PATTERN (insn
),
11873 rs6000_get_some_local_dynamic_name_1
, 0))
11874 return cfun
->machine
->some_ld_name
;
11876 gcc_unreachable ();
11879 /* Helper function for rs6000_get_some_local_dynamic_name. */
11882 rs6000_get_some_local_dynamic_name_1 (rtx
*px
, void *data ATTRIBUTE_UNUSED
)
11886 if (GET_CODE (x
) == SYMBOL_REF
)
11888 const char *str
= XSTR (x
, 0);
11889 if (SYMBOL_REF_TLS_MODEL (x
) == TLS_MODEL_LOCAL_DYNAMIC
)
11891 cfun
->machine
->some_ld_name
= str
;
11899 /* Write out a function code label. */
11902 rs6000_output_function_entry (FILE *file
, const char *fname
)
11904 if (fname
[0] != '.')
11906 switch (DEFAULT_ABI
)
11909 gcc_unreachable ();
11915 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "L.");
11924 RS6000_OUTPUT_BASENAME (file
, fname
);
11926 assemble_name (file
, fname
);
11929 /* Print an operand. Recognize special options, documented below. */
11932 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
11933 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
11935 #define SMALL_DATA_RELOC "sda21"
11936 #define SMALL_DATA_REG 0
11940 print_operand (FILE *file
, rtx x
, int code
)
11944 unsigned HOST_WIDE_INT uval
;
11949 /* Write out an instruction after the call which may be replaced
11950 with glue code by the loader. This depends on the AIX version. */
11951 asm_fprintf (file
, RS6000_CALL_GLUE
);
11954 /* %a is output_address. */
11957 /* If X is a constant integer whose low-order 5 bits are zero,
11958 write 'l'. Otherwise, write 'r'. This is a kludge to fix a bug
11959 in the AIX assembler where "sri" with a zero shift count
11960 writes a trash instruction. */
11961 if (GET_CODE (x
) == CONST_INT
&& (INTVAL (x
) & 31) == 0)
11968 /* If constant, low-order 16 bits of constant, unsigned.
11969 Otherwise, write normally. */
11971 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 0xffff);
11973 print_operand (file
, x
, 0);
11977 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
11978 for 64-bit mask direction. */
11979 putc (((INT_LOWPART (x
) & 1) == 0 ? 'r' : 'l'), file
);
11982 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
11986 /* X is a CR register. Print the number of the GT bit of the CR. */
11987 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
11988 output_operand_lossage ("invalid %%E value");
11990 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 1);
11994 /* Like 'J' but get to the GT bit only. */
11995 gcc_assert (GET_CODE (x
) == REG
);
11997 /* Bit 1 is GT bit. */
11998 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 1;
12000 /* Add one for shift count in rlinm for scc. */
12001 fprintf (file
, "%d", i
+ 1);
12005 /* X is a CR register. Print the number of the EQ bit of the CR */
12006 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
12007 output_operand_lossage ("invalid %%E value");
12009 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
) + 2);
12013 /* X is a CR register. Print the shift count needed to move it
12014 to the high-order four bits. */
12015 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
12016 output_operand_lossage ("invalid %%f value");
12018 fprintf (file
, "%d", 4 * (REGNO (x
) - CR0_REGNO
));
12022 /* Similar, but print the count for the rotate in the opposite
12024 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
12025 output_operand_lossage ("invalid %%F value");
12027 fprintf (file
, "%d", 32 - 4 * (REGNO (x
) - CR0_REGNO
));
12031 /* X is a constant integer. If it is negative, print "m",
12032 otherwise print "z". This is to make an aze or ame insn. */
12033 if (GET_CODE (x
) != CONST_INT
)
12034 output_operand_lossage ("invalid %%G value");
12035 else if (INTVAL (x
) >= 0)
12042 /* If constant, output low-order five bits. Otherwise, write
12045 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 31);
12047 print_operand (file
, x
, 0);
12051 /* If constant, output low-order six bits. Otherwise, write
12054 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, INT_LOWPART (x
) & 63);
12056 print_operand (file
, x
, 0);
12060 /* Print `i' if this is a constant, else nothing. */
12066 /* Write the bit number in CCR for jump. */
12067 i
= ccr_bit (x
, 0);
12069 output_operand_lossage ("invalid %%j code");
12071 fprintf (file
, "%d", i
);
12075 /* Similar, but add one for shift count in rlinm for scc and pass
12076 scc flag to `ccr_bit'. */
12077 i
= ccr_bit (x
, 1);
12079 output_operand_lossage ("invalid %%J code");
12081 /* If we want bit 31, write a shift count of zero, not 32. */
12082 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
12086 /* X must be a constant. Write the 1's complement of the
12089 output_operand_lossage ("invalid %%k value");
12091 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, ~ INT_LOWPART (x
));
12095 /* X must be a symbolic constant on ELF. Write an
12096 expression suitable for an 'addi' that adds in the low 16
12097 bits of the MEM. */
12098 if (GET_CODE (x
) != CONST
)
12100 print_operand_address (file
, x
);
12101 fputs ("@l", file
);
12105 if (GET_CODE (XEXP (x
, 0)) != PLUS
12106 || (GET_CODE (XEXP (XEXP (x
, 0), 0)) != SYMBOL_REF
12107 && GET_CODE (XEXP (XEXP (x
, 0), 0)) != LABEL_REF
)
12108 || GET_CODE (XEXP (XEXP (x
, 0), 1)) != CONST_INT
)
12109 output_operand_lossage ("invalid %%K value");
12110 print_operand_address (file
, XEXP (XEXP (x
, 0), 0));
12111 fputs ("@l", file
);
12112 /* For GNU as, there must be a non-alphanumeric character
12113 between 'l' and the number. The '-' is added by
12114 print_operand() already. */
12115 if (INTVAL (XEXP (XEXP (x
, 0), 1)) >= 0)
12117 print_operand (file
, XEXP (XEXP (x
, 0), 1), 0);
12121 /* %l is output_asm_label. */
12124 /* Write second word of DImode or DFmode reference. Works on register
12125 or non-indexed memory only. */
12126 if (GET_CODE (x
) == REG
)
12127 fputs (reg_names
[REGNO (x
) + 1], file
);
12128 else if (GET_CODE (x
) == MEM
)
12130 /* Handle possible auto-increment. Since it is pre-increment and
12131 we have already done it, we can just use an offset of word. */
12132 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
12133 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
12134 output_address (plus_constant (XEXP (XEXP (x
, 0), 0),
12136 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
12137 output_address (plus_constant (XEXP (XEXP (x
, 0), 0),
12140 output_address (XEXP (adjust_address_nv (x
, SImode
,
12144 if (small_data_operand (x
, GET_MODE (x
)))
12145 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
12146 reg_names
[SMALL_DATA_REG
]);
12151 /* MB value for a mask operand. */
12152 if (! mask_operand (x
, SImode
))
12153 output_operand_lossage ("invalid %%m value");
12155 fprintf (file
, "%d", extract_MB (x
));
12159 /* ME value for a mask operand. */
12160 if (! mask_operand (x
, SImode
))
12161 output_operand_lossage ("invalid %%M value");
12163 fprintf (file
, "%d", extract_ME (x
));
12166 /* %n outputs the negative of its operand. */
12169 /* Write the number of elements in the vector times 4. */
12170 if (GET_CODE (x
) != PARALLEL
)
12171 output_operand_lossage ("invalid %%N value");
12173 fprintf (file
, "%d", XVECLEN (x
, 0) * 4);
12177 /* Similar, but subtract 1 first. */
12178 if (GET_CODE (x
) != PARALLEL
)
12179 output_operand_lossage ("invalid %%O value");
12181 fprintf (file
, "%d", (XVECLEN (x
, 0) - 1) * 4);
12185 /* X is a CONST_INT that is a power of two. Output the logarithm. */
12187 || INT_LOWPART (x
) < 0
12188 || (i
= exact_log2 (INT_LOWPART (x
))) < 0)
12189 output_operand_lossage ("invalid %%p value");
12191 fprintf (file
, "%d", i
);
12195 /* The operand must be an indirect memory reference. The result
12196 is the register name. */
12197 if (GET_CODE (x
) != MEM
|| GET_CODE (XEXP (x
, 0)) != REG
12198 || REGNO (XEXP (x
, 0)) >= 32)
12199 output_operand_lossage ("invalid %%P value");
12201 fputs (reg_names
[REGNO (XEXP (x
, 0))], file
);
12205 /* This outputs the logical code corresponding to a boolean
12206 expression. The expression may have one or both operands
12207 negated (if one, only the first one). For condition register
12208 logical operations, it will also treat the negated
12209 CR codes as NOTs, but not handle NOTs of them. */
12211 const char *const *t
= 0;
12213 enum rtx_code code
= GET_CODE (x
);
12214 static const char * const tbl
[3][3] = {
12215 { "and", "andc", "nor" },
12216 { "or", "orc", "nand" },
12217 { "xor", "eqv", "xor" } };
12221 else if (code
== IOR
)
12223 else if (code
== XOR
)
12226 output_operand_lossage ("invalid %%q value");
12228 if (GET_CODE (XEXP (x
, 0)) != NOT
)
12232 if (GET_CODE (XEXP (x
, 1)) == NOT
)
12250 /* X is a CR register. Print the mask for `mtcrf'. */
12251 if (GET_CODE (x
) != REG
|| ! CR_REGNO_P (REGNO (x
)))
12252 output_operand_lossage ("invalid %%R value");
12254 fprintf (file
, "%d", 128 >> (REGNO (x
) - CR0_REGNO
));
12258 /* Low 5 bits of 32 - value */
12260 output_operand_lossage ("invalid %%s value");
12262 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, (32 - INT_LOWPART (x
)) & 31);
12266 /* PowerPC64 mask position. All 0's is excluded.
12267 CONST_INT 32-bit mask is considered sign-extended so any
12268 transition must occur within the CONST_INT, not on the boundary. */
12269 if (! mask64_operand (x
, DImode
))
12270 output_operand_lossage ("invalid %%S value");
12272 uval
= INT_LOWPART (x
);
12274 if (uval
& 1) /* Clear Left */
12276 #if HOST_BITS_PER_WIDE_INT > 64
12277 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
12281 else /* Clear Right */
12284 #if HOST_BITS_PER_WIDE_INT > 64
12285 uval
&= ((unsigned HOST_WIDE_INT
) 1 << 64) - 1;
12291 gcc_assert (i
>= 0);
12292 fprintf (file
, "%d", i
);
12296 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
12297 gcc_assert (GET_CODE (x
) == REG
&& GET_MODE (x
) == CCmode
);
12299 /* Bit 3 is OV bit. */
12300 i
= 4 * (REGNO (x
) - CR0_REGNO
) + 3;
12302 /* If we want bit 31, write a shift count of zero, not 32. */
12303 fprintf (file
, "%d", i
== 31 ? 0 : i
+ 1);
12307 /* Print the symbolic name of a branch target register. */
12308 if (GET_CODE (x
) != REG
|| (REGNO (x
) != LR_REGNO
12309 && REGNO (x
) != CTR_REGNO
))
12310 output_operand_lossage ("invalid %%T value");
12311 else if (REGNO (x
) == LR_REGNO
)
12312 fputs (TARGET_NEW_MNEMONICS
? "lr" : "r", file
);
12314 fputs ("ctr", file
);
12318 /* High-order 16 bits of constant for use in unsigned operand. */
12320 output_operand_lossage ("invalid %%u value");
12322 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
12323 (INT_LOWPART (x
) >> 16) & 0xffff);
12327 /* High-order 16 bits of constant for use in signed operand. */
12329 output_operand_lossage ("invalid %%v value");
12331 fprintf (file
, HOST_WIDE_INT_PRINT_HEX
,
12332 (INT_LOWPART (x
) >> 16) & 0xffff);
12336 /* Print `u' if this has an auto-increment or auto-decrement. */
12337 if (GET_CODE (x
) == MEM
12338 && (GET_CODE (XEXP (x
, 0)) == PRE_INC
12339 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
12340 || GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
))
12345 /* Print the trap code for this operand. */
12346 switch (GET_CODE (x
))
12349 fputs ("eq", file
); /* 4 */
12352 fputs ("ne", file
); /* 24 */
12355 fputs ("lt", file
); /* 16 */
12358 fputs ("le", file
); /* 20 */
12361 fputs ("gt", file
); /* 8 */
12364 fputs ("ge", file
); /* 12 */
12367 fputs ("llt", file
); /* 2 */
12370 fputs ("lle", file
); /* 6 */
12373 fputs ("lgt", file
); /* 1 */
12376 fputs ("lge", file
); /* 5 */
12379 gcc_unreachable ();
12384 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
12387 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
,
12388 ((INT_LOWPART (x
) & 0xffff) ^ 0x8000) - 0x8000);
12390 print_operand (file
, x
, 0);
12394 /* MB value for a PowerPC64 rldic operand. */
12395 val
= (GET_CODE (x
) == CONST_INT
12396 ? INTVAL (x
) : CONST_DOUBLE_HIGH (x
));
12401 for (i
= 0; i
< HOST_BITS_PER_WIDE_INT
; i
++)
12402 if ((val
<<= 1) < 0)
12405 #if HOST_BITS_PER_WIDE_INT == 32
12406 if (GET_CODE (x
) == CONST_INT
&& i
>= 0)
12407 i
+= 32; /* zero-extend high-part was all 0's */
12408 else if (GET_CODE (x
) == CONST_DOUBLE
&& i
== 32)
12410 val
= CONST_DOUBLE_LOW (x
);
12416 for ( ; i
< 64; i
++)
12417 if ((val
<<= 1) < 0)
12422 fprintf (file
, "%d", i
+ 1);
12426 if (GET_CODE (x
) == MEM
12427 && (legitimate_indexed_address_p (XEXP (x
, 0), 0)
12428 || (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
12429 && legitimate_indexed_address_p (XEXP (XEXP (x
, 0), 1), 0))))
12434 /* Like 'L', for third word of TImode */
12435 if (GET_CODE (x
) == REG
)
12436 fputs (reg_names
[REGNO (x
) + 2], file
);
12437 else if (GET_CODE (x
) == MEM
)
12439 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
12440 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
12441 output_address (plus_constant (XEXP (XEXP (x
, 0), 0), 8));
12442 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
12443 output_address (plus_constant (XEXP (XEXP (x
, 0), 0), 8));
12445 output_address (XEXP (adjust_address_nv (x
, SImode
, 8), 0));
12446 if (small_data_operand (x
, GET_MODE (x
)))
12447 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
12448 reg_names
[SMALL_DATA_REG
]);
12453 /* X is a SYMBOL_REF. Write out the name preceded by a
12454 period and without any trailing data in brackets. Used for function
12455 names. If we are configured for System V (or the embedded ABI) on
12456 the PowerPC, do not emit the period, since those systems do not use
12457 TOCs and the like. */
12458 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
12460 /* Mark the decl as referenced so that cgraph will output the
12462 if (SYMBOL_REF_DECL (x
))
12463 mark_decl_referenced (SYMBOL_REF_DECL (x
));
12465 /* For macho, check to see if we need a stub. */
12468 const char *name
= XSTR (x
, 0);
12470 if (MACHOPIC_INDIRECT
12471 && machopic_classify_symbol (x
) == MACHOPIC_UNDEFINED_FUNCTION
)
12472 name
= machopic_indirection_name (x
, /*stub_p=*/true);
12474 assemble_name (file
, name
);
12476 else if (!DOT_SYMBOLS
)
12477 assemble_name (file
, XSTR (x
, 0));
12479 rs6000_output_function_entry (file
, XSTR (x
, 0));
12483 /* Like 'L', for last word of TImode. */
12484 if (GET_CODE (x
) == REG
)
12485 fputs (reg_names
[REGNO (x
) + 3], file
);
12486 else if (GET_CODE (x
) == MEM
)
12488 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
12489 || GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
12490 output_address (plus_constant (XEXP (XEXP (x
, 0), 0), 12));
12491 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
12492 output_address (plus_constant (XEXP (XEXP (x
, 0), 0), 12));
12494 output_address (XEXP (adjust_address_nv (x
, SImode
, 12), 0));
12495 if (small_data_operand (x
, GET_MODE (x
)))
12496 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
12497 reg_names
[SMALL_DATA_REG
]);
12501 /* Print AltiVec or SPE memory operand. */
12506 gcc_assert (GET_CODE (x
) == MEM
);
12510 /* Ugly hack because %y is overloaded. */
12511 if ((TARGET_SPE
|| TARGET_E500_DOUBLE
)
12512 && (GET_MODE_SIZE (GET_MODE (x
)) == 8
12513 || GET_MODE (x
) == TFmode
12514 || GET_MODE (x
) == TImode
))
12516 /* Handle [reg]. */
12517 if (GET_CODE (tmp
) == REG
)
12519 fprintf (file
, "0(%s)", reg_names
[REGNO (tmp
)]);
12522 /* Handle [reg+UIMM]. */
12523 else if (GET_CODE (tmp
) == PLUS
&&
12524 GET_CODE (XEXP (tmp
, 1)) == CONST_INT
)
12528 gcc_assert (GET_CODE (XEXP (tmp
, 0)) == REG
);
12530 x
= INTVAL (XEXP (tmp
, 1));
12531 fprintf (file
, "%d(%s)", x
, reg_names
[REGNO (XEXP (tmp
, 0))]);
12535 /* Fall through. Must be [reg+reg]. */
12538 && GET_CODE (tmp
) == AND
12539 && GET_CODE (XEXP (tmp
, 1)) == CONST_INT
12540 && INTVAL (XEXP (tmp
, 1)) == -16)
12541 tmp
= XEXP (tmp
, 0);
12542 if (GET_CODE (tmp
) == REG
)
12543 fprintf (file
, "0,%s", reg_names
[REGNO (tmp
)]);
12546 if (!GET_CODE (tmp
) == PLUS
12547 || !REG_P (XEXP (tmp
, 0))
12548 || !REG_P (XEXP (tmp
, 1)))
12550 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
12554 if (REGNO (XEXP (tmp
, 0)) == 0)
12555 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 1)) ],
12556 reg_names
[ REGNO (XEXP (tmp
, 0)) ]);
12558 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (tmp
, 0)) ],
12559 reg_names
[ REGNO (XEXP (tmp
, 1)) ]);
12565 if (GET_CODE (x
) == REG
)
12566 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
12567 else if (GET_CODE (x
) == MEM
)
12569 /* We need to handle PRE_INC and PRE_DEC here, since we need to
12570 know the width from the mode. */
12571 if (GET_CODE (XEXP (x
, 0)) == PRE_INC
)
12572 fprintf (file
, "%d(%s)", GET_MODE_SIZE (GET_MODE (x
)),
12573 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
12574 else if (GET_CODE (XEXP (x
, 0)) == PRE_DEC
)
12575 fprintf (file
, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x
)),
12576 reg_names
[REGNO (XEXP (XEXP (x
, 0), 0))]);
12577 else if (GET_CODE (XEXP (x
, 0)) == PRE_MODIFY
)
12578 output_address (XEXP (XEXP (x
, 0), 1));
12580 output_address (XEXP (x
, 0));
12583 output_addr_const (file
, x
);
12587 assemble_name (file
, rs6000_get_some_local_dynamic_name ());
12591 output_operand_lossage ("invalid %%xn code");
12595 /* Print the address of an operand. */
12598 print_operand_address (FILE *file
, rtx x
)
12600 if (GET_CODE (x
) == REG
)
12601 fprintf (file
, "0(%s)", reg_names
[ REGNO (x
) ]);
12602 else if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == CONST
12603 || GET_CODE (x
) == LABEL_REF
)
12605 output_addr_const (file
, x
);
12606 if (small_data_operand (x
, GET_MODE (x
)))
12607 fprintf (file
, "@%s(%s)", SMALL_DATA_RELOC
,
12608 reg_names
[SMALL_DATA_REG
]);
12610 gcc_assert (!TARGET_TOC
);
12612 else if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == REG
)
12614 gcc_assert (REG_P (XEXP (x
, 0)));
12615 if (REGNO (XEXP (x
, 0)) == 0)
12616 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 1)) ],
12617 reg_names
[ REGNO (XEXP (x
, 0)) ]);
12619 fprintf (file
, "%s,%s", reg_names
[ REGNO (XEXP (x
, 0)) ],
12620 reg_names
[ REGNO (XEXP (x
, 1)) ]);
12622 else if (GET_CODE (x
) == PLUS
&& GET_CODE (XEXP (x
, 1)) == CONST_INT
)
12623 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
"(%s)",
12624 INTVAL (XEXP (x
, 1)), reg_names
[ REGNO (XEXP (x
, 0)) ]);
12626 else if (GET_CODE (x
) == LO_SUM
&& GET_CODE (XEXP (x
, 0)) == REG
12627 && CONSTANT_P (XEXP (x
, 1)))
12629 output_addr_const (file
, XEXP (x
, 1));
12630 fprintf (file
, "@l(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
12634 else if (GET_CODE (x
) == LO_SUM
&& GET_CODE (XEXP (x
, 0)) == REG
12635 && CONSTANT_P (XEXP (x
, 1)))
12637 fprintf (file
, "lo16(");
12638 output_addr_const (file
, XEXP (x
, 1));
12639 fprintf (file
, ")(%s)", reg_names
[ REGNO (XEXP (x
, 0)) ]);
12642 else if (legitimate_constant_pool_address_p (x
))
12644 output_addr_const (file
, XEXP (x
, 1));
12645 fprintf (file
, "(%s)", reg_names
[REGNO (XEXP (x
, 0))]);
12648 gcc_unreachable ();
12651 /* Implement OUTPUT_ADDR_CONST_EXTRA for address X. */
12654 rs6000_output_addr_const_extra (FILE *file
, rtx x
)
12656 if (GET_CODE (x
) == UNSPEC
)
12657 switch (XINT (x
, 1))
12659 case UNSPEC_TOCREL
:
12660 x
= XVECEXP (x
, 0, 0);
12661 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
12662 output_addr_const (file
, x
);
12663 if (!TARGET_AIX
|| (TARGET_ELF
&& TARGET_MINIMAL_TOC
))
12666 assemble_name (file
, toc_label_name
);
12668 else if (TARGET_ELF
)
12669 fputs ("@toc", file
);
12673 case UNSPEC_MACHOPIC_OFFSET
:
12674 output_addr_const (file
, XVECEXP (x
, 0, 0));
12676 machopic_output_function_base_name (file
);
12683 /* Target hook for assembling integer objects. The PowerPC version has
12684 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
12685 is defined. It also needs to handle DI-mode objects on 64-bit
12689 rs6000_assemble_integer (rtx x
, unsigned int size
, int aligned_p
)
12691 #ifdef RELOCATABLE_NEEDS_FIXUP
12692 /* Special handling for SI values. */
12693 if (RELOCATABLE_NEEDS_FIXUP
&& size
== 4 && aligned_p
)
12695 static int recurse
= 0;
12697 /* For -mrelocatable, we mark all addresses that need to be fixed up
12698 in the .fixup section. */
12699 if (TARGET_RELOCATABLE
12700 && in_section
!= toc_section
12701 && in_section
!= text_section
12702 && !unlikely_text_section_p (in_section
)
12704 && GET_CODE (x
) != CONST_INT
12705 && GET_CODE (x
) != CONST_DOUBLE
12711 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCP", fixuplabelno
);
12713 ASM_OUTPUT_LABEL (asm_out_file
, buf
);
12714 fprintf (asm_out_file
, "\t.long\t(");
12715 output_addr_const (asm_out_file
, x
);
12716 fprintf (asm_out_file
, ")@fixup\n");
12717 fprintf (asm_out_file
, "\t.section\t\".fixup\",\"aw\"\n");
12718 ASM_OUTPUT_ALIGN (asm_out_file
, 2);
12719 fprintf (asm_out_file
, "\t.long\t");
12720 assemble_name (asm_out_file
, buf
);
12721 fprintf (asm_out_file
, "\n\t.previous\n");
12725 /* Remove initial .'s to turn a -mcall-aixdesc function
12726 address into the address of the descriptor, not the function
12728 else if (GET_CODE (x
) == SYMBOL_REF
12729 && XSTR (x
, 0)[0] == '.'
12730 && DEFAULT_ABI
== ABI_AIX
)
12732 const char *name
= XSTR (x
, 0);
12733 while (*name
== '.')
12736 fprintf (asm_out_file
, "\t.long\t%s\n", name
);
12740 #endif /* RELOCATABLE_NEEDS_FIXUP */
12741 return default_assemble_integer (x
, size
, aligned_p
);
12744 #ifdef HAVE_GAS_HIDDEN
12745 /* Emit an assembler directive to set symbol visibility for DECL to
12746 VISIBILITY_TYPE. */
12749 rs6000_assemble_visibility (tree decl
, int vis
)
12751 /* Functions need to have their entry point symbol visibility set as
12752 well as their descriptor symbol visibility. */
12753 if (DEFAULT_ABI
== ABI_AIX
12755 && TREE_CODE (decl
) == FUNCTION_DECL
)
12757 static const char * const visibility_types
[] = {
12758 NULL
, "internal", "hidden", "protected"
12761 const char *name
, *type
;
12763 name
= ((* targetm
.strip_name_encoding
)
12764 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
))));
12765 type
= visibility_types
[vis
];
12767 fprintf (asm_out_file
, "\t.%s\t%s\n", type
, name
);
12768 fprintf (asm_out_file
, "\t.%s\t.%s\n", type
, name
);
12771 default_assemble_visibility (decl
, vis
);
12776 rs6000_reverse_condition (enum machine_mode mode
, enum rtx_code code
)
12778 /* Reversal of FP compares takes care -- an ordered compare
12779 becomes an unordered compare and vice versa. */
12780 if (mode
== CCFPmode
12781 && (!flag_finite_math_only
12782 || code
== UNLT
|| code
== UNLE
|| code
== UNGT
|| code
== UNGE
12783 || code
== UNEQ
|| code
== LTGT
))
12784 return reverse_condition_maybe_unordered (code
);
12786 return reverse_condition (code
);
12789 /* Generate a compare for CODE. Return a brand-new rtx that
12790 represents the result of the compare. */
12793 rs6000_generate_compare (rtx cmp
, enum machine_mode mode
)
12795 enum machine_mode comp_mode
;
12796 rtx compare_result
;
12797 enum rtx_code code
= GET_CODE (cmp
);
12798 rtx op0
= XEXP (cmp
, 0);
12799 rtx op1
= XEXP (cmp
, 1);
12801 if (FLOAT_MODE_P (mode
))
12802 comp_mode
= CCFPmode
;
12803 else if (code
== GTU
|| code
== LTU
12804 || code
== GEU
|| code
== LEU
)
12805 comp_mode
= CCUNSmode
;
12806 else if ((code
== EQ
|| code
== NE
)
12807 && GET_CODE (op0
) == SUBREG
12808 && GET_CODE (op1
) == SUBREG
12809 && SUBREG_PROMOTED_UNSIGNED_P (op0
)
12810 && SUBREG_PROMOTED_UNSIGNED_P (op1
))
12811 /* These are unsigned values, perhaps there will be a later
12812 ordering compare that can be shared with this one.
12813 Unfortunately we cannot detect the signedness of the operands
12814 for non-subregs. */
12815 comp_mode
= CCUNSmode
;
12817 comp_mode
= CCmode
;
12819 /* First, the compare. */
12820 compare_result
= gen_reg_rtx (comp_mode
);
12822 /* E500 FP compare instructions on the GPRs. Yuck! */
12823 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
)
12824 && FLOAT_MODE_P (mode
))
12826 rtx cmp
, or_result
, compare_result2
;
12827 enum machine_mode op_mode
= GET_MODE (op0
);
12829 if (op_mode
== VOIDmode
)
12830 op_mode
= GET_MODE (op1
);
12832 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
12833 This explains the following mess. */
12837 case EQ
: case UNEQ
: case NE
: case LTGT
:
12841 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
12842 ? gen_tstsfeq_gpr (compare_result
, op0
, op1
)
12843 : gen_cmpsfeq_gpr (compare_result
, op0
, op1
);
12847 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
12848 ? gen_tstdfeq_gpr (compare_result
, op0
, op1
)
12849 : gen_cmpdfeq_gpr (compare_result
, op0
, op1
);
12853 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
12854 ? gen_tsttfeq_gpr (compare_result
, op0
, op1
)
12855 : gen_cmptfeq_gpr (compare_result
, op0
, op1
);
12859 gcc_unreachable ();
12863 case GT
: case GTU
: case UNGT
: case UNGE
: case GE
: case GEU
:
12867 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
12868 ? gen_tstsfgt_gpr (compare_result
, op0
, op1
)
12869 : gen_cmpsfgt_gpr (compare_result
, op0
, op1
);
12873 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
12874 ? gen_tstdfgt_gpr (compare_result
, op0
, op1
)
12875 : gen_cmpdfgt_gpr (compare_result
, op0
, op1
);
12879 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
12880 ? gen_tsttfgt_gpr (compare_result
, op0
, op1
)
12881 : gen_cmptfgt_gpr (compare_result
, op0
, op1
);
12885 gcc_unreachable ();
12889 case LT
: case LTU
: case UNLT
: case UNLE
: case LE
: case LEU
:
12893 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
12894 ? gen_tstsflt_gpr (compare_result
, op0
, op1
)
12895 : gen_cmpsflt_gpr (compare_result
, op0
, op1
);
12899 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
12900 ? gen_tstdflt_gpr (compare_result
, op0
, op1
)
12901 : gen_cmpdflt_gpr (compare_result
, op0
, op1
);
12905 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
12906 ? gen_tsttflt_gpr (compare_result
, op0
, op1
)
12907 : gen_cmptflt_gpr (compare_result
, op0
, op1
);
12911 gcc_unreachable ();
12915 gcc_unreachable ();
12918 /* Synthesize LE and GE from LT/GT || EQ. */
12919 if (code
== LE
|| code
== GE
|| code
== LEU
|| code
== GEU
)
12925 case LE
: code
= LT
; break;
12926 case GE
: code
= GT
; break;
12927 case LEU
: code
= LT
; break;
12928 case GEU
: code
= GT
; break;
12929 default: gcc_unreachable ();
12932 compare_result2
= gen_reg_rtx (CCFPmode
);
12938 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
12939 ? gen_tstsfeq_gpr (compare_result2
, op0
, op1
)
12940 : gen_cmpsfeq_gpr (compare_result2
, op0
, op1
);
12944 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
12945 ? gen_tstdfeq_gpr (compare_result2
, op0
, op1
)
12946 : gen_cmpdfeq_gpr (compare_result2
, op0
, op1
);
12950 cmp
= (flag_finite_math_only
&& !flag_trapping_math
)
12951 ? gen_tsttfeq_gpr (compare_result2
, op0
, op1
)
12952 : gen_cmptfeq_gpr (compare_result2
, op0
, op1
);
12956 gcc_unreachable ();
12960 /* OR them together. */
12961 or_result
= gen_reg_rtx (CCFPmode
);
12962 cmp
= gen_e500_cr_ior_compare (or_result
, compare_result
,
12964 compare_result
= or_result
;
12969 if (code
== NE
|| code
== LTGT
)
12979 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
12980 CLOBBERs to match cmptf_internal2 pattern. */
12981 if (comp_mode
== CCFPmode
&& TARGET_XL_COMPAT
12982 && GET_MODE (op0
) == TFmode
12983 && !TARGET_IEEEQUAD
12984 && TARGET_HARD_FLOAT
&& TARGET_FPRS
&& TARGET_LONG_DOUBLE_128
)
12985 emit_insn (gen_rtx_PARALLEL (VOIDmode
,
12987 gen_rtx_SET (VOIDmode
,
12989 gen_rtx_COMPARE (comp_mode
, op0
, op1
)),
12990 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
12991 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
12992 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
12993 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
12994 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
12995 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
12996 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)),
12997 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (DFmode
)))));
12998 else if (GET_CODE (op1
) == UNSPEC
12999 && XINT (op1
, 1) == UNSPEC_SP_TEST
)
13001 rtx op1b
= XVECEXP (op1
, 0, 0);
13002 comp_mode
= CCEQmode
;
13003 compare_result
= gen_reg_rtx (CCEQmode
);
13005 emit_insn (gen_stack_protect_testdi (compare_result
, op0
, op1b
));
13007 emit_insn (gen_stack_protect_testsi (compare_result
, op0
, op1b
));
13010 emit_insn (gen_rtx_SET (VOIDmode
, compare_result
,
13011 gen_rtx_COMPARE (comp_mode
, op0
, op1
)));
13014 /* Some kinds of FP comparisons need an OR operation;
13015 under flag_finite_math_only we don't bother. */
13016 if (FLOAT_MODE_P (mode
)
13017 && !flag_finite_math_only
13018 && !(TARGET_HARD_FLOAT
&& !TARGET_FPRS
)
13019 && (code
== LE
|| code
== GE
13020 || code
== UNEQ
|| code
== LTGT
13021 || code
== UNGT
|| code
== UNLT
))
13023 enum rtx_code or1
, or2
;
13024 rtx or1_rtx
, or2_rtx
, compare2_rtx
;
13025 rtx or_result
= gen_reg_rtx (CCEQmode
);
13029 case LE
: or1
= LT
; or2
= EQ
; break;
13030 case GE
: or1
= GT
; or2
= EQ
; break;
13031 case UNEQ
: or1
= UNORDERED
; or2
= EQ
; break;
13032 case LTGT
: or1
= LT
; or2
= GT
; break;
13033 case UNGT
: or1
= UNORDERED
; or2
= GT
; break;
13034 case UNLT
: or1
= UNORDERED
; or2
= LT
; break;
13035 default: gcc_unreachable ();
13037 validate_condition_mode (or1
, comp_mode
);
13038 validate_condition_mode (or2
, comp_mode
);
13039 or1_rtx
= gen_rtx_fmt_ee (or1
, SImode
, compare_result
, const0_rtx
);
13040 or2_rtx
= gen_rtx_fmt_ee (or2
, SImode
, compare_result
, const0_rtx
);
13041 compare2_rtx
= gen_rtx_COMPARE (CCEQmode
,
13042 gen_rtx_IOR (SImode
, or1_rtx
, or2_rtx
),
13044 emit_insn (gen_rtx_SET (VOIDmode
, or_result
, compare2_rtx
));
13046 compare_result
= or_result
;
13050 validate_condition_mode (code
, GET_MODE (compare_result
));
13052 return gen_rtx_fmt_ee (code
, VOIDmode
, compare_result
, const0_rtx
);
13056 /* Emit the RTL for an sCOND pattern. */
13059 rs6000_emit_sCOND (enum machine_mode mode
, rtx operands
[])
13062 enum machine_mode op_mode
;
13063 enum rtx_code cond_code
;
13064 rtx result
= operands
[0];
13066 condition_rtx
= rs6000_generate_compare (operands
[1], mode
);
13067 cond_code
= GET_CODE (condition_rtx
);
13069 if (FLOAT_MODE_P (mode
)
13070 && !TARGET_FPRS
&& TARGET_HARD_FLOAT
)
13074 PUT_MODE (condition_rtx
, SImode
);
13075 t
= XEXP (condition_rtx
, 0);
13077 gcc_assert (cond_code
== NE
|| cond_code
== EQ
);
13079 if (cond_code
== NE
)
13080 emit_insn (gen_e500_flip_gt_bit (t
, t
));
13082 emit_insn (gen_move_from_CR_gt_bit (result
, t
));
13086 if (cond_code
== NE
13087 || cond_code
== GE
|| cond_code
== LE
13088 || cond_code
== GEU
|| cond_code
== LEU
13089 || cond_code
== ORDERED
|| cond_code
== UNGE
|| cond_code
== UNLE
)
13091 rtx not_result
= gen_reg_rtx (CCEQmode
);
13092 rtx not_op
, rev_cond_rtx
;
13093 enum machine_mode cc_mode
;
13095 cc_mode
= GET_MODE (XEXP (condition_rtx
, 0));
13097 rev_cond_rtx
= gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode
, cond_code
),
13098 SImode
, XEXP (condition_rtx
, 0), const0_rtx
);
13099 not_op
= gen_rtx_COMPARE (CCEQmode
, rev_cond_rtx
, const0_rtx
);
13100 emit_insn (gen_rtx_SET (VOIDmode
, not_result
, not_op
));
13101 condition_rtx
= gen_rtx_EQ (VOIDmode
, not_result
, const0_rtx
);
13104 op_mode
= GET_MODE (XEXP (operands
[1], 0));
13105 if (op_mode
== VOIDmode
)
13106 op_mode
= GET_MODE (XEXP (operands
[1], 1));
13108 if (TARGET_POWERPC64
&& (op_mode
== DImode
|| FLOAT_MODE_P (mode
)))
13110 PUT_MODE (condition_rtx
, DImode
);
13111 convert_move (result
, condition_rtx
, 0);
13115 PUT_MODE (condition_rtx
, SImode
);
13116 emit_insn (gen_rtx_SET (VOIDmode
, result
, condition_rtx
));
13120 /* Emit a branch of kind CODE to location LOC. */
13123 rs6000_emit_cbranch (enum machine_mode mode
, rtx operands
[])
13125 rtx condition_rtx
, loc_ref
;
13127 condition_rtx
= rs6000_generate_compare (operands
[0], mode
);
13128 loc_ref
= gen_rtx_LABEL_REF (VOIDmode
, operands
[3]);
13129 emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
,
13130 gen_rtx_IF_THEN_ELSE (VOIDmode
, condition_rtx
,
13131 loc_ref
, pc_rtx
)));
13134 /* Return the string to output a conditional branch to LABEL, which is
13135 the operand number of the label, or -1 if the branch is really a
13136 conditional return.
13138 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
13139 condition code register and its mode specifies what kind of
13140 comparison we made.
13142 REVERSED is nonzero if we should reverse the sense of the comparison.
13144 INSN is the insn. */
13147 output_cbranch (rtx op
, const char *label
, int reversed
, rtx insn
)
13149 static char string
[64];
13150 enum rtx_code code
= GET_CODE (op
);
13151 rtx cc_reg
= XEXP (op
, 0);
13152 enum machine_mode mode
= GET_MODE (cc_reg
);
13153 int cc_regno
= REGNO (cc_reg
) - CR0_REGNO
;
13154 int need_longbranch
= label
!= NULL
&& get_attr_length (insn
) == 8;
13155 int really_reversed
= reversed
^ need_longbranch
;
13161 validate_condition_mode (code
, mode
);
13163 /* Work out which way this really branches. We could use
13164 reverse_condition_maybe_unordered here always but this
13165 makes the resulting assembler clearer. */
13166 if (really_reversed
)
13168 /* Reversal of FP compares takes care -- an ordered compare
13169 becomes an unordered compare and vice versa. */
13170 if (mode
== CCFPmode
)
13171 code
= reverse_condition_maybe_unordered (code
);
13173 code
= reverse_condition (code
);
13176 if ((!TARGET_FPRS
&& TARGET_HARD_FLOAT
) && mode
== CCFPmode
)
13178 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
13183 /* Opposite of GT. */
13192 gcc_unreachable ();
13198 /* Not all of these are actually distinct opcodes, but
13199 we distinguish them for clarity of the resulting assembler. */
13200 case NE
: case LTGT
:
13201 ccode
= "ne"; break;
13202 case EQ
: case UNEQ
:
13203 ccode
= "eq"; break;
13205 ccode
= "ge"; break;
13206 case GT
: case GTU
: case UNGT
:
13207 ccode
= "gt"; break;
13209 ccode
= "le"; break;
13210 case LT
: case LTU
: case UNLT
:
13211 ccode
= "lt"; break;
13212 case UNORDERED
: ccode
= "un"; break;
13213 case ORDERED
: ccode
= "nu"; break;
13214 case UNGE
: ccode
= "nl"; break;
13215 case UNLE
: ccode
= "ng"; break;
13217 gcc_unreachable ();
13220 /* Maybe we have a guess as to how likely the branch is.
13221 The old mnemonics don't have a way to specify this information. */
13223 note
= find_reg_note (insn
, REG_BR_PROB
, NULL_RTX
);
13224 if (note
!= NULL_RTX
)
13226 /* PROB is the difference from 50%. */
13227 int prob
= INTVAL (XEXP (note
, 0)) - REG_BR_PROB_BASE
/ 2;
13229 /* Only hint for highly probable/improbable branches on newer
13230 cpus as static prediction overrides processor dynamic
13231 prediction. For older cpus we may as well always hint, but
13232 assume not taken for branches that are very close to 50% as a
13233 mispredicted taken branch is more expensive than a
13234 mispredicted not-taken branch. */
13235 if (rs6000_always_hint
13236 || (abs (prob
) > REG_BR_PROB_BASE
/ 100 * 48
13237 && br_prob_note_reliable_p (note
)))
13239 if (abs (prob
) > REG_BR_PROB_BASE
/ 20
13240 && ((prob
> 0) ^ need_longbranch
))
13248 s
+= sprintf (s
, "{b%sr|b%slr%s} ", ccode
, ccode
, pred
);
13250 s
+= sprintf (s
, "{b%s|b%s%s} ", ccode
, ccode
, pred
);
13252 /* We need to escape any '%' characters in the reg_names string.
13253 Assume they'd only be the first character.... */
13254 if (reg_names
[cc_regno
+ CR0_REGNO
][0] == '%')
13256 s
+= sprintf (s
, "%s", reg_names
[cc_regno
+ CR0_REGNO
]);
13260 /* If the branch distance was too far, we may have to use an
13261 unconditional branch to go the distance. */
13262 if (need_longbranch
)
13263 s
+= sprintf (s
, ",$+8\n\tb %s", label
);
13265 s
+= sprintf (s
, ",%s", label
);
13271 /* Return the string to flip the GT bit on a CR. */
13273 output_e500_flip_gt_bit (rtx dst
, rtx src
)
13275 static char string
[64];
13278 gcc_assert (GET_CODE (dst
) == REG
&& CR_REGNO_P (REGNO (dst
))
13279 && GET_CODE (src
) == REG
&& CR_REGNO_P (REGNO (src
)));
13282 a
= 4 * (REGNO (dst
) - CR0_REGNO
) + 1;
13283 b
= 4 * (REGNO (src
) - CR0_REGNO
) + 1;
13285 sprintf (string
, "crnot %d,%d", a
, b
);
13289 /* Return insn index for the vector compare instruction for given CODE,
13290 and DEST_MODE, OP_MODE. Return INSN_NOT_AVAILABLE if valid insn is
13294 get_vec_cmp_insn (enum rtx_code code
,
13295 enum machine_mode dest_mode
,
13296 enum machine_mode op_mode
)
13298 if (!TARGET_ALTIVEC
)
13299 return INSN_NOT_AVAILABLE
;
13304 if (dest_mode
== V16QImode
&& op_mode
== V16QImode
)
13305 return UNSPEC_VCMPEQUB
;
13306 if (dest_mode
== V8HImode
&& op_mode
== V8HImode
)
13307 return UNSPEC_VCMPEQUH
;
13308 if (dest_mode
== V4SImode
&& op_mode
== V4SImode
)
13309 return UNSPEC_VCMPEQUW
;
13310 if (dest_mode
== V4SImode
&& op_mode
== V4SFmode
)
13311 return UNSPEC_VCMPEQFP
;
13314 if (dest_mode
== V4SImode
&& op_mode
== V4SFmode
)
13315 return UNSPEC_VCMPGEFP
;
13317 if (dest_mode
== V16QImode
&& op_mode
== V16QImode
)
13318 return UNSPEC_VCMPGTSB
;
13319 if (dest_mode
== V8HImode
&& op_mode
== V8HImode
)
13320 return UNSPEC_VCMPGTSH
;
13321 if (dest_mode
== V4SImode
&& op_mode
== V4SImode
)
13322 return UNSPEC_VCMPGTSW
;
13323 if (dest_mode
== V4SImode
&& op_mode
== V4SFmode
)
13324 return UNSPEC_VCMPGTFP
;
13327 if (dest_mode
== V16QImode
&& op_mode
== V16QImode
)
13328 return UNSPEC_VCMPGTUB
;
13329 if (dest_mode
== V8HImode
&& op_mode
== V8HImode
)
13330 return UNSPEC_VCMPGTUH
;
13331 if (dest_mode
== V4SImode
&& op_mode
== V4SImode
)
13332 return UNSPEC_VCMPGTUW
;
13337 return INSN_NOT_AVAILABLE
;
13340 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
13341 DMODE is expected destination mode. This is a recursive function. */
13344 rs6000_emit_vector_compare (enum rtx_code rcode
,
13346 enum machine_mode dmode
)
13350 enum machine_mode dest_mode
;
13351 enum machine_mode op_mode
= GET_MODE (op1
);
13353 gcc_assert (TARGET_ALTIVEC
);
13354 gcc_assert (GET_MODE (op0
) == GET_MODE (op1
));
13356 /* Floating point vector compare instructions uses destination V4SImode.
13357 Move destination to appropriate mode later. */
13358 if (dmode
== V4SFmode
)
13359 dest_mode
= V4SImode
;
13363 mask
= gen_reg_rtx (dest_mode
);
13364 vec_cmp_insn
= get_vec_cmp_insn (rcode
, dest_mode
, op_mode
);
13366 if (vec_cmp_insn
== INSN_NOT_AVAILABLE
)
13368 bool swap_operands
= false;
13369 bool try_again
= false;
13374 swap_operands
= true;
13379 swap_operands
= true;
13387 /* Invert condition and try again.
13388 e.g., A != B becomes ~(A==B). */
13390 enum rtx_code rev_code
;
13391 enum insn_code nor_code
;
13394 rev_code
= reverse_condition_maybe_unordered (rcode
);
13395 eq_rtx
= rs6000_emit_vector_compare (rev_code
, op0
, op1
,
13398 nor_code
= optab_handler (one_cmpl_optab
, (int)dest_mode
)->insn_code
;
13399 gcc_assert (nor_code
!= CODE_FOR_nothing
);
13400 emit_insn (GEN_FCN (nor_code
) (mask
, eq_rtx
));
13402 if (dmode
!= dest_mode
)
13404 rtx temp
= gen_reg_rtx (dest_mode
);
13405 convert_move (temp
, mask
, 0);
13415 /* Try GT/GTU/LT/LTU OR EQ */
13418 enum insn_code ior_code
;
13419 enum rtx_code new_code
;
13440 gcc_unreachable ();
13443 c_rtx
= rs6000_emit_vector_compare (new_code
,
13444 op0
, op1
, dest_mode
);
13445 eq_rtx
= rs6000_emit_vector_compare (EQ
, op0
, op1
,
13448 ior_code
= optab_handler (ior_optab
, (int)dest_mode
)->insn_code
;
13449 gcc_assert (ior_code
!= CODE_FOR_nothing
);
13450 emit_insn (GEN_FCN (ior_code
) (mask
, c_rtx
, eq_rtx
));
13451 if (dmode
!= dest_mode
)
13453 rtx temp
= gen_reg_rtx (dest_mode
);
13454 convert_move (temp
, mask
, 0);
13461 gcc_unreachable ();
13466 vec_cmp_insn
= get_vec_cmp_insn (rcode
, dest_mode
, op_mode
);
13467 /* You only get two chances. */
13468 gcc_assert (vec_cmp_insn
!= INSN_NOT_AVAILABLE
);
13480 emit_insn (gen_rtx_SET (VOIDmode
, mask
,
13481 gen_rtx_UNSPEC (dest_mode
,
13482 gen_rtvec (2, op0
, op1
),
13484 if (dmode
!= dest_mode
)
13486 rtx temp
= gen_reg_rtx (dest_mode
);
13487 convert_move (temp
, mask
, 0);
13493 /* Return vector select instruction for MODE. Return INSN_NOT_AVAILABLE, if
13494 valid insn doesn exist for given mode. */
13497 get_vsel_insn (enum machine_mode mode
)
13502 return UNSPEC_VSEL4SI
;
13505 return UNSPEC_VSEL4SF
;
13508 return UNSPEC_VSEL8HI
;
13511 return UNSPEC_VSEL16QI
;
13514 return INSN_NOT_AVAILABLE
;
13517 return INSN_NOT_AVAILABLE
;
13520 /* Emit vector select insn where DEST is destination using
13521 operands OP1, OP2 and MASK. */
13524 rs6000_emit_vector_select (rtx dest
, rtx op1
, rtx op2
, rtx mask
)
13527 enum machine_mode dest_mode
= GET_MODE (dest
);
13528 int vsel_insn_index
= get_vsel_insn (GET_MODE (dest
));
13530 temp
= gen_reg_rtx (dest_mode
);
13532 /* For each vector element, select op1 when mask is 1 otherwise
13534 t
= gen_rtx_SET (VOIDmode
, temp
,
13535 gen_rtx_UNSPEC (dest_mode
,
13536 gen_rtvec (3, op2
, op1
, mask
),
13539 emit_move_insn (dest
, temp
);
13543 /* Emit vector conditional expression.
13544 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
13545 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
13548 rs6000_emit_vector_cond_expr (rtx dest
, rtx op1
, rtx op2
,
13549 rtx cond
, rtx cc_op0
, rtx cc_op1
)
13551 enum machine_mode dest_mode
= GET_MODE (dest
);
13552 enum rtx_code rcode
= GET_CODE (cond
);
13555 if (!TARGET_ALTIVEC
)
13558 /* Get the vector mask for the given relational operations. */
13559 mask
= rs6000_emit_vector_compare (rcode
, cc_op0
, cc_op1
, dest_mode
);
13561 rs6000_emit_vector_select (dest
, op1
, op2
, mask
);
13566 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
13567 operands of the last comparison is nonzero/true, FALSE_COND if it
13568 is zero/false. Return 0 if the hardware has no such operation. */
13571 rs6000_emit_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
13573 enum rtx_code code
= GET_CODE (op
);
13574 rtx op0
= XEXP (op
, 0);
13575 rtx op1
= XEXP (op
, 1);
13576 REAL_VALUE_TYPE c1
;
13577 enum machine_mode compare_mode
= GET_MODE (op0
);
13578 enum machine_mode result_mode
= GET_MODE (dest
);
13580 bool is_against_zero
;
13582 /* These modes should always match. */
13583 if (GET_MODE (op1
) != compare_mode
13584 /* In the isel case however, we can use a compare immediate, so
13585 op1 may be a small constant. */
13586 && (!TARGET_ISEL
|| !short_cint_operand (op1
, VOIDmode
)))
13588 if (GET_MODE (true_cond
) != result_mode
)
13590 if (GET_MODE (false_cond
) != result_mode
)
13593 /* First, work out if the hardware can do this at all, or
13594 if it's too slow.... */
13595 if (!FLOAT_MODE_P (compare_mode
))
13598 return rs6000_emit_int_cmove (dest
, op
, true_cond
, false_cond
);
13601 else if (TARGET_HARD_FLOAT
&& !TARGET_FPRS
13602 && SCALAR_FLOAT_MODE_P (compare_mode
))
13605 is_against_zero
= op1
== CONST0_RTX (compare_mode
);
13607 /* A floating-point subtract might overflow, underflow, or produce
13608 an inexact result, thus changing the floating-point flags, so it
13609 can't be generated if we care about that. It's safe if one side
13610 of the construct is zero, since then no subtract will be
13612 if (SCALAR_FLOAT_MODE_P (compare_mode
)
13613 && flag_trapping_math
&& ! is_against_zero
)
13616 /* Eliminate half of the comparisons by switching operands, this
13617 makes the remaining code simpler. */
13618 if (code
== UNLT
|| code
== UNGT
|| code
== UNORDERED
|| code
== NE
13619 || code
== LTGT
|| code
== LT
|| code
== UNLE
)
13621 code
= reverse_condition_maybe_unordered (code
);
13623 true_cond
= false_cond
;
13627 /* UNEQ and LTGT take four instructions for a comparison with zero,
13628 it'll probably be faster to use a branch here too. */
13629 if (code
== UNEQ
&& HONOR_NANS (compare_mode
))
13632 if (GET_CODE (op1
) == CONST_DOUBLE
)
13633 REAL_VALUE_FROM_CONST_DOUBLE (c1
, op1
);
13635 /* We're going to try to implement comparisons by performing
13636 a subtract, then comparing against zero. Unfortunately,
13637 Inf - Inf is NaN which is not zero, and so if we don't
13638 know that the operand is finite and the comparison
13639 would treat EQ different to UNORDERED, we can't do it. */
13640 if (HONOR_INFINITIES (compare_mode
)
13641 && code
!= GT
&& code
!= UNGE
13642 && (GET_CODE (op1
) != CONST_DOUBLE
|| real_isinf (&c1
))
13643 /* Constructs of the form (a OP b ? a : b) are safe. */
13644 && ((! rtx_equal_p (op0
, false_cond
) && ! rtx_equal_p (op1
, false_cond
))
13645 || (! rtx_equal_p (op0
, true_cond
)
13646 && ! rtx_equal_p (op1
, true_cond
))))
13649 /* At this point we know we can use fsel. */
13651 /* Reduce the comparison to a comparison against zero. */
13652 if (! is_against_zero
)
13654 temp
= gen_reg_rtx (compare_mode
);
13655 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
13656 gen_rtx_MINUS (compare_mode
, op0
, op1
)));
13658 op1
= CONST0_RTX (compare_mode
);
13661 /* If we don't care about NaNs we can reduce some of the comparisons
13662 down to faster ones. */
13663 if (! HONOR_NANS (compare_mode
))
13669 true_cond
= false_cond
;
13682 /* Now, reduce everything down to a GE. */
13689 temp
= gen_reg_rtx (compare_mode
);
13690 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
13695 temp
= gen_reg_rtx (compare_mode
);
13696 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_ABS (compare_mode
, op0
)));
13701 temp
= gen_reg_rtx (compare_mode
);
13702 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
13703 gen_rtx_NEG (compare_mode
,
13704 gen_rtx_ABS (compare_mode
, op0
))));
13709 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
13710 temp
= gen_reg_rtx (result_mode
);
13711 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
13712 gen_rtx_IF_THEN_ELSE (result_mode
,
13713 gen_rtx_GE (VOIDmode
,
13715 true_cond
, false_cond
)));
13716 false_cond
= true_cond
;
13719 temp
= gen_reg_rtx (compare_mode
);
13720 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
13725 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
13726 temp
= gen_reg_rtx (result_mode
);
13727 emit_insn (gen_rtx_SET (VOIDmode
, temp
,
13728 gen_rtx_IF_THEN_ELSE (result_mode
,
13729 gen_rtx_GE (VOIDmode
,
13731 true_cond
, false_cond
)));
13732 true_cond
= false_cond
;
13735 temp
= gen_reg_rtx (compare_mode
);
13736 emit_insn (gen_rtx_SET (VOIDmode
, temp
, gen_rtx_NEG (compare_mode
, op0
)));
13741 gcc_unreachable ();
13744 emit_insn (gen_rtx_SET (VOIDmode
, dest
,
13745 gen_rtx_IF_THEN_ELSE (result_mode
,
13746 gen_rtx_GE (VOIDmode
,
13748 true_cond
, false_cond
)));
13752 /* Same as above, but for ints (isel). */
13755 rs6000_emit_int_cmove (rtx dest
, rtx op
, rtx true_cond
, rtx false_cond
)
13757 rtx condition_rtx
, cr
;
13759 /* All isel implementations thus far are 32-bits. */
13760 if (GET_MODE (XEXP (op
, 0)) != SImode
)
13763 /* We still have to do the compare, because isel doesn't do a
13764 compare, it just looks at the CRx bits set by a previous compare
13766 condition_rtx
= rs6000_generate_compare (op
, SImode
);
13767 cr
= XEXP (condition_rtx
, 0);
13769 if (GET_MODE (cr
) == CCmode
)
13770 emit_insn (gen_isel_signed (dest
, condition_rtx
,
13771 true_cond
, false_cond
, cr
));
13773 emit_insn (gen_isel_unsigned (dest
, condition_rtx
,
13774 true_cond
, false_cond
, cr
));
13780 output_isel (rtx
*operands
)
13782 enum rtx_code code
;
13784 code
= GET_CODE (operands
[1]);
13785 if (code
== GE
|| code
== GEU
|| code
== LE
|| code
== LEU
|| code
== NE
)
13787 PUT_CODE (operands
[1], reverse_condition (code
));
13788 return "isel %0,%3,%2,%j1";
13791 return "isel %0,%2,%3,%j1";
13795 rs6000_emit_minmax (rtx dest
, enum rtx_code code
, rtx op0
, rtx op1
)
13797 enum machine_mode mode
= GET_MODE (op0
);
13801 if (code
== SMAX
|| code
== SMIN
)
13806 if (code
== SMAX
|| code
== UMAX
)
13807 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
13808 op0
, op1
, mode
, 0);
13810 target
= emit_conditional_move (dest
, c
, op0
, op1
, mode
,
13811 op1
, op0
, mode
, 0);
13812 gcc_assert (target
);
13813 if (target
!= dest
)
13814 emit_move_insn (dest
, target
);
13817 /* Emit instructions to perform a load-reserved/store-conditional operation.
13818 The operation performed is an atomic
13819 (set M (CODE:MODE M OP))
13820 If not NULL, BEFORE is atomically set to M before the operation, and
13821 AFTER is set to M after the operation (that is, (CODE:MODE M OP)).
13822 If SYNC_P then a memory barrier is emitted before the operation.
13823 Either OP or M may be wrapped in a NOT operation. */
13826 rs6000_emit_sync (enum rtx_code code
, enum machine_mode mode
,
13827 rtx m
, rtx op
, rtx before_param
, rtx after_param
,
13830 enum machine_mode used_mode
;
13831 rtx the_op
, set_before
, set_after
, set_atomic
, cc_scratch
, before
, after
;
13834 HOST_WIDE_INT imask
= GET_MODE_MASK (mode
);
13835 rtx shift
= NULL_RTX
;
13838 emit_insn (gen_lwsync ());
13842 /* If this is smaller than SImode, we'll have to use SImode with
13844 if (mode
== QImode
|| mode
== HImode
)
13848 if (MEM_ALIGN (used_m
) >= 32)
13851 if (BYTES_BIG_ENDIAN
)
13852 ishift
= GET_MODE_BITSIZE (SImode
) - GET_MODE_BITSIZE (mode
);
13854 shift
= GEN_INT (ishift
);
13855 used_m
= change_address (used_m
, SImode
, 0);
13859 rtx addrSI
, aligned_addr
;
13860 int shift_mask
= mode
== QImode
? 0x18 : 0x10;
13862 addrSI
= gen_lowpart_common (SImode
,
13863 force_reg (Pmode
, XEXP (used_m
, 0)));
13864 addrSI
= force_reg (SImode
, addrSI
);
13865 shift
= gen_reg_rtx (SImode
);
13867 emit_insn (gen_rlwinm (shift
, addrSI
, GEN_INT (3),
13868 GEN_INT (shift_mask
)));
13869 emit_insn (gen_xorsi3 (shift
, shift
, GEN_INT (shift_mask
)));
13871 aligned_addr
= expand_binop (Pmode
, and_optab
,
13873 GEN_INT (-4), NULL_RTX
,
13874 1, OPTAB_LIB_WIDEN
);
13875 used_m
= change_address (used_m
, SImode
, aligned_addr
);
13876 set_mem_align (used_m
, 32);
13878 /* It's safe to keep the old alias set of USED_M, because
13879 the operation is atomic and only affects the original
13883 if (GET_CODE (op
) == NOT
)
13885 oldop
= lowpart_subreg (SImode
, XEXP (op
, 0), mode
);
13886 oldop
= gen_rtx_NOT (SImode
, oldop
);
13889 oldop
= lowpart_subreg (SImode
, op
, mode
);
13895 newop
= expand_binop (SImode
, and_optab
,
13896 oldop
, GEN_INT (imask
), NULL_RTX
,
13897 1, OPTAB_LIB_WIDEN
);
13898 emit_insn (gen_ashlsi3 (newop
, newop
, shift
));
13901 case NOT
: /* NAND */
13902 newop
= expand_binop (SImode
, ior_optab
,
13903 oldop
, GEN_INT (~imask
), NULL_RTX
,
13904 1, OPTAB_LIB_WIDEN
);
13905 emit_insn (gen_rotlsi3 (newop
, newop
, shift
));
13909 newop
= expand_binop (SImode
, ior_optab
,
13910 oldop
, GEN_INT (~imask
), NULL_RTX
,
13911 1, OPTAB_LIB_WIDEN
);
13912 emit_insn (gen_rotlsi3 (newop
, newop
, shift
));
13920 newop
= expand_binop (SImode
, and_optab
,
13921 oldop
, GEN_INT (imask
), NULL_RTX
,
13922 1, OPTAB_LIB_WIDEN
);
13923 emit_insn (gen_ashlsi3 (newop
, newop
, shift
));
13925 mask
= gen_reg_rtx (SImode
);
13926 emit_move_insn (mask
, GEN_INT (imask
));
13927 emit_insn (gen_ashlsi3 (mask
, mask
, shift
));
13930 newop
= gen_rtx_PLUS (SImode
, m
, newop
);
13932 newop
= gen_rtx_MINUS (SImode
, m
, newop
);
13933 newop
= gen_rtx_AND (SImode
, newop
, mask
);
13934 newop
= gen_rtx_IOR (SImode
, newop
,
13935 gen_rtx_AND (SImode
,
13936 gen_rtx_NOT (SImode
, mask
),
13942 gcc_unreachable ();
13946 used_mode
= SImode
;
13947 before
= gen_reg_rtx (used_mode
);
13948 after
= gen_reg_rtx (used_mode
);
13953 before
= before_param
;
13954 after
= after_param
;
13956 if (before
== NULL_RTX
)
13957 before
= gen_reg_rtx (used_mode
);
13958 if (after
== NULL_RTX
)
13959 after
= gen_reg_rtx (used_mode
);
13962 if ((code
== PLUS
|| code
== MINUS
)
13963 && used_mode
!= mode
)
13964 the_op
= op
; /* Computed above. */
13965 else if (GET_CODE (op
) == NOT
&& GET_CODE (m
) != NOT
)
13966 the_op
= gen_rtx_fmt_ee (code
, used_mode
, op
, m
);
13967 else if (code
== NOT
)
13968 the_op
= gen_rtx_fmt_ee (IOR
, used_mode
,
13969 gen_rtx_NOT (used_mode
, m
),
13970 gen_rtx_NOT (used_mode
, op
));
13972 the_op
= gen_rtx_fmt_ee (code
, used_mode
, m
, op
);
13974 set_after
= gen_rtx_SET (VOIDmode
, after
, the_op
);
13975 set_before
= gen_rtx_SET (VOIDmode
, before
, used_m
);
13976 set_atomic
= gen_rtx_SET (VOIDmode
, used_m
,
13977 gen_rtx_UNSPEC (used_mode
,
13978 gen_rtvec (1, the_op
),
13980 cc_scratch
= gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (CCmode
));
13982 if ((code
== PLUS
|| code
== MINUS
) && used_mode
!= mode
)
13983 vec
= gen_rtvec (5, set_after
, set_before
, set_atomic
, cc_scratch
,
13984 gen_rtx_CLOBBER (VOIDmode
, gen_rtx_SCRATCH (SImode
)));
13986 vec
= gen_rtvec (4, set_after
, set_before
, set_atomic
, cc_scratch
);
13987 emit_insn (gen_rtx_PARALLEL (VOIDmode
, vec
));
13989 /* Shift and mask the return values properly. */
13990 if (used_mode
!= mode
&& before_param
)
13992 emit_insn (gen_lshrsi3 (before
, before
, shift
));
13993 convert_move (before_param
, before
, 1);
13996 if (used_mode
!= mode
&& after_param
)
13998 emit_insn (gen_lshrsi3 (after
, after
, shift
));
13999 convert_move (after_param
, after
, 1);
14002 /* The previous sequence will end with a branch that's dependent on
14003 the conditional store, so placing an isync will ensure that no
14004 other instructions (especially, no load or store instructions)
14005 can start before the atomic operation completes. */
14007 emit_insn (gen_isync ());
14010 /* A subroutine of the atomic operation splitters. Jump to LABEL if
14011 COND is true. Mark the jump as unlikely to be taken. */
14014 emit_unlikely_jump (rtx cond
, rtx label
)
14016 rtx very_unlikely
= GEN_INT (REG_BR_PROB_BASE
/ 100 - 1);
14019 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, cond
, label
, pc_rtx
);
14020 x
= emit_jump_insn (gen_rtx_SET (VOIDmode
, pc_rtx
, x
));
14021 add_reg_note (x
, REG_BR_PROB
, very_unlikely
);
14024 /* A subroutine of the atomic operation splitters. Emit a load-locked
14025 instruction in MODE. */
14028 emit_load_locked (enum machine_mode mode
, rtx reg
, rtx mem
)
14030 rtx (*fn
) (rtx
, rtx
) = NULL
;
14031 if (mode
== SImode
)
14032 fn
= gen_load_locked_si
;
14033 else if (mode
== DImode
)
14034 fn
= gen_load_locked_di
;
14035 emit_insn (fn (reg
, mem
));
14038 /* A subroutine of the atomic operation splitters. Emit a store-conditional
14039 instruction in MODE. */
14042 emit_store_conditional (enum machine_mode mode
, rtx res
, rtx mem
, rtx val
)
14044 rtx (*fn
) (rtx
, rtx
, rtx
) = NULL
;
14045 if (mode
== SImode
)
14046 fn
= gen_store_conditional_si
;
14047 else if (mode
== DImode
)
14048 fn
= gen_store_conditional_di
;
14050 /* Emit sync before stwcx. to address PPC405 Erratum. */
14051 if (PPC405_ERRATUM77
)
14052 emit_insn (gen_memory_barrier ());
14054 emit_insn (fn (res
, mem
, val
));
14057 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
14058 to perform. MEM is the memory on which to operate. VAL is the second
14059 operand of the binary operator. BEFORE and AFTER are optional locations to
14060 return the value of MEM either before of after the operation. SCRATCH is
14061 a scratch register. */
14064 rs6000_split_atomic_op (enum rtx_code code
, rtx mem
, rtx val
,
14065 rtx before
, rtx after
, rtx scratch
)
14067 enum machine_mode mode
= GET_MODE (mem
);
14068 rtx label
, x
, cond
= gen_rtx_REG (CCmode
, CR0_REGNO
);
14070 emit_insn (gen_lwsync ());
14072 label
= gen_label_rtx ();
14073 emit_label (label
);
14074 label
= gen_rtx_LABEL_REF (VOIDmode
, label
);
14076 if (before
== NULL_RTX
)
14078 emit_load_locked (mode
, before
, mem
);
14081 x
= gen_rtx_IOR (mode
,
14082 gen_rtx_NOT (mode
, before
),
14083 gen_rtx_NOT (mode
, val
));
14084 else if (code
== AND
)
14085 x
= gen_rtx_UNSPEC (mode
, gen_rtvec (2, before
, val
), UNSPEC_AND
);
14087 x
= gen_rtx_fmt_ee (code
, mode
, before
, val
);
14089 if (after
!= NULL_RTX
)
14090 emit_insn (gen_rtx_SET (VOIDmode
, after
, copy_rtx (x
)));
14091 emit_insn (gen_rtx_SET (VOIDmode
, scratch
, x
));
14093 emit_store_conditional (mode
, cond
, mem
, scratch
);
14095 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
14096 emit_unlikely_jump (x
, label
);
14098 emit_insn (gen_isync ());
14101 /* Expand an atomic compare and swap operation. MEM is the memory on which
14102 to operate. OLDVAL is the old value to be compared. NEWVAL is the new
14103 value to be stored. SCRATCH is a scratch GPR. */
14106 rs6000_split_compare_and_swap (rtx retval
, rtx mem
, rtx oldval
, rtx newval
,
14109 enum machine_mode mode
= GET_MODE (mem
);
14110 rtx label1
, label2
, x
, cond
= gen_rtx_REG (CCmode
, CR0_REGNO
);
14112 emit_insn (gen_lwsync ());
14114 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
14115 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
14116 emit_label (XEXP (label1
, 0));
14118 emit_load_locked (mode
, retval
, mem
);
14120 x
= gen_rtx_COMPARE (CCmode
, retval
, oldval
);
14121 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
14123 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
14124 emit_unlikely_jump (x
, label2
);
14126 emit_move_insn (scratch
, newval
);
14127 emit_store_conditional (mode
, cond
, mem
, scratch
);
14129 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
14130 emit_unlikely_jump (x
, label1
);
14132 emit_insn (gen_isync ());
14133 emit_label (XEXP (label2
, 0));
14136 /* Expand an atomic test and set operation. MEM is the memory on which
14137 to operate. VAL is the value set. SCRATCH is a scratch GPR. */
14140 rs6000_split_lock_test_and_set (rtx retval
, rtx mem
, rtx val
, rtx scratch
)
14142 enum machine_mode mode
= GET_MODE (mem
);
14143 rtx label
, x
, cond
= gen_rtx_REG (CCmode
, CR0_REGNO
);
14145 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
14146 emit_label (XEXP (label
, 0));
14148 emit_load_locked (mode
, retval
, mem
);
14149 emit_move_insn (scratch
, val
);
14150 emit_store_conditional (mode
, cond
, mem
, scratch
);
14152 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
14153 emit_unlikely_jump (x
, label
);
14155 emit_insn (gen_isync ());
14159 rs6000_expand_compare_and_swapqhi (rtx dst
, rtx mem
, rtx oldval
, rtx newval
)
14161 enum machine_mode mode
= GET_MODE (mem
);
14162 rtx addrSI
, align
, wdst
, shift
, mask
;
14163 HOST_WIDE_INT shift_mask
= mode
== QImode
? 0x18 : 0x10;
14164 HOST_WIDE_INT imask
= GET_MODE_MASK (mode
);
14166 /* Shift amount for subword relative to aligned word. */
14167 addrSI
= force_reg (GET_MODE (XEXP (mem
, 0)), XEXP (mem
, 0));
14168 addrSI
= force_reg (SImode
, gen_lowpart_common (SImode
, addrSI
));
14169 shift
= gen_reg_rtx (SImode
);
14170 emit_insn (gen_rlwinm (shift
, addrSI
, GEN_INT (3),
14171 GEN_INT (shift_mask
)));
14172 emit_insn (gen_xorsi3 (shift
, shift
, GEN_INT (shift_mask
)));
14174 /* Shift and mask old value into position within word. */
14175 oldval
= convert_modes (SImode
, mode
, oldval
, 1);
14176 oldval
= expand_binop (SImode
, and_optab
,
14177 oldval
, GEN_INT (imask
), NULL_RTX
,
14178 1, OPTAB_LIB_WIDEN
);
14179 emit_insn (gen_ashlsi3 (oldval
, oldval
, shift
));
14181 /* Shift and mask new value into position within word. */
14182 newval
= convert_modes (SImode
, mode
, newval
, 1);
14183 newval
= expand_binop (SImode
, and_optab
,
14184 newval
, GEN_INT (imask
), NULL_RTX
,
14185 1, OPTAB_LIB_WIDEN
);
14186 emit_insn (gen_ashlsi3 (newval
, newval
, shift
));
14188 /* Mask for insertion. */
14189 mask
= gen_reg_rtx (SImode
);
14190 emit_move_insn (mask
, GEN_INT (imask
));
14191 emit_insn (gen_ashlsi3 (mask
, mask
, shift
));
14193 /* Address of aligned word containing subword. */
14194 align
= expand_binop (Pmode
, and_optab
, XEXP (mem
, 0), GEN_INT (-4),
14195 NULL_RTX
, 1, OPTAB_LIB_WIDEN
);
14196 mem
= change_address (mem
, SImode
, align
);
14197 set_mem_align (mem
, 32);
14198 MEM_VOLATILE_P (mem
) = 1;
14200 wdst
= gen_reg_rtx (SImode
);
14201 emit_insn (gen_sync_compare_and_swapqhi_internal (wdst
, mask
,
14202 oldval
, newval
, mem
));
14204 /* Shift the result back. */
14205 emit_insn (gen_lshrsi3 (wdst
, wdst
, shift
));
14207 emit_move_insn (dst
, gen_lowpart (mode
, wdst
));
14211 rs6000_split_compare_and_swapqhi (rtx dest
, rtx mask
,
14212 rtx oldval
, rtx newval
, rtx mem
,
14215 rtx label1
, label2
, x
, cond
= gen_rtx_REG (CCmode
, CR0_REGNO
);
14217 emit_insn (gen_lwsync ());
14218 label1
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
14219 label2
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
14220 emit_label (XEXP (label1
, 0));
14222 emit_load_locked (SImode
, scratch
, mem
);
14224 /* Mask subword within loaded value for comparison with oldval.
14225 Use UNSPEC_AND to avoid clobber.*/
14226 emit_insn (gen_rtx_SET (SImode
, dest
,
14227 gen_rtx_UNSPEC (SImode
,
14228 gen_rtvec (2, scratch
, mask
),
14231 x
= gen_rtx_COMPARE (CCmode
, dest
, oldval
);
14232 emit_insn (gen_rtx_SET (VOIDmode
, cond
, x
));
14234 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
14235 emit_unlikely_jump (x
, label2
);
14237 /* Clear subword within loaded value for insertion of new value. */
14238 emit_insn (gen_rtx_SET (SImode
, scratch
,
14239 gen_rtx_AND (SImode
,
14240 gen_rtx_NOT (SImode
, mask
), scratch
)));
14241 emit_insn (gen_iorsi3 (scratch
, scratch
, newval
));
14242 emit_store_conditional (SImode
, cond
, mem
, scratch
);
14244 x
= gen_rtx_NE (VOIDmode
, cond
, const0_rtx
);
14245 emit_unlikely_jump (x
, label1
);
14247 emit_insn (gen_isync ());
14248 emit_label (XEXP (label2
, 0));
14252 /* Emit instructions to move SRC to DST. Called by splitters for
14253 multi-register moves. It will emit at most one instruction for
14254 each register that is accessed; that is, it won't emit li/lis pairs
14255 (or equivalent for 64-bit code). One of SRC or DST must be a hard
14259 rs6000_split_multireg_move (rtx dst
, rtx src
)
14261 /* The register number of the first register being moved. */
14263 /* The mode that is to be moved. */
14264 enum machine_mode mode
;
14265 /* The mode that the move is being done in, and its size. */
14266 enum machine_mode reg_mode
;
14268 /* The number of registers that will be moved. */
14271 reg
= REG_P (dst
) ? REGNO (dst
) : REGNO (src
);
14272 mode
= GET_MODE (dst
);
14273 nregs
= hard_regno_nregs
[reg
][mode
];
14274 if (FP_REGNO_P (reg
))
14275 reg_mode
= DECIMAL_FLOAT_MODE_P (mode
) ? DDmode
:
14276 ((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
) ? DFmode
: SFmode
);
14277 else if (ALTIVEC_REGNO_P (reg
))
14278 reg_mode
= V16QImode
;
14279 else if (TARGET_E500_DOUBLE
&& mode
== TFmode
)
14282 reg_mode
= word_mode
;
14283 reg_mode_size
= GET_MODE_SIZE (reg_mode
);
14285 gcc_assert (reg_mode_size
* nregs
== GET_MODE_SIZE (mode
));
14287 if (REG_P (src
) && REG_P (dst
) && (REGNO (src
) < REGNO (dst
)))
14289 /* Move register range backwards, if we might have destructive
14292 for (i
= nregs
- 1; i
>= 0; i
--)
14293 emit_insn (gen_rtx_SET (VOIDmode
,
14294 simplify_gen_subreg (reg_mode
, dst
, mode
,
14295 i
* reg_mode_size
),
14296 simplify_gen_subreg (reg_mode
, src
, mode
,
14297 i
* reg_mode_size
)));
14303 bool used_update
= false;
14305 if (MEM_P (src
) && INT_REGNO_P (reg
))
14309 if (GET_CODE (XEXP (src
, 0)) == PRE_INC
14310 || GET_CODE (XEXP (src
, 0)) == PRE_DEC
)
14313 breg
= XEXP (XEXP (src
, 0), 0);
14314 delta_rtx
= (GET_CODE (XEXP (src
, 0)) == PRE_INC
14315 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src
)))
14316 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src
))));
14317 emit_insn (TARGET_32BIT
14318 ? gen_addsi3 (breg
, breg
, delta_rtx
)
14319 : gen_adddi3 (breg
, breg
, delta_rtx
));
14320 src
= replace_equiv_address (src
, breg
);
14322 else if (! rs6000_offsettable_memref_p (src
))
14325 basereg
= gen_rtx_REG (Pmode
, reg
);
14326 emit_insn (gen_rtx_SET (VOIDmode
, basereg
, XEXP (src
, 0)));
14327 src
= replace_equiv_address (src
, basereg
);
14330 breg
= XEXP (src
, 0);
14331 if (GET_CODE (breg
) == PLUS
|| GET_CODE (breg
) == LO_SUM
)
14332 breg
= XEXP (breg
, 0);
14334 /* If the base register we are using to address memory is
14335 also a destination reg, then change that register last. */
14337 && REGNO (breg
) >= REGNO (dst
)
14338 && REGNO (breg
) < REGNO (dst
) + nregs
)
14339 j
= REGNO (breg
) - REGNO (dst
);
14342 if (GET_CODE (dst
) == MEM
&& INT_REGNO_P (reg
))
14346 if (GET_CODE (XEXP (dst
, 0)) == PRE_INC
14347 || GET_CODE (XEXP (dst
, 0)) == PRE_DEC
)
14350 breg
= XEXP (XEXP (dst
, 0), 0);
14351 delta_rtx
= (GET_CODE (XEXP (dst
, 0)) == PRE_INC
14352 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst
)))
14353 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst
))));
14355 /* We have to update the breg before doing the store.
14356 Use store with update, if available. */
14360 rtx nsrc
= simplify_gen_subreg (reg_mode
, src
, mode
, 0);
14361 emit_insn (TARGET_32BIT
14362 ? (TARGET_POWERPC64
14363 ? gen_movdi_si_update (breg
, breg
, delta_rtx
, nsrc
)
14364 : gen_movsi_update (breg
, breg
, delta_rtx
, nsrc
))
14365 : gen_movdi_di_update (breg
, breg
, delta_rtx
, nsrc
));
14366 used_update
= true;
14369 emit_insn (TARGET_32BIT
14370 ? gen_addsi3 (breg
, breg
, delta_rtx
)
14371 : gen_adddi3 (breg
, breg
, delta_rtx
));
14372 dst
= replace_equiv_address (dst
, breg
);
14375 gcc_assert (rs6000_offsettable_memref_p (dst
));
14378 for (i
= 0; i
< nregs
; i
++)
14380 /* Calculate index to next subword. */
14385 /* If compiler already emitted move of first word by
14386 store with update, no need to do anything. */
14387 if (j
== 0 && used_update
)
14390 emit_insn (gen_rtx_SET (VOIDmode
,
14391 simplify_gen_subreg (reg_mode
, dst
, mode
,
14392 j
* reg_mode_size
),
14393 simplify_gen_subreg (reg_mode
, src
, mode
,
14394 j
* reg_mode_size
)));
14400 /* This page contains routines that are used to determine what the
14401 function prologue and epilogue code will do and write them out. */
14403 /* Return the first fixed-point register that is required to be
14404 saved. 32 if none. */
14407 first_reg_to_save (void)
14411 /* Find lowest numbered live register. */
14412 for (first_reg
= 13; first_reg
<= 31; first_reg
++)
14413 if (df_regs_ever_live_p (first_reg
)
14414 && (! call_used_regs
[first_reg
]
14415 || (first_reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
14416 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
14417 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
)
14418 || (TARGET_TOC
&& TARGET_MINIMAL_TOC
)))))
14423 && crtl
->uses_pic_offset_table
14424 && first_reg
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
14425 return RS6000_PIC_OFFSET_TABLE_REGNUM
;
14431 /* Similar, for FP regs. */
14434 first_fp_reg_to_save (void)
14438 /* Find lowest numbered live register. */
14439 for (first_reg
= 14 + 32; first_reg
<= 63; first_reg
++)
14440 if (df_regs_ever_live_p (first_reg
))
14446 /* Similar, for AltiVec regs. */
14449 first_altivec_reg_to_save (void)
14453 /* Stack frame remains as is unless we are in AltiVec ABI. */
14454 if (! TARGET_ALTIVEC_ABI
)
14455 return LAST_ALTIVEC_REGNO
+ 1;
14457 /* On Darwin, the unwind routines are compiled without
14458 TARGET_ALTIVEC, and use save_world to save/restore the
14459 altivec registers when necessary. */
14460 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
14461 && ! TARGET_ALTIVEC
)
14462 return FIRST_ALTIVEC_REGNO
+ 20;
14464 /* Find lowest numbered live register. */
14465 for (i
= FIRST_ALTIVEC_REGNO
+ 20; i
<= LAST_ALTIVEC_REGNO
; ++i
)
14466 if (df_regs_ever_live_p (i
))
14472 /* Return a 32-bit mask of the AltiVec registers we need to set in
14473 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
14474 the 32-bit word is 0. */
14476 static unsigned int
14477 compute_vrsave_mask (void)
14479 unsigned int i
, mask
= 0;
14481 /* On Darwin, the unwind routines are compiled without
14482 TARGET_ALTIVEC, and use save_world to save/restore the
14483 call-saved altivec registers when necessary. */
14484 if (DEFAULT_ABI
== ABI_DARWIN
&& crtl
->calls_eh_return
14485 && ! TARGET_ALTIVEC
)
14488 /* First, find out if we use _any_ altivec registers. */
14489 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
14490 if (df_regs_ever_live_p (i
))
14491 mask
|= ALTIVEC_REG_BIT (i
);
14496 /* Next, remove the argument registers from the set. These must
14497 be in the VRSAVE mask set by the caller, so we don't need to add
14498 them in again. More importantly, the mask we compute here is
14499 used to generate CLOBBERs in the set_vrsave insn, and we do not
14500 wish the argument registers to die. */
14501 for (i
= crtl
->args
.info
.vregno
- 1; i
>= ALTIVEC_ARG_MIN_REG
; --i
)
14502 mask
&= ~ALTIVEC_REG_BIT (i
);
14504 /* Similarly, remove the return value from the set. */
14507 diddle_return_value (is_altivec_return_reg
, &yes
);
14509 mask
&= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN
);
14515 /* For a very restricted set of circumstances, we can cut down the
14516 size of prologues/epilogues by calling our own save/restore-the-world
14520 compute_save_world_info (rs6000_stack_t
*info_ptr
)
14522 info_ptr
->world_save_p
= 1;
14523 info_ptr
->world_save_p
14524 = (WORLD_SAVE_P (info_ptr
)
14525 && DEFAULT_ABI
== ABI_DARWIN
14526 && ! (cfun
->calls_setjmp
&& flag_exceptions
)
14527 && info_ptr
->first_fp_reg_save
== FIRST_SAVED_FP_REGNO
14528 && info_ptr
->first_gp_reg_save
== FIRST_SAVED_GP_REGNO
14529 && info_ptr
->first_altivec_reg_save
== FIRST_SAVED_ALTIVEC_REGNO
14530 && info_ptr
->cr_save_p
);
14532 /* This will not work in conjunction with sibcalls. Make sure there
14533 are none. (This check is expensive, but seldom executed.) */
14534 if (WORLD_SAVE_P (info_ptr
))
14537 for ( insn
= get_last_insn_anywhere (); insn
; insn
= PREV_INSN (insn
))
14538 if ( GET_CODE (insn
) == CALL_INSN
14539 && SIBLING_CALL_P (insn
))
14541 info_ptr
->world_save_p
= 0;
14546 if (WORLD_SAVE_P (info_ptr
))
14548 /* Even if we're not touching VRsave, make sure there's room on the
14549 stack for it, if it looks like we're calling SAVE_WORLD, which
14550 will attempt to save it. */
14551 info_ptr
->vrsave_size
= 4;
14553 /* If we are going to save the world, we need to save the link register too. */
14554 info_ptr
->lr_save_p
= 1;
14556 /* "Save" the VRsave register too if we're saving the world. */
14557 if (info_ptr
->vrsave_mask
== 0)
14558 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
14560 /* Because the Darwin register save/restore routines only handle
14561 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
14563 gcc_assert (info_ptr
->first_fp_reg_save
>= FIRST_SAVED_FP_REGNO
14564 && (info_ptr
->first_altivec_reg_save
14565 >= FIRST_SAVED_ALTIVEC_REGNO
));
14572 is_altivec_return_reg (rtx reg
, void *xyes
)
14574 bool *yes
= (bool *) xyes
;
14575 if (REGNO (reg
) == ALTIVEC_ARG_RETURN
)
14580 /* Calculate the stack information for the current function. This is
14581 complicated by having two separate calling sequences, the AIX calling
14582 sequence and the V.4 calling sequence.
14584 AIX (and Darwin/Mac OS X) stack frames look like:
14586 SP----> +---------------------------------------+
14587 | back chain to caller | 0 0
14588 +---------------------------------------+
14589 | saved CR | 4 8 (8-11)
14590 +---------------------------------------+
14592 +---------------------------------------+
14593 | reserved for compilers | 12 24
14594 +---------------------------------------+
14595 | reserved for binders | 16 32
14596 +---------------------------------------+
14597 | saved TOC pointer | 20 40
14598 +---------------------------------------+
14599 | Parameter save area (P) | 24 48
14600 +---------------------------------------+
14601 | Alloca space (A) | 24+P etc.
14602 +---------------------------------------+
14603 | Local variable space (L) | 24+P+A
14604 +---------------------------------------+
14605 | Float/int conversion temporary (X) | 24+P+A+L
14606 +---------------------------------------+
14607 | Save area for AltiVec registers (W) | 24+P+A+L+X
14608 +---------------------------------------+
14609 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
14610 +---------------------------------------+
14611 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
14612 +---------------------------------------+
14613 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
14614 +---------------------------------------+
14615 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
14616 +---------------------------------------+
14617 old SP->| back chain to caller's caller |
14618 +---------------------------------------+
14620 The required alignment for AIX configurations is two words (i.e., 8
14624 V.4 stack frames look like:
14626 SP----> +---------------------------------------+
14627 | back chain to caller | 0
14628 +---------------------------------------+
14629 | caller's saved LR | 4
14630 +---------------------------------------+
14631 | Parameter save area (P) | 8
14632 +---------------------------------------+
14633 | Alloca space (A) | 8+P
14634 +---------------------------------------+
14635 | Varargs save area (V) | 8+P+A
14636 +---------------------------------------+
14637 | Local variable space (L) | 8+P+A+V
14638 +---------------------------------------+
14639 | Float/int conversion temporary (X) | 8+P+A+V+L
14640 +---------------------------------------+
14641 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
14642 +---------------------------------------+
14643 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
14644 +---------------------------------------+
14645 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
14646 +---------------------------------------+
14647 | SPE: area for 64-bit GP registers |
14648 +---------------------------------------+
14649 | SPE alignment padding |
14650 +---------------------------------------+
14651 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
14652 +---------------------------------------+
14653 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
14654 +---------------------------------------+
14655 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
14656 +---------------------------------------+
14657 old SP->| back chain to caller's caller |
14658 +---------------------------------------+
14660 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
14661 given. (But note below and in sysv4.h that we require only 8 and
14662 may round up the size of our stack frame anyways. The historical
14663 reason is early versions of powerpc-linux which didn't properly
14664 align the stack at program startup. A happy side-effect is that
14665 -mno-eabi libraries can be used with -meabi programs.)
14667 The EABI configuration defaults to the V.4 layout. However,
14668 the stack alignment requirements may differ. If -mno-eabi is not
14669 given, the required stack alignment is 8 bytes; if -mno-eabi is
14670 given, the required alignment is 16 bytes. (But see V.4 comment
14673 #ifndef ABI_STACK_BOUNDARY
14674 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
14677 static rs6000_stack_t
*
14678 rs6000_stack_info (void)
14680 static rs6000_stack_t info
;
14681 rs6000_stack_t
*info_ptr
= &info
;
14682 int reg_size
= TARGET_32BIT
? 4 : 8;
14686 HOST_WIDE_INT non_fixed_size
;
14688 memset (&info
, 0, sizeof (info
));
14692 /* Cache value so we don't rescan instruction chain over and over. */
14693 if (cfun
->machine
->insn_chain_scanned_p
== 0)
14694 cfun
->machine
->insn_chain_scanned_p
14695 = spe_func_has_64bit_regs_p () + 1;
14696 info_ptr
->spe_64bit_regs_used
= cfun
->machine
->insn_chain_scanned_p
- 1;
14699 /* Select which calling sequence. */
14700 info_ptr
->abi
= DEFAULT_ABI
;
14702 /* Calculate which registers need to be saved & save area size. */
14703 info_ptr
->first_gp_reg_save
= first_reg_to_save ();
14704 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
14705 even if it currently looks like we won't. Reload may need it to
14706 get at a constant; if so, it will have already created a constant
14707 pool entry for it. */
14708 if (((TARGET_TOC
&& TARGET_MINIMAL_TOC
)
14709 || (flag_pic
== 1 && DEFAULT_ABI
== ABI_V4
)
14710 || (flag_pic
&& DEFAULT_ABI
== ABI_DARWIN
))
14711 && crtl
->uses_const_pool
14712 && info_ptr
->first_gp_reg_save
> RS6000_PIC_OFFSET_TABLE_REGNUM
)
14713 first_gp
= RS6000_PIC_OFFSET_TABLE_REGNUM
;
14715 first_gp
= info_ptr
->first_gp_reg_save
;
14717 info_ptr
->gp_size
= reg_size
* (32 - first_gp
);
14719 /* For the SPE, we have an additional upper 32-bits on each GPR.
14720 Ideally we should save the entire 64-bits only when the upper
14721 half is used in SIMD instructions. Since we only record
14722 registers live (not the size they are used in), this proves
14723 difficult because we'd have to traverse the instruction chain at
14724 the right time, taking reload into account. This is a real pain,
14725 so we opt to save the GPRs in 64-bits always if but one register
14726 gets used in 64-bits. Otherwise, all the registers in the frame
14727 get saved in 32-bits.
14729 So... since when we save all GPRs (except the SP) in 64-bits, the
14730 traditional GP save area will be empty. */
14731 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
14732 info_ptr
->gp_size
= 0;
14734 info_ptr
->first_fp_reg_save
= first_fp_reg_to_save ();
14735 info_ptr
->fp_size
= 8 * (64 - info_ptr
->first_fp_reg_save
);
14737 info_ptr
->first_altivec_reg_save
= first_altivec_reg_to_save ();
14738 info_ptr
->altivec_size
= 16 * (LAST_ALTIVEC_REGNO
+ 1
14739 - info_ptr
->first_altivec_reg_save
);
14741 /* Does this function call anything? */
14742 info_ptr
->calls_p
= (! current_function_is_leaf
14743 || cfun
->machine
->ra_needs_full_frame
);
14745 /* Determine if we need to save the link register. */
14746 if ((DEFAULT_ABI
== ABI_AIX
14748 && !TARGET_PROFILE_KERNEL
)
14749 #ifdef TARGET_RELOCATABLE
14750 || (TARGET_RELOCATABLE
&& (get_pool_size () != 0))
14752 || (info_ptr
->first_fp_reg_save
!= 64
14753 && !FP_SAVE_INLINE (info_ptr
->first_fp_reg_save
))
14754 || (DEFAULT_ABI
== ABI_V4
&& cfun
->calls_alloca
)
14755 || info_ptr
->calls_p
14756 || rs6000_ra_ever_killed ())
14758 info_ptr
->lr_save_p
= 1;
14759 df_set_regs_ever_live (LR_REGNO
, true);
14762 /* Determine if we need to save the condition code registers. */
14763 if (df_regs_ever_live_p (CR2_REGNO
)
14764 || df_regs_ever_live_p (CR3_REGNO
)
14765 || df_regs_ever_live_p (CR4_REGNO
))
14767 info_ptr
->cr_save_p
= 1;
14768 if (DEFAULT_ABI
== ABI_V4
)
14769 info_ptr
->cr_size
= reg_size
;
14772 /* If the current function calls __builtin_eh_return, then we need
14773 to allocate stack space for registers that will hold data for
14774 the exception handler. */
14775 if (crtl
->calls_eh_return
)
14778 for (i
= 0; EH_RETURN_DATA_REGNO (i
) != INVALID_REGNUM
; ++i
)
14781 /* SPE saves EH registers in 64-bits. */
14782 ehrd_size
= i
* (TARGET_SPE_ABI
14783 && info_ptr
->spe_64bit_regs_used
!= 0
14784 ? UNITS_PER_SPE_WORD
: UNITS_PER_WORD
);
14789 /* Determine various sizes. */
14790 info_ptr
->reg_size
= reg_size
;
14791 info_ptr
->fixed_size
= RS6000_SAVE_AREA
;
14792 info_ptr
->vars_size
= RS6000_ALIGN (get_frame_size (), 8);
14793 info_ptr
->parm_size
= RS6000_ALIGN (crtl
->outgoing_args_size
,
14794 TARGET_ALTIVEC
? 16 : 8);
14795 if (FRAME_GROWS_DOWNWARD
)
14796 info_ptr
->vars_size
14797 += RS6000_ALIGN (info_ptr
->fixed_size
+ info_ptr
->vars_size
14798 + info_ptr
->parm_size
,
14799 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
)
14800 - (info_ptr
->fixed_size
+ info_ptr
->vars_size
14801 + info_ptr
->parm_size
);
14803 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
14804 info_ptr
->spe_gp_size
= 8 * (32 - first_gp
);
14806 info_ptr
->spe_gp_size
= 0;
14808 if (TARGET_ALTIVEC_ABI
)
14809 info_ptr
->vrsave_mask
= compute_vrsave_mask ();
14811 info_ptr
->vrsave_mask
= 0;
14813 if (TARGET_ALTIVEC_VRSAVE
&& info_ptr
->vrsave_mask
)
14814 info_ptr
->vrsave_size
= 4;
14816 info_ptr
->vrsave_size
= 0;
14818 compute_save_world_info (info_ptr
);
14820 /* Calculate the offsets. */
14821 switch (DEFAULT_ABI
)
14825 gcc_unreachable ();
14829 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
14830 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
14832 if (TARGET_ALTIVEC_ABI
)
14834 info_ptr
->vrsave_save_offset
14835 = info_ptr
->gp_save_offset
- info_ptr
->vrsave_size
;
14837 /* Align stack so vector save area is on a quadword boundary.
14838 The padding goes above the vectors. */
14839 if (info_ptr
->altivec_size
!= 0)
14840 info_ptr
->altivec_padding_size
14841 = info_ptr
->vrsave_save_offset
& 0xF;
14843 info_ptr
->altivec_padding_size
= 0;
14845 info_ptr
->altivec_save_offset
14846 = info_ptr
->vrsave_save_offset
14847 - info_ptr
->altivec_padding_size
14848 - info_ptr
->altivec_size
;
14849 gcc_assert (info_ptr
->altivec_size
== 0
14850 || info_ptr
->altivec_save_offset
% 16 == 0);
14852 /* Adjust for AltiVec case. */
14853 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
- ehrd_size
;
14856 info_ptr
->ehrd_offset
= info_ptr
->gp_save_offset
- ehrd_size
;
14857 info_ptr
->cr_save_offset
= reg_size
; /* first word when 64-bit. */
14858 info_ptr
->lr_save_offset
= 2*reg_size
;
14862 info_ptr
->fp_save_offset
= - info_ptr
->fp_size
;
14863 info_ptr
->gp_save_offset
= info_ptr
->fp_save_offset
- info_ptr
->gp_size
;
14864 info_ptr
->cr_save_offset
= info_ptr
->gp_save_offset
- info_ptr
->cr_size
;
14866 if (TARGET_SPE_ABI
&& info_ptr
->spe_64bit_regs_used
!= 0)
14868 /* Align stack so SPE GPR save area is aligned on a
14869 double-word boundary. */
14870 if (info_ptr
->spe_gp_size
!= 0 && info_ptr
->cr_save_offset
!= 0)
14871 info_ptr
->spe_padding_size
14872 = 8 - (-info_ptr
->cr_save_offset
% 8);
14874 info_ptr
->spe_padding_size
= 0;
14876 info_ptr
->spe_gp_save_offset
14877 = info_ptr
->cr_save_offset
14878 - info_ptr
->spe_padding_size
14879 - info_ptr
->spe_gp_size
;
14881 /* Adjust for SPE case. */
14882 info_ptr
->ehrd_offset
= info_ptr
->spe_gp_save_offset
;
14884 else if (TARGET_ALTIVEC_ABI
)
14886 info_ptr
->vrsave_save_offset
14887 = info_ptr
->cr_save_offset
- info_ptr
->vrsave_size
;
14889 /* Align stack so vector save area is on a quadword boundary. */
14890 if (info_ptr
->altivec_size
!= 0)
14891 info_ptr
->altivec_padding_size
14892 = 16 - (-info_ptr
->vrsave_save_offset
% 16);
14894 info_ptr
->altivec_padding_size
= 0;
14896 info_ptr
->altivec_save_offset
14897 = info_ptr
->vrsave_save_offset
14898 - info_ptr
->altivec_padding_size
14899 - info_ptr
->altivec_size
;
14901 /* Adjust for AltiVec case. */
14902 info_ptr
->ehrd_offset
= info_ptr
->altivec_save_offset
;
14905 info_ptr
->ehrd_offset
= info_ptr
->cr_save_offset
;
14906 info_ptr
->ehrd_offset
-= ehrd_size
;
14907 info_ptr
->lr_save_offset
= reg_size
;
14911 save_align
= (TARGET_ALTIVEC_ABI
|| DEFAULT_ABI
== ABI_DARWIN
) ? 16 : 8;
14912 info_ptr
->save_size
= RS6000_ALIGN (info_ptr
->fp_size
14913 + info_ptr
->gp_size
14914 + info_ptr
->altivec_size
14915 + info_ptr
->altivec_padding_size
14916 + info_ptr
->spe_gp_size
14917 + info_ptr
->spe_padding_size
14919 + info_ptr
->cr_size
14920 + info_ptr
->vrsave_size
,
14923 non_fixed_size
= (info_ptr
->vars_size
14924 + info_ptr
->parm_size
14925 + info_ptr
->save_size
);
14927 info_ptr
->total_size
= RS6000_ALIGN (non_fixed_size
+ info_ptr
->fixed_size
,
14928 ABI_STACK_BOUNDARY
/ BITS_PER_UNIT
);
14930 /* Determine if we need to allocate any stack frame:
14932 For AIX we need to push the stack if a frame pointer is needed
14933 (because the stack might be dynamically adjusted), if we are
14934 debugging, if we make calls, or if the sum of fp_save, gp_save,
14935 and local variables are more than the space needed to save all
14936 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
14937 + 18*8 = 288 (GPR13 reserved).
14939 For V.4 we don't have the stack cushion that AIX uses, but assume
14940 that the debugger can handle stackless frames. */
14942 if (info_ptr
->calls_p
)
14943 info_ptr
->push_p
= 1;
14945 else if (DEFAULT_ABI
== ABI_V4
)
14946 info_ptr
->push_p
= non_fixed_size
!= 0;
14948 else if (frame_pointer_needed
)
14949 info_ptr
->push_p
= 1;
14951 else if (TARGET_XCOFF
&& write_symbols
!= NO_DEBUG
)
14952 info_ptr
->push_p
= 1;
14955 info_ptr
->push_p
= non_fixed_size
> (TARGET_32BIT
? 220 : 288);
14957 /* Zero offsets if we're not saving those registers. */
14958 if (info_ptr
->fp_size
== 0)
14959 info_ptr
->fp_save_offset
= 0;
14961 if (info_ptr
->gp_size
== 0)
14962 info_ptr
->gp_save_offset
= 0;
14964 if (! TARGET_ALTIVEC_ABI
|| info_ptr
->altivec_size
== 0)
14965 info_ptr
->altivec_save_offset
= 0;
14967 if (! TARGET_ALTIVEC_ABI
|| info_ptr
->vrsave_mask
== 0)
14968 info_ptr
->vrsave_save_offset
= 0;
14970 if (! TARGET_SPE_ABI
14971 || info_ptr
->spe_64bit_regs_used
== 0
14972 || info_ptr
->spe_gp_size
== 0)
14973 info_ptr
->spe_gp_save_offset
= 0;
14975 if (! info_ptr
->lr_save_p
)
14976 info_ptr
->lr_save_offset
= 0;
14978 if (! info_ptr
->cr_save_p
)
14979 info_ptr
->cr_save_offset
= 0;
14984 /* Return true if the current function uses any GPRs in 64-bit SIMD
14988 spe_func_has_64bit_regs_p (void)
14992 /* Functions that save and restore all the call-saved registers will
14993 need to save/restore the registers in 64-bits. */
14994 if (crtl
->calls_eh_return
14995 || cfun
->calls_setjmp
14996 || crtl
->has_nonlocal_goto
)
14999 insns
= get_insns ();
15001 for (insn
= NEXT_INSN (insns
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
15007 /* FIXME: This should be implemented with attributes...
15009 (set_attr "spe64" "true")....then,
15010 if (get_spe64(insn)) return true;
15012 It's the only reliable way to do the stuff below. */
15014 i
= PATTERN (insn
);
15015 if (GET_CODE (i
) == SET
)
15017 enum machine_mode mode
= GET_MODE (SET_SRC (i
));
15019 if (SPE_VECTOR_MODE (mode
))
15021 if (TARGET_E500_DOUBLE
&& (mode
== DFmode
|| mode
== TFmode
))
15031 debug_stack_info (rs6000_stack_t
*info
)
15033 const char *abi_string
;
15036 info
= rs6000_stack_info ();
15038 fprintf (stderr
, "\nStack information for function %s:\n",
15039 ((current_function_decl
&& DECL_NAME (current_function_decl
))
15040 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl
))
15045 default: abi_string
= "Unknown"; break;
15046 case ABI_NONE
: abi_string
= "NONE"; break;
15047 case ABI_AIX
: abi_string
= "AIX"; break;
15048 case ABI_DARWIN
: abi_string
= "Darwin"; break;
15049 case ABI_V4
: abi_string
= "V.4"; break;
15052 fprintf (stderr
, "\tABI = %5s\n", abi_string
);
15054 if (TARGET_ALTIVEC_ABI
)
15055 fprintf (stderr
, "\tALTIVEC ABI extensions enabled.\n");
15057 if (TARGET_SPE_ABI
)
15058 fprintf (stderr
, "\tSPE ABI extensions enabled.\n");
15060 if (info
->first_gp_reg_save
!= 32)
15061 fprintf (stderr
, "\tfirst_gp_reg_save = %5d\n", info
->first_gp_reg_save
);
15063 if (info
->first_fp_reg_save
!= 64)
15064 fprintf (stderr
, "\tfirst_fp_reg_save = %5d\n", info
->first_fp_reg_save
);
15066 if (info
->first_altivec_reg_save
<= LAST_ALTIVEC_REGNO
)
15067 fprintf (stderr
, "\tfirst_altivec_reg_save = %5d\n",
15068 info
->first_altivec_reg_save
);
15070 if (info
->lr_save_p
)
15071 fprintf (stderr
, "\tlr_save_p = %5d\n", info
->lr_save_p
);
15073 if (info
->cr_save_p
)
15074 fprintf (stderr
, "\tcr_save_p = %5d\n", info
->cr_save_p
);
15076 if (info
->vrsave_mask
)
15077 fprintf (stderr
, "\tvrsave_mask = 0x%x\n", info
->vrsave_mask
);
15080 fprintf (stderr
, "\tpush_p = %5d\n", info
->push_p
);
15083 fprintf (stderr
, "\tcalls_p = %5d\n", info
->calls_p
);
15085 if (info
->gp_save_offset
)
15086 fprintf (stderr
, "\tgp_save_offset = %5d\n", info
->gp_save_offset
);
15088 if (info
->fp_save_offset
)
15089 fprintf (stderr
, "\tfp_save_offset = %5d\n", info
->fp_save_offset
);
15091 if (info
->altivec_save_offset
)
15092 fprintf (stderr
, "\taltivec_save_offset = %5d\n",
15093 info
->altivec_save_offset
);
15095 if (info
->spe_gp_save_offset
)
15096 fprintf (stderr
, "\tspe_gp_save_offset = %5d\n",
15097 info
->spe_gp_save_offset
);
15099 if (info
->vrsave_save_offset
)
15100 fprintf (stderr
, "\tvrsave_save_offset = %5d\n",
15101 info
->vrsave_save_offset
);
15103 if (info
->lr_save_offset
)
15104 fprintf (stderr
, "\tlr_save_offset = %5d\n", info
->lr_save_offset
);
15106 if (info
->cr_save_offset
)
15107 fprintf (stderr
, "\tcr_save_offset = %5d\n", info
->cr_save_offset
);
15109 if (info
->varargs_save_offset
)
15110 fprintf (stderr
, "\tvarargs_save_offset = %5d\n", info
->varargs_save_offset
);
15112 if (info
->total_size
)
15113 fprintf (stderr
, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
15116 if (info
->vars_size
)
15117 fprintf (stderr
, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC
"\n",
15120 if (info
->parm_size
)
15121 fprintf (stderr
, "\tparm_size = %5d\n", info
->parm_size
);
15123 if (info
->fixed_size
)
15124 fprintf (stderr
, "\tfixed_size = %5d\n", info
->fixed_size
);
15127 fprintf (stderr
, "\tgp_size = %5d\n", info
->gp_size
);
15129 if (info
->spe_gp_size
)
15130 fprintf (stderr
, "\tspe_gp_size = %5d\n", info
->spe_gp_size
);
15133 fprintf (stderr
, "\tfp_size = %5d\n", info
->fp_size
);
15135 if (info
->altivec_size
)
15136 fprintf (stderr
, "\taltivec_size = %5d\n", info
->altivec_size
);
15138 if (info
->vrsave_size
)
15139 fprintf (stderr
, "\tvrsave_size = %5d\n", info
->vrsave_size
);
15141 if (info
->altivec_padding_size
)
15142 fprintf (stderr
, "\taltivec_padding_size= %5d\n",
15143 info
->altivec_padding_size
);
15145 if (info
->spe_padding_size
)
15146 fprintf (stderr
, "\tspe_padding_size = %5d\n",
15147 info
->spe_padding_size
);
15150 fprintf (stderr
, "\tcr_size = %5d\n", info
->cr_size
);
15152 if (info
->save_size
)
15153 fprintf (stderr
, "\tsave_size = %5d\n", info
->save_size
);
15155 if (info
->reg_size
!= 4)
15156 fprintf (stderr
, "\treg_size = %5d\n", info
->reg_size
);
15158 fprintf (stderr
, "\n");
15162 rs6000_return_addr (int count
, rtx frame
)
15164 /* Currently we don't optimize very well between prolog and body
15165 code and for PIC code the code can be actually quite bad, so
15166 don't try to be too clever here. */
15167 if (count
!= 0 || (DEFAULT_ABI
!= ABI_AIX
&& flag_pic
))
15169 cfun
->machine
->ra_needs_full_frame
= 1;
15176 plus_constant (copy_to_reg
15177 (gen_rtx_MEM (Pmode
,
15178 memory_address (Pmode
, frame
))),
15179 RETURN_ADDRESS_OFFSET
)));
15182 cfun
->machine
->ra_need_lr
= 1;
15183 return get_hard_reg_initial_val (Pmode
, LR_REGNO
);
15186 /* Say whether a function is a candidate for sibcall handling or not.
15187 We do not allow indirect calls to be optimized into sibling calls.
15188 Also, we can't do it if there are any vector parameters; there's
15189 nowhere to put the VRsave code so it works; note that functions with
15190 vector parameters are required to have a prototype, so the argument
15191 type info must be available here. (The tail recursion case can work
15192 with vector parameters, but there's no way to distinguish here.) */
15194 rs6000_function_ok_for_sibcall (tree decl
, tree exp ATTRIBUTE_UNUSED
)
15199 if (TARGET_ALTIVEC_VRSAVE
)
15201 for (type
= TYPE_ARG_TYPES (TREE_TYPE (decl
));
15202 type
; type
= TREE_CHAIN (type
))
15204 if (TREE_CODE (TREE_VALUE (type
)) == VECTOR_TYPE
)
15208 if (DEFAULT_ABI
== ABI_DARWIN
15209 || ((*targetm
.binds_local_p
) (decl
)
15210 && (DEFAULT_ABI
!= ABI_AIX
|| !DECL_EXTERNAL (decl
))))
15212 tree attr_list
= TYPE_ATTRIBUTES (TREE_TYPE (decl
));
15214 if (!lookup_attribute ("longcall", attr_list
)
15215 || lookup_attribute ("shortcall", attr_list
))
15222 /* NULL if INSN insn is valid within a low-overhead loop.
15223 Otherwise return why doloop cannot be applied.
15224 PowerPC uses the COUNT register for branch on table instructions. */
15226 static const char *
15227 rs6000_invalid_within_doloop (const_rtx insn
)
15230 return "Function call in the loop.";
15233 && (GET_CODE (PATTERN (insn
)) == ADDR_DIFF_VEC
15234 || GET_CODE (PATTERN (insn
)) == ADDR_VEC
))
15235 return "Computed branch in the loop.";
15241 rs6000_ra_ever_killed (void)
15247 if (cfun
->is_thunk
)
15250 /* regs_ever_live has LR marked as used if any sibcalls are present,
15251 but this should not force saving and restoring in the
15252 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
15253 clobbers LR, so that is inappropriate. */
15255 /* Also, the prologue can generate a store into LR that
15256 doesn't really count, like this:
15259 bcl to set PIC register
15263 When we're called from the epilogue, we need to avoid counting
15264 this as a store. */
15266 push_topmost_sequence ();
15267 top
= get_insns ();
15268 pop_topmost_sequence ();
15269 reg
= gen_rtx_REG (Pmode
, LR_REGNO
);
15271 for (insn
= NEXT_INSN (top
); insn
!= NULL_RTX
; insn
= NEXT_INSN (insn
))
15277 if (!SIBLING_CALL_P (insn
))
15280 else if (find_regno_note (insn
, REG_INC
, LR_REGNO
))
15282 else if (set_of (reg
, insn
) != NULL_RTX
15283 && !prologue_epilogue_contains (insn
))
15290 /* Emit instructions needed to load the TOC register.
15291 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
15292 a constant pool; or for SVR4 -fpic. */
15295 rs6000_emit_load_toc_table (int fromprolog
)
15298 dest
= gen_rtx_REG (Pmode
, RS6000_PIC_OFFSET_TABLE_REGNUM
);
15300 if (TARGET_ELF
&& TARGET_SECURE_PLT
&& DEFAULT_ABI
!= ABI_AIX
&& flag_pic
)
15303 rtx lab
, tmp1
, tmp2
, got
;
15305 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
15306 lab
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
15308 got
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
15310 got
= rs6000_got_sym ();
15311 tmp1
= tmp2
= dest
;
15314 tmp1
= gen_reg_rtx (Pmode
);
15315 tmp2
= gen_reg_rtx (Pmode
);
15317 emit_insn (gen_load_toc_v4_PIC_1 (lab
));
15318 emit_move_insn (tmp1
,
15319 gen_rtx_REG (Pmode
, LR_REGNO
));
15320 emit_insn (gen_load_toc_v4_PIC_3b (tmp2
, tmp1
, got
, lab
));
15321 emit_insn (gen_load_toc_v4_PIC_3c (dest
, tmp2
, got
, lab
));
15323 else if (TARGET_ELF
&& DEFAULT_ABI
== ABI_V4
&& flag_pic
== 1)
15325 emit_insn (gen_load_toc_v4_pic_si ());
15326 emit_move_insn (dest
, gen_rtx_REG (Pmode
, LR_REGNO
));
15328 else if (TARGET_ELF
&& DEFAULT_ABI
!= ABI_AIX
&& flag_pic
== 2)
15331 rtx temp0
= (fromprolog
15332 ? gen_rtx_REG (Pmode
, 0)
15333 : gen_reg_rtx (Pmode
));
15339 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
15340 symF
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
15342 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCL", rs6000_pic_labelno
);
15343 symL
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
15345 emit_insn (gen_load_toc_v4_PIC_1 (symF
));
15346 emit_move_insn (dest
,
15347 gen_rtx_REG (Pmode
, LR_REGNO
));
15348 emit_insn (gen_load_toc_v4_PIC_2 (temp0
, dest
, symL
, symF
));
15354 tocsym
= gen_rtx_SYMBOL_REF (Pmode
, toc_label_name
);
15355 emit_insn (gen_load_toc_v4_PIC_1b (tocsym
));
15356 emit_move_insn (dest
,
15357 gen_rtx_REG (Pmode
, LR_REGNO
));
15358 emit_move_insn (temp0
, gen_rtx_MEM (Pmode
, dest
));
15360 emit_insn (gen_addsi3 (dest
, temp0
, dest
));
15362 else if (TARGET_ELF
&& !TARGET_AIX
&& flag_pic
== 0 && TARGET_MINIMAL_TOC
)
15364 /* This is for AIX code running in non-PIC ELF32. */
15367 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
15368 realsym
= gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (buf
));
15370 emit_insn (gen_elf_high (dest
, realsym
));
15371 emit_insn (gen_elf_low (dest
, dest
, realsym
));
15375 gcc_assert (DEFAULT_ABI
== ABI_AIX
);
15378 emit_insn (gen_load_toc_aix_si (dest
));
15380 emit_insn (gen_load_toc_aix_di (dest
));
15384 /* Emit instructions to restore the link register after determining where
15385 its value has been stored. */
15388 rs6000_emit_eh_reg_restore (rtx source
, rtx scratch
)
15390 rs6000_stack_t
*info
= rs6000_stack_info ();
15393 operands
[0] = source
;
15394 operands
[1] = scratch
;
15396 if (info
->lr_save_p
)
15398 rtx frame_rtx
= stack_pointer_rtx
;
15399 HOST_WIDE_INT sp_offset
= 0;
15402 if (frame_pointer_needed
15403 || cfun
->calls_alloca
15404 || info
->total_size
> 32767)
15406 tmp
= gen_frame_mem (Pmode
, frame_rtx
);
15407 emit_move_insn (operands
[1], tmp
);
15408 frame_rtx
= operands
[1];
15410 else if (info
->push_p
)
15411 sp_offset
= info
->total_size
;
15413 tmp
= plus_constant (frame_rtx
, info
->lr_save_offset
+ sp_offset
);
15414 tmp
= gen_frame_mem (Pmode
, tmp
);
15415 emit_move_insn (tmp
, operands
[0]);
15418 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
), operands
[0]);
15421 static GTY(()) alias_set_type set
= -1;
15424 get_TOC_alias_set (void)
15427 set
= new_alias_set ();
15431 /* This returns nonzero if the current function uses the TOC. This is
15432 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
15433 is generated by the ABI_V4 load_toc_* patterns. */
15440 for (insn
= get_insns (); insn
; insn
= NEXT_INSN (insn
))
15443 rtx pat
= PATTERN (insn
);
15446 if (GET_CODE (pat
) == PARALLEL
)
15447 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
15449 rtx sub
= XVECEXP (pat
, 0, i
);
15450 if (GET_CODE (sub
) == USE
)
15452 sub
= XEXP (sub
, 0);
15453 if (GET_CODE (sub
) == UNSPEC
15454 && XINT (sub
, 1) == UNSPEC_TOC
)
15464 create_TOC_reference (rtx symbol
)
15466 if (!can_create_pseudo_p ())
15467 df_set_regs_ever_live (TOC_REGISTER
, true);
15468 return gen_rtx_PLUS (Pmode
,
15469 gen_rtx_REG (Pmode
, TOC_REGISTER
),
15470 gen_rtx_CONST (Pmode
,
15471 gen_rtx_UNSPEC (Pmode
, gen_rtvec (1, symbol
), UNSPEC_TOCREL
)));
15474 /* If _Unwind_* has been called from within the same module,
15475 toc register is not guaranteed to be saved to 40(1) on function
15476 entry. Save it there in that case. */
15479 rs6000_aix_emit_builtin_unwind_init (void)
15482 rtx stack_top
= gen_reg_rtx (Pmode
);
15483 rtx opcode_addr
= gen_reg_rtx (Pmode
);
15484 rtx opcode
= gen_reg_rtx (SImode
);
15485 rtx tocompare
= gen_reg_rtx (SImode
);
15486 rtx no_toc_save_needed
= gen_label_rtx ();
15488 mem
= gen_frame_mem (Pmode
, hard_frame_pointer_rtx
);
15489 emit_move_insn (stack_top
, mem
);
15491 mem
= gen_frame_mem (Pmode
,
15492 gen_rtx_PLUS (Pmode
, stack_top
,
15493 GEN_INT (2 * GET_MODE_SIZE (Pmode
))));
15494 emit_move_insn (opcode_addr
, mem
);
15495 emit_move_insn (opcode
, gen_rtx_MEM (SImode
, opcode_addr
));
15496 emit_move_insn (tocompare
, gen_int_mode (TARGET_32BIT
? 0x80410014
15497 : 0xE8410028, SImode
));
15499 do_compare_rtx_and_jump (opcode
, tocompare
, EQ
, 1,
15500 SImode
, NULL_RTX
, NULL_RTX
,
15501 no_toc_save_needed
);
15503 mem
= gen_frame_mem (Pmode
,
15504 gen_rtx_PLUS (Pmode
, stack_top
,
15505 GEN_INT (5 * GET_MODE_SIZE (Pmode
))));
15506 emit_move_insn (mem
, gen_rtx_REG (Pmode
, 2));
15507 emit_label (no_toc_save_needed
);
15510 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
15511 and the change to the stack pointer. */
15514 rs6000_emit_stack_tie (void)
15516 rtx mem
= gen_frame_mem (BLKmode
,
15517 gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
));
15519 emit_insn (gen_stack_tie (mem
));
15522 /* Emit the correct code for allocating stack space, as insns.
15523 If COPY_R12, make sure a copy of the old frame is left in r12.
15524 If COPY_R11, make sure a copy of the old frame is left in r11,
15525 in preference to r12 if COPY_R12.
15526 The generated code may use hard register 0 as a temporary. */
15529 rs6000_emit_allocate_stack (HOST_WIDE_INT size
, int copy_r12
, int copy_r11
)
15532 rtx stack_reg
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
15533 rtx tmp_reg
= gen_rtx_REG (Pmode
, 0);
15534 rtx todec
= gen_int_mode (-size
, Pmode
);
15537 if (INTVAL (todec
) != -size
)
15539 warning (0, "stack frame too large");
15540 emit_insn (gen_trap ());
15544 if (crtl
->limit_stack
)
15546 if (REG_P (stack_limit_rtx
)
15547 && REGNO (stack_limit_rtx
) > 1
15548 && REGNO (stack_limit_rtx
) <= 31)
15550 emit_insn (TARGET_32BIT
15551 ? gen_addsi3 (tmp_reg
,
15554 : gen_adddi3 (tmp_reg
,
15558 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
15561 else if (GET_CODE (stack_limit_rtx
) == SYMBOL_REF
15563 && DEFAULT_ABI
== ABI_V4
)
15565 rtx toload
= gen_rtx_CONST (VOIDmode
,
15566 gen_rtx_PLUS (Pmode
,
15570 emit_insn (gen_elf_high (tmp_reg
, toload
));
15571 emit_insn (gen_elf_low (tmp_reg
, tmp_reg
, toload
));
15572 emit_insn (gen_cond_trap (LTU
, stack_reg
, tmp_reg
,
15576 warning (0, "stack limit expression is not supported");
15579 if (copy_r12
|| copy_r11
)
15580 emit_move_insn (copy_r11
15581 ? gen_rtx_REG (Pmode
, 11)
15582 : gen_rtx_REG (Pmode
, 12),
15587 /* Need a note here so that try_split doesn't get confused. */
15588 if (get_last_insn () == NULL_RTX
)
15589 emit_note (NOTE_INSN_DELETED
);
15590 insn
= emit_move_insn (tmp_reg
, todec
);
15591 try_split (PATTERN (insn
), insn
, 0);
15595 insn
= emit_insn (TARGET_32BIT
15596 ? gen_movsi_update_stack (stack_reg
, stack_reg
,
15598 : gen_movdi_di_update_stack (stack_reg
, stack_reg
,
15599 todec
, stack_reg
));
15600 /* Since we didn't use gen_frame_mem to generate the MEM, grab
15601 it now and set the alias set/attributes. The above gen_*_update
15602 calls will generate a PARALLEL with the MEM set being the first
15604 par
= PATTERN (insn
);
15605 gcc_assert (GET_CODE (par
) == PARALLEL
);
15606 set
= XVECEXP (par
, 0, 0);
15607 gcc_assert (GET_CODE (set
) == SET
);
15608 mem
= SET_DEST (set
);
15609 gcc_assert (MEM_P (mem
));
15610 MEM_NOTRAP_P (mem
) = 1;
15611 set_mem_alias_set (mem
, get_frame_alias_set ());
15613 RTX_FRAME_RELATED_P (insn
) = 1;
15614 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
,
15615 gen_rtx_SET (VOIDmode
, stack_reg
,
15616 gen_rtx_PLUS (Pmode
, stack_reg
,
15617 GEN_INT (-size
))));
15620 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
15621 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
15622 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
15623 deduce these equivalences by itself so it wasn't necessary to hold
15624 its hand so much. */
15627 rs6000_frame_related (rtx insn
, rtx reg
, HOST_WIDE_INT val
,
15628 rtx reg2
, rtx rreg
)
15632 /* copy_rtx will not make unique copies of registers, so we need to
15633 ensure we don't have unwanted sharing here. */
15635 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
15638 reg
= gen_raw_REG (GET_MODE (reg
), REGNO (reg
));
15640 real
= copy_rtx (PATTERN (insn
));
15642 if (reg2
!= NULL_RTX
)
15643 real
= replace_rtx (real
, reg2
, rreg
);
15645 real
= replace_rtx (real
, reg
,
15646 gen_rtx_PLUS (Pmode
, gen_rtx_REG (Pmode
,
15647 STACK_POINTER_REGNUM
),
15650 /* We expect that 'real' is either a SET or a PARALLEL containing
15651 SETs (and possibly other stuff). In a PARALLEL, all the SETs
15652 are important so they all have to be marked RTX_FRAME_RELATED_P. */
15654 if (GET_CODE (real
) == SET
)
15658 temp
= simplify_rtx (SET_SRC (set
));
15660 SET_SRC (set
) = temp
;
15661 temp
= simplify_rtx (SET_DEST (set
));
15663 SET_DEST (set
) = temp
;
15664 if (GET_CODE (SET_DEST (set
)) == MEM
)
15666 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
15668 XEXP (SET_DEST (set
), 0) = temp
;
15675 gcc_assert (GET_CODE (real
) == PARALLEL
);
15676 for (i
= 0; i
< XVECLEN (real
, 0); i
++)
15677 if (GET_CODE (XVECEXP (real
, 0, i
)) == SET
)
15679 rtx set
= XVECEXP (real
, 0, i
);
15681 temp
= simplify_rtx (SET_SRC (set
));
15683 SET_SRC (set
) = temp
;
15684 temp
= simplify_rtx (SET_DEST (set
));
15686 SET_DEST (set
) = temp
;
15687 if (GET_CODE (SET_DEST (set
)) == MEM
)
15689 temp
= simplify_rtx (XEXP (SET_DEST (set
), 0));
15691 XEXP (SET_DEST (set
), 0) = temp
;
15693 RTX_FRAME_RELATED_P (set
) = 1;
15697 RTX_FRAME_RELATED_P (insn
) = 1;
15698 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, real
);
15701 /* Returns an insn that has a vrsave set operation with the
15702 appropriate CLOBBERs. */
15705 generate_set_vrsave (rtx reg
, rs6000_stack_t
*info
, int epiloguep
)
15708 rtx insn
, clobs
[TOTAL_ALTIVEC_REGS
+ 1];
15709 rtx vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
15712 = gen_rtx_SET (VOIDmode
,
15714 gen_rtx_UNSPEC_VOLATILE (SImode
,
15715 gen_rtvec (2, reg
, vrsave
),
15716 UNSPECV_SET_VRSAVE
));
15720 /* We need to clobber the registers in the mask so the scheduler
15721 does not move sets to VRSAVE before sets of AltiVec registers.
15723 However, if the function receives nonlocal gotos, reload will set
15724 all call saved registers live. We will end up with:
15726 (set (reg 999) (mem))
15727 (parallel [ (set (reg vrsave) (unspec blah))
15728 (clobber (reg 999))])
15730 The clobber will cause the store into reg 999 to be dead, and
15731 flow will attempt to delete an epilogue insn. In this case, we
15732 need an unspec use/set of the register. */
15734 for (i
= FIRST_ALTIVEC_REGNO
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
15735 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
15737 if (!epiloguep
|| call_used_regs
[i
])
15738 clobs
[nclobs
++] = gen_rtx_CLOBBER (VOIDmode
,
15739 gen_rtx_REG (V4SImode
, i
));
15742 rtx reg
= gen_rtx_REG (V4SImode
, i
);
15745 = gen_rtx_SET (VOIDmode
,
15747 gen_rtx_UNSPEC (V4SImode
,
15748 gen_rtvec (1, reg
), 27));
15752 insn
= gen_rtx_PARALLEL (VOIDmode
, rtvec_alloc (nclobs
));
15754 for (i
= 0; i
< nclobs
; ++i
)
15755 XVECEXP (insn
, 0, i
) = clobs
[i
];
15760 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
15761 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
15764 emit_frame_save (rtx frame_reg
, rtx frame_ptr
, enum machine_mode mode
,
15765 unsigned int regno
, int offset
, HOST_WIDE_INT total_size
)
15767 rtx reg
, offset_rtx
, insn
, mem
, addr
, int_rtx
;
15768 rtx replacea
, replaceb
;
15770 int_rtx
= GEN_INT (offset
);
15772 /* Some cases that need register indexed addressing. */
15773 if ((TARGET_ALTIVEC_ABI
&& ALTIVEC_VECTOR_MODE (mode
))
15774 || (TARGET_E500_DOUBLE
&& mode
== DFmode
)
15776 && SPE_VECTOR_MODE (mode
)
15777 && !SPE_CONST_OFFSET_OK (offset
)))
15779 /* Whomever calls us must make sure r11 is available in the
15780 flow path of instructions in the prologue. */
15781 offset_rtx
= gen_rtx_REG (Pmode
, 11);
15782 emit_move_insn (offset_rtx
, int_rtx
);
15784 replacea
= offset_rtx
;
15785 replaceb
= int_rtx
;
15789 offset_rtx
= int_rtx
;
15790 replacea
= NULL_RTX
;
15791 replaceb
= NULL_RTX
;
15794 reg
= gen_rtx_REG (mode
, regno
);
15795 addr
= gen_rtx_PLUS (Pmode
, frame_reg
, offset_rtx
);
15796 mem
= gen_frame_mem (mode
, addr
);
15798 insn
= emit_move_insn (mem
, reg
);
15800 rs6000_frame_related (insn
, frame_ptr
, total_size
, replacea
, replaceb
);
15803 /* Emit an offset memory reference suitable for a frame store, while
15804 converting to a valid addressing mode. */
15807 gen_frame_mem_offset (enum machine_mode mode
, rtx reg
, int offset
)
15809 rtx int_rtx
, offset_rtx
;
15811 int_rtx
= GEN_INT (offset
);
15813 if ((TARGET_SPE_ABI
&& SPE_VECTOR_MODE (mode
))
15814 || (TARGET_E500_DOUBLE
&& mode
== DFmode
))
15816 offset_rtx
= gen_rtx_REG (Pmode
, FIXED_SCRATCH
);
15817 emit_move_insn (offset_rtx
, int_rtx
);
15820 offset_rtx
= int_rtx
;
15822 return gen_frame_mem (mode
, gen_rtx_PLUS (Pmode
, reg
, offset_rtx
));
15825 /* Look for user-defined global regs. We should not save and restore these,
15826 and cannot use stmw/lmw if there are any in its range. */
15829 no_global_regs_above (int first
, bool gpr
)
15832 for (i
= first
; i
< gpr
? 32 : 64 ; i
++)
15833 if (global_regs
[i
])
15838 #ifndef TARGET_FIX_AND_CONTINUE
15839 #define TARGET_FIX_AND_CONTINUE 0
15842 /* It's really GPR 13 and FPR 14, but we need the smaller of the two. */
15843 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
15844 #define LAST_SAVRES_REGISTER 31
15845 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
15847 static GTY(()) rtx savres_routine_syms
[N_SAVRES_REGISTERS
][8];
15849 /* Return the symbol for an out-of-line register save/restore routine.
15850 We are saving/restoring GPRs if GPR is true. */
15853 rs6000_savres_routine_sym (rs6000_stack_t
*info
, bool savep
, bool gpr
, bool exitp
)
15855 int regno
= gpr
? info
->first_gp_reg_save
: (info
->first_fp_reg_save
- 32);
15857 int select
= ((savep
? 1 : 0) << 2
15859 /* On the SPE, we never have any FPRs, but we do have
15860 32/64-bit versions of the routines. */
15861 ? (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
? 1 : 0)
15865 /* Don't generate bogus routine names. */
15866 gcc_assert (FIRST_SAVRES_REGISTER
<= regno
&& regno
<= LAST_SAVRES_REGISTER
);
15868 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
];
15873 const char *action
;
15874 const char *regkind
;
15875 const char *exit_suffix
;
15877 action
= savep
? "save" : "rest";
15879 /* SPE has slightly different names for its routines depending on
15880 whether we are saving 32-bit or 64-bit registers. */
15881 if (TARGET_SPE_ABI
)
15883 /* No floating point saves on the SPE. */
15886 regkind
= info
->spe_64bit_regs_used
? "64gpr" : "32gpr";
15889 regkind
= gpr
? "gpr" : "fpr";
15891 exit_suffix
= exitp
? "_x" : "";
15893 sprintf (name
, "_%s%s_%d%s", action
, regkind
, regno
, exit_suffix
);
15895 sym
= savres_routine_syms
[regno
-FIRST_SAVRES_REGISTER
][select
]
15896 = gen_rtx_SYMBOL_REF (Pmode
, ggc_strdup (name
));
15902 /* Emit a sequence of insns, including a stack tie if needed, for
15903 resetting the stack pointer. If SAVRES is true, then don't reset the
15904 stack pointer, but move the base of the frame into r11 for use by
15905 out-of-line register restore routines. */
15908 rs6000_emit_stack_reset (rs6000_stack_t
*info
,
15909 rtx sp_reg_rtx
, rtx frame_reg_rtx
,
15910 int sp_offset
, bool savres
)
15912 /* This blockage is needed so that sched doesn't decide to move
15913 the sp change before the register restores. */
15914 if (frame_reg_rtx
!= sp_reg_rtx
15916 && info
->spe_64bit_regs_used
!= 0
15917 && info
->first_gp_reg_save
!= 32))
15918 rs6000_emit_stack_tie ();
15920 if (frame_reg_rtx
!= sp_reg_rtx
)
15922 if (sp_offset
!= 0)
15923 return emit_insn (gen_addsi3 (sp_reg_rtx
, frame_reg_rtx
,
15924 GEN_INT (sp_offset
)));
15926 return emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
15928 else if (sp_offset
!= 0)
15930 /* If we are restoring registers out-of-line, we will be using the
15931 "exit" variants of the restore routines, which will reset the
15932 stack for us. But we do need to point r11 into the right place
15933 for those routines. */
15934 rtx dest_reg
= (savres
15935 ? gen_rtx_REG (Pmode
, 11)
15938 rtx insn
= emit_insn (gen_add3_insn (dest_reg
, sp_reg_rtx
,
15939 GEN_INT (sp_offset
)));
15946 /* Construct a parallel rtx describing the effect of a call to an
15947 out-of-line register save/restore routine. */
15950 rs6000_make_savres_rtx (rs6000_stack_t
*info
,
15951 rtx frame_reg_rtx
, int save_area_offset
,
15952 enum machine_mode reg_mode
,
15953 bool savep
, bool gpr
, bool exitp
)
15956 int offset
, start_reg
, end_reg
, n_regs
;
15957 int reg_size
= GET_MODE_SIZE (reg_mode
);
15963 ? info
->first_gp_reg_save
15964 : info
->first_fp_reg_save
);
15965 end_reg
= gpr
? 32 : 64;
15966 n_regs
= end_reg
- start_reg
;
15967 p
= rtvec_alloc ((exitp
? 4 : 3) + n_regs
);
15969 /* If we're saving registers, then we should never say we're exiting. */
15970 gcc_assert ((savep
&& !exitp
) || !savep
);
15973 RTVEC_ELT (p
, offset
++) = gen_rtx_RETURN (VOIDmode
);
15975 RTVEC_ELT (p
, offset
++)
15976 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 65));
15978 sym
= rs6000_savres_routine_sym (info
, savep
, gpr
, exitp
);
15979 RTVEC_ELT (p
, offset
++) = gen_rtx_USE (VOIDmode
, sym
);
15980 RTVEC_ELT (p
, offset
++) = gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 11));
15982 for (i
= 0; i
< end_reg
- start_reg
; i
++)
15984 rtx addr
, reg
, mem
;
15985 reg
= gen_rtx_REG (reg_mode
, start_reg
+ i
);
15986 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
15987 GEN_INT (save_area_offset
+ reg_size
*i
));
15988 mem
= gen_frame_mem (reg_mode
, addr
);
15990 RTVEC_ELT (p
, i
+ offset
) = gen_rtx_SET (VOIDmode
,
15992 savep
? reg
: mem
);
15995 return gen_rtx_PARALLEL (VOIDmode
, p
);
15998 /* Determine whether the gp REG is really used. */
16001 rs6000_reg_live_or_pic_offset_p (int reg
)
16003 return ((df_regs_ever_live_p (reg
)
16004 && (!call_used_regs
[reg
]
16005 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
16006 && TARGET_TOC
&& TARGET_MINIMAL_TOC
)))
16007 || (reg
== RS6000_PIC_OFFSET_TABLE_REGNUM
16008 && ((DEFAULT_ABI
== ABI_V4
&& flag_pic
!= 0)
16009 || (DEFAULT_ABI
== ABI_DARWIN
&& flag_pic
))));
16013 SAVRES_MULTIPLE
= 0x1,
16014 SAVRES_INLINE_FPRS
= 0x2,
16015 SAVRES_INLINE_GPRS
= 0x4
16018 /* Determine the strategy for savings/restoring registers. */
16021 rs6000_savres_strategy (rs6000_stack_t
*info
, bool savep
,
16022 int using_static_chain_p
, int sibcall
)
16024 bool using_multiple_p
;
16026 bool savres_fprs_inline
;
16027 bool savres_gprs_inline
;
16028 bool noclobber_global_gprs
16029 = no_global_regs_above (info
->first_gp_reg_save
, /*gpr=*/true);
16031 using_multiple_p
= (TARGET_MULTIPLE
&& ! TARGET_POWERPC64
16032 && (!TARGET_SPE_ABI
16033 || info
->spe_64bit_regs_used
== 0)
16034 && info
->first_gp_reg_save
< 31
16035 && noclobber_global_gprs
);
16036 /* Don't bother to try to save things out-of-line if r11 is occupied
16037 by the static chain. It would require too much fiddling and the
16038 static chain is rarely used anyway. */
16039 common
= (using_static_chain_p
16041 || crtl
->calls_eh_return
16042 || !info
->lr_save_p
16043 || cfun
->machine
->ra_need_lr
16044 || info
->total_size
> 32767);
16045 savres_fprs_inline
= (common
16046 || info
->first_fp_reg_save
== 64
16047 || !no_global_regs_above (info
->first_fp_reg_save
,
16049 || FP_SAVE_INLINE (info
->first_fp_reg_save
));
16050 savres_gprs_inline
= (common
16051 /* Saving CR interferes with the exit routines
16052 used on the SPE, so just punt here. */
16055 && info
->spe_64bit_regs_used
!= 0
16056 && info
->cr_save_p
!= 0)
16057 || info
->first_gp_reg_save
== 32
16058 || !noclobber_global_gprs
16059 || GP_SAVE_INLINE (info
->first_gp_reg_save
));
16062 /* If we are going to use store multiple, then don't even bother
16063 with the out-of-line routines, since the store-multiple instruction
16064 will always be smaller. */
16065 savres_gprs_inline
= savres_gprs_inline
|| using_multiple_p
;
16068 /* The situation is more complicated with load multiple. We'd
16069 prefer to use the out-of-line routines for restores, since the
16070 "exit" out-of-line routines can handle the restore of LR and
16071 the frame teardown. But we can only use the out-of-line
16072 routines if we know that we've used store multiple or
16073 out-of-line routines in the prologue, i.e. if we've saved all
16074 the registers from first_gp_reg_save. Otherwise, we risk
16075 loading garbage from the stack. Furthermore, we can only use
16076 the "exit" out-of-line gpr restore if we haven't saved any
16078 bool saved_all
= !savres_gprs_inline
|| using_multiple_p
;
16080 if (saved_all
&& info
->first_fp_reg_save
!= 64)
16081 /* We can't use the exit routine; use load multiple if it's
16083 savres_gprs_inline
= savres_gprs_inline
|| using_multiple_p
;
16086 return (using_multiple_p
16087 | (savres_fprs_inline
<< 1)
16088 | (savres_gprs_inline
<< 2));
16091 /* Emit function prologue as insns. */
16094 rs6000_emit_prologue (void)
16096 rs6000_stack_t
*info
= rs6000_stack_info ();
16097 enum machine_mode reg_mode
= Pmode
;
16098 int reg_size
= TARGET_32BIT
? 4 : 8;
16099 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, STACK_POINTER_REGNUM
);
16100 rtx frame_ptr_rtx
= gen_rtx_REG (Pmode
, 12);
16101 rtx frame_reg_rtx
= sp_reg_rtx
;
16102 rtx cr_save_rtx
= NULL_RTX
;
16105 int saving_FPRs_inline
;
16106 int saving_GPRs_inline
;
16107 int using_store_multiple
;
16108 int using_static_chain_p
= (cfun
->static_chain_decl
!= NULL_TREE
16109 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM
)
16110 && !call_used_regs
[STATIC_CHAIN_REGNUM
]);
16111 HOST_WIDE_INT sp_offset
= 0;
16113 if (TARGET_FIX_AND_CONTINUE
)
16115 /* gdb on darwin arranges to forward a function from the old
16116 address by modifying the first 5 instructions of the function
16117 to branch to the overriding function. This is necessary to
16118 permit function pointers that point to the old function to
16119 actually forward to the new function. */
16120 emit_insn (gen_nop ());
16121 emit_insn (gen_nop ());
16122 emit_insn (gen_nop ());
16123 emit_insn (gen_nop ());
16124 emit_insn (gen_nop ());
16127 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
16129 reg_mode
= V2SImode
;
16133 strategy
= rs6000_savres_strategy (info
, /*savep=*/true,
16134 /*static_chain_p=*/using_static_chain_p
,
16136 using_store_multiple
= strategy
& SAVRES_MULTIPLE
;
16137 saving_FPRs_inline
= strategy
& SAVRES_INLINE_FPRS
;
16138 saving_GPRs_inline
= strategy
& SAVRES_INLINE_GPRS
;
16140 /* For V.4, update stack before we do any saving and set back pointer. */
16141 if (! WORLD_SAVE_P (info
)
16143 && (DEFAULT_ABI
== ABI_V4
16144 || crtl
->calls_eh_return
))
16146 bool need_r11
= (TARGET_SPE
16147 ? (!saving_GPRs_inline
16148 && info
->spe_64bit_regs_used
== 0)
16149 : (!saving_FPRs_inline
|| !saving_GPRs_inline
));
16150 if (info
->total_size
< 32767)
16151 sp_offset
= info
->total_size
;
16153 frame_reg_rtx
= (need_r11
16154 ? gen_rtx_REG (Pmode
, 11)
16156 rs6000_emit_allocate_stack (info
->total_size
,
16157 (frame_reg_rtx
!= sp_reg_rtx
16158 && (info
->cr_save_p
16160 || info
->first_fp_reg_save
< 64
16161 || info
->first_gp_reg_save
< 32
16164 if (frame_reg_rtx
!= sp_reg_rtx
)
16165 rs6000_emit_stack_tie ();
16168 /* Handle world saves specially here. */
16169 if (WORLD_SAVE_P (info
))
16176 /* save_world expects lr in r0. */
16177 reg0
= gen_rtx_REG (Pmode
, 0);
16178 if (info
->lr_save_p
)
16180 insn
= emit_move_insn (reg0
,
16181 gen_rtx_REG (Pmode
, LR_REGNO
));
16182 RTX_FRAME_RELATED_P (insn
) = 1;
16185 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
16186 assumptions about the offsets of various bits of the stack
16188 gcc_assert (info
->gp_save_offset
== -220
16189 && info
->fp_save_offset
== -144
16190 && info
->lr_save_offset
== 8
16191 && info
->cr_save_offset
== 4
16194 && (!crtl
->calls_eh_return
16195 || info
->ehrd_offset
== -432)
16196 && info
->vrsave_save_offset
== -224
16197 && info
->altivec_save_offset
== -416);
16199 treg
= gen_rtx_REG (SImode
, 11);
16200 emit_move_insn (treg
, GEN_INT (-info
->total_size
));
16202 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
16203 in R11. It also clobbers R12, so beware! */
16205 /* Preserve CR2 for save_world prologues */
16207 sz
+= 32 - info
->first_gp_reg_save
;
16208 sz
+= 64 - info
->first_fp_reg_save
;
16209 sz
+= LAST_ALTIVEC_REGNO
- info
->first_altivec_reg_save
+ 1;
16210 p
= rtvec_alloc (sz
);
16212 RTVEC_ELT (p
, j
++) = gen_rtx_CLOBBER (VOIDmode
,
16213 gen_rtx_REG (SImode
,
16215 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
16216 gen_rtx_SYMBOL_REF (Pmode
,
16218 /* We do floats first so that the instruction pattern matches
16220 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
16222 rtx reg
= gen_rtx_REG (((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
16223 ? DFmode
: SFmode
),
16224 info
->first_fp_reg_save
+ i
);
16225 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16226 GEN_INT (info
->fp_save_offset
16227 + sp_offset
+ 8 * i
));
16228 rtx mem
= gen_frame_mem (((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
16229 ? DFmode
: SFmode
), addr
);
16231 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, mem
, reg
);
16233 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
16235 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
16236 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16237 GEN_INT (info
->altivec_save_offset
16238 + sp_offset
+ 16 * i
));
16239 rtx mem
= gen_frame_mem (V4SImode
, addr
);
16241 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, mem
, reg
);
16243 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
16245 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
16246 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16247 GEN_INT (info
->gp_save_offset
16248 + sp_offset
+ reg_size
* i
));
16249 rtx mem
= gen_frame_mem (reg_mode
, addr
);
16251 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, mem
, reg
);
16255 /* CR register traditionally saved as CR2. */
16256 rtx reg
= gen_rtx_REG (reg_mode
, CR2_REGNO
);
16257 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16258 GEN_INT (info
->cr_save_offset
16260 rtx mem
= gen_frame_mem (reg_mode
, addr
);
16262 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, mem
, reg
);
16264 /* Explain about use of R0. */
16265 if (info
->lr_save_p
)
16267 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16268 GEN_INT (info
->lr_save_offset
16270 rtx mem
= gen_frame_mem (reg_mode
, addr
);
16272 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, mem
, reg0
);
16274 /* Explain what happens to the stack pointer. */
16276 rtx newval
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
, treg
);
16277 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, sp_reg_rtx
, newval
);
16280 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
16281 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
16282 treg
, GEN_INT (-info
->total_size
));
16283 sp_offset
= info
->total_size
;
16286 /* If we use the link register, get it into r0. */
16287 if (!WORLD_SAVE_P (info
) && info
->lr_save_p
)
16289 rtx addr
, reg
, mem
;
16291 insn
= emit_move_insn (gen_rtx_REG (Pmode
, 0),
16292 gen_rtx_REG (Pmode
, LR_REGNO
));
16293 RTX_FRAME_RELATED_P (insn
) = 1;
16295 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16296 GEN_INT (info
->lr_save_offset
+ sp_offset
));
16297 reg
= gen_rtx_REG (Pmode
, 0);
16298 mem
= gen_rtx_MEM (Pmode
, addr
);
16299 /* This should not be of rs6000_sr_alias_set, because of
16300 __builtin_return_address. */
16302 insn
= emit_move_insn (mem
, reg
);
16303 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
16304 NULL_RTX
, NULL_RTX
);
16307 /* If we need to save CR, put it into r12. */
16308 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
&& frame_reg_rtx
!= frame_ptr_rtx
)
16312 cr_save_rtx
= gen_rtx_REG (SImode
, 12);
16313 insn
= emit_insn (gen_movesi_from_cr (cr_save_rtx
));
16314 RTX_FRAME_RELATED_P (insn
) = 1;
16315 /* Now, there's no way that dwarf2out_frame_debug_expr is going
16316 to understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)'.
16317 But that's OK. All we have to do is specify that _one_ condition
16318 code register is saved in this stack slot. The thrower's epilogue
16319 will then restore all the call-saved registers.
16320 We use CR2_REGNO (70) to be compatible with gcc-2.95 on Linux. */
16321 set
= gen_rtx_SET (VOIDmode
, cr_save_rtx
,
16322 gen_rtx_REG (SImode
, CR2_REGNO
));
16323 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
16326 /* Do any required saving of fpr's. If only one or two to save, do
16327 it ourselves. Otherwise, call function. */
16328 if (!WORLD_SAVE_P (info
) && saving_FPRs_inline
)
16331 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
16332 if ((df_regs_ever_live_p (info
->first_fp_reg_save
+i
)
16333 && ! call_used_regs
[info
->first_fp_reg_save
+i
]))
16334 emit_frame_save (frame_reg_rtx
, frame_ptr_rtx
,
16335 (TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
16337 info
->first_fp_reg_save
+ i
,
16338 info
->fp_save_offset
+ sp_offset
+ 8 * i
,
16341 else if (!WORLD_SAVE_P (info
) && info
->first_fp_reg_save
!= 64)
16345 par
= rs6000_make_savres_rtx (info
, frame_reg_rtx
,
16346 info
->fp_save_offset
+ sp_offset
,
16348 /*savep=*/true, /*gpr=*/false,
16350 insn
= emit_insn (par
);
16351 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
16352 NULL_RTX
, NULL_RTX
);
16355 /* Save GPRs. This is done as a PARALLEL if we are using
16356 the store-multiple instructions. */
16357 if (!WORLD_SAVE_P (info
)
16359 && info
->spe_64bit_regs_used
!= 0
16360 && info
->first_gp_reg_save
!= 32)
16363 rtx spe_save_area_ptr
;
16365 /* Determine whether we can address all of the registers that need
16366 to be saved with an offset from the stack pointer that fits in
16367 the small const field for SPE memory instructions. */
16368 int spe_regs_addressable_via_sp
16369 = (SPE_CONST_OFFSET_OK(info
->spe_gp_save_offset
+ sp_offset
16370 + (32 - info
->first_gp_reg_save
- 1) * reg_size
)
16371 && saving_GPRs_inline
);
16374 if (spe_regs_addressable_via_sp
)
16376 spe_save_area_ptr
= frame_reg_rtx
;
16377 spe_offset
= info
->spe_gp_save_offset
+ sp_offset
;
16381 /* Make r11 point to the start of the SPE save area. We need
16382 to be careful here if r11 is holding the static chain. If
16383 it is, then temporarily save it in r0. We would use r0 as
16384 our base register here, but using r0 as a base register in
16385 loads and stores means something different from what we
16387 int ool_adjust
= (saving_GPRs_inline
16389 : (info
->first_gp_reg_save
16390 - (FIRST_SAVRES_REGISTER
+1))*8);
16391 HOST_WIDE_INT offset
= (info
->spe_gp_save_offset
16392 + sp_offset
- ool_adjust
);
16394 if (using_static_chain_p
)
16396 rtx r0
= gen_rtx_REG (Pmode
, 0);
16397 gcc_assert (info
->first_gp_reg_save
> 11);
16399 emit_move_insn (r0
, gen_rtx_REG (Pmode
, 11));
16402 spe_save_area_ptr
= gen_rtx_REG (Pmode
, 11);
16403 insn
= emit_insn (gen_addsi3 (spe_save_area_ptr
,
16405 GEN_INT (offset
)));
16406 /* We need to make sure the move to r11 gets noted for
16407 properly outputting unwind information. */
16408 if (!saving_GPRs_inline
)
16409 rs6000_frame_related (insn
, frame_reg_rtx
, offset
,
16410 NULL_RTX
, NULL_RTX
);
16414 if (saving_GPRs_inline
)
16416 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
16417 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
16419 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
16420 rtx offset
, addr
, mem
;
16422 /* We're doing all this to ensure that the offset fits into
16423 the immediate offset of 'evstdd'. */
16424 gcc_assert (SPE_CONST_OFFSET_OK (reg_size
* i
+ spe_offset
));
16426 offset
= GEN_INT (reg_size
* i
+ spe_offset
);
16427 addr
= gen_rtx_PLUS (Pmode
, spe_save_area_ptr
, offset
);
16428 mem
= gen_rtx_MEM (V2SImode
, addr
);
16430 insn
= emit_move_insn (mem
, reg
);
16432 rs6000_frame_related (insn
, spe_save_area_ptr
,
16433 info
->spe_gp_save_offset
16434 + sp_offset
+ reg_size
* i
,
16435 offset
, const0_rtx
);
16442 par
= rs6000_make_savres_rtx (info
, gen_rtx_REG (Pmode
, 11),
16444 /*savep=*/true, /*gpr=*/true,
16446 insn
= emit_insn (par
);
16447 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
16448 NULL_RTX
, NULL_RTX
);
16452 /* Move the static chain pointer back. */
16453 if (using_static_chain_p
&& !spe_regs_addressable_via_sp
)
16454 emit_move_insn (gen_rtx_REG (Pmode
, 11), gen_rtx_REG (Pmode
, 0));
16456 else if (!WORLD_SAVE_P (info
) && !saving_GPRs_inline
)
16460 /* Need to adjust r11 if we saved any FPRs. */
16461 if (info
->first_fp_reg_save
!= 64)
16463 rtx r11
= gen_rtx_REG (reg_mode
, 11);
16464 rtx offset
= GEN_INT (info
->total_size
16465 + (-8 * (64-info
->first_fp_reg_save
)));
16466 rtx ptr_reg
= (sp_reg_rtx
== frame_reg_rtx
16467 ? sp_reg_rtx
: r11
);
16469 emit_insn (TARGET_32BIT
16470 ? gen_addsi3 (r11
, ptr_reg
, offset
)
16471 : gen_adddi3 (r11
, ptr_reg
, offset
));
16474 par
= rs6000_make_savres_rtx (info
, frame_reg_rtx
,
16475 info
->gp_save_offset
+ sp_offset
,
16477 /*savep=*/true, /*gpr=*/true,
16479 insn
= emit_insn (par
);
16480 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
16481 NULL_RTX
, NULL_RTX
);
16483 else if (!WORLD_SAVE_P (info
) && using_store_multiple
)
16487 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
16488 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
16490 rtx addr
, reg
, mem
;
16491 reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
16492 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16493 GEN_INT (info
->gp_save_offset
16496 mem
= gen_frame_mem (reg_mode
, addr
);
16498 RTVEC_ELT (p
, i
) = gen_rtx_SET (VOIDmode
, mem
, reg
);
16500 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
16501 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
16502 NULL_RTX
, NULL_RTX
);
16504 else if (!WORLD_SAVE_P (info
))
16507 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
16508 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
16510 rtx addr
, reg
, mem
;
16511 reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
16513 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16514 GEN_INT (info
->gp_save_offset
16517 mem
= gen_frame_mem (reg_mode
, addr
);
16519 insn
= emit_move_insn (mem
, reg
);
16520 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
16521 NULL_RTX
, NULL_RTX
);
16525 /* ??? There's no need to emit actual instructions here, but it's the
16526 easiest way to get the frame unwind information emitted. */
16527 if (crtl
->calls_eh_return
)
16529 unsigned int i
, regno
;
16531 /* In AIX ABI we need to pretend we save r2 here. */
16534 rtx addr
, reg
, mem
;
16536 reg
= gen_rtx_REG (reg_mode
, 2);
16537 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16538 GEN_INT (sp_offset
+ 5 * reg_size
));
16539 mem
= gen_frame_mem (reg_mode
, addr
);
16541 insn
= emit_move_insn (mem
, reg
);
16542 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
16543 NULL_RTX
, NULL_RTX
);
16544 PATTERN (insn
) = gen_blockage ();
16549 regno
= EH_RETURN_DATA_REGNO (i
);
16550 if (regno
== INVALID_REGNUM
)
16553 emit_frame_save (frame_reg_rtx
, frame_ptr_rtx
, reg_mode
, regno
,
16554 info
->ehrd_offset
+ sp_offset
16555 + reg_size
* (int) i
,
16560 /* Save CR if we use any that must be preserved. */
16561 if (!WORLD_SAVE_P (info
) && info
->cr_save_p
)
16563 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16564 GEN_INT (info
->cr_save_offset
+ sp_offset
));
16565 rtx mem
= gen_frame_mem (SImode
, addr
);
16566 /* See the large comment above about why CR2_REGNO is used. */
16567 rtx magic_eh_cr_reg
= gen_rtx_REG (SImode
, CR2_REGNO
);
16569 /* If r12 was used to hold the original sp, copy cr into r0 now
16571 if (REGNO (frame_reg_rtx
) == 12)
16575 cr_save_rtx
= gen_rtx_REG (SImode
, 0);
16576 insn
= emit_insn (gen_movesi_from_cr (cr_save_rtx
));
16577 RTX_FRAME_RELATED_P (insn
) = 1;
16578 set
= gen_rtx_SET (VOIDmode
, cr_save_rtx
, magic_eh_cr_reg
);
16579 add_reg_note (insn
, REG_FRAME_RELATED_EXPR
, set
);
16581 insn
= emit_move_insn (mem
, cr_save_rtx
);
16583 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
16584 NULL_RTX
, NULL_RTX
);
16587 /* Update stack and set back pointer unless this is V.4,
16588 for which it was done previously. */
16589 if (!WORLD_SAVE_P (info
) && info
->push_p
16590 && !(DEFAULT_ABI
== ABI_V4
|| crtl
->calls_eh_return
))
16592 if (info
->total_size
< 32767)
16593 sp_offset
= info
->total_size
;
16595 frame_reg_rtx
= frame_ptr_rtx
;
16596 rs6000_emit_allocate_stack (info
->total_size
,
16597 (frame_reg_rtx
!= sp_reg_rtx
16598 && ((info
->altivec_size
!= 0)
16599 || (info
->vrsave_mask
!= 0)
16602 if (frame_reg_rtx
!= sp_reg_rtx
)
16603 rs6000_emit_stack_tie ();
16606 /* Set frame pointer, if needed. */
16607 if (frame_pointer_needed
)
16609 insn
= emit_move_insn (gen_rtx_REG (Pmode
, HARD_FRAME_POINTER_REGNUM
),
16611 RTX_FRAME_RELATED_P (insn
) = 1;
16614 /* Save AltiVec registers if needed. Save here because the red zone does
16615 not include AltiVec registers. */
16616 if (!WORLD_SAVE_P (info
) && TARGET_ALTIVEC_ABI
&& info
->altivec_size
!= 0)
16620 /* There should be a non inline version of this, for when we
16621 are saving lots of vector registers. */
16622 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
16623 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
16625 rtx areg
, savereg
, mem
;
16628 offset
= info
->altivec_save_offset
+ sp_offset
16629 + 16 * (i
- info
->first_altivec_reg_save
);
16631 savereg
= gen_rtx_REG (V4SImode
, i
);
16633 areg
= gen_rtx_REG (Pmode
, 0);
16634 emit_move_insn (areg
, GEN_INT (offset
));
16636 /* AltiVec addressing mode is [reg+reg]. */
16637 mem
= gen_frame_mem (V4SImode
,
16638 gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
));
16640 insn
= emit_move_insn (mem
, savereg
);
16642 rs6000_frame_related (insn
, frame_ptr_rtx
, info
->total_size
,
16643 areg
, GEN_INT (offset
));
16647 /* VRSAVE is a bit vector representing which AltiVec registers
16648 are used. The OS uses this to determine which vector
16649 registers to save on a context switch. We need to save
16650 VRSAVE on the stack frame, add whatever AltiVec registers we
16651 used in this function, and do the corresponding magic in the
16654 if (TARGET_ALTIVEC
&& TARGET_ALTIVEC_VRSAVE
16655 && info
->vrsave_mask
!= 0)
16657 rtx reg
, mem
, vrsave
;
16660 /* Get VRSAVE onto a GPR. Note that ABI_V4 might be using r12
16661 as frame_reg_rtx and r11 as the static chain pointer for
16662 nested functions. */
16663 reg
= gen_rtx_REG (SImode
, 0);
16664 vrsave
= gen_rtx_REG (SImode
, VRSAVE_REGNO
);
16666 emit_insn (gen_get_vrsave_internal (reg
));
16668 emit_insn (gen_rtx_SET (VOIDmode
, reg
, vrsave
));
16670 if (!WORLD_SAVE_P (info
))
16673 offset
= info
->vrsave_save_offset
+ sp_offset
;
16674 mem
= gen_frame_mem (SImode
,
16675 gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16676 GEN_INT (offset
)));
16677 insn
= emit_move_insn (mem
, reg
);
16680 /* Include the registers in the mask. */
16681 emit_insn (gen_iorsi3 (reg
, reg
, GEN_INT ((int) info
->vrsave_mask
)));
16683 insn
= emit_insn (generate_set_vrsave (reg
, info
, 0));
16686 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
16687 if ((TARGET_TOC
&& TARGET_MINIMAL_TOC
&& get_pool_size () != 0)
16688 || (DEFAULT_ABI
== ABI_V4
16689 && (flag_pic
== 1 || (flag_pic
&& TARGET_SECURE_PLT
))
16690 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM
)))
16692 /* If emit_load_toc_table will use the link register, we need to save
16693 it. We use R12 for this purpose because emit_load_toc_table
16694 can use register 0. This allows us to use a plain 'blr' to return
16695 from the procedure more often. */
16696 int save_LR_around_toc_setup
= (TARGET_ELF
16697 && DEFAULT_ABI
!= ABI_AIX
16699 && ! info
->lr_save_p
16700 && EDGE_COUNT (EXIT_BLOCK_PTR
->preds
) > 0);
16701 if (save_LR_around_toc_setup
)
16703 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
16705 insn
= emit_move_insn (frame_ptr_rtx
, lr
);
16706 RTX_FRAME_RELATED_P (insn
) = 1;
16708 rs6000_emit_load_toc_table (TRUE
);
16710 insn
= emit_move_insn (lr
, frame_ptr_rtx
);
16711 RTX_FRAME_RELATED_P (insn
) = 1;
16714 rs6000_emit_load_toc_table (TRUE
);
16718 if (DEFAULT_ABI
== ABI_DARWIN
16719 && flag_pic
&& crtl
->uses_pic_offset_table
)
16721 rtx lr
= gen_rtx_REG (Pmode
, LR_REGNO
);
16722 rtx src
= gen_rtx_SYMBOL_REF (Pmode
, MACHOPIC_FUNCTION_BASE_NAME
);
16724 /* Save and restore LR locally around this call (in R0). */
16725 if (!info
->lr_save_p
)
16726 emit_move_insn (gen_rtx_REG (Pmode
, 0), lr
);
16728 emit_insn (gen_load_macho_picbase (src
));
16730 emit_move_insn (gen_rtx_REG (Pmode
,
16731 RS6000_PIC_OFFSET_TABLE_REGNUM
),
16734 if (!info
->lr_save_p
)
16735 emit_move_insn (lr
, gen_rtx_REG (Pmode
, 0));
16740 /* Write function prologue. */
16743 rs6000_output_function_prologue (FILE *file
,
16744 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
16746 rs6000_stack_t
*info
= rs6000_stack_info ();
16748 if (TARGET_DEBUG_STACK
)
16749 debug_stack_info (info
);
16751 /* Write .extern for any function we will call to save and restore
16753 if (info
->first_fp_reg_save
< 64
16754 && !FP_SAVE_INLINE (info
->first_fp_reg_save
))
16755 fprintf (file
, "\t.extern %s%d%s\n\t.extern %s%d%s\n",
16756 SAVE_FP_PREFIX
, info
->first_fp_reg_save
- 32, SAVE_FP_SUFFIX
,
16757 RESTORE_FP_PREFIX
, info
->first_fp_reg_save
- 32, RESTORE_FP_SUFFIX
);
16759 /* Write .extern for AIX common mode routines, if needed. */
16760 if (! TARGET_POWER
&& ! TARGET_POWERPC
&& ! common_mode_defined
)
16762 fputs ("\t.extern __mulh\n", file
);
16763 fputs ("\t.extern __mull\n", file
);
16764 fputs ("\t.extern __divss\n", file
);
16765 fputs ("\t.extern __divus\n", file
);
16766 fputs ("\t.extern __quoss\n", file
);
16767 fputs ("\t.extern __quous\n", file
);
16768 common_mode_defined
= 1;
16771 if (! HAVE_prologue
)
16775 /* A NOTE_INSN_DELETED is supposed to be at the start and end of
16776 the "toplevel" insn chain. */
16777 emit_note (NOTE_INSN_DELETED
);
16778 rs6000_emit_prologue ();
16779 emit_note (NOTE_INSN_DELETED
);
16781 /* Expand INSN_ADDRESSES so final() doesn't crash. */
16785 for (insn
= get_insns (); insn
!= 0; insn
= NEXT_INSN (insn
))
16787 INSN_ADDRESSES_NEW (insn
, addr
);
16792 if (TARGET_DEBUG_STACK
)
16793 debug_rtx_list (get_insns (), 100);
16794 final (get_insns (), file
, FALSE
);
16798 rs6000_pic_labelno
++;
16801 /* Non-zero if vmx regs are restored before the frame pop, zero if
16802 we restore after the pop when possible. */
16803 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
16805 /* Reload CR from REG. */
16808 rs6000_restore_saved_cr (rtx reg
, int using_mfcr_multiple
)
16813 if (using_mfcr_multiple
)
16815 for (i
= 0; i
< 8; i
++)
16816 if (df_regs_ever_live_p (CR0_REGNO
+i
) && ! call_used_regs
[CR0_REGNO
+i
])
16818 gcc_assert (count
);
16821 if (using_mfcr_multiple
&& count
> 1)
16826 p
= rtvec_alloc (count
);
16829 for (i
= 0; i
< 8; i
++)
16830 if (df_regs_ever_live_p (CR0_REGNO
+i
) && ! call_used_regs
[CR0_REGNO
+i
])
16832 rtvec r
= rtvec_alloc (2);
16833 RTVEC_ELT (r
, 0) = reg
;
16834 RTVEC_ELT (r
, 1) = GEN_INT (1 << (7-i
));
16835 RTVEC_ELT (p
, ndx
) =
16836 gen_rtx_SET (VOIDmode
, gen_rtx_REG (CCmode
, CR0_REGNO
+i
),
16837 gen_rtx_UNSPEC (CCmode
, r
, UNSPEC_MOVESI_TO_CR
));
16840 emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
16841 gcc_assert (ndx
== count
);
16844 for (i
= 0; i
< 8; i
++)
16845 if (df_regs_ever_live_p (CR0_REGNO
+i
) && ! call_used_regs
[CR0_REGNO
+i
])
16847 emit_insn (gen_movsi_to_cr_one (gen_rtx_REG (CCmode
,
16853 /* Return true if OFFSET from stack pointer can be clobbered by signals.
16854 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
16855 below stack pointer not cloberred by signals. */
16858 offset_below_red_zone_p (HOST_WIDE_INT offset
)
16860 return offset
< (DEFAULT_ABI
== ABI_V4
16862 : TARGET_32BIT
? -220 : -288);
16865 /* Emit function epilogue as insns. */
16868 rs6000_emit_epilogue (int sibcall
)
16870 rs6000_stack_t
*info
;
16871 int restoring_GPRs_inline
;
16872 int restoring_FPRs_inline
;
16873 int using_load_multiple
;
16874 int using_mtcr_multiple
;
16875 int use_backchain_to_restore_sp
;
16879 rtx sp_reg_rtx
= gen_rtx_REG (Pmode
, 1);
16880 rtx frame_reg_rtx
= sp_reg_rtx
;
16881 rtx cfa_restores
= NULL_RTX
;
16883 enum machine_mode reg_mode
= Pmode
;
16884 int reg_size
= TARGET_32BIT
? 4 : 8;
16887 info
= rs6000_stack_info ();
16889 if (TARGET_SPE_ABI
&& info
->spe_64bit_regs_used
!= 0)
16891 reg_mode
= V2SImode
;
16895 strategy
= rs6000_savres_strategy (info
, /*savep=*/false,
16896 /*static_chain_p=*/0, sibcall
);
16897 using_load_multiple
= strategy
& SAVRES_MULTIPLE
;
16898 restoring_FPRs_inline
= strategy
& SAVRES_INLINE_FPRS
;
16899 restoring_GPRs_inline
= strategy
& SAVRES_INLINE_GPRS
;
16900 using_mtcr_multiple
= (rs6000_cpu
== PROCESSOR_PPC601
16901 || rs6000_cpu
== PROCESSOR_PPC603
16902 || rs6000_cpu
== PROCESSOR_PPC750
16904 /* Restore via the backchain when we have a large frame, since this
16905 is more efficient than an addis, addi pair. The second condition
16906 here will not trigger at the moment; We don't actually need a
16907 frame pointer for alloca, but the generic parts of the compiler
16908 give us one anyway. */
16909 use_backchain_to_restore_sp
= (info
->total_size
> 32767
16910 || info
->total_size
16911 + (info
->lr_save_p
? info
->lr_save_offset
: 0)
16913 || (cfun
->calls_alloca
16914 && !frame_pointer_needed
));
16915 restore_lr
= (info
->lr_save_p
16916 && restoring_GPRs_inline
16917 && restoring_FPRs_inline
);
16919 if (WORLD_SAVE_P (info
))
16923 const char *alloc_rname
;
16926 /* eh_rest_world_r10 will return to the location saved in the LR
16927 stack slot (which is not likely to be our caller.)
16928 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
16929 rest_world is similar, except any R10 parameter is ignored.
16930 The exception-handling stuff that was here in 2.95 is no
16931 longer necessary. */
16935 + 32 - info
->first_gp_reg_save
16936 + LAST_ALTIVEC_REGNO
+ 1 - info
->first_altivec_reg_save
16937 + 63 + 1 - info
->first_fp_reg_save
);
16939 strcpy (rname
, ((crtl
->calls_eh_return
) ?
16940 "*eh_rest_world_r10" : "*rest_world"));
16941 alloc_rname
= ggc_strdup (rname
);
16944 RTVEC_ELT (p
, j
++) = gen_rtx_RETURN (VOIDmode
);
16945 RTVEC_ELT (p
, j
++) = gen_rtx_USE (VOIDmode
,
16946 gen_rtx_REG (Pmode
,
16949 = gen_rtx_USE (VOIDmode
, gen_rtx_SYMBOL_REF (Pmode
, alloc_rname
));
16950 /* The instruction pattern requires a clobber here;
16951 it is shared with the restVEC helper. */
16953 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 11));
16956 /* CR register traditionally saved as CR2. */
16957 rtx reg
= gen_rtx_REG (reg_mode
, CR2_REGNO
);
16958 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16959 GEN_INT (info
->cr_save_offset
));
16960 rtx mem
= gen_frame_mem (reg_mode
, addr
);
16962 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, reg
, mem
);
16965 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
16967 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
16968 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16969 GEN_INT (info
->gp_save_offset
16971 rtx mem
= gen_frame_mem (reg_mode
, addr
);
16973 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, reg
, mem
);
16975 for (i
= 0; info
->first_altivec_reg_save
+ i
<= LAST_ALTIVEC_REGNO
; i
++)
16977 rtx reg
= gen_rtx_REG (V4SImode
, info
->first_altivec_reg_save
+ i
);
16978 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16979 GEN_INT (info
->altivec_save_offset
16981 rtx mem
= gen_frame_mem (V4SImode
, addr
);
16983 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, reg
, mem
);
16985 for (i
= 0; info
->first_fp_reg_save
+ i
<= 63; i
++)
16987 rtx reg
= gen_rtx_REG (((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
16988 ? DFmode
: SFmode
),
16989 info
->first_fp_reg_save
+ i
);
16990 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
16991 GEN_INT (info
->fp_save_offset
16993 rtx mem
= gen_frame_mem (((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
16994 ? DFmode
: SFmode
), addr
);
16996 RTVEC_ELT (p
, j
++) = gen_rtx_SET (VOIDmode
, reg
, mem
);
16999 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (Pmode
, 0));
17001 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 12));
17003 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 7));
17005 = gen_rtx_CLOBBER (VOIDmode
, gen_rtx_REG (SImode
, 8));
17007 = gen_rtx_USE (VOIDmode
, gen_rtx_REG (SImode
, 10));
17008 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
17013 /* frame_reg_rtx + sp_offset points to the top of this stack frame. */
17015 sp_offset
= info
->total_size
;
17017 /* Restore AltiVec registers if we must do so before adjusting the
17019 if (TARGET_ALTIVEC_ABI
17020 && info
->altivec_size
!= 0
17021 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
17022 || (DEFAULT_ABI
!= ABI_V4
17023 && offset_below_red_zone_p (info
->altivec_save_offset
))))
17027 if (use_backchain_to_restore_sp
)
17029 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
17030 emit_move_insn (frame_reg_rtx
,
17031 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
17034 else if (frame_pointer_needed
)
17035 frame_reg_rtx
= hard_frame_pointer_rtx
;
17037 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
17038 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
17040 rtx addr
, areg
, mem
, reg
;
17042 areg
= gen_rtx_REG (Pmode
, 0);
17044 (areg
, GEN_INT (info
->altivec_save_offset
17046 + 16 * (i
- info
->first_altivec_reg_save
)));
17048 /* AltiVec addressing mode is [reg+reg]. */
17049 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
17050 mem
= gen_frame_mem (V4SImode
, addr
);
17052 reg
= gen_rtx_REG (V4SImode
, i
);
17053 emit_move_insn (reg
, mem
);
17054 if (offset_below_red_zone_p (info
->altivec_save_offset
17055 + (i
- info
->first_altivec_reg_save
)
17057 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
17062 /* Restore VRSAVE if we must do so before adjusting the stack. */
17064 && TARGET_ALTIVEC_VRSAVE
17065 && info
->vrsave_mask
!= 0
17066 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
17067 || (DEFAULT_ABI
!= ABI_V4
17068 && offset_below_red_zone_p (info
->vrsave_save_offset
))))
17070 rtx addr
, mem
, reg
;
17072 if (frame_reg_rtx
== sp_reg_rtx
)
17074 if (use_backchain_to_restore_sp
)
17076 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
17077 emit_move_insn (frame_reg_rtx
,
17078 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
17081 else if (frame_pointer_needed
)
17082 frame_reg_rtx
= hard_frame_pointer_rtx
;
17085 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
17086 GEN_INT (info
->vrsave_save_offset
+ sp_offset
));
17087 mem
= gen_frame_mem (SImode
, addr
);
17088 reg
= gen_rtx_REG (SImode
, 12);
17089 emit_move_insn (reg
, mem
);
17091 emit_insn (generate_set_vrsave (reg
, info
, 1));
17095 /* If we have a large stack frame, restore the old stack pointer
17096 using the backchain. */
17097 if (use_backchain_to_restore_sp
)
17099 if (frame_reg_rtx
== sp_reg_rtx
)
17101 /* Under V.4, don't reset the stack pointer until after we're done
17102 loading the saved registers. */
17103 if (DEFAULT_ABI
== ABI_V4
)
17104 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
17106 insn
= emit_move_insn (frame_reg_rtx
,
17107 gen_rtx_MEM (Pmode
, sp_reg_rtx
));
17110 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
17111 && DEFAULT_ABI
== ABI_V4
)
17112 /* frame_reg_rtx has been set up by the altivec restore. */
17116 insn
= emit_move_insn (sp_reg_rtx
, frame_reg_rtx
);
17117 frame_reg_rtx
= sp_reg_rtx
;
17120 /* If we have a frame pointer, we can restore the old stack pointer
17122 else if (frame_pointer_needed
)
17124 frame_reg_rtx
= sp_reg_rtx
;
17125 if (DEFAULT_ABI
== ABI_V4
)
17126 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
17128 insn
= emit_insn (gen_add3_insn (frame_reg_rtx
, hard_frame_pointer_rtx
,
17129 GEN_INT (info
->total_size
)));
17132 else if (info
->push_p
17133 && DEFAULT_ABI
!= ABI_V4
17134 && !crtl
->calls_eh_return
)
17136 insn
= emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
,
17137 GEN_INT (info
->total_size
)));
17140 if (insn
&& frame_reg_rtx
== sp_reg_rtx
)
17144 REG_NOTES (insn
) = cfa_restores
;
17145 cfa_restores
= NULL_RTX
;
17147 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
17148 RTX_FRAME_RELATED_P (insn
) = 1;
17151 /* Restore AltiVec registers if we have not done so already. */
17152 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
17153 && TARGET_ALTIVEC_ABI
17154 && info
->altivec_size
!= 0
17155 && (DEFAULT_ABI
== ABI_V4
17156 || !offset_below_red_zone_p (info
->altivec_save_offset
)))
17160 for (i
= info
->first_altivec_reg_save
; i
<= LAST_ALTIVEC_REGNO
; ++i
)
17161 if (info
->vrsave_mask
& ALTIVEC_REG_BIT (i
))
17163 rtx addr
, areg
, mem
, reg
;
17165 areg
= gen_rtx_REG (Pmode
, 0);
17167 (areg
, GEN_INT (info
->altivec_save_offset
17169 + 16 * (i
- info
->first_altivec_reg_save
)));
17171 /* AltiVec addressing mode is [reg+reg]. */
17172 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, areg
);
17173 mem
= gen_frame_mem (V4SImode
, addr
);
17175 reg
= gen_rtx_REG (V4SImode
, i
);
17176 emit_move_insn (reg
, mem
);
17177 if (DEFAULT_ABI
== ABI_V4
)
17178 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
17183 /* Restore VRSAVE if we have not done so already. */
17184 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
17186 && TARGET_ALTIVEC_VRSAVE
17187 && info
->vrsave_mask
!= 0
17188 && (DEFAULT_ABI
== ABI_V4
17189 || !offset_below_red_zone_p (info
->vrsave_save_offset
)))
17191 rtx addr
, mem
, reg
;
17193 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
17194 GEN_INT (info
->vrsave_save_offset
+ sp_offset
));
17195 mem
= gen_frame_mem (SImode
, addr
);
17196 reg
= gen_rtx_REG (SImode
, 12);
17197 emit_move_insn (reg
, mem
);
17199 emit_insn (generate_set_vrsave (reg
, info
, 1));
17202 /* Get the old lr if we saved it. If we are restoring registers
17203 out-of-line, then the out-of-line routines can do this for us. */
17206 rtx mem
= gen_frame_mem_offset (Pmode
, frame_reg_rtx
,
17207 info
->lr_save_offset
+ sp_offset
);
17209 emit_move_insn (gen_rtx_REG (Pmode
, 0), mem
);
17212 /* Get the old cr if we saved it. */
17213 if (info
->cr_save_p
)
17215 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
17216 GEN_INT (info
->cr_save_offset
+ sp_offset
));
17217 rtx mem
= gen_frame_mem (SImode
, addr
);
17219 emit_move_insn (gen_rtx_REG (SImode
, 12), mem
);
17222 /* Set LR here to try to overlap restores below. LR is always saved
17223 above incoming stack, so it never needs REG_CFA_RESTORE. */
17225 emit_move_insn (gen_rtx_REG (Pmode
, LR_REGNO
),
17226 gen_rtx_REG (Pmode
, 0));
17228 /* Load exception handler data registers, if needed. */
17229 if (crtl
->calls_eh_return
)
17231 unsigned int i
, regno
;
17235 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
17236 GEN_INT (sp_offset
+ 5 * reg_size
));
17237 rtx mem
= gen_frame_mem (reg_mode
, addr
);
17239 emit_move_insn (gen_rtx_REG (reg_mode
, 2), mem
);
17246 regno
= EH_RETURN_DATA_REGNO (i
);
17247 if (regno
== INVALID_REGNUM
)
17250 mem
= gen_frame_mem_offset (reg_mode
, frame_reg_rtx
,
17251 info
->ehrd_offset
+ sp_offset
17252 + reg_size
* (int) i
);
17254 emit_move_insn (gen_rtx_REG (reg_mode
, regno
), mem
);
17258 /* Restore GPRs. This is done as a PARALLEL if we are using
17259 the load-multiple instructions. */
17261 && info
->spe_64bit_regs_used
!= 0
17262 && info
->first_gp_reg_save
!= 32)
17264 /* Determine whether we can address all of the registers that need
17265 to be saved with an offset from the stack pointer that fits in
17266 the small const field for SPE memory instructions. */
17267 int spe_regs_addressable_via_sp
17268 = (SPE_CONST_OFFSET_OK(info
->spe_gp_save_offset
+ sp_offset
17269 + (32 - info
->first_gp_reg_save
- 1) * reg_size
)
17270 && restoring_GPRs_inline
);
17273 if (spe_regs_addressable_via_sp
)
17274 spe_offset
= info
->spe_gp_save_offset
+ sp_offset
;
17277 rtx old_frame_reg_rtx
= frame_reg_rtx
;
17278 /* Make r11 point to the start of the SPE save area. We worried about
17279 not clobbering it when we were saving registers in the prologue.
17280 There's no need to worry here because the static chain is passed
17281 anew to every function. */
17282 int ool_adjust
= (restoring_GPRs_inline
17284 : (info
->first_gp_reg_save
17285 - (FIRST_SAVRES_REGISTER
+1))*8);
17287 if (frame_reg_rtx
== sp_reg_rtx
)
17288 frame_reg_rtx
= gen_rtx_REG (Pmode
, 11);
17289 emit_insn (gen_addsi3 (frame_reg_rtx
, old_frame_reg_rtx
,
17290 GEN_INT (info
->spe_gp_save_offset
17293 /* Keep the invariant that frame_reg_rtx + sp_offset points
17294 at the top of the stack frame. */
17295 sp_offset
= -info
->spe_gp_save_offset
;
17300 if (restoring_GPRs_inline
)
17302 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
17303 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
17305 rtx offset
, addr
, mem
, reg
;
17307 /* We're doing all this to ensure that the immediate offset
17308 fits into the immediate field of 'evldd'. */
17309 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset
+ reg_size
* i
));
17311 offset
= GEN_INT (spe_offset
+ reg_size
* i
);
17312 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
, offset
);
17313 mem
= gen_rtx_MEM (V2SImode
, addr
);
17314 reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
17316 insn
= emit_move_insn (reg
, mem
);
17317 if (DEFAULT_ABI
== ABI_V4
)
17319 if (frame_pointer_needed
17320 && info
->first_gp_reg_save
+ i
17321 == HARD_FRAME_POINTER_REGNUM
)
17323 add_reg_note (insn
, REG_CFA_DEF_CFA
,
17324 plus_constant (frame_reg_rtx
,
17326 RTX_FRAME_RELATED_P (insn
) = 1;
17329 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
17338 par
= rs6000_make_savres_rtx (info
, gen_rtx_REG (Pmode
, 11),
17340 /*savep=*/false, /*gpr=*/true,
17342 emit_jump_insn (par
);
17343 /* We don't want anybody else emitting things after we jumped
17348 else if (!restoring_GPRs_inline
)
17350 /* We are jumping to an out-of-line function. */
17351 bool can_use_exit
= info
->first_fp_reg_save
== 64;
17354 /* Emit stack reset code if we need it. */
17356 rs6000_emit_stack_reset (info
, sp_reg_rtx
, frame_reg_rtx
,
17357 sp_offset
, can_use_exit
);
17359 emit_insn (gen_addsi3 (gen_rtx_REG (Pmode
, 11),
17361 GEN_INT (sp_offset
- info
->fp_size
)));
17363 par
= rs6000_make_savres_rtx (info
, frame_reg_rtx
,
17364 info
->gp_save_offset
, reg_mode
,
17365 /*savep=*/false, /*gpr=*/true,
17366 /*exitp=*/can_use_exit
);
17370 if (info
->cr_save_p
)
17372 rs6000_restore_saved_cr (gen_rtx_REG (SImode
, 12),
17373 using_mtcr_multiple
);
17374 if (DEFAULT_ABI
== ABI_V4
)
17376 = alloc_reg_note (REG_CFA_RESTORE
,
17377 gen_rtx_REG (SImode
, CR2_REGNO
),
17381 emit_jump_insn (par
);
17383 /* We don't want anybody else emitting things after we jumped
17388 insn
= emit_insn (par
);
17389 if (DEFAULT_ABI
== ABI_V4
)
17391 if (frame_pointer_needed
)
17393 add_reg_note (insn
, REG_CFA_DEF_CFA
,
17394 plus_constant (frame_reg_rtx
, sp_offset
));
17395 RTX_FRAME_RELATED_P (insn
) = 1;
17398 for (i
= info
->first_gp_reg_save
; i
< 32; i
++)
17400 = alloc_reg_note (REG_CFA_RESTORE
,
17401 gen_rtx_REG (reg_mode
, i
), cfa_restores
);
17404 else if (using_load_multiple
)
17407 p
= rtvec_alloc (32 - info
->first_gp_reg_save
);
17408 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
17410 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
17411 GEN_INT (info
->gp_save_offset
17414 rtx mem
= gen_frame_mem (reg_mode
, addr
);
17415 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
17417 RTVEC_ELT (p
, i
) = gen_rtx_SET (VOIDmode
, reg
, mem
);
17418 if (DEFAULT_ABI
== ABI_V4
)
17419 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
17422 insn
= emit_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
17423 if (DEFAULT_ABI
== ABI_V4
&& frame_pointer_needed
)
17425 add_reg_note (insn
, REG_CFA_DEF_CFA
,
17426 plus_constant (frame_reg_rtx
, sp_offset
));
17427 RTX_FRAME_RELATED_P (insn
) = 1;
17432 for (i
= 0; i
< 32 - info
->first_gp_reg_save
; i
++)
17433 if (rs6000_reg_live_or_pic_offset_p (info
->first_gp_reg_save
+ i
))
17435 rtx addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
17436 GEN_INT (info
->gp_save_offset
17439 rtx mem
= gen_frame_mem (reg_mode
, addr
);
17440 rtx reg
= gen_rtx_REG (reg_mode
, info
->first_gp_reg_save
+ i
);
17442 insn
= emit_move_insn (reg
, mem
);
17443 if (DEFAULT_ABI
== ABI_V4
)
17445 if (frame_pointer_needed
17446 && info
->first_gp_reg_save
+ i
17447 == HARD_FRAME_POINTER_REGNUM
)
17449 add_reg_note (insn
, REG_CFA_DEF_CFA
,
17450 plus_constant (frame_reg_rtx
, sp_offset
));
17451 RTX_FRAME_RELATED_P (insn
) = 1;
17454 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
17460 /* Restore fpr's if we need to do it without calling a function. */
17461 if (restoring_FPRs_inline
)
17462 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
17463 if ((df_regs_ever_live_p (info
->first_fp_reg_save
+i
)
17464 && ! call_used_regs
[info
->first_fp_reg_save
+i
]))
17466 rtx addr
, mem
, reg
;
17467 addr
= gen_rtx_PLUS (Pmode
, frame_reg_rtx
,
17468 GEN_INT (info
->fp_save_offset
17471 mem
= gen_frame_mem (((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
17472 ? DFmode
: SFmode
), addr
);
17473 reg
= gen_rtx_REG (((TARGET_HARD_FLOAT
&& TARGET_DOUBLE_FLOAT
)
17474 ? DFmode
: SFmode
),
17475 info
->first_fp_reg_save
+ i
);
17477 emit_move_insn (reg
, mem
);
17478 if (DEFAULT_ABI
== ABI_V4
)
17479 cfa_restores
= alloc_reg_note (REG_CFA_RESTORE
, reg
,
17483 /* If we saved cr, restore it here. Just those that were used. */
17484 if (info
->cr_save_p
)
17486 rs6000_restore_saved_cr (gen_rtx_REG (SImode
, 12), using_mtcr_multiple
);
17487 if (DEFAULT_ABI
== ABI_V4
)
17489 = alloc_reg_note (REG_CFA_RESTORE
, gen_rtx_REG (SImode
, CR2_REGNO
),
17493 /* If this is V.4, unwind the stack pointer after all of the loads
17495 insn
= rs6000_emit_stack_reset (info
, sp_reg_rtx
, frame_reg_rtx
,
17496 sp_offset
, !restoring_FPRs_inline
);
17501 REG_NOTES (insn
) = cfa_restores
;
17502 cfa_restores
= NULL_RTX
;
17504 add_reg_note (insn
, REG_CFA_DEF_CFA
, sp_reg_rtx
);
17505 RTX_FRAME_RELATED_P (insn
) = 1;
17508 if (crtl
->calls_eh_return
)
17510 rtx sa
= EH_RETURN_STACKADJ_RTX
;
17511 emit_insn (gen_add3_insn (sp_reg_rtx
, sp_reg_rtx
, sa
));
17517 if (! restoring_FPRs_inline
)
17518 p
= rtvec_alloc (4 + 64 - info
->first_fp_reg_save
);
17520 p
= rtvec_alloc (2);
17522 RTVEC_ELT (p
, 0) = gen_rtx_RETURN (VOIDmode
);
17523 RTVEC_ELT (p
, 1) = (restoring_FPRs_inline
17524 ? gen_rtx_USE (VOIDmode
, gen_rtx_REG (Pmode
, 65))
17525 : gen_rtx_CLOBBER (VOIDmode
,
17526 gen_rtx_REG (Pmode
, 65)));
17528 /* If we have to restore more than two FP registers, branch to the
17529 restore function. It will return to our caller. */
17530 if (! restoring_FPRs_inline
)
17535 sym
= rs6000_savres_routine_sym (info
,
17539 RTVEC_ELT (p
, 2) = gen_rtx_USE (VOIDmode
, sym
);
17540 RTVEC_ELT (p
, 3) = gen_rtx_USE (VOIDmode
,
17541 gen_rtx_REG (Pmode
, 11));
17542 for (i
= 0; i
< 64 - info
->first_fp_reg_save
; i
++)
17545 addr
= gen_rtx_PLUS (Pmode
, sp_reg_rtx
,
17546 GEN_INT (info
->fp_save_offset
+ 8*i
));
17547 mem
= gen_frame_mem (DFmode
, addr
);
17549 RTVEC_ELT (p
, i
+4) =
17550 gen_rtx_SET (VOIDmode
,
17551 gen_rtx_REG (DFmode
, info
->first_fp_reg_save
+ i
),
17556 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode
, p
));
17560 /* Write function epilogue. */
17563 rs6000_output_function_epilogue (FILE *file
,
17564 HOST_WIDE_INT size ATTRIBUTE_UNUSED
)
17566 if (! HAVE_epilogue
)
17568 rtx insn
= get_last_insn ();
17569 /* If the last insn was a BARRIER, we don't have to write anything except
17570 the trace table. */
17571 if (GET_CODE (insn
) == NOTE
)
17572 insn
= prev_nonnote_insn (insn
);
17573 if (insn
== 0 || GET_CODE (insn
) != BARRIER
)
17575 /* This is slightly ugly, but at least we don't have two
17576 copies of the epilogue-emitting code. */
17579 /* A NOTE_INSN_DELETED is supposed to be at the start
17580 and end of the "toplevel" insn chain. */
17581 emit_note (NOTE_INSN_DELETED
);
17582 rs6000_emit_epilogue (FALSE
);
17583 emit_note (NOTE_INSN_DELETED
);
17585 /* Expand INSN_ADDRESSES so final() doesn't crash. */
17589 for (insn
= get_insns (); insn
!= 0; insn
= NEXT_INSN (insn
))
17591 INSN_ADDRESSES_NEW (insn
, addr
);
17596 if (TARGET_DEBUG_STACK
)
17597 debug_rtx_list (get_insns (), 100);
17598 final (get_insns (), file
, FALSE
);
17604 macho_branch_islands ();
17605 /* Mach-O doesn't support labels at the end of objects, so if
17606 it looks like we might want one, insert a NOP. */
17608 rtx insn
= get_last_insn ();
17611 && NOTE_KIND (insn
) != NOTE_INSN_DELETED_LABEL
)
17612 insn
= PREV_INSN (insn
);
17616 && NOTE_KIND (insn
) == NOTE_INSN_DELETED_LABEL
)))
17617 fputs ("\tnop\n", file
);
17621 /* Output a traceback table here. See /usr/include/sys/debug.h for info
17624 We don't output a traceback table if -finhibit-size-directive was
17625 used. The documentation for -finhibit-size-directive reads
17626 ``don't output a @code{.size} assembler directive, or anything
17627 else that would cause trouble if the function is split in the
17628 middle, and the two halves are placed at locations far apart in
17629 memory.'' The traceback table has this property, since it
17630 includes the offset from the start of the function to the
17631 traceback table itself.
17633 System V.4 Powerpc's (and the embedded ABI derived from it) use a
17634 different traceback table. */
17635 if (DEFAULT_ABI
== ABI_AIX
&& ! flag_inhibit_size_directive
17636 && rs6000_traceback
!= traceback_none
&& !cfun
->is_thunk
)
17638 const char *fname
= NULL
;
17639 const char *language_string
= lang_hooks
.name
;
17640 int fixed_parms
= 0, float_parms
= 0, parm_info
= 0;
17642 int optional_tbtab
;
17643 rs6000_stack_t
*info
= rs6000_stack_info ();
17645 if (rs6000_traceback
== traceback_full
)
17646 optional_tbtab
= 1;
17647 else if (rs6000_traceback
== traceback_part
)
17648 optional_tbtab
= 0;
17650 optional_tbtab
= !optimize_size
&& !TARGET_ELF
;
17652 if (optional_tbtab
)
17654 fname
= XSTR (XEXP (DECL_RTL (current_function_decl
), 0), 0);
17655 while (*fname
== '.') /* V.4 encodes . in the name */
17658 /* Need label immediately before tbtab, so we can compute
17659 its offset from the function start. */
17660 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
17661 ASM_OUTPUT_LABEL (file
, fname
);
17664 /* The .tbtab pseudo-op can only be used for the first eight
17665 expressions, since it can't handle the possibly variable
17666 length fields that follow. However, if you omit the optional
17667 fields, the assembler outputs zeros for all optional fields
17668 anyways, giving each variable length field is minimum length
17669 (as defined in sys/debug.h). Thus we can not use the .tbtab
17670 pseudo-op at all. */
17672 /* An all-zero word flags the start of the tbtab, for debuggers
17673 that have to find it by searching forward from the entry
17674 point or from the current pc. */
17675 fputs ("\t.long 0\n", file
);
17677 /* Tbtab format type. Use format type 0. */
17678 fputs ("\t.byte 0,", file
);
17680 /* Language type. Unfortunately, there does not seem to be any
17681 official way to discover the language being compiled, so we
17682 use language_string.
17683 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
17684 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
17685 a number, so for now use 9. */
17686 if (! strcmp (language_string
, "GNU C"))
17688 else if (! strcmp (language_string
, "GNU F77")
17689 || ! strcmp (language_string
, "GNU Fortran"))
17691 else if (! strcmp (language_string
, "GNU Pascal"))
17693 else if (! strcmp (language_string
, "GNU Ada"))
17695 else if (! strcmp (language_string
, "GNU C++")
17696 || ! strcmp (language_string
, "GNU Objective-C++"))
17698 else if (! strcmp (language_string
, "GNU Java"))
17700 else if (! strcmp (language_string
, "GNU Objective-C"))
17703 gcc_unreachable ();
17704 fprintf (file
, "%d,", i
);
17706 /* 8 single bit fields: global linkage (not set for C extern linkage,
17707 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
17708 from start of procedure stored in tbtab, internal function, function
17709 has controlled storage, function has no toc, function uses fp,
17710 function logs/aborts fp operations. */
17711 /* Assume that fp operations are used if any fp reg must be saved. */
17712 fprintf (file
, "%d,",
17713 (optional_tbtab
<< 5) | ((info
->first_fp_reg_save
!= 64) << 1));
17715 /* 6 bitfields: function is interrupt handler, name present in
17716 proc table, function calls alloca, on condition directives
17717 (controls stack walks, 3 bits), saves condition reg, saves
17719 /* The `function calls alloca' bit seems to be set whenever reg 31 is
17720 set up as a frame pointer, even when there is no alloca call. */
17721 fprintf (file
, "%d,",
17722 ((optional_tbtab
<< 6)
17723 | ((optional_tbtab
& frame_pointer_needed
) << 5)
17724 | (info
->cr_save_p
<< 1)
17725 | (info
->lr_save_p
)));
17727 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
17729 fprintf (file
, "%d,",
17730 (info
->push_p
<< 7) | (64 - info
->first_fp_reg_save
));
17732 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
17733 fprintf (file
, "%d,", (32 - first_reg_to_save ()));
17735 if (optional_tbtab
)
17737 /* Compute the parameter info from the function decl argument
17740 int next_parm_info_bit
= 31;
17742 for (decl
= DECL_ARGUMENTS (current_function_decl
);
17743 decl
; decl
= TREE_CHAIN (decl
))
17745 rtx parameter
= DECL_INCOMING_RTL (decl
);
17746 enum machine_mode mode
= GET_MODE (parameter
);
17748 if (GET_CODE (parameter
) == REG
)
17750 if (SCALAR_FLOAT_MODE_P (mode
))
17771 gcc_unreachable ();
17774 /* If only one bit will fit, don't or in this entry. */
17775 if (next_parm_info_bit
> 0)
17776 parm_info
|= (bits
<< (next_parm_info_bit
- 1));
17777 next_parm_info_bit
-= 2;
17781 fixed_parms
+= ((GET_MODE_SIZE (mode
)
17782 + (UNITS_PER_WORD
- 1))
17784 next_parm_info_bit
-= 1;
17790 /* Number of fixed point parameters. */
17791 /* This is actually the number of words of fixed point parameters; thus
17792 an 8 byte struct counts as 2; and thus the maximum value is 8. */
17793 fprintf (file
, "%d,", fixed_parms
);
17795 /* 2 bitfields: number of floating point parameters (7 bits), parameters
17797 /* This is actually the number of fp registers that hold parameters;
17798 and thus the maximum value is 13. */
17799 /* Set parameters on stack bit if parameters are not in their original
17800 registers, regardless of whether they are on the stack? Xlc
17801 seems to set the bit when not optimizing. */
17802 fprintf (file
, "%d\n", ((float_parms
<< 1) | (! optimize
)));
17804 if (! optional_tbtab
)
17807 /* Optional fields follow. Some are variable length. */
17809 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
17810 11 double float. */
17811 /* There is an entry for each parameter in a register, in the order that
17812 they occur in the parameter list. Any intervening arguments on the
17813 stack are ignored. If the list overflows a long (max possible length
17814 34 bits) then completely leave off all elements that don't fit. */
17815 /* Only emit this long if there was at least one parameter. */
17816 if (fixed_parms
|| float_parms
)
17817 fprintf (file
, "\t.long %d\n", parm_info
);
17819 /* Offset from start of code to tb table. */
17820 fputs ("\t.long ", file
);
17821 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LT");
17823 RS6000_OUTPUT_BASENAME (file
, fname
);
17825 assemble_name (file
, fname
);
17827 rs6000_output_function_entry (file
, fname
);
17830 /* Interrupt handler mask. */
17831 /* Omit this long, since we never set the interrupt handler bit
17834 /* Number of CTL (controlled storage) anchors. */
17835 /* Omit this long, since the has_ctl bit is never set above. */
17837 /* Displacement into stack of each CTL anchor. */
17838 /* Omit this list of longs, because there are no CTL anchors. */
17840 /* Length of function name. */
17843 fprintf (file
, "\t.short %d\n", (int) strlen (fname
));
17845 /* Function name. */
17846 assemble_string (fname
, strlen (fname
));
17848 /* Register for alloca automatic storage; this is always reg 31.
17849 Only emit this if the alloca bit was set above. */
17850 if (frame_pointer_needed
)
17851 fputs ("\t.byte 31\n", file
);
17853 fputs ("\t.align 2\n", file
);
17857 /* A C compound statement that outputs the assembler code for a thunk
17858 function, used to implement C++ virtual function calls with
17859 multiple inheritance. The thunk acts as a wrapper around a virtual
17860 function, adjusting the implicit object parameter before handing
17861 control off to the real function.
17863 First, emit code to add the integer DELTA to the location that
17864 contains the incoming first argument. Assume that this argument
17865 contains a pointer, and is the one used to pass the `this' pointer
17866 in C++. This is the incoming argument *before* the function
17867 prologue, e.g. `%o0' on a sparc. The addition must preserve the
17868 values of all other incoming arguments.
17870 After the addition, emit code to jump to FUNCTION, which is a
17871 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
17872 not touch the return address. Hence returning from FUNCTION will
17873 return to whoever called the current `thunk'.
17875 The effect must be as if FUNCTION had been called directly with the
17876 adjusted first argument. This macro is responsible for emitting
17877 all of the code for a thunk function; output_function_prologue()
17878 and output_function_epilogue() are not invoked.
17880 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
17881 been extracted from it.) It might possibly be useful on some
17882 targets, but probably not.
17884 If you do not define this macro, the target-independent code in the
17885 C++ frontend will generate a less efficient heavyweight thunk that
17886 calls FUNCTION instead of jumping to it. The generic approach does
17887 not support varargs. */
17890 rs6000_output_mi_thunk (FILE *file
, tree thunk_fndecl ATTRIBUTE_UNUSED
,
17891 HOST_WIDE_INT delta
, HOST_WIDE_INT vcall_offset
,
17894 rtx this_rtx
, insn
, funexp
;
17896 reload_completed
= 1;
17897 epilogue_completed
= 1;
17899 /* Mark the end of the (empty) prologue. */
17900 emit_note (NOTE_INSN_PROLOGUE_END
);
17902 /* Find the "this" pointer. If the function returns a structure,
17903 the structure return pointer is in r3. */
17904 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
17905 this_rtx
= gen_rtx_REG (Pmode
, 4);
17907 this_rtx
= gen_rtx_REG (Pmode
, 3);
17909 /* Apply the constant offset, if required. */
17912 rtx delta_rtx
= GEN_INT (delta
);
17913 emit_insn (TARGET_32BIT
17914 ? gen_addsi3 (this_rtx
, this_rtx
, delta_rtx
)
17915 : gen_adddi3 (this_rtx
, this_rtx
, delta_rtx
));
17918 /* Apply the offset from the vtable, if required. */
17921 rtx vcall_offset_rtx
= GEN_INT (vcall_offset
);
17922 rtx tmp
= gen_rtx_REG (Pmode
, 12);
17924 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, this_rtx
));
17925 if (((unsigned HOST_WIDE_INT
) vcall_offset
) + 0x8000 >= 0x10000)
17927 emit_insn (TARGET_32BIT
17928 ? gen_addsi3 (tmp
, tmp
, vcall_offset_rtx
)
17929 : gen_adddi3 (tmp
, tmp
, vcall_offset_rtx
));
17930 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, tmp
));
17934 rtx loc
= gen_rtx_PLUS (Pmode
, tmp
, vcall_offset_rtx
);
17936 emit_move_insn (tmp
, gen_rtx_MEM (Pmode
, loc
));
17938 emit_insn (TARGET_32BIT
17939 ? gen_addsi3 (this_rtx
, this_rtx
, tmp
)
17940 : gen_adddi3 (this_rtx
, this_rtx
, tmp
));
17943 /* Generate a tail call to the target function. */
17944 if (!TREE_USED (function
))
17946 assemble_external (function
);
17947 TREE_USED (function
) = 1;
17949 funexp
= XEXP (DECL_RTL (function
), 0);
17950 funexp
= gen_rtx_MEM (FUNCTION_MODE
, funexp
);
17953 if (MACHOPIC_INDIRECT
)
17954 funexp
= machopic_indirect_call_target (funexp
);
17957 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
17958 generate sibcall RTL explicitly. */
17959 insn
= emit_call_insn (
17960 gen_rtx_PARALLEL (VOIDmode
,
17962 gen_rtx_CALL (VOIDmode
,
17963 funexp
, const0_rtx
),
17964 gen_rtx_USE (VOIDmode
, const0_rtx
),
17965 gen_rtx_USE (VOIDmode
,
17966 gen_rtx_REG (SImode
,
17968 gen_rtx_RETURN (VOIDmode
))));
17969 SIBLING_CALL_P (insn
) = 1;
17972 /* Run just enough of rest_of_compilation to get the insns emitted.
17973 There's not really enough bulk here to make other passes such as
17974 instruction scheduling worth while. Note that use_thunk calls
17975 assemble_start_function and assemble_end_function. */
17976 insn
= get_insns ();
17977 insn_locators_alloc ();
17978 shorten_branches (insn
);
17979 final_start_function (insn
, file
, 1);
17980 final (insn
, file
, 1);
17981 final_end_function ();
17982 free_after_compilation (cfun
);
17984 reload_completed
= 0;
17985 epilogue_completed
= 0;
17988 /* A quick summary of the various types of 'constant-pool tables'
17991 Target Flags Name One table per
17992 AIX (none) AIX TOC object file
17993 AIX -mfull-toc AIX TOC object file
17994 AIX -mminimal-toc AIX minimal TOC translation unit
17995 SVR4/EABI (none) SVR4 SDATA object file
17996 SVR4/EABI -fpic SVR4 pic object file
17997 SVR4/EABI -fPIC SVR4 PIC translation unit
17998 SVR4/EABI -mrelocatable EABI TOC function
17999 SVR4/EABI -maix AIX TOC object file
18000 SVR4/EABI -maix -mminimal-toc
18001 AIX minimal TOC translation unit
18003 Name Reg. Set by entries contains:
18004 made by addrs? fp? sum?
18006 AIX TOC 2 crt0 as Y option option
18007 AIX minimal TOC 30 prolog gcc Y Y option
18008 SVR4 SDATA 13 crt0 gcc N Y N
18009 SVR4 pic 30 prolog ld Y not yet N
18010 SVR4 PIC 30 prolog gcc Y option option
18011 EABI TOC 30 prolog gcc Y option option
18015 /* Hash functions for the hash table. */
18018 rs6000_hash_constant (rtx k
)
18020 enum rtx_code code
= GET_CODE (k
);
18021 enum machine_mode mode
= GET_MODE (k
);
18022 unsigned result
= (code
<< 3) ^ mode
;
18023 const char *format
;
18026 format
= GET_RTX_FORMAT (code
);
18027 flen
= strlen (format
);
18033 return result
* 1231 + (unsigned) INSN_UID (XEXP (k
, 0));
18036 if (mode
!= VOIDmode
)
18037 return real_hash (CONST_DOUBLE_REAL_VALUE (k
)) * result
;
18049 for (; fidx
< flen
; fidx
++)
18050 switch (format
[fidx
])
18055 const char *str
= XSTR (k
, fidx
);
18056 len
= strlen (str
);
18057 result
= result
* 613 + len
;
18058 for (i
= 0; i
< len
; i
++)
18059 result
= result
* 613 + (unsigned) str
[i
];
18064 result
= result
* 1231 + rs6000_hash_constant (XEXP (k
, fidx
));
18068 result
= result
* 613 + (unsigned) XINT (k
, fidx
);
18071 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT
))
18072 result
= result
* 613 + (unsigned) XWINT (k
, fidx
);
18076 for (i
= 0; i
< sizeof (HOST_WIDE_INT
) / sizeof (unsigned); i
++)
18077 result
= result
* 613 + (unsigned) (XWINT (k
, fidx
)
18084 gcc_unreachable ();
18091 toc_hash_function (const void *hash_entry
)
18093 const struct toc_hash_struct
*thc
=
18094 (const struct toc_hash_struct
*) hash_entry
;
18095 return rs6000_hash_constant (thc
->key
) ^ thc
->key_mode
;
18098 /* Compare H1 and H2 for equivalence. */
18101 toc_hash_eq (const void *h1
, const void *h2
)
18103 rtx r1
= ((const struct toc_hash_struct
*) h1
)->key
;
18104 rtx r2
= ((const struct toc_hash_struct
*) h2
)->key
;
18106 if (((const struct toc_hash_struct
*) h1
)->key_mode
18107 != ((const struct toc_hash_struct
*) h2
)->key_mode
)
18110 return rtx_equal_p (r1
, r2
);
18113 /* These are the names given by the C++ front-end to vtables, and
18114 vtable-like objects. Ideally, this logic should not be here;
18115 instead, there should be some programmatic way of inquiring as
18116 to whether or not an object is a vtable. */
18118 #define VTABLE_NAME_P(NAME) \
18119 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
18120 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
18121 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
18122 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
18123 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
18125 #ifdef NO_DOLLAR_IN_LABEL
18126 /* Return a GGC-allocated character string translating dollar signs in
18127 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
18130 rs6000_xcoff_strip_dollar (const char *name
)
18135 p
= strchr (name
, '$');
18137 if (p
== 0 || p
== name
)
18140 len
= strlen (name
);
18141 strip
= (char *) alloca (len
+ 1);
18142 strcpy (strip
, name
);
18143 p
= strchr (strip
, '$');
18147 p
= strchr (p
+ 1, '$');
18150 return ggc_alloc_string (strip
, len
);
18155 rs6000_output_symbol_ref (FILE *file
, rtx x
)
18157 /* Currently C++ toc references to vtables can be emitted before it
18158 is decided whether the vtable is public or private. If this is
18159 the case, then the linker will eventually complain that there is
18160 a reference to an unknown section. Thus, for vtables only,
18161 we emit the TOC reference to reference the symbol and not the
18163 const char *name
= XSTR (x
, 0);
18165 if (VTABLE_NAME_P (name
))
18167 RS6000_OUTPUT_BASENAME (file
, name
);
18170 assemble_name (file
, name
);
18173 /* Output a TOC entry. We derive the entry name from what is being
18177 output_toc (FILE *file
, rtx x
, int labelno
, enum machine_mode mode
)
18180 const char *name
= buf
;
18182 HOST_WIDE_INT offset
= 0;
18184 gcc_assert (!TARGET_NO_TOC
);
18186 /* When the linker won't eliminate them, don't output duplicate
18187 TOC entries (this happens on AIX if there is any kind of TOC,
18188 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
18190 if (TARGET_TOC
&& GET_CODE (x
) != LABEL_REF
)
18192 struct toc_hash_struct
*h
;
18195 /* Create toc_hash_table. This can't be done at OVERRIDE_OPTIONS
18196 time because GGC is not initialized at that point. */
18197 if (toc_hash_table
== NULL
)
18198 toc_hash_table
= htab_create_ggc (1021, toc_hash_function
,
18199 toc_hash_eq
, NULL
);
18201 h
= GGC_NEW (struct toc_hash_struct
);
18203 h
->key_mode
= mode
;
18204 h
->labelno
= labelno
;
18206 found
= htab_find_slot (toc_hash_table
, h
, INSERT
);
18207 if (*found
== NULL
)
18209 else /* This is indeed a duplicate.
18210 Set this label equal to that label. */
18212 fputs ("\t.set ", file
);
18213 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
18214 fprintf (file
, "%d,", labelno
);
18215 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file
, "LC");
18216 fprintf (file
, "%d\n", ((*(const struct toc_hash_struct
**)
18222 /* If we're going to put a double constant in the TOC, make sure it's
18223 aligned properly when strict alignment is on. */
18224 if (GET_CODE (x
) == CONST_DOUBLE
18225 && STRICT_ALIGNMENT
18226 && GET_MODE_BITSIZE (mode
) >= 64
18227 && ! (TARGET_NO_FP_IN_TOC
&& ! TARGET_MINIMAL_TOC
)) {
18228 ASM_OUTPUT_ALIGN (file
, 3);
18231 (*targetm
.asm_out
.internal_label
) (file
, "LC", labelno
);
18233 /* Handle FP constants specially. Note that if we have a minimal
18234 TOC, things we put here aren't actually in the TOC, so we can allow
18236 if (GET_CODE (x
) == CONST_DOUBLE
&&
18237 (GET_MODE (x
) == TFmode
|| GET_MODE (x
) == TDmode
))
18239 REAL_VALUE_TYPE rv
;
18242 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
18243 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
18244 REAL_VALUE_TO_TARGET_DECIMAL128 (rv
, k
);
18246 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv
, k
);
18250 if (TARGET_MINIMAL_TOC
)
18251 fputs (DOUBLE_INT_ASM_OP
, file
);
18253 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
18254 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
18255 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
18256 fprintf (file
, "0x%lx%08lx,0x%lx%08lx\n",
18257 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
18258 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
18263 if (TARGET_MINIMAL_TOC
)
18264 fputs ("\t.long ", file
);
18266 fprintf (file
, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
18267 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
18268 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
18269 fprintf (file
, "0x%lx,0x%lx,0x%lx,0x%lx\n",
18270 k
[0] & 0xffffffff, k
[1] & 0xffffffff,
18271 k
[2] & 0xffffffff, k
[3] & 0xffffffff);
18275 else if (GET_CODE (x
) == CONST_DOUBLE
&&
18276 (GET_MODE (x
) == DFmode
|| GET_MODE (x
) == DDmode
))
18278 REAL_VALUE_TYPE rv
;
18281 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
18283 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
18284 REAL_VALUE_TO_TARGET_DECIMAL64 (rv
, k
);
18286 REAL_VALUE_TO_TARGET_DOUBLE (rv
, k
);
18290 if (TARGET_MINIMAL_TOC
)
18291 fputs (DOUBLE_INT_ASM_OP
, file
);
18293 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
18294 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
18295 fprintf (file
, "0x%lx%08lx\n",
18296 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
18301 if (TARGET_MINIMAL_TOC
)
18302 fputs ("\t.long ", file
);
18304 fprintf (file
, "\t.tc FD_%lx_%lx[TC],",
18305 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
18306 fprintf (file
, "0x%lx,0x%lx\n",
18307 k
[0] & 0xffffffff, k
[1] & 0xffffffff);
18311 else if (GET_CODE (x
) == CONST_DOUBLE
&&
18312 (GET_MODE (x
) == SFmode
|| GET_MODE (x
) == SDmode
))
18314 REAL_VALUE_TYPE rv
;
18317 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
18318 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x
)))
18319 REAL_VALUE_TO_TARGET_DECIMAL32 (rv
, l
);
18321 REAL_VALUE_TO_TARGET_SINGLE (rv
, l
);
18325 if (TARGET_MINIMAL_TOC
)
18326 fputs (DOUBLE_INT_ASM_OP
, file
);
18328 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
18329 fprintf (file
, "0x%lx00000000\n", l
& 0xffffffff);
18334 if (TARGET_MINIMAL_TOC
)
18335 fputs ("\t.long ", file
);
18337 fprintf (file
, "\t.tc FS_%lx[TC],", l
& 0xffffffff);
18338 fprintf (file
, "0x%lx\n", l
& 0xffffffff);
18342 else if (GET_MODE (x
) == VOIDmode
18343 && (GET_CODE (x
) == CONST_INT
|| GET_CODE (x
) == CONST_DOUBLE
))
18345 unsigned HOST_WIDE_INT low
;
18346 HOST_WIDE_INT high
;
18348 if (GET_CODE (x
) == CONST_DOUBLE
)
18350 low
= CONST_DOUBLE_LOW (x
);
18351 high
= CONST_DOUBLE_HIGH (x
);
18354 #if HOST_BITS_PER_WIDE_INT == 32
18357 high
= (low
& 0x80000000) ? ~0 : 0;
18361 low
= INTVAL (x
) & 0xffffffff;
18362 high
= (HOST_WIDE_INT
) INTVAL (x
) >> 32;
18366 /* TOC entries are always Pmode-sized, but since this
18367 is a bigendian machine then if we're putting smaller
18368 integer constants in the TOC we have to pad them.
18369 (This is still a win over putting the constants in
18370 a separate constant pool, because then we'd have
18371 to have both a TOC entry _and_ the actual constant.)
18373 For a 32-bit target, CONST_INT values are loaded and shifted
18374 entirely within `low' and can be stored in one TOC entry. */
18376 /* It would be easy to make this work, but it doesn't now. */
18377 gcc_assert (!TARGET_64BIT
|| POINTER_SIZE
>= GET_MODE_BITSIZE (mode
));
18379 if (POINTER_SIZE
> GET_MODE_BITSIZE (mode
))
18381 #if HOST_BITS_PER_WIDE_INT == 32
18382 lshift_double (low
, high
, POINTER_SIZE
- GET_MODE_BITSIZE (mode
),
18383 POINTER_SIZE
, &low
, &high
, 0);
18386 low
<<= POINTER_SIZE
- GET_MODE_BITSIZE (mode
);
18387 high
= (HOST_WIDE_INT
) low
>> 32;
18394 if (TARGET_MINIMAL_TOC
)
18395 fputs (DOUBLE_INT_ASM_OP
, file
);
18397 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
18398 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
18399 fprintf (file
, "0x%lx%08lx\n",
18400 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
18405 if (POINTER_SIZE
< GET_MODE_BITSIZE (mode
))
18407 if (TARGET_MINIMAL_TOC
)
18408 fputs ("\t.long ", file
);
18410 fprintf (file
, "\t.tc ID_%lx_%lx[TC],",
18411 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
18412 fprintf (file
, "0x%lx,0x%lx\n",
18413 (long) high
& 0xffffffff, (long) low
& 0xffffffff);
18417 if (TARGET_MINIMAL_TOC
)
18418 fputs ("\t.long ", file
);
18420 fprintf (file
, "\t.tc IS_%lx[TC],", (long) low
& 0xffffffff);
18421 fprintf (file
, "0x%lx\n", (long) low
& 0xffffffff);
18427 if (GET_CODE (x
) == CONST
)
18429 gcc_assert (GET_CODE (XEXP (x
, 0)) == PLUS
);
18431 base
= XEXP (XEXP (x
, 0), 0);
18432 offset
= INTVAL (XEXP (XEXP (x
, 0), 1));
18435 switch (GET_CODE (base
))
18438 name
= XSTR (base
, 0);
18442 ASM_GENERATE_INTERNAL_LABEL (buf
, "L",
18443 CODE_LABEL_NUMBER (XEXP (base
, 0)));
18447 ASM_GENERATE_INTERNAL_LABEL (buf
, "L", CODE_LABEL_NUMBER (base
));
18451 gcc_unreachable ();
18454 if (TARGET_MINIMAL_TOC
)
18455 fputs (TARGET_32BIT
? "\t.long " : DOUBLE_INT_ASM_OP
, file
);
18458 fputs ("\t.tc ", file
);
18459 RS6000_OUTPUT_BASENAME (file
, name
);
18462 fprintf (file
, ".N" HOST_WIDE_INT_PRINT_UNSIGNED
, - offset
);
18464 fprintf (file
, ".P" HOST_WIDE_INT_PRINT_UNSIGNED
, offset
);
18466 fputs ("[TC],", file
);
18469 /* Currently C++ toc references to vtables can be emitted before it
18470 is decided whether the vtable is public or private. If this is
18471 the case, then the linker will eventually complain that there is
18472 a TOC reference to an unknown section. Thus, for vtables only,
18473 we emit the TOC reference to reference the symbol and not the
18475 if (VTABLE_NAME_P (name
))
18477 RS6000_OUTPUT_BASENAME (file
, name
);
18479 fprintf (file
, HOST_WIDE_INT_PRINT_DEC
, offset
);
18480 else if (offset
> 0)
18481 fprintf (file
, "+" HOST_WIDE_INT_PRINT_DEC
, offset
);
18484 output_addr_const (file
, x
);
18488 /* Output an assembler pseudo-op to write an ASCII string of N characters
18489 starting at P to FILE.
18491 On the RS/6000, we have to do this using the .byte operation and
18492 write out special characters outside the quoted string.
18493 Also, the assembler is broken; very long strings are truncated,
18494 so we must artificially break them up early. */
18497 output_ascii (FILE *file
, const char *p
, int n
)
18500 int i
, count_string
;
18501 const char *for_string
= "\t.byte \"";
18502 const char *for_decimal
= "\t.byte ";
18503 const char *to_close
= NULL
;
18506 for (i
= 0; i
< n
; i
++)
18509 if (c
>= ' ' && c
< 0177)
18512 fputs (for_string
, file
);
18515 /* Write two quotes to get one. */
18523 for_decimal
= "\"\n\t.byte ";
18527 if (count_string
>= 512)
18529 fputs (to_close
, file
);
18531 for_string
= "\t.byte \"";
18532 for_decimal
= "\t.byte ";
18540 fputs (for_decimal
, file
);
18541 fprintf (file
, "%d", c
);
18543 for_string
= "\n\t.byte \"";
18544 for_decimal
= ", ";
18550 /* Now close the string if we have written one. Then end the line. */
18552 fputs (to_close
, file
);
18555 /* Generate a unique section name for FILENAME for a section type
18556 represented by SECTION_DESC. Output goes into BUF.
18558 SECTION_DESC can be any string, as long as it is different for each
18559 possible section type.
18561 We name the section in the same manner as xlc. The name begins with an
18562 underscore followed by the filename (after stripping any leading directory
18563 names) with the last period replaced by the string SECTION_DESC. If
18564 FILENAME does not contain a period, SECTION_DESC is appended to the end of
18568 rs6000_gen_section_name (char **buf
, const char *filename
,
18569 const char *section_desc
)
18571 const char *q
, *after_last_slash
, *last_period
= 0;
18575 after_last_slash
= filename
;
18576 for (q
= filename
; *q
; q
++)
18579 after_last_slash
= q
+ 1;
18580 else if (*q
== '.')
18584 len
= strlen (after_last_slash
) + strlen (section_desc
) + 2;
18585 *buf
= (char *) xmalloc (len
);
18590 for (q
= after_last_slash
; *q
; q
++)
18592 if (q
== last_period
)
18594 strcpy (p
, section_desc
);
18595 p
+= strlen (section_desc
);
18599 else if (ISALNUM (*q
))
18603 if (last_period
== 0)
18604 strcpy (p
, section_desc
);
18609 /* Emit profile function. */
18612 output_profile_hook (int labelno ATTRIBUTE_UNUSED
)
18614 /* Non-standard profiling for kernels, which just saves LR then calls
18615 _mcount without worrying about arg saves. The idea is to change
18616 the function prologue as little as possible as it isn't easy to
18617 account for arg save/restore code added just for _mcount. */
18618 if (TARGET_PROFILE_KERNEL
)
18621 if (DEFAULT_ABI
== ABI_AIX
)
18623 #ifndef NO_PROFILE_COUNTERS
18624 # define NO_PROFILE_COUNTERS 0
18626 if (NO_PROFILE_COUNTERS
)
18627 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
18628 LCT_NORMAL
, VOIDmode
, 0);
18632 const char *label_name
;
18635 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
18636 label_name
= (*targetm
.strip_name_encoding
) (ggc_strdup (buf
));
18637 fun
= gen_rtx_SYMBOL_REF (Pmode
, label_name
);
18639 emit_library_call (init_one_libfunc (RS6000_MCOUNT
),
18640 LCT_NORMAL
, VOIDmode
, 1, fun
, Pmode
);
18643 else if (DEFAULT_ABI
== ABI_DARWIN
)
18645 const char *mcount_name
= RS6000_MCOUNT
;
18646 int caller_addr_regno
= LR_REGNO
;
18648 /* Be conservative and always set this, at least for now. */
18649 crtl
->uses_pic_offset_table
= 1;
18652 /* For PIC code, set up a stub and collect the caller's address
18653 from r0, which is where the prologue puts it. */
18654 if (MACHOPIC_INDIRECT
18655 && crtl
->uses_pic_offset_table
)
18656 caller_addr_regno
= 0;
18658 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, mcount_name
),
18659 LCT_NORMAL
, VOIDmode
, 1,
18660 gen_rtx_REG (Pmode
, caller_addr_regno
), Pmode
);
18664 /* Write function profiler code. */
18667 output_function_profiler (FILE *file
, int labelno
)
18671 switch (DEFAULT_ABI
)
18674 gcc_unreachable ();
18679 warning (0, "no profiling of 64-bit code for this ABI");
18682 ASM_GENERATE_INTERNAL_LABEL (buf
, "LP", labelno
);
18683 fprintf (file
, "\tmflr %s\n", reg_names
[0]);
18684 if (NO_PROFILE_COUNTERS
)
18686 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
18687 reg_names
[0], reg_names
[1]);
18689 else if (TARGET_SECURE_PLT
&& flag_pic
)
18691 asm_fprintf (file
, "\tbcl 20,31,1f\n1:\n\t{st|stw} %s,4(%s)\n",
18692 reg_names
[0], reg_names
[1]);
18693 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
18694 asm_fprintf (file
, "\t{cau|addis} %s,%s,",
18695 reg_names
[12], reg_names
[12]);
18696 assemble_name (file
, buf
);
18697 asm_fprintf (file
, "-1b@ha\n\t{cal|la} %s,", reg_names
[0]);
18698 assemble_name (file
, buf
);
18699 asm_fprintf (file
, "-1b@l(%s)\n", reg_names
[12]);
18701 else if (flag_pic
== 1)
18703 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file
);
18704 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
18705 reg_names
[0], reg_names
[1]);
18706 asm_fprintf (file
, "\tmflr %s\n", reg_names
[12]);
18707 asm_fprintf (file
, "\t{l|lwz} %s,", reg_names
[0]);
18708 assemble_name (file
, buf
);
18709 asm_fprintf (file
, "@got(%s)\n", reg_names
[12]);
18711 else if (flag_pic
> 1)
18713 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
18714 reg_names
[0], reg_names
[1]);
18715 /* Now, we need to get the address of the label. */
18716 fputs ("\tbcl 20,31,1f\n\t.long ", file
);
18717 assemble_name (file
, buf
);
18718 fputs ("-.\n1:", file
);
18719 asm_fprintf (file
, "\tmflr %s\n", reg_names
[11]);
18720 asm_fprintf (file
, "\t{l|lwz} %s,0(%s)\n",
18721 reg_names
[0], reg_names
[11]);
18722 asm_fprintf (file
, "\t{cax|add} %s,%s,%s\n",
18723 reg_names
[0], reg_names
[0], reg_names
[11]);
18727 asm_fprintf (file
, "\t{liu|lis} %s,", reg_names
[12]);
18728 assemble_name (file
, buf
);
18729 fputs ("@ha\n", file
);
18730 asm_fprintf (file
, "\t{st|stw} %s,4(%s)\n",
18731 reg_names
[0], reg_names
[1]);
18732 asm_fprintf (file
, "\t{cal|la} %s,", reg_names
[0]);
18733 assemble_name (file
, buf
);
18734 asm_fprintf (file
, "@l(%s)\n", reg_names
[12]);
18737 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
18738 fprintf (file
, "\tbl %s%s\n",
18739 RS6000_MCOUNT
, flag_pic
? "@plt" : "");
18744 if (!TARGET_PROFILE_KERNEL
)
18746 /* Don't do anything, done in output_profile_hook (). */
18750 gcc_assert (!TARGET_32BIT
);
18752 asm_fprintf (file
, "\tmflr %s\n", reg_names
[0]);
18753 asm_fprintf (file
, "\tstd %s,16(%s)\n", reg_names
[0], reg_names
[1]);
18755 if (cfun
->static_chain_decl
!= NULL
)
18757 asm_fprintf (file
, "\tstd %s,24(%s)\n",
18758 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
18759 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
18760 asm_fprintf (file
, "\tld %s,24(%s)\n",
18761 reg_names
[STATIC_CHAIN_REGNUM
], reg_names
[1]);
18764 fprintf (file
, "\tbl %s\n", RS6000_MCOUNT
);
18772 /* The following variable value is the last issued insn. */
18774 static rtx last_scheduled_insn
;
18776 /* The following variable helps to balance issuing of load and
18777 store instructions */
18779 static int load_store_pendulum
;
18781 /* Power4 load update and store update instructions are cracked into a
18782 load or store and an integer insn which are executed in the same cycle.
18783 Branches have their own dispatch slot which does not count against the
18784 GCC issue rate, but it changes the program flow so there are no other
18785 instructions to issue in this cycle. */
18788 rs6000_variable_issue (FILE *stream ATTRIBUTE_UNUSED
,
18789 int verbose ATTRIBUTE_UNUSED
,
18790 rtx insn
, int more
)
18792 last_scheduled_insn
= insn
;
18793 if (GET_CODE (PATTERN (insn
)) == USE
18794 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
18796 cached_can_issue_more
= more
;
18797 return cached_can_issue_more
;
18800 if (insn_terminates_group_p (insn
, current_group
))
18802 cached_can_issue_more
= 0;
18803 return cached_can_issue_more
;
18806 /* If no reservation, but reach here */
18807 if (recog_memoized (insn
) < 0)
18810 if (rs6000_sched_groups
)
18812 if (is_microcoded_insn (insn
))
18813 cached_can_issue_more
= 0;
18814 else if (is_cracked_insn (insn
))
18815 cached_can_issue_more
= more
> 2 ? more
- 2 : 0;
18817 cached_can_issue_more
= more
- 1;
18819 return cached_can_issue_more
;
18822 if (rs6000_cpu_attr
== CPU_CELL
&& is_nonpipeline_insn (insn
))
18825 cached_can_issue_more
= more
- 1;
18826 return cached_can_issue_more
;
18829 /* Adjust the cost of a scheduling dependency. Return the new cost of
18830 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
18833 rs6000_adjust_cost (rtx insn
, rtx link
, rtx dep_insn
, int cost
)
18835 enum attr_type attr_type
;
18837 if (! recog_memoized (insn
))
18840 switch (REG_NOTE_KIND (link
))
18844 /* Data dependency; DEP_INSN writes a register that INSN reads
18845 some cycles later. */
18847 /* Separate a load from a narrower, dependent store. */
18848 if (rs6000_sched_groups
18849 && GET_CODE (PATTERN (insn
)) == SET
18850 && GET_CODE (PATTERN (dep_insn
)) == SET
18851 && GET_CODE (XEXP (PATTERN (insn
), 1)) == MEM
18852 && GET_CODE (XEXP (PATTERN (dep_insn
), 0)) == MEM
18853 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn
), 1)))
18854 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn
), 0)))))
18857 attr_type
= get_attr_type (insn
);
18862 /* Tell the first scheduling pass about the latency between
18863 a mtctr and bctr (and mtlr and br/blr). The first
18864 scheduling pass will not know about this latency since
18865 the mtctr instruction, which has the latency associated
18866 to it, will be generated by reload. */
18867 return TARGET_POWER
? 5 : 4;
18869 /* Leave some extra cycles between a compare and its
18870 dependent branch, to inhibit expensive mispredicts. */
18871 if ((rs6000_cpu_attr
== CPU_PPC603
18872 || rs6000_cpu_attr
== CPU_PPC604
18873 || rs6000_cpu_attr
== CPU_PPC604E
18874 || rs6000_cpu_attr
== CPU_PPC620
18875 || rs6000_cpu_attr
== CPU_PPC630
18876 || rs6000_cpu_attr
== CPU_PPC750
18877 || rs6000_cpu_attr
== CPU_PPC7400
18878 || rs6000_cpu_attr
== CPU_PPC7450
18879 || rs6000_cpu_attr
== CPU_POWER4
18880 || rs6000_cpu_attr
== CPU_POWER5
18881 || rs6000_cpu_attr
== CPU_CELL
)
18882 && recog_memoized (dep_insn
)
18883 && (INSN_CODE (dep_insn
) >= 0))
18885 switch (get_attr_type (dep_insn
))
18889 case TYPE_DELAYED_COMPARE
:
18890 case TYPE_IMUL_COMPARE
:
18891 case TYPE_LMUL_COMPARE
:
18892 case TYPE_FPCOMPARE
:
18893 case TYPE_CR_LOGICAL
:
18894 case TYPE_DELAYED_CR
:
18903 case TYPE_STORE_UX
:
18905 case TYPE_FPSTORE_U
:
18906 case TYPE_FPSTORE_UX
:
18907 if ((rs6000_cpu
== PROCESSOR_POWER6
)
18908 && recog_memoized (dep_insn
)
18909 && (INSN_CODE (dep_insn
) >= 0))
18912 if (GET_CODE (PATTERN (insn
)) != SET
)
18913 /* If this happens, we have to extend this to schedule
18914 optimally. Return default for now. */
18917 /* Adjust the cost for the case where the value written
18918 by a fixed point operation is used as the address
18919 gen value on a store. */
18920 switch (get_attr_type (dep_insn
))
18927 if (! store_data_bypass_p (dep_insn
, insn
))
18931 case TYPE_LOAD_EXT
:
18932 case TYPE_LOAD_EXT_U
:
18933 case TYPE_LOAD_EXT_UX
:
18934 case TYPE_VAR_SHIFT_ROTATE
:
18935 case TYPE_VAR_DELAYED_COMPARE
:
18937 if (! store_data_bypass_p (dep_insn
, insn
))
18943 case TYPE_FAST_COMPARE
:
18946 case TYPE_INSERT_WORD
:
18947 case TYPE_INSERT_DWORD
:
18948 case TYPE_FPLOAD_U
:
18949 case TYPE_FPLOAD_UX
:
18951 case TYPE_STORE_UX
:
18952 case TYPE_FPSTORE_U
:
18953 case TYPE_FPSTORE_UX
:
18955 if (! store_data_bypass_p (dep_insn
, insn
))
18963 case TYPE_IMUL_COMPARE
:
18964 case TYPE_LMUL_COMPARE
:
18966 if (! store_data_bypass_p (dep_insn
, insn
))
18972 if (! store_data_bypass_p (dep_insn
, insn
))
18978 if (! store_data_bypass_p (dep_insn
, insn
))
18991 case TYPE_LOAD_EXT
:
18992 case TYPE_LOAD_EXT_U
:
18993 case TYPE_LOAD_EXT_UX
:
18994 if ((rs6000_cpu
== PROCESSOR_POWER6
)
18995 && recog_memoized (dep_insn
)
18996 && (INSN_CODE (dep_insn
) >= 0))
18999 /* Adjust the cost for the case where the value written
19000 by a fixed point instruction is used within the address
19001 gen portion of a subsequent load(u)(x) */
19002 switch (get_attr_type (dep_insn
))
19009 if (set_to_load_agen (dep_insn
, insn
))
19013 case TYPE_LOAD_EXT
:
19014 case TYPE_LOAD_EXT_U
:
19015 case TYPE_LOAD_EXT_UX
:
19016 case TYPE_VAR_SHIFT_ROTATE
:
19017 case TYPE_VAR_DELAYED_COMPARE
:
19019 if (set_to_load_agen (dep_insn
, insn
))
19025 case TYPE_FAST_COMPARE
:
19028 case TYPE_INSERT_WORD
:
19029 case TYPE_INSERT_DWORD
:
19030 case TYPE_FPLOAD_U
:
19031 case TYPE_FPLOAD_UX
:
19033 case TYPE_STORE_UX
:
19034 case TYPE_FPSTORE_U
:
19035 case TYPE_FPSTORE_UX
:
19037 if (set_to_load_agen (dep_insn
, insn
))
19045 case TYPE_IMUL_COMPARE
:
19046 case TYPE_LMUL_COMPARE
:
19048 if (set_to_load_agen (dep_insn
, insn
))
19054 if (set_to_load_agen (dep_insn
, insn
))
19060 if (set_to_load_agen (dep_insn
, insn
))
19071 if ((rs6000_cpu
== PROCESSOR_POWER6
)
19072 && recog_memoized (dep_insn
)
19073 && (INSN_CODE (dep_insn
) >= 0)
19074 && (get_attr_type (dep_insn
) == TYPE_MFFGPR
))
19081 /* Fall out to return default cost. */
19085 case REG_DEP_OUTPUT
:
19086 /* Output dependency; DEP_INSN writes a register that INSN writes some
19088 if ((rs6000_cpu
== PROCESSOR_POWER6
)
19089 && recog_memoized (dep_insn
)
19090 && (INSN_CODE (dep_insn
) >= 0))
19092 attr_type
= get_attr_type (insn
);
19097 if (get_attr_type (dep_insn
) == TYPE_FP
)
19101 if (get_attr_type (dep_insn
) == TYPE_MFFGPR
)
19109 /* Anti dependency; DEP_INSN reads a register that INSN writes some
19114 gcc_unreachable ();
19120 /* The function returns a true if INSN is microcoded.
19121 Return false otherwise. */
19124 is_microcoded_insn (rtx insn
)
19126 if (!insn
|| !INSN_P (insn
)
19127 || GET_CODE (PATTERN (insn
)) == USE
19128 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
19131 if (rs6000_cpu_attr
== CPU_CELL
)
19132 return get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
;
19134 if (rs6000_sched_groups
)
19136 enum attr_type type
= get_attr_type (insn
);
19137 if (type
== TYPE_LOAD_EXT_U
19138 || type
== TYPE_LOAD_EXT_UX
19139 || type
== TYPE_LOAD_UX
19140 || type
== TYPE_STORE_UX
19141 || type
== TYPE_MFCR
)
19148 /* The function returns true if INSN is cracked into 2 instructions
19149 by the processor (and therefore occupies 2 issue slots). */
19152 is_cracked_insn (rtx insn
)
19154 if (!insn
|| !INSN_P (insn
)
19155 || GET_CODE (PATTERN (insn
)) == USE
19156 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
19159 if (rs6000_sched_groups
)
19161 enum attr_type type
= get_attr_type (insn
);
19162 if (type
== TYPE_LOAD_U
|| type
== TYPE_STORE_U
19163 || type
== TYPE_FPLOAD_U
|| type
== TYPE_FPSTORE_U
19164 || type
== TYPE_FPLOAD_UX
|| type
== TYPE_FPSTORE_UX
19165 || type
== TYPE_LOAD_EXT
|| type
== TYPE_DELAYED_CR
19166 || type
== TYPE_COMPARE
|| type
== TYPE_DELAYED_COMPARE
19167 || type
== TYPE_IMUL_COMPARE
|| type
== TYPE_LMUL_COMPARE
19168 || type
== TYPE_IDIV
|| type
== TYPE_LDIV
19169 || type
== TYPE_INSERT_WORD
)
19176 /* The function returns true if INSN can be issued only from
19177 the branch slot. */
19180 is_branch_slot_insn (rtx insn
)
19182 if (!insn
|| !INSN_P (insn
)
19183 || GET_CODE (PATTERN (insn
)) == USE
19184 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
19187 if (rs6000_sched_groups
)
19189 enum attr_type type
= get_attr_type (insn
);
19190 if (type
== TYPE_BRANCH
|| type
== TYPE_JMPREG
)
19198 /* The function returns true if out_inst sets a value that is
19199 used in the address generation computation of in_insn */
19201 set_to_load_agen (rtx out_insn
, rtx in_insn
)
19203 rtx out_set
, in_set
;
19205 /* For performance reasons, only handle the simple case where
19206 both loads are a single_set. */
19207 out_set
= single_set (out_insn
);
19210 in_set
= single_set (in_insn
);
19212 return reg_mentioned_p (SET_DEST (out_set
), SET_SRC (in_set
));
19218 /* The function returns true if the target storage location of
19219 out_insn is adjacent to the target storage location of in_insn */
19220 /* Return 1 if memory locations are adjacent. */
19223 adjacent_mem_locations (rtx insn1
, rtx insn2
)
19226 rtx a
= get_store_dest (PATTERN (insn1
));
19227 rtx b
= get_store_dest (PATTERN (insn2
));
19229 if ((GET_CODE (XEXP (a
, 0)) == REG
19230 || (GET_CODE (XEXP (a
, 0)) == PLUS
19231 && GET_CODE (XEXP (XEXP (a
, 0), 1)) == CONST_INT
))
19232 && (GET_CODE (XEXP (b
, 0)) == REG
19233 || (GET_CODE (XEXP (b
, 0)) == PLUS
19234 && GET_CODE (XEXP (XEXP (b
, 0), 1)) == CONST_INT
)))
19236 HOST_WIDE_INT val0
= 0, val1
= 0, val_diff
;
19239 if (GET_CODE (XEXP (a
, 0)) == PLUS
)
19241 reg0
= XEXP (XEXP (a
, 0), 0);
19242 val0
= INTVAL (XEXP (XEXP (a
, 0), 1));
19245 reg0
= XEXP (a
, 0);
19247 if (GET_CODE (XEXP (b
, 0)) == PLUS
)
19249 reg1
= XEXP (XEXP (b
, 0), 0);
19250 val1
= INTVAL (XEXP (XEXP (b
, 0), 1));
19253 reg1
= XEXP (b
, 0);
19255 val_diff
= val1
- val0
;
19257 return ((REGNO (reg0
) == REGNO (reg1
))
19258 && ((MEM_SIZE (a
) && val_diff
== INTVAL (MEM_SIZE (a
)))
19259 || (MEM_SIZE (b
) && val_diff
== -INTVAL (MEM_SIZE (b
)))));
19265 /* A C statement (sans semicolon) to update the integer scheduling
19266 priority INSN_PRIORITY (INSN). Increase the priority to execute the
19267 INSN earlier, reduce the priority to execute INSN later. Do not
19268 define this macro if you do not need to adjust the scheduling
19269 priorities of insns. */
19272 rs6000_adjust_priority (rtx insn ATTRIBUTE_UNUSED
, int priority
)
19274 /* On machines (like the 750) which have asymmetric integer units,
19275 where one integer unit can do multiply and divides and the other
19276 can't, reduce the priority of multiply/divide so it is scheduled
19277 before other integer operations. */
19280 if (! INSN_P (insn
))
19283 if (GET_CODE (PATTERN (insn
)) == USE
)
19286 switch (rs6000_cpu_attr
) {
19288 switch (get_attr_type (insn
))
19295 fprintf (stderr
, "priority was %#x (%d) before adjustment\n",
19296 priority
, priority
);
19297 if (priority
>= 0 && priority
< 0x01000000)
19304 if (insn_must_be_first_in_group (insn
)
19305 && reload_completed
19306 && current_sched_info
->sched_max_insns_priority
19307 && rs6000_sched_restricted_insns_priority
)
19310 /* Prioritize insns that can be dispatched only in the first
19312 if (rs6000_sched_restricted_insns_priority
== 1)
19313 /* Attach highest priority to insn. This means that in
19314 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
19315 precede 'priority' (critical path) considerations. */
19316 return current_sched_info
->sched_max_insns_priority
;
19317 else if (rs6000_sched_restricted_insns_priority
== 2)
19318 /* Increase priority of insn by a minimal amount. This means that in
19319 haifa-sched.c:ready_sort(), only 'priority' (critical path)
19320 considerations precede dispatch-slot restriction considerations. */
19321 return (priority
+ 1);
19324 if (rs6000_cpu
== PROCESSOR_POWER6
19325 && ((load_store_pendulum
== -2 && is_load_insn (insn
))
19326 || (load_store_pendulum
== 2 && is_store_insn (insn
))))
19327 /* Attach highest priority to insn if the scheduler has just issued two
19328 stores and this instruction is a load, or two loads and this instruction
19329 is a store. Power6 wants loads and stores scheduled alternately
19331 return current_sched_info
->sched_max_insns_priority
;
19336 /* Return true if the instruction is nonpipelined on the Cell. */
19338 is_nonpipeline_insn (rtx insn
)
19340 enum attr_type type
;
19341 if (!insn
|| !INSN_P (insn
)
19342 || GET_CODE (PATTERN (insn
)) == USE
19343 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
19346 type
= get_attr_type (insn
);
19347 if (type
== TYPE_IMUL
19348 || type
== TYPE_IMUL2
19349 || type
== TYPE_IMUL3
19350 || type
== TYPE_LMUL
19351 || type
== TYPE_IDIV
19352 || type
== TYPE_LDIV
19353 || type
== TYPE_SDIV
19354 || type
== TYPE_DDIV
19355 || type
== TYPE_SSQRT
19356 || type
== TYPE_DSQRT
19357 || type
== TYPE_MFCR
19358 || type
== TYPE_MFCRF
19359 || type
== TYPE_MFJMPR
)
19367 /* Return how many instructions the machine can issue per cycle. */
19370 rs6000_issue_rate (void)
19372 /* Use issue rate of 1 for first scheduling pass to decrease degradation. */
19373 if (!reload_completed
)
19376 switch (rs6000_cpu_attr
) {
19377 case CPU_RIOS1
: /* ? */
19379 case CPU_PPC601
: /* ? */
19388 case CPU_PPCE300C2
:
19389 case CPU_PPCE300C3
:
19390 case CPU_PPCE500MC
:
19407 /* Return how many instructions to look ahead for better insn
19411 rs6000_use_sched_lookahead (void)
19413 if (rs6000_cpu_attr
== CPU_PPC8540
)
19415 if (rs6000_cpu_attr
== CPU_CELL
)
19416 return (reload_completed
? 8 : 0);
19420 /* We are choosing insn from the ready queue. Return nonzero if INSN can be chosen. */
19422 rs6000_use_sched_lookahead_guard (rtx insn
)
19424 if (rs6000_cpu_attr
!= CPU_CELL
)
19427 if (insn
== NULL_RTX
|| !INSN_P (insn
))
19430 if (!reload_completed
19431 || is_nonpipeline_insn (insn
)
19432 || is_microcoded_insn (insn
))
19438 /* Determine is PAT refers to memory. */
19441 is_mem_ref (rtx pat
)
19447 /* stack_tie does not produce any real memory traffic. */
19448 if (GET_CODE (pat
) == UNSPEC
19449 && XINT (pat
, 1) == UNSPEC_TIE
)
19452 if (GET_CODE (pat
) == MEM
)
19455 /* Recursively process the pattern. */
19456 fmt
= GET_RTX_FORMAT (GET_CODE (pat
));
19458 for (i
= GET_RTX_LENGTH (GET_CODE (pat
)) - 1; i
>= 0 && !ret
; i
--)
19461 ret
|= is_mem_ref (XEXP (pat
, i
));
19462 else if (fmt
[i
] == 'E')
19463 for (j
= XVECLEN (pat
, i
) - 1; j
>= 0; j
--)
19464 ret
|= is_mem_ref (XVECEXP (pat
, i
, j
));
19470 /* Determine if PAT is a PATTERN of a load insn. */
19473 is_load_insn1 (rtx pat
)
19475 if (!pat
|| pat
== NULL_RTX
)
19478 if (GET_CODE (pat
) == SET
)
19479 return is_mem_ref (SET_SRC (pat
));
19481 if (GET_CODE (pat
) == PARALLEL
)
19485 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
19486 if (is_load_insn1 (XVECEXP (pat
, 0, i
)))
19493 /* Determine if INSN loads from memory. */
19496 is_load_insn (rtx insn
)
19498 if (!insn
|| !INSN_P (insn
))
19501 if (GET_CODE (insn
) == CALL_INSN
)
19504 return is_load_insn1 (PATTERN (insn
));
19507 /* Determine if PAT is a PATTERN of a store insn. */
19510 is_store_insn1 (rtx pat
)
19512 if (!pat
|| pat
== NULL_RTX
)
19515 if (GET_CODE (pat
) == SET
)
19516 return is_mem_ref (SET_DEST (pat
));
19518 if (GET_CODE (pat
) == PARALLEL
)
19522 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
19523 if (is_store_insn1 (XVECEXP (pat
, 0, i
)))
19530 /* Determine if INSN stores to memory. */
19533 is_store_insn (rtx insn
)
19535 if (!insn
|| !INSN_P (insn
))
19538 return is_store_insn1 (PATTERN (insn
));
19541 /* Return the dest of a store insn. */
19544 get_store_dest (rtx pat
)
19546 gcc_assert (is_store_insn1 (pat
));
19548 if (GET_CODE (pat
) == SET
)
19549 return SET_DEST (pat
);
19550 else if (GET_CODE (pat
) == PARALLEL
)
19554 for (i
= 0; i
< XVECLEN (pat
, 0); i
++)
19556 rtx inner_pat
= XVECEXP (pat
, 0, i
);
19557 if (GET_CODE (inner_pat
) == SET
19558 && is_mem_ref (SET_DEST (inner_pat
)))
19562 /* We shouldn't get here, because we should have either a simple
19563 store insn or a store with update which are covered above. */
19567 /* Returns whether the dependence between INSN and NEXT is considered
19568 costly by the given target. */
19571 rs6000_is_costly_dependence (dep_t dep
, int cost
, int distance
)
19576 /* If the flag is not enabled - no dependence is considered costly;
19577 allow all dependent insns in the same group.
19578 This is the most aggressive option. */
19579 if (rs6000_sched_costly_dep
== no_dep_costly
)
19582 /* If the flag is set to 1 - a dependence is always considered costly;
19583 do not allow dependent instructions in the same group.
19584 This is the most conservative option. */
19585 if (rs6000_sched_costly_dep
== all_deps_costly
)
19588 insn
= DEP_PRO (dep
);
19589 next
= DEP_CON (dep
);
19591 if (rs6000_sched_costly_dep
== store_to_load_dep_costly
19592 && is_load_insn (next
)
19593 && is_store_insn (insn
))
19594 /* Prevent load after store in the same group. */
19597 if (rs6000_sched_costly_dep
== true_store_to_load_dep_costly
19598 && is_load_insn (next
)
19599 && is_store_insn (insn
)
19600 && DEP_TYPE (dep
) == REG_DEP_TRUE
)
19601 /* Prevent load after store in the same group if it is a true
19605 /* The flag is set to X; dependences with latency >= X are considered costly,
19606 and will not be scheduled in the same group. */
19607 if (rs6000_sched_costly_dep
<= max_dep_latency
19608 && ((cost
- distance
) >= (int)rs6000_sched_costly_dep
))
19614 /* Return the next insn after INSN that is found before TAIL is reached,
19615 skipping any "non-active" insns - insns that will not actually occupy
19616 an issue slot. Return NULL_RTX if such an insn is not found. */
19619 get_next_active_insn (rtx insn
, rtx tail
)
19621 if (insn
== NULL_RTX
|| insn
== tail
)
19626 insn
= NEXT_INSN (insn
);
19627 if (insn
== NULL_RTX
|| insn
== tail
)
19632 || (NONJUMP_INSN_P (insn
)
19633 && GET_CODE (PATTERN (insn
)) != USE
19634 && GET_CODE (PATTERN (insn
)) != CLOBBER
19635 && INSN_CODE (insn
) != CODE_FOR_stack_tie
))
19641 /* We are about to begin issuing insns for this clock cycle. */
19644 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED
, int sched_verbose
,
19645 rtx
*ready ATTRIBUTE_UNUSED
,
19646 int *pn_ready ATTRIBUTE_UNUSED
,
19647 int clock_var ATTRIBUTE_UNUSED
)
19649 int n_ready
= *pn_ready
;
19652 fprintf (dump
, "// rs6000_sched_reorder :\n");
19654 /* Reorder the ready list, if the second to last ready insn
19655 is a nonepipeline insn. */
19656 if (rs6000_cpu_attr
== CPU_CELL
&& n_ready
> 1)
19658 if (is_nonpipeline_insn (ready
[n_ready
- 1])
19659 && (recog_memoized (ready
[n_ready
- 2]) > 0))
19660 /* Simply swap first two insns. */
19662 rtx tmp
= ready
[n_ready
- 1];
19663 ready
[n_ready
- 1] = ready
[n_ready
- 2];
19664 ready
[n_ready
- 2] = tmp
;
19668 if (rs6000_cpu
== PROCESSOR_POWER6
)
19669 load_store_pendulum
= 0;
19671 return rs6000_issue_rate ();
19674 /* Like rs6000_sched_reorder, but called after issuing each insn. */
19677 rs6000_sched_reorder2 (FILE *dump
, int sched_verbose
, rtx
*ready
,
19678 int *pn_ready
, int clock_var ATTRIBUTE_UNUSED
)
19681 fprintf (dump
, "// rs6000_sched_reorder2 :\n");
19683 /* For Power6, we need to handle some special cases to try and keep the
19684 store queue from overflowing and triggering expensive flushes.
19686 This code monitors how load and store instructions are being issued
19687 and skews the ready list one way or the other to increase the likelihood
19688 that a desired instruction is issued at the proper time.
19690 A couple of things are done. First, we maintain a "load_store_pendulum"
19691 to track the current state of load/store issue.
19693 - If the pendulum is at zero, then no loads or stores have been
19694 issued in the current cycle so we do nothing.
19696 - If the pendulum is 1, then a single load has been issued in this
19697 cycle and we attempt to locate another load in the ready list to
19700 - If the pendulum is -2, then two stores have already been
19701 issued in this cycle, so we increase the priority of the first load
19702 in the ready list to increase it's likelihood of being chosen first
19705 - If the pendulum is -1, then a single store has been issued in this
19706 cycle and we attempt to locate another store in the ready list to
19707 issue with it, preferring a store to an adjacent memory location to
19708 facilitate store pairing in the store queue.
19710 - If the pendulum is 2, then two loads have already been
19711 issued in this cycle, so we increase the priority of the first store
19712 in the ready list to increase it's likelihood of being chosen first
19715 - If the pendulum < -2 or > 2, then do nothing.
19717 Note: This code covers the most common scenarios. There exist non
19718 load/store instructions which make use of the LSU and which
19719 would need to be accounted for to strictly model the behavior
19720 of the machine. Those instructions are currently unaccounted
19721 for to help minimize compile time overhead of this code.
19723 if (rs6000_cpu
== PROCESSOR_POWER6
&& last_scheduled_insn
)
19729 if (is_store_insn (last_scheduled_insn
))
19730 /* Issuing a store, swing the load_store_pendulum to the left */
19731 load_store_pendulum
--;
19732 else if (is_load_insn (last_scheduled_insn
))
19733 /* Issuing a load, swing the load_store_pendulum to the right */
19734 load_store_pendulum
++;
19736 return cached_can_issue_more
;
19738 /* If the pendulum is balanced, or there is only one instruction on
19739 the ready list, then all is well, so return. */
19740 if ((load_store_pendulum
== 0) || (*pn_ready
<= 1))
19741 return cached_can_issue_more
;
19743 if (load_store_pendulum
== 1)
19745 /* A load has been issued in this cycle. Scan the ready list
19746 for another load to issue with it */
19751 if (is_load_insn (ready
[pos
]))
19753 /* Found a load. Move it to the head of the ready list,
19754 and adjust it's priority so that it is more likely to
19757 for (i
=pos
; i
<*pn_ready
-1; i
++)
19758 ready
[i
] = ready
[i
+ 1];
19759 ready
[*pn_ready
-1] = tmp
;
19761 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
19762 INSN_PRIORITY (tmp
)++;
19768 else if (load_store_pendulum
== -2)
19770 /* Two stores have been issued in this cycle. Increase the
19771 priority of the first load in the ready list to favor it for
19772 issuing in the next cycle. */
19777 if (is_load_insn (ready
[pos
])
19779 && INSN_PRIORITY_KNOWN (ready
[pos
]))
19781 INSN_PRIORITY (ready
[pos
])++;
19783 /* Adjust the pendulum to account for the fact that a load
19784 was found and increased in priority. This is to prevent
19785 increasing the priority of multiple loads */
19786 load_store_pendulum
--;
19793 else if (load_store_pendulum
== -1)
19795 /* A store has been issued in this cycle. Scan the ready list for
19796 another store to issue with it, preferring a store to an adjacent
19798 int first_store_pos
= -1;
19804 if (is_store_insn (ready
[pos
]))
19806 /* Maintain the index of the first store found on the
19808 if (first_store_pos
== -1)
19809 first_store_pos
= pos
;
19811 if (is_store_insn (last_scheduled_insn
)
19812 && adjacent_mem_locations (last_scheduled_insn
,ready
[pos
]))
19814 /* Found an adjacent store. Move it to the head of the
19815 ready list, and adjust it's priority so that it is
19816 more likely to stay there */
19818 for (i
=pos
; i
<*pn_ready
-1; i
++)
19819 ready
[i
] = ready
[i
+ 1];
19820 ready
[*pn_ready
-1] = tmp
;
19822 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
19823 INSN_PRIORITY (tmp
)++;
19825 first_store_pos
= -1;
19833 if (first_store_pos
>= 0)
19835 /* An adjacent store wasn't found, but a non-adjacent store was,
19836 so move the non-adjacent store to the front of the ready
19837 list, and adjust its priority so that it is more likely to
19839 tmp
= ready
[first_store_pos
];
19840 for (i
=first_store_pos
; i
<*pn_ready
-1; i
++)
19841 ready
[i
] = ready
[i
+ 1];
19842 ready
[*pn_ready
-1] = tmp
;
19843 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp
))
19844 INSN_PRIORITY (tmp
)++;
19847 else if (load_store_pendulum
== 2)
19849 /* Two loads have been issued in this cycle. Increase the priority
19850 of the first store in the ready list to favor it for issuing in
19856 if (is_store_insn (ready
[pos
])
19858 && INSN_PRIORITY_KNOWN (ready
[pos
]))
19860 INSN_PRIORITY (ready
[pos
])++;
19862 /* Adjust the pendulum to account for the fact that a store
19863 was found and increased in priority. This is to prevent
19864 increasing the priority of multiple stores */
19865 load_store_pendulum
++;
19874 return cached_can_issue_more
;
19877 /* Return whether the presence of INSN causes a dispatch group termination
19878 of group WHICH_GROUP.
19880 If WHICH_GROUP == current_group, this function will return true if INSN
19881 causes the termination of the current group (i.e, the dispatch group to
19882 which INSN belongs). This means that INSN will be the last insn in the
19883 group it belongs to.
19885 If WHICH_GROUP == previous_group, this function will return true if INSN
19886 causes the termination of the previous group (i.e, the dispatch group that
19887 precedes the group to which INSN belongs). This means that INSN will be
19888 the first insn in the group it belongs to). */
19891 insn_terminates_group_p (rtx insn
, enum group_termination which_group
)
19898 first
= insn_must_be_first_in_group (insn
);
19899 last
= insn_must_be_last_in_group (insn
);
19904 if (which_group
== current_group
)
19906 else if (which_group
== previous_group
)
19914 insn_must_be_first_in_group (rtx insn
)
19916 enum attr_type type
;
19919 || insn
== NULL_RTX
19920 || GET_CODE (insn
) == NOTE
19921 || GET_CODE (PATTERN (insn
)) == USE
19922 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
19925 switch (rs6000_cpu
)
19927 case PROCESSOR_POWER5
:
19928 if (is_cracked_insn (insn
))
19930 case PROCESSOR_POWER4
:
19931 if (is_microcoded_insn (insn
))
19934 if (!rs6000_sched_groups
)
19937 type
= get_attr_type (insn
);
19944 case TYPE_DELAYED_CR
:
19945 case TYPE_CR_LOGICAL
:
19959 case PROCESSOR_POWER6
:
19960 type
= get_attr_type (insn
);
19964 case TYPE_INSERT_DWORD
:
19968 case TYPE_VAR_SHIFT_ROTATE
:
19975 case TYPE_INSERT_WORD
:
19976 case TYPE_DELAYED_COMPARE
:
19977 case TYPE_IMUL_COMPARE
:
19978 case TYPE_LMUL_COMPARE
:
19979 case TYPE_FPCOMPARE
:
19990 case TYPE_LOAD_EXT_UX
:
19992 case TYPE_STORE_UX
:
19993 case TYPE_FPLOAD_U
:
19994 case TYPE_FPLOAD_UX
:
19995 case TYPE_FPSTORE_U
:
19996 case TYPE_FPSTORE_UX
:
20010 insn_must_be_last_in_group (rtx insn
)
20012 enum attr_type type
;
20015 || insn
== NULL_RTX
20016 || GET_CODE (insn
) == NOTE
20017 || GET_CODE (PATTERN (insn
)) == USE
20018 || GET_CODE (PATTERN (insn
)) == CLOBBER
)
20021 switch (rs6000_cpu
) {
20022 case PROCESSOR_POWER4
:
20023 case PROCESSOR_POWER5
:
20024 if (is_microcoded_insn (insn
))
20027 if (is_branch_slot_insn (insn
))
20031 case PROCESSOR_POWER6
:
20032 type
= get_attr_type (insn
);
20039 case TYPE_VAR_SHIFT_ROTATE
:
20046 case TYPE_DELAYED_COMPARE
:
20047 case TYPE_IMUL_COMPARE
:
20048 case TYPE_LMUL_COMPARE
:
20049 case TYPE_FPCOMPARE
:
20070 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
20071 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
20074 is_costly_group (rtx
*group_insns
, rtx next_insn
)
20077 int issue_rate
= rs6000_issue_rate ();
20079 for (i
= 0; i
< issue_rate
; i
++)
20081 sd_iterator_def sd_it
;
20083 rtx insn
= group_insns
[i
];
20088 FOR_EACH_DEP (insn
, SD_LIST_FORW
, sd_it
, dep
)
20090 rtx next
= DEP_CON (dep
);
20092 if (next
== next_insn
20093 && rs6000_is_costly_dependence (dep
, dep_cost (dep
), 0))
20101 /* Utility of the function redefine_groups.
20102 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
20103 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
20104 to keep it "far" (in a separate group) from GROUP_INSNS, following
20105 one of the following schemes, depending on the value of the flag
20106 -minsert_sched_nops = X:
20107 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
20108 in order to force NEXT_INSN into a separate group.
20109 (2) X < sched_finish_regroup_exact: insert exactly X nops.
20110 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
20111 insertion (has a group just ended, how many vacant issue slots remain in the
20112 last group, and how many dispatch groups were encountered so far). */
20115 force_new_group (int sched_verbose
, FILE *dump
, rtx
*group_insns
,
20116 rtx next_insn
, bool *group_end
, int can_issue_more
,
20121 int issue_rate
= rs6000_issue_rate ();
20122 bool end
= *group_end
;
20125 if (next_insn
== NULL_RTX
)
20126 return can_issue_more
;
20128 if (rs6000_sched_insert_nops
> sched_finish_regroup_exact
)
20129 return can_issue_more
;
20131 force
= is_costly_group (group_insns
, next_insn
);
20133 return can_issue_more
;
20135 if (sched_verbose
> 6)
20136 fprintf (dump
,"force: group count = %d, can_issue_more = %d\n",
20137 *group_count
,can_issue_more
);
20139 if (rs6000_sched_insert_nops
== sched_finish_regroup_exact
)
20142 can_issue_more
= 0;
20144 /* Since only a branch can be issued in the last issue_slot, it is
20145 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
20146 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
20147 in this case the last nop will start a new group and the branch
20148 will be forced to the new group. */
20149 if (can_issue_more
&& !is_branch_slot_insn (next_insn
))
20152 while (can_issue_more
> 0)
20155 emit_insn_before (nop
, next_insn
);
20163 if (rs6000_sched_insert_nops
< sched_finish_regroup_exact
)
20165 int n_nops
= rs6000_sched_insert_nops
;
20167 /* Nops can't be issued from the branch slot, so the effective
20168 issue_rate for nops is 'issue_rate - 1'. */
20169 if (can_issue_more
== 0)
20170 can_issue_more
= issue_rate
;
20172 if (can_issue_more
== 0)
20174 can_issue_more
= issue_rate
- 1;
20177 for (i
= 0; i
< issue_rate
; i
++)
20179 group_insns
[i
] = 0;
20186 emit_insn_before (nop
, next_insn
);
20187 if (can_issue_more
== issue_rate
- 1) /* new group begins */
20190 if (can_issue_more
== 0)
20192 can_issue_more
= issue_rate
- 1;
20195 for (i
= 0; i
< issue_rate
; i
++)
20197 group_insns
[i
] = 0;
20203 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
20206 /* Is next_insn going to start a new group? */
20209 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
20210 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
20211 || (can_issue_more
< issue_rate
&&
20212 insn_terminates_group_p (next_insn
, previous_group
)));
20213 if (*group_end
&& end
)
20216 if (sched_verbose
> 6)
20217 fprintf (dump
, "done force: group count = %d, can_issue_more = %d\n",
20218 *group_count
, can_issue_more
);
20219 return can_issue_more
;
20222 return can_issue_more
;
20225 /* This function tries to synch the dispatch groups that the compiler "sees"
20226 with the dispatch groups that the processor dispatcher is expected to
20227 form in practice. It tries to achieve this synchronization by forcing the
20228 estimated processor grouping on the compiler (as opposed to the function
20229 'pad_goups' which tries to force the scheduler's grouping on the processor).
20231 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
20232 examines the (estimated) dispatch groups that will be formed by the processor
20233 dispatcher. It marks these group boundaries to reflect the estimated
20234 processor grouping, overriding the grouping that the scheduler had marked.
20235 Depending on the value of the flag '-minsert-sched-nops' this function can
20236 force certain insns into separate groups or force a certain distance between
20237 them by inserting nops, for example, if there exists a "costly dependence"
20240 The function estimates the group boundaries that the processor will form as
20241 follows: It keeps track of how many vacant issue slots are available after
20242 each insn. A subsequent insn will start a new group if one of the following
20244 - no more vacant issue slots remain in the current dispatch group.
20245 - only the last issue slot, which is the branch slot, is vacant, but the next
20246 insn is not a branch.
20247 - only the last 2 or less issue slots, including the branch slot, are vacant,
20248 which means that a cracked insn (which occupies two issue slots) can't be
20249 issued in this group.
20250 - less than 'issue_rate' slots are vacant, and the next insn always needs to
20251 start a new group. */
20254 redefine_groups (FILE *dump
, int sched_verbose
, rtx prev_head_insn
, rtx tail
)
20256 rtx insn
, next_insn
;
20258 int can_issue_more
;
20261 int group_count
= 0;
20265 issue_rate
= rs6000_issue_rate ();
20266 group_insns
= XALLOCAVEC (rtx
, issue_rate
);
20267 for (i
= 0; i
< issue_rate
; i
++)
20269 group_insns
[i
] = 0;
20271 can_issue_more
= issue_rate
;
20273 insn
= get_next_active_insn (prev_head_insn
, tail
);
20276 while (insn
!= NULL_RTX
)
20278 slot
= (issue_rate
- can_issue_more
);
20279 group_insns
[slot
] = insn
;
20281 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
20282 if (insn_terminates_group_p (insn
, current_group
))
20283 can_issue_more
= 0;
20285 next_insn
= get_next_active_insn (insn
, tail
);
20286 if (next_insn
== NULL_RTX
)
20287 return group_count
+ 1;
20289 /* Is next_insn going to start a new group? */
20291 = (can_issue_more
== 0
20292 || (can_issue_more
== 1 && !is_branch_slot_insn (next_insn
))
20293 || (can_issue_more
<= 2 && is_cracked_insn (next_insn
))
20294 || (can_issue_more
< issue_rate
&&
20295 insn_terminates_group_p (next_insn
, previous_group
)));
20297 can_issue_more
= force_new_group (sched_verbose
, dump
, group_insns
,
20298 next_insn
, &group_end
, can_issue_more
,
20304 can_issue_more
= 0;
20305 for (i
= 0; i
< issue_rate
; i
++)
20307 group_insns
[i
] = 0;
20311 if (GET_MODE (next_insn
) == TImode
&& can_issue_more
)
20312 PUT_MODE (next_insn
, VOIDmode
);
20313 else if (!can_issue_more
&& GET_MODE (next_insn
) != TImode
)
20314 PUT_MODE (next_insn
, TImode
);
20317 if (can_issue_more
== 0)
20318 can_issue_more
= issue_rate
;
20321 return group_count
;
20324 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
20325 dispatch group boundaries that the scheduler had marked. Pad with nops
20326 any dispatch groups which have vacant issue slots, in order to force the
20327 scheduler's grouping on the processor dispatcher. The function
20328 returns the number of dispatch groups found. */
20331 pad_groups (FILE *dump
, int sched_verbose
, rtx prev_head_insn
, rtx tail
)
20333 rtx insn
, next_insn
;
20336 int can_issue_more
;
20338 int group_count
= 0;
20340 /* Initialize issue_rate. */
20341 issue_rate
= rs6000_issue_rate ();
20342 can_issue_more
= issue_rate
;
20344 insn
= get_next_active_insn (prev_head_insn
, tail
);
20345 next_insn
= get_next_active_insn (insn
, tail
);
20347 while (insn
!= NULL_RTX
)
20350 rs6000_variable_issue (dump
, sched_verbose
, insn
, can_issue_more
);
20352 group_end
= (next_insn
== NULL_RTX
|| GET_MODE (next_insn
) == TImode
);
20354 if (next_insn
== NULL_RTX
)
20359 /* If the scheduler had marked group termination at this location
20360 (between insn and next_insn), and neither insn nor next_insn will
20361 force group termination, pad the group with nops to force group
20364 && (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
20365 && !insn_terminates_group_p (insn
, current_group
)
20366 && !insn_terminates_group_p (next_insn
, previous_group
))
20368 if (!is_branch_slot_insn (next_insn
))
20371 while (can_issue_more
)
20374 emit_insn_before (nop
, next_insn
);
20379 can_issue_more
= issue_rate
;
20384 next_insn
= get_next_active_insn (insn
, tail
);
20387 return group_count
;
20390 /* We're beginning a new block. Initialize data structures as necessary. */
20393 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED
,
20394 int sched_verbose ATTRIBUTE_UNUSED
,
20395 int max_ready ATTRIBUTE_UNUSED
)
20397 last_scheduled_insn
= NULL_RTX
;
20398 load_store_pendulum
= 0;
20401 /* The following function is called at the end of scheduling BB.
20402 After reload, it inserts nops at insn group bundling. */
20405 rs6000_sched_finish (FILE *dump
, int sched_verbose
)
20410 fprintf (dump
, "=== Finishing schedule.\n");
20412 if (reload_completed
&& rs6000_sched_groups
)
20414 /* Do not run sched_finish hook when selective scheduling enabled. */
20415 if (sel_sched_p ())
20418 if (rs6000_sched_insert_nops
== sched_finish_none
)
20421 if (rs6000_sched_insert_nops
== sched_finish_pad_groups
)
20422 n_groups
= pad_groups (dump
, sched_verbose
,
20423 current_sched_info
->prev_head
,
20424 current_sched_info
->next_tail
);
20426 n_groups
= redefine_groups (dump
, sched_verbose
,
20427 current_sched_info
->prev_head
,
20428 current_sched_info
->next_tail
);
20430 if (sched_verbose
>= 6)
20432 fprintf (dump
, "ngroups = %d\n", n_groups
);
20433 print_rtl (dump
, current_sched_info
->prev_head
);
20434 fprintf (dump
, "Done finish_sched\n");
20439 struct _rs6000_sched_context
20441 short cached_can_issue_more
;
20442 rtx last_scheduled_insn
;
20443 int load_store_pendulum
;
20446 typedef struct _rs6000_sched_context rs6000_sched_context_def
;
20447 typedef rs6000_sched_context_def
*rs6000_sched_context_t
;
20449 /* Allocate store for new scheduling context. */
20451 rs6000_alloc_sched_context (void)
20453 return xmalloc (sizeof (rs6000_sched_context_def
));
20456 /* If CLEAN_P is true then initializes _SC with clean data,
20457 and from the global context otherwise. */
20459 rs6000_init_sched_context (void *_sc
, bool clean_p
)
20461 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
20465 sc
->cached_can_issue_more
= 0;
20466 sc
->last_scheduled_insn
= NULL_RTX
;
20467 sc
->load_store_pendulum
= 0;
20471 sc
->cached_can_issue_more
= cached_can_issue_more
;
20472 sc
->last_scheduled_insn
= last_scheduled_insn
;
20473 sc
->load_store_pendulum
= load_store_pendulum
;
20477 /* Sets the global scheduling context to the one pointed to by _SC. */
20479 rs6000_set_sched_context (void *_sc
)
20481 rs6000_sched_context_t sc
= (rs6000_sched_context_t
) _sc
;
20483 gcc_assert (sc
!= NULL
);
20485 cached_can_issue_more
= sc
->cached_can_issue_more
;
20486 last_scheduled_insn
= sc
->last_scheduled_insn
;
20487 load_store_pendulum
= sc
->load_store_pendulum
;
20492 rs6000_free_sched_context (void *_sc
)
20494 gcc_assert (_sc
!= NULL
);
20500 /* Length in units of the trampoline for entering a nested function. */
20503 rs6000_trampoline_size (void)
20507 switch (DEFAULT_ABI
)
20510 gcc_unreachable ();
20513 ret
= (TARGET_32BIT
) ? 12 : 24;
20518 ret
= (TARGET_32BIT
) ? 40 : 48;
20525 /* Emit RTL insns to initialize the variable parts of a trampoline.
20526 FNADDR is an RTX for the address of the function's pure code.
20527 CXT is an RTX for the static chain value for the function. */
20530 rs6000_initialize_trampoline (rtx addr
, rtx fnaddr
, rtx cxt
)
20532 int regsize
= (TARGET_32BIT
) ? 4 : 8;
20533 rtx ctx_reg
= force_reg (Pmode
, cxt
);
20535 switch (DEFAULT_ABI
)
20538 gcc_unreachable ();
20540 /* Macros to shorten the code expansions below. */
20541 #define MEM_DEREF(addr) gen_rtx_MEM (Pmode, memory_address (Pmode, addr))
20542 #define MEM_PLUS(addr,offset) \
20543 gen_rtx_MEM (Pmode, memory_address (Pmode, plus_constant (addr, offset)))
20545 /* Under AIX, just build the 3 word function descriptor */
20548 rtx fn_reg
= gen_reg_rtx (Pmode
);
20549 rtx toc_reg
= gen_reg_rtx (Pmode
);
20550 emit_move_insn (fn_reg
, MEM_DEREF (fnaddr
));
20551 emit_move_insn (toc_reg
, MEM_PLUS (fnaddr
, regsize
));
20552 emit_move_insn (MEM_DEREF (addr
), fn_reg
);
20553 emit_move_insn (MEM_PLUS (addr
, regsize
), toc_reg
);
20554 emit_move_insn (MEM_PLUS (addr
, 2*regsize
), ctx_reg
);
20558 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
20561 emit_library_call (gen_rtx_SYMBOL_REF (Pmode
, "__trampoline_setup"),
20562 LCT_NORMAL
, VOIDmode
, 4,
20564 GEN_INT (rs6000_trampoline_size ()), SImode
,
20574 /* Table of valid machine attributes. */
20576 const struct attribute_spec rs6000_attribute_table
[] =
20578 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
20579 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute
},
20580 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
},
20581 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute
},
20582 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
},
20583 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute
},
20584 #ifdef SUBTARGET_ATTRIBUTE_TABLE
20585 SUBTARGET_ATTRIBUTE_TABLE
,
20587 { NULL
, 0, 0, false, false, false, NULL
}
20590 /* Handle the "altivec" attribute. The attribute may have
20591 arguments as follows:
20593 __attribute__((altivec(vector__)))
20594 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
20595 __attribute__((altivec(bool__))) (always followed by 'unsigned')
20597 and may appear more than once (e.g., 'vector bool char') in a
20598 given declaration. */
20601 rs6000_handle_altivec_attribute (tree
*node
,
20602 tree name ATTRIBUTE_UNUSED
,
20604 int flags ATTRIBUTE_UNUSED
,
20605 bool *no_add_attrs
)
20607 tree type
= *node
, result
= NULL_TREE
;
20608 enum machine_mode mode
;
20611 = ((args
&& TREE_CODE (args
) == TREE_LIST
&& TREE_VALUE (args
)
20612 && TREE_CODE (TREE_VALUE (args
)) == IDENTIFIER_NODE
)
20613 ? *IDENTIFIER_POINTER (TREE_VALUE (args
))
20616 while (POINTER_TYPE_P (type
)
20617 || TREE_CODE (type
) == FUNCTION_TYPE
20618 || TREE_CODE (type
) == METHOD_TYPE
20619 || TREE_CODE (type
) == ARRAY_TYPE
)
20620 type
= TREE_TYPE (type
);
20622 mode
= TYPE_MODE (type
);
20624 /* Check for invalid AltiVec type qualifiers. */
20625 if (type
== long_unsigned_type_node
|| type
== long_integer_type_node
)
20628 error ("use of %<long%> in AltiVec types is invalid for 64-bit code");
20629 else if (rs6000_warn_altivec_long
)
20630 warning (0, "use of %<long%> in AltiVec types is deprecated; use %<int%>");
20632 else if (type
== long_long_unsigned_type_node
20633 || type
== long_long_integer_type_node
)
20634 error ("use of %<long long%> in AltiVec types is invalid");
20635 else if (type
== double_type_node
)
20636 error ("use of %<double%> in AltiVec types is invalid");
20637 else if (type
== long_double_type_node
)
20638 error ("use of %<long double%> in AltiVec types is invalid");
20639 else if (type
== boolean_type_node
)
20640 error ("use of boolean types in AltiVec types is invalid");
20641 else if (TREE_CODE (type
) == COMPLEX_TYPE
)
20642 error ("use of %<complex%> in AltiVec types is invalid");
20643 else if (DECIMAL_FLOAT_MODE_P (mode
))
20644 error ("use of decimal floating point types in AltiVec types is invalid");
20646 switch (altivec_type
)
20649 unsigned_p
= TYPE_UNSIGNED (type
);
20653 result
= (unsigned_p
? unsigned_V4SI_type_node
: V4SI_type_node
);
20656 result
= (unsigned_p
? unsigned_V8HI_type_node
: V8HI_type_node
);
20659 result
= (unsigned_p
? unsigned_V16QI_type_node
: V16QI_type_node
);
20661 case SFmode
: result
= V4SF_type_node
; break;
20662 /* If the user says 'vector int bool', we may be handed the 'bool'
20663 attribute _before_ the 'vector' attribute, and so select the
20664 proper type in the 'b' case below. */
20665 case V4SImode
: case V8HImode
: case V16QImode
: case V4SFmode
:
20673 case SImode
: case V4SImode
: result
= bool_V4SI_type_node
; break;
20674 case HImode
: case V8HImode
: result
= bool_V8HI_type_node
; break;
20675 case QImode
: case V16QImode
: result
= bool_V16QI_type_node
;
20682 case V8HImode
: result
= pixel_V8HI_type_node
;
20688 /* Propagate qualifiers attached to the element type
20689 onto the vector type. */
20690 if (result
&& result
!= type
&& TYPE_QUALS (type
))
20691 result
= build_qualified_type (result
, TYPE_QUALS (type
));
20693 *no_add_attrs
= true; /* No need to hang on to the attribute. */
20696 *node
= lang_hooks
.types
.reconstruct_complex_type (*node
, result
);
20701 /* AltiVec defines four built-in scalar types that serve as vector
20702 elements; we must teach the compiler how to mangle them. */
20704 static const char *
20705 rs6000_mangle_type (const_tree type
)
20707 type
= TYPE_MAIN_VARIANT (type
);
20709 if (TREE_CODE (type
) != VOID_TYPE
&& TREE_CODE (type
) != BOOLEAN_TYPE
20710 && TREE_CODE (type
) != INTEGER_TYPE
&& TREE_CODE (type
) != REAL_TYPE
)
20713 if (type
== bool_char_type_node
) return "U6__boolc";
20714 if (type
== bool_short_type_node
) return "U6__bools";
20715 if (type
== pixel_type_node
) return "u7__pixel";
20716 if (type
== bool_int_type_node
) return "U6__booli";
20718 /* Mangle IBM extended float long double as `g' (__float128) on
20719 powerpc*-linux where long-double-64 previously was the default. */
20720 if (TYPE_MAIN_VARIANT (type
) == long_double_type_node
20722 && TARGET_LONG_DOUBLE_128
20723 && !TARGET_IEEEQUAD
)
20726 /* For all other types, use normal C++ mangling. */
20730 /* Handle a "longcall" or "shortcall" attribute; arguments as in
20731 struct attribute_spec.handler. */
20734 rs6000_handle_longcall_attribute (tree
*node
, tree name
,
20735 tree args ATTRIBUTE_UNUSED
,
20736 int flags ATTRIBUTE_UNUSED
,
20737 bool *no_add_attrs
)
20739 if (TREE_CODE (*node
) != FUNCTION_TYPE
20740 && TREE_CODE (*node
) != FIELD_DECL
20741 && TREE_CODE (*node
) != TYPE_DECL
)
20743 warning (OPT_Wattributes
, "%qE attribute only applies to functions",
20745 *no_add_attrs
= true;
20751 /* Set longcall attributes on all functions declared when
20752 rs6000_default_long_calls is true. */
20754 rs6000_set_default_type_attributes (tree type
)
20756 if (rs6000_default_long_calls
20757 && (TREE_CODE (type
) == FUNCTION_TYPE
20758 || TREE_CODE (type
) == METHOD_TYPE
))
20759 TYPE_ATTRIBUTES (type
) = tree_cons (get_identifier ("longcall"),
20761 TYPE_ATTRIBUTES (type
));
20764 darwin_set_default_type_attributes (type
);
20768 /* Return a reference suitable for calling a function with the
20769 longcall attribute. */
20772 rs6000_longcall_ref (rtx call_ref
)
20774 const char *call_name
;
20777 if (GET_CODE (call_ref
) != SYMBOL_REF
)
20780 /* System V adds '.' to the internal name, so skip them. */
20781 call_name
= XSTR (call_ref
, 0);
20782 if (*call_name
== '.')
20784 while (*call_name
== '.')
20787 node
= get_identifier (call_name
);
20788 call_ref
= gen_rtx_SYMBOL_REF (VOIDmode
, IDENTIFIER_POINTER (node
));
20791 return force_reg (Pmode
, call_ref
);
20794 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
20795 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
20798 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
20799 struct attribute_spec.handler. */
20801 rs6000_handle_struct_attribute (tree
*node
, tree name
,
20802 tree args ATTRIBUTE_UNUSED
,
20803 int flags ATTRIBUTE_UNUSED
, bool *no_add_attrs
)
20806 if (DECL_P (*node
))
20808 if (TREE_CODE (*node
) == TYPE_DECL
)
20809 type
= &TREE_TYPE (*node
);
20814 if (!(type
&& (TREE_CODE (*type
) == RECORD_TYPE
20815 || TREE_CODE (*type
) == UNION_TYPE
)))
20817 warning (OPT_Wattributes
, "%qE attribute ignored", name
);
20818 *no_add_attrs
= true;
20821 else if ((is_attribute_p ("ms_struct", name
)
20822 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type
)))
20823 || ((is_attribute_p ("gcc_struct", name
)
20824 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type
)))))
20826 warning (OPT_Wattributes
, "%qE incompatible attribute ignored",
20828 *no_add_attrs
= true;
20835 rs6000_ms_bitfield_layout_p (const_tree record_type
)
20837 return (TARGET_USE_MS_BITFIELD_LAYOUT
&&
20838 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type
)))
20839 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type
));
20842 #ifdef USING_ELFOS_H
20844 /* A get_unnamed_section callback, used for switching to toc_section. */
20847 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
20849 if (DEFAULT_ABI
== ABI_AIX
20850 && TARGET_MINIMAL_TOC
20851 && !TARGET_RELOCATABLE
)
20853 if (!toc_initialized
)
20855 toc_initialized
= 1;
20856 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
20857 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "LCTOC", 0);
20858 fprintf (asm_out_file
, "\t.tc ");
20859 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1[TC],");
20860 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
20861 fprintf (asm_out_file
, "\n");
20863 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
20864 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
20865 fprintf (asm_out_file
, " = .+32768\n");
20868 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
20870 else if (DEFAULT_ABI
== ABI_AIX
&& !TARGET_RELOCATABLE
)
20871 fprintf (asm_out_file
, "%s\n", TOC_SECTION_ASM_OP
);
20874 fprintf (asm_out_file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
20875 if (!toc_initialized
)
20877 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file
, "LCTOC1");
20878 fprintf (asm_out_file
, " = .+32768\n");
20879 toc_initialized
= 1;
20884 /* Implement TARGET_ASM_INIT_SECTIONS. */
20887 rs6000_elf_asm_init_sections (void)
20890 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op
, NULL
);
20893 = get_unnamed_section (SECTION_WRITE
, output_section_asm_op
,
20894 SDATA2_SECTION_ASM_OP
);
20897 /* Implement TARGET_SELECT_RTX_SECTION. */
20900 rs6000_elf_select_rtx_section (enum machine_mode mode
, rtx x
,
20901 unsigned HOST_WIDE_INT align
)
20903 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
20904 return toc_section
;
20906 return default_elf_select_rtx_section (mode
, x
, align
);
20909 /* For a SYMBOL_REF, set generic flags and then perform some
20910 target-specific processing.
20912 When the AIX ABI is requested on a non-AIX system, replace the
20913 function name with the real name (with a leading .) rather than the
20914 function descriptor name. This saves a lot of overriding code to
20915 read the prefixes. */
20918 rs6000_elf_encode_section_info (tree decl
, rtx rtl
, int first
)
20920 default_encode_section_info (decl
, rtl
, first
);
20923 && TREE_CODE (decl
) == FUNCTION_DECL
20925 && DEFAULT_ABI
== ABI_AIX
)
20927 rtx sym_ref
= XEXP (rtl
, 0);
20928 size_t len
= strlen (XSTR (sym_ref
, 0));
20929 char *str
= XALLOCAVEC (char, len
+ 2);
20931 memcpy (str
+ 1, XSTR (sym_ref
, 0), len
+ 1);
20932 XSTR (sym_ref
, 0) = ggc_alloc_string (str
, len
+ 1);
20937 compare_section_name (const char *section
, const char *templ
)
20941 len
= strlen (templ
);
20942 return (strncmp (section
, templ
, len
) == 0
20943 && (section
[len
] == 0 || section
[len
] == '.'));
20947 rs6000_elf_in_small_data_p (const_tree decl
)
20949 if (rs6000_sdata
== SDATA_NONE
)
20952 /* We want to merge strings, so we never consider them small data. */
20953 if (TREE_CODE (decl
) == STRING_CST
)
20956 /* Functions are never in the small data area. */
20957 if (TREE_CODE (decl
) == FUNCTION_DECL
)
20960 if (TREE_CODE (decl
) == VAR_DECL
&& DECL_SECTION_NAME (decl
))
20962 const char *section
= TREE_STRING_POINTER (DECL_SECTION_NAME (decl
));
20963 if (compare_section_name (section
, ".sdata")
20964 || compare_section_name (section
, ".sdata2")
20965 || compare_section_name (section
, ".gnu.linkonce.s")
20966 || compare_section_name (section
, ".sbss")
20967 || compare_section_name (section
, ".sbss2")
20968 || compare_section_name (section
, ".gnu.linkonce.sb")
20969 || strcmp (section
, ".PPC.EMB.sdata0") == 0
20970 || strcmp (section
, ".PPC.EMB.sbss0") == 0)
20975 HOST_WIDE_INT size
= int_size_in_bytes (TREE_TYPE (decl
));
20978 && (unsigned HOST_WIDE_INT
) size
<= g_switch_value
20979 /* If it's not public, and we're not going to reference it there,
20980 there's no need to put it in the small data section. */
20981 && (rs6000_sdata
!= SDATA_DATA
|| TREE_PUBLIC (decl
)))
20988 #endif /* USING_ELFOS_H */
20990 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
20993 rs6000_use_blocks_for_constant_p (enum machine_mode mode
, const_rtx x
)
20995 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
);
20998 /* Return a REG that occurs in ADDR with coefficient 1.
20999 ADDR can be effectively incremented by incrementing REG.
21001 r0 is special and we must not select it as an address
21002 register by this routine since our caller will try to
21003 increment the returned register via an "la" instruction. */
21006 find_addr_reg (rtx addr
)
21008 while (GET_CODE (addr
) == PLUS
)
21010 if (GET_CODE (XEXP (addr
, 0)) == REG
21011 && REGNO (XEXP (addr
, 0)) != 0)
21012 addr
= XEXP (addr
, 0);
21013 else if (GET_CODE (XEXP (addr
, 1)) == REG
21014 && REGNO (XEXP (addr
, 1)) != 0)
21015 addr
= XEXP (addr
, 1);
21016 else if (CONSTANT_P (XEXP (addr
, 0)))
21017 addr
= XEXP (addr
, 1);
21018 else if (CONSTANT_P (XEXP (addr
, 1)))
21019 addr
= XEXP (addr
, 0);
21021 gcc_unreachable ();
21023 gcc_assert (GET_CODE (addr
) == REG
&& REGNO (addr
) != 0);
21028 rs6000_fatal_bad_address (rtx op
)
21030 fatal_insn ("bad address", op
);
21035 static tree branch_island_list
= 0;
21037 /* Remember to generate a branch island for far calls to the given
21041 add_compiler_branch_island (tree label_name
, tree function_name
,
21044 tree branch_island
= build_tree_list (function_name
, label_name
);
21045 TREE_TYPE (branch_island
) = build_int_cst (NULL_TREE
, line_number
);
21046 TREE_CHAIN (branch_island
) = branch_island_list
;
21047 branch_island_list
= branch_island
;
21050 #define BRANCH_ISLAND_LABEL_NAME(BRANCH_ISLAND) TREE_VALUE (BRANCH_ISLAND)
21051 #define BRANCH_ISLAND_FUNCTION_NAME(BRANCH_ISLAND) TREE_PURPOSE (BRANCH_ISLAND)
21052 #define BRANCH_ISLAND_LINE_NUMBER(BRANCH_ISLAND) \
21053 TREE_INT_CST_LOW (TREE_TYPE (BRANCH_ISLAND))
21055 /* Generate far-jump branch islands for everything on the
21056 branch_island_list. Invoked immediately after the last instruction
21057 of the epilogue has been emitted; the branch-islands must be
21058 appended to, and contiguous with, the function body. Mach-O stubs
21059 are generated in machopic_output_stub(). */
21062 macho_branch_islands (void)
21065 tree branch_island
;
21067 for (branch_island
= branch_island_list
;
21069 branch_island
= TREE_CHAIN (branch_island
))
21071 const char *label
=
21072 IDENTIFIER_POINTER (BRANCH_ISLAND_LABEL_NAME (branch_island
));
21074 IDENTIFIER_POINTER (BRANCH_ISLAND_FUNCTION_NAME (branch_island
));
21075 char name_buf
[512];
21076 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
21077 if (name
[0] == '*' || name
[0] == '&')
21078 strcpy (name_buf
, name
+1);
21082 strcpy (name_buf
+1, name
);
21084 strcpy (tmp_buf
, "\n");
21085 strcat (tmp_buf
, label
);
21086 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
21087 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
21088 dbxout_stabd (N_SLINE
, BRANCH_ISLAND_LINE_NUMBER (branch_island
));
21089 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
21092 strcat (tmp_buf
, ":\n\tmflr r0\n\tbcl 20,31,");
21093 strcat (tmp_buf
, label
);
21094 strcat (tmp_buf
, "_pic\n");
21095 strcat (tmp_buf
, label
);
21096 strcat (tmp_buf
, "_pic:\n\tmflr r11\n");
21098 strcat (tmp_buf
, "\taddis r11,r11,ha16(");
21099 strcat (tmp_buf
, name_buf
);
21100 strcat (tmp_buf
, " - ");
21101 strcat (tmp_buf
, label
);
21102 strcat (tmp_buf
, "_pic)\n");
21104 strcat (tmp_buf
, "\tmtlr r0\n");
21106 strcat (tmp_buf
, "\taddi r12,r11,lo16(");
21107 strcat (tmp_buf
, name_buf
);
21108 strcat (tmp_buf
, " - ");
21109 strcat (tmp_buf
, label
);
21110 strcat (tmp_buf
, "_pic)\n");
21112 strcat (tmp_buf
, "\tmtctr r12\n\tbctr\n");
21116 strcat (tmp_buf
, ":\nlis r12,hi16(");
21117 strcat (tmp_buf
, name_buf
);
21118 strcat (tmp_buf
, ")\n\tori r12,r12,lo16(");
21119 strcat (tmp_buf
, name_buf
);
21120 strcat (tmp_buf
, ")\n\tmtctr r12\n\tbctr");
21122 output_asm_insn (tmp_buf
, 0);
21123 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
21124 if (write_symbols
== DBX_DEBUG
|| write_symbols
== XCOFF_DEBUG
)
21125 dbxout_stabd (N_SLINE
, BRANCH_ISLAND_LINE_NUMBER (branch_island
));
21126 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
21129 branch_island_list
= 0;
21132 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
21133 already there or not. */
21136 no_previous_def (tree function_name
)
21138 tree branch_island
;
21139 for (branch_island
= branch_island_list
;
21141 branch_island
= TREE_CHAIN (branch_island
))
21142 if (function_name
== BRANCH_ISLAND_FUNCTION_NAME (branch_island
))
21147 /* GET_PREV_LABEL gets the label name from the previous definition of
21151 get_prev_label (tree function_name
)
21153 tree branch_island
;
21154 for (branch_island
= branch_island_list
;
21156 branch_island
= TREE_CHAIN (branch_island
))
21157 if (function_name
== BRANCH_ISLAND_FUNCTION_NAME (branch_island
))
21158 return BRANCH_ISLAND_LABEL_NAME (branch_island
);
21162 #ifndef DARWIN_LINKER_GENERATES_ISLANDS
21163 #define DARWIN_LINKER_GENERATES_ISLANDS 0
21166 /* KEXTs still need branch islands. */
21167 #define DARWIN_GENERATE_ISLANDS (!DARWIN_LINKER_GENERATES_ISLANDS \
21168 || flag_mkernel || flag_apple_kext)
21170 /* INSN is either a function call or a millicode call. It may have an
21171 unconditional jump in its delay slot.
21173 CALL_DEST is the routine we are calling. */
21176 output_call (rtx insn
, rtx
*operands
, int dest_operand_number
,
21177 int cookie_operand_number
)
21179 static char buf
[256];
21180 if (DARWIN_GENERATE_ISLANDS
21181 && GET_CODE (operands
[dest_operand_number
]) == SYMBOL_REF
21182 && (INTVAL (operands
[cookie_operand_number
]) & CALL_LONG
))
21185 tree funname
= get_identifier (XSTR (operands
[dest_operand_number
], 0));
21187 if (no_previous_def (funname
))
21189 rtx label_rtx
= gen_label_rtx ();
21190 char *label_buf
, temp_buf
[256];
21191 ASM_GENERATE_INTERNAL_LABEL (temp_buf
, "L",
21192 CODE_LABEL_NUMBER (label_rtx
));
21193 label_buf
= temp_buf
[0] == '*' ? temp_buf
+ 1 : temp_buf
;
21194 labelname
= get_identifier (label_buf
);
21195 add_compiler_branch_island (labelname
, funname
, insn_line (insn
));
21198 labelname
= get_prev_label (funname
);
21200 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
21201 instruction will reach 'foo', otherwise link as 'bl L42'".
21202 "L42" should be a 'branch island', that will do a far jump to
21203 'foo'. Branch islands are generated in
21204 macho_branch_islands(). */
21205 sprintf (buf
, "jbsr %%z%d,%.246s",
21206 dest_operand_number
, IDENTIFIER_POINTER (labelname
));
21209 sprintf (buf
, "bl %%z%d", dest_operand_number
);
21213 /* Generate PIC and indirect symbol stubs. */
21216 machopic_output_stub (FILE *file
, const char *symb
, const char *stub
)
21218 unsigned int length
;
21219 char *symbol_name
, *lazy_ptr_name
;
21220 char *local_label_0
;
21221 static int label
= 0;
21223 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
21224 symb
= (*targetm
.strip_name_encoding
) (symb
);
21227 length
= strlen (symb
);
21228 symbol_name
= XALLOCAVEC (char, length
+ 32);
21229 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name
, symb
, length
);
21231 lazy_ptr_name
= XALLOCAVEC (char, length
+ 32);
21232 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name
, symb
, length
);
21235 switch_to_section (darwin_sections
[machopic_picsymbol_stub1_section
]);
21237 switch_to_section (darwin_sections
[machopic_symbol_stub1_section
]);
21241 fprintf (file
, "\t.align 5\n");
21243 fprintf (file
, "%s:\n", stub
);
21244 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
21247 local_label_0
= XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
21248 sprintf (local_label_0
, "\"L%011d$spb\"", label
);
21250 fprintf (file
, "\tmflr r0\n");
21251 fprintf (file
, "\tbcl 20,31,%s\n", local_label_0
);
21252 fprintf (file
, "%s:\n\tmflr r11\n", local_label_0
);
21253 fprintf (file
, "\taddis r11,r11,ha16(%s-%s)\n",
21254 lazy_ptr_name
, local_label_0
);
21255 fprintf (file
, "\tmtlr r0\n");
21256 fprintf (file
, "\t%s r12,lo16(%s-%s)(r11)\n",
21257 (TARGET_64BIT
? "ldu" : "lwzu"),
21258 lazy_ptr_name
, local_label_0
);
21259 fprintf (file
, "\tmtctr r12\n");
21260 fprintf (file
, "\tbctr\n");
21264 fprintf (file
, "\t.align 4\n");
21266 fprintf (file
, "%s:\n", stub
);
21267 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
21269 fprintf (file
, "\tlis r11,ha16(%s)\n", lazy_ptr_name
);
21270 fprintf (file
, "\t%s r12,lo16(%s)(r11)\n",
21271 (TARGET_64BIT
? "ldu" : "lwzu"),
21273 fprintf (file
, "\tmtctr r12\n");
21274 fprintf (file
, "\tbctr\n");
21277 switch_to_section (darwin_sections
[machopic_lazy_symbol_ptr_section
]);
21278 fprintf (file
, "%s:\n", lazy_ptr_name
);
21279 fprintf (file
, "\t.indirect_symbol %s\n", symbol_name
);
21280 fprintf (file
, "%sdyld_stub_binding_helper\n",
21281 (TARGET_64BIT
? DOUBLE_INT_ASM_OP
: "\t.long\t"));
21284 /* Legitimize PIC addresses. If the address is already
21285 position-independent, we return ORIG. Newly generated
21286 position-independent addresses go into a reg. This is REG if non
21287 zero, otherwise we allocate register(s) as necessary. */
21289 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
21292 rs6000_machopic_legitimize_pic_address (rtx orig
, enum machine_mode mode
,
21297 if (reg
== NULL
&& ! reload_in_progress
&& ! reload_completed
)
21298 reg
= gen_reg_rtx (Pmode
);
21300 if (GET_CODE (orig
) == CONST
)
21304 if (GET_CODE (XEXP (orig
, 0)) == PLUS
21305 && XEXP (XEXP (orig
, 0), 0) == pic_offset_table_rtx
)
21308 gcc_assert (GET_CODE (XEXP (orig
, 0)) == PLUS
);
21310 /* Use a different reg for the intermediate value, as
21311 it will be marked UNCHANGING. */
21312 reg_temp
= !can_create_pseudo_p () ? reg
: gen_reg_rtx (Pmode
);
21313 base
= rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 0),
21316 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig
, 0), 1),
21319 if (GET_CODE (offset
) == CONST_INT
)
21321 if (SMALL_INT (offset
))
21322 return plus_constant (base
, INTVAL (offset
));
21323 else if (! reload_in_progress
&& ! reload_completed
)
21324 offset
= force_reg (Pmode
, offset
);
21327 rtx mem
= force_const_mem (Pmode
, orig
);
21328 return machopic_legitimize_pic_address (mem
, Pmode
, reg
);
21331 return gen_rtx_PLUS (Pmode
, base
, offset
);
21334 /* Fall back on generic machopic code. */
21335 return machopic_legitimize_pic_address (orig
, mode
, reg
);
21338 /* Output a .machine directive for the Darwin assembler, and call
21339 the generic start_file routine. */
21342 rs6000_darwin_file_start (void)
21344 static const struct
21350 { "ppc64", "ppc64", MASK_64BIT
},
21351 { "970", "ppc970", MASK_PPC_GPOPT
| MASK_MFCRF
| MASK_POWERPC64
},
21352 { "power4", "ppc970", 0 },
21353 { "G5", "ppc970", 0 },
21354 { "7450", "ppc7450", 0 },
21355 { "7400", "ppc7400", MASK_ALTIVEC
},
21356 { "G4", "ppc7400", 0 },
21357 { "750", "ppc750", 0 },
21358 { "740", "ppc750", 0 },
21359 { "G3", "ppc750", 0 },
21360 { "604e", "ppc604e", 0 },
21361 { "604", "ppc604", 0 },
21362 { "603e", "ppc603", 0 },
21363 { "603", "ppc603", 0 },
21364 { "601", "ppc601", 0 },
21365 { NULL
, "ppc", 0 } };
21366 const char *cpu_id
= "";
21369 rs6000_file_start ();
21370 darwin_file_start ();
21372 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
21373 for (i
= 0; i
< ARRAY_SIZE (rs6000_select
); i
++)
21374 if (rs6000_select
[i
].set_arch_p
&& rs6000_select
[i
].string
21375 && rs6000_select
[i
].string
[0] != '\0')
21376 cpu_id
= rs6000_select
[i
].string
;
21378 /* Look through the mapping array. Pick the first name that either
21379 matches the argument, has a bit set in IF_SET that is also set
21380 in the target flags, or has a NULL name. */
21383 while (mapping
[i
].arg
!= NULL
21384 && strcmp (mapping
[i
].arg
, cpu_id
) != 0
21385 && (mapping
[i
].if_set
& target_flags
) == 0)
21388 fprintf (asm_out_file
, "\t.machine %s\n", mapping
[i
].name
);
21391 #endif /* TARGET_MACHO */
21395 rs6000_elf_reloc_rw_mask (void)
21399 else if (DEFAULT_ABI
== ABI_AIX
)
21405 /* Record an element in the table of global constructors. SYMBOL is
21406 a SYMBOL_REF of the function to be called; PRIORITY is a number
21407 between 0 and MAX_INIT_PRIORITY.
21409 This differs from default_named_section_asm_out_constructor in
21410 that we have special handling for -mrelocatable. */
21413 rs6000_elf_asm_out_constructor (rtx symbol
, int priority
)
21415 const char *section
= ".ctors";
21418 if (priority
!= DEFAULT_INIT_PRIORITY
)
21420 sprintf (buf
, ".ctors.%.5u",
21421 /* Invert the numbering so the linker puts us in the proper
21422 order; constructors are run from right to left, and the
21423 linker sorts in increasing order. */
21424 MAX_INIT_PRIORITY
- priority
);
21428 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
21429 assemble_align (POINTER_SIZE
);
21431 if (TARGET_RELOCATABLE
)
21433 fputs ("\t.long (", asm_out_file
);
21434 output_addr_const (asm_out_file
, symbol
);
21435 fputs (")@fixup\n", asm_out_file
);
21438 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
21442 rs6000_elf_asm_out_destructor (rtx symbol
, int priority
)
21444 const char *section
= ".dtors";
21447 if (priority
!= DEFAULT_INIT_PRIORITY
)
21449 sprintf (buf
, ".dtors.%.5u",
21450 /* Invert the numbering so the linker puts us in the proper
21451 order; constructors are run from right to left, and the
21452 linker sorts in increasing order. */
21453 MAX_INIT_PRIORITY
- priority
);
21457 switch_to_section (get_section (section
, SECTION_WRITE
, NULL
));
21458 assemble_align (POINTER_SIZE
);
21460 if (TARGET_RELOCATABLE
)
21462 fputs ("\t.long (", asm_out_file
);
21463 output_addr_const (asm_out_file
, symbol
);
21464 fputs (")@fixup\n", asm_out_file
);
21467 assemble_integer (symbol
, POINTER_SIZE
/ BITS_PER_UNIT
, POINTER_SIZE
, 1);
21471 rs6000_elf_declare_function_name (FILE *file
, const char *name
, tree decl
)
21475 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file
);
21476 ASM_OUTPUT_LABEL (file
, name
);
21477 fputs (DOUBLE_INT_ASM_OP
, file
);
21478 rs6000_output_function_entry (file
, name
);
21479 fputs (",.TOC.@tocbase,0\n\t.previous\n", file
);
21482 fputs ("\t.size\t", file
);
21483 assemble_name (file
, name
);
21484 fputs (",24\n\t.type\t.", file
);
21485 assemble_name (file
, name
);
21486 fputs (",@function\n", file
);
21487 if (TREE_PUBLIC (decl
) && ! DECL_WEAK (decl
))
21489 fputs ("\t.globl\t.", file
);
21490 assemble_name (file
, name
);
21495 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
21496 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
21497 rs6000_output_function_entry (file
, name
);
21498 fputs (":\n", file
);
21502 if (TARGET_RELOCATABLE
21503 && !TARGET_SECURE_PLT
21504 && (get_pool_size () != 0 || crtl
->profile
)
21509 (*targetm
.asm_out
.internal_label
) (file
, "LCL", rs6000_pic_labelno
);
21511 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCTOC", 1);
21512 fprintf (file
, "\t.long ");
21513 assemble_name (file
, buf
);
21515 ASM_GENERATE_INTERNAL_LABEL (buf
, "LCF", rs6000_pic_labelno
);
21516 assemble_name (file
, buf
);
21520 ASM_OUTPUT_TYPE_DIRECTIVE (file
, name
, "function");
21521 ASM_DECLARE_RESULT (file
, DECL_RESULT (decl
));
21523 if (DEFAULT_ABI
== ABI_AIX
)
21525 const char *desc_name
, *orig_name
;
21527 orig_name
= (*targetm
.strip_name_encoding
) (name
);
21528 desc_name
= orig_name
;
21529 while (*desc_name
== '.')
21532 if (TREE_PUBLIC (decl
))
21533 fprintf (file
, "\t.globl %s\n", desc_name
);
21535 fprintf (file
, "%s\n", MINIMAL_TOC_SECTION_ASM_OP
);
21536 fprintf (file
, "%s:\n", desc_name
);
21537 fprintf (file
, "\t.long %s\n", orig_name
);
21538 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file
);
21539 if (DEFAULT_ABI
== ABI_AIX
)
21540 fputs ("\t.long 0\n", file
);
21541 fprintf (file
, "\t.previous\n");
21543 ASM_OUTPUT_LABEL (file
, name
);
21547 rs6000_elf_end_indicate_exec_stack (void)
21550 file_end_indicate_exec_stack ();
21556 rs6000_xcoff_asm_output_anchor (rtx symbol
)
21560 sprintf (buffer
, "$ + " HOST_WIDE_INT_PRINT_DEC
,
21561 SYMBOL_REF_BLOCK_OFFSET (symbol
));
21562 ASM_OUTPUT_DEF (asm_out_file
, XSTR (symbol
, 0), buffer
);
21566 rs6000_xcoff_asm_globalize_label (FILE *stream
, const char *name
)
21568 fputs (GLOBAL_ASM_OP
, stream
);
21569 RS6000_OUTPUT_BASENAME (stream
, name
);
21570 putc ('\n', stream
);
21573 /* A get_unnamed_decl callback, used for read-only sections. PTR
21574 points to the section string variable. */
21577 rs6000_xcoff_output_readonly_section_asm_op (const void *directive
)
21579 fprintf (asm_out_file
, "\t.csect %s[RO],%s\n",
21580 *(const char *const *) directive
,
21581 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
21584 /* Likewise for read-write sections. */
21587 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive
)
21589 fprintf (asm_out_file
, "\t.csect %s[RW],%s\n",
21590 *(const char *const *) directive
,
21591 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR
);
21594 /* A get_unnamed_section callback, used for switching to toc_section. */
21597 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED
)
21599 if (TARGET_MINIMAL_TOC
)
21601 /* toc_section is always selected at least once from
21602 rs6000_xcoff_file_start, so this is guaranteed to
21603 always be defined once and only once in each file. */
21604 if (!toc_initialized
)
21606 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file
);
21607 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file
);
21608 toc_initialized
= 1;
21610 fprintf (asm_out_file
, "\t.csect toc_table[RW]%s\n",
21611 (TARGET_32BIT
? "" : ",3"));
21614 fputs ("\t.toc\n", asm_out_file
);
21617 /* Implement TARGET_ASM_INIT_SECTIONS. */
21620 rs6000_xcoff_asm_init_sections (void)
21622 read_only_data_section
21623 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
21624 &xcoff_read_only_section_name
);
21626 private_data_section
21627 = get_unnamed_section (SECTION_WRITE
,
21628 rs6000_xcoff_output_readwrite_section_asm_op
,
21629 &xcoff_private_data_section_name
);
21631 read_only_private_data_section
21632 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op
,
21633 &xcoff_private_data_section_name
);
21636 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op
, NULL
);
21638 readonly_data_section
= read_only_data_section
;
21639 exception_section
= data_section
;
21643 rs6000_xcoff_reloc_rw_mask (void)
21649 rs6000_xcoff_asm_named_section (const char *name
, unsigned int flags
,
21650 tree decl ATTRIBUTE_UNUSED
)
21653 static const char * const suffix
[3] = { "PR", "RO", "RW" };
21655 if (flags
& SECTION_CODE
)
21657 else if (flags
& SECTION_WRITE
)
21662 fprintf (asm_out_file
, "\t.csect %s%s[%s],%u\n",
21663 (flags
& SECTION_CODE
) ? "." : "",
21664 name
, suffix
[smclass
], flags
& SECTION_ENTSIZE
);
21668 rs6000_xcoff_select_section (tree decl
, int reloc
,
21669 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
21671 if (decl_readonly_section (decl
, reloc
))
21673 if (TREE_PUBLIC (decl
))
21674 return read_only_data_section
;
21676 return read_only_private_data_section
;
21680 if (TREE_PUBLIC (decl
))
21681 return data_section
;
21683 return private_data_section
;
21688 rs6000_xcoff_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
21692 /* Use select_section for private and uninitialized data. */
21693 if (!TREE_PUBLIC (decl
)
21694 || DECL_COMMON (decl
)
21695 || DECL_INITIAL (decl
) == NULL_TREE
21696 || DECL_INITIAL (decl
) == error_mark_node
21697 || (flag_zero_initialized_in_bss
21698 && initializer_zerop (DECL_INITIAL (decl
))))
21701 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
21702 name
= (*targetm
.strip_name_encoding
) (name
);
21703 DECL_SECTION_NAME (decl
) = build_string (strlen (name
), name
);
21706 /* Select section for constant in constant pool.
21708 On RS/6000, all constants are in the private read-only data area.
21709 However, if this is being placed in the TOC it must be output as a
21713 rs6000_xcoff_select_rtx_section (enum machine_mode mode
, rtx x
,
21714 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED
)
21716 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x
, mode
))
21717 return toc_section
;
21719 return read_only_private_data_section
;
21722 /* Remove any trailing [DS] or the like from the symbol name. */
21724 static const char *
21725 rs6000_xcoff_strip_name_encoding (const char *name
)
21730 len
= strlen (name
);
21731 if (name
[len
- 1] == ']')
21732 return ggc_alloc_string (name
, len
- 4);
21737 /* Section attributes. AIX is always PIC. */
21739 static unsigned int
21740 rs6000_xcoff_section_type_flags (tree decl
, const char *name
, int reloc
)
21742 unsigned int align
;
21743 unsigned int flags
= default_section_type_flags (decl
, name
, reloc
);
21745 /* Align to at least UNIT size. */
21746 if (flags
& SECTION_CODE
)
21747 align
= MIN_UNITS_PER_WORD
;
21749 /* Increase alignment of large objects if not already stricter. */
21750 align
= MAX ((DECL_ALIGN (decl
) / BITS_PER_UNIT
),
21751 int_size_in_bytes (TREE_TYPE (decl
)) > MIN_UNITS_PER_WORD
21752 ? UNITS_PER_FP_WORD
: MIN_UNITS_PER_WORD
);
21754 return flags
| (exact_log2 (align
) & SECTION_ENTSIZE
);
21757 /* Output at beginning of assembler file.
21759 Initialize the section names for the RS/6000 at this point.
21761 Specify filename, including full path, to assembler.
21763 We want to go into the TOC section so at least one .toc will be emitted.
21764 Also, in order to output proper .bs/.es pairs, we need at least one static
21765 [RW] section emitted.
21767 Finally, declare mcount when profiling to make the assembler happy. */
21770 rs6000_xcoff_file_start (void)
21772 rs6000_gen_section_name (&xcoff_bss_section_name
,
21773 main_input_filename
, ".bss_");
21774 rs6000_gen_section_name (&xcoff_private_data_section_name
,
21775 main_input_filename
, ".rw_");
21776 rs6000_gen_section_name (&xcoff_read_only_section_name
,
21777 main_input_filename
, ".ro_");
21779 fputs ("\t.file\t", asm_out_file
);
21780 output_quoted_string (asm_out_file
, main_input_filename
);
21781 fputc ('\n', asm_out_file
);
21782 if (write_symbols
!= NO_DEBUG
)
21783 switch_to_section (private_data_section
);
21784 switch_to_section (text_section
);
21786 fprintf (asm_out_file
, "\t.extern %s\n", RS6000_MCOUNT
);
21787 rs6000_file_start ();
21790 /* Output at end of assembler file.
21791 On the RS/6000, referencing data should automatically pull in text. */
21794 rs6000_xcoff_file_end (void)
21796 switch_to_section (text_section
);
21797 fputs ("_section_.text:\n", asm_out_file
);
21798 switch_to_section (data_section
);
21799 fputs (TARGET_32BIT
21800 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
21803 #endif /* TARGET_XCOFF */
21805 /* Compute a (partial) cost for rtx X. Return true if the complete
21806 cost has been computed, and false if subexpressions should be
21807 scanned. In either case, *TOTAL contains the cost result. */
21810 rs6000_rtx_costs (rtx x
, int code
, int outer_code
, int *total
,
21813 enum machine_mode mode
= GET_MODE (x
);
21817 /* On the RS/6000, if it is valid in the insn, it is free. */
21819 if (((outer_code
== SET
21820 || outer_code
== PLUS
21821 || outer_code
== MINUS
)
21822 && (satisfies_constraint_I (x
)
21823 || satisfies_constraint_L (x
)))
21824 || (outer_code
== AND
21825 && (satisfies_constraint_K (x
)
21827 ? satisfies_constraint_L (x
)
21828 : satisfies_constraint_J (x
))
21829 || mask_operand (x
, mode
)
21831 && mask64_operand (x
, DImode
))))
21832 || ((outer_code
== IOR
|| outer_code
== XOR
)
21833 && (satisfies_constraint_K (x
)
21835 ? satisfies_constraint_L (x
)
21836 : satisfies_constraint_J (x
))))
21837 || outer_code
== ASHIFT
21838 || outer_code
== ASHIFTRT
21839 || outer_code
== LSHIFTRT
21840 || outer_code
== ROTATE
21841 || outer_code
== ROTATERT
21842 || outer_code
== ZERO_EXTRACT
21843 || (outer_code
== MULT
21844 && satisfies_constraint_I (x
))
21845 || ((outer_code
== DIV
|| outer_code
== UDIV
21846 || outer_code
== MOD
|| outer_code
== UMOD
)
21847 && exact_log2 (INTVAL (x
)) >= 0)
21848 || (outer_code
== COMPARE
21849 && (satisfies_constraint_I (x
)
21850 || satisfies_constraint_K (x
)))
21851 || (outer_code
== EQ
21852 && (satisfies_constraint_I (x
)
21853 || satisfies_constraint_K (x
)
21855 ? satisfies_constraint_L (x
)
21856 : satisfies_constraint_J (x
))))
21857 || (outer_code
== GTU
21858 && satisfies_constraint_I (x
))
21859 || (outer_code
== LTU
21860 && satisfies_constraint_P (x
)))
21865 else if ((outer_code
== PLUS
21866 && reg_or_add_cint_operand (x
, VOIDmode
))
21867 || (outer_code
== MINUS
21868 && reg_or_sub_cint_operand (x
, VOIDmode
))
21869 || ((outer_code
== SET
21870 || outer_code
== IOR
21871 || outer_code
== XOR
)
21873 & ~ (unsigned HOST_WIDE_INT
) 0xffffffff) == 0))
21875 *total
= COSTS_N_INSNS (1);
21881 if (mode
== DImode
&& code
== CONST_DOUBLE
)
21883 if ((outer_code
== IOR
|| outer_code
== XOR
)
21884 && CONST_DOUBLE_HIGH (x
) == 0
21885 && (CONST_DOUBLE_LOW (x
)
21886 & ~ (unsigned HOST_WIDE_INT
) 0xffff) == 0)
21891 else if ((outer_code
== AND
&& and64_2_operand (x
, DImode
))
21892 || ((outer_code
== SET
21893 || outer_code
== IOR
21894 || outer_code
== XOR
)
21895 && CONST_DOUBLE_HIGH (x
) == 0))
21897 *total
= COSTS_N_INSNS (1);
21907 /* When optimizing for size, MEM should be slightly more expensive
21908 than generating address, e.g., (plus (reg) (const)).
21909 L1 cache latency is about two instructions. */
21910 *total
= !speed
? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
21918 if (mode
== DFmode
)
21920 if (GET_CODE (XEXP (x
, 0)) == MULT
)
21922 /* FNMA accounted in outer NEG. */
21923 if (outer_code
== NEG
)
21924 *total
= rs6000_cost
->dmul
- rs6000_cost
->fp
;
21926 *total
= rs6000_cost
->dmul
;
21929 *total
= rs6000_cost
->fp
;
21931 else if (mode
== SFmode
)
21933 /* FNMA accounted in outer NEG. */
21934 if (outer_code
== NEG
&& GET_CODE (XEXP (x
, 0)) == MULT
)
21937 *total
= rs6000_cost
->fp
;
21940 *total
= COSTS_N_INSNS (1);
21944 if (mode
== DFmode
)
21946 if (GET_CODE (XEXP (x
, 0)) == MULT
21947 || GET_CODE (XEXP (x
, 1)) == MULT
)
21949 /* FNMA accounted in outer NEG. */
21950 if (outer_code
== NEG
)
21951 *total
= rs6000_cost
->dmul
- rs6000_cost
->fp
;
21953 *total
= rs6000_cost
->dmul
;
21956 *total
= rs6000_cost
->fp
;
21958 else if (mode
== SFmode
)
21960 /* FNMA accounted in outer NEG. */
21961 if (outer_code
== NEG
&& GET_CODE (XEXP (x
, 0)) == MULT
)
21964 *total
= rs6000_cost
->fp
;
21967 *total
= COSTS_N_INSNS (1);
21971 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
21972 && satisfies_constraint_I (XEXP (x
, 1)))
21974 if (INTVAL (XEXP (x
, 1)) >= -256
21975 && INTVAL (XEXP (x
, 1)) <= 255)
21976 *total
= rs6000_cost
->mulsi_const9
;
21978 *total
= rs6000_cost
->mulsi_const
;
21980 /* FMA accounted in outer PLUS/MINUS. */
21981 else if ((mode
== DFmode
|| mode
== SFmode
)
21982 && (outer_code
== PLUS
|| outer_code
== MINUS
))
21984 else if (mode
== DFmode
)
21985 *total
= rs6000_cost
->dmul
;
21986 else if (mode
== SFmode
)
21987 *total
= rs6000_cost
->fp
;
21988 else if (mode
== DImode
)
21989 *total
= rs6000_cost
->muldi
;
21991 *total
= rs6000_cost
->mulsi
;
21996 if (FLOAT_MODE_P (mode
))
21998 *total
= mode
== DFmode
? rs6000_cost
->ddiv
21999 : rs6000_cost
->sdiv
;
22006 if (GET_CODE (XEXP (x
, 1)) == CONST_INT
22007 && exact_log2 (INTVAL (XEXP (x
, 1))) >= 0)
22009 if (code
== DIV
|| code
== MOD
)
22011 *total
= COSTS_N_INSNS (2);
22014 *total
= COSTS_N_INSNS (1);
22018 if (GET_MODE (XEXP (x
, 1)) == DImode
)
22019 *total
= rs6000_cost
->divdi
;
22021 *total
= rs6000_cost
->divsi
;
22023 /* Add in shift and subtract for MOD. */
22024 if (code
== MOD
|| code
== UMOD
)
22025 *total
+= COSTS_N_INSNS (2);
22030 *total
= COSTS_N_INSNS (4);
22034 *total
= COSTS_N_INSNS (6);
22038 if (outer_code
== AND
|| outer_code
== IOR
|| outer_code
== XOR
)
22050 *total
= COSTS_N_INSNS (1);
22058 /* Handle mul_highpart. */
22059 if (outer_code
== TRUNCATE
22060 && GET_CODE (XEXP (x
, 0)) == MULT
)
22062 if (mode
== DImode
)
22063 *total
= rs6000_cost
->muldi
;
22065 *total
= rs6000_cost
->mulsi
;
22068 else if (outer_code
== AND
)
22071 *total
= COSTS_N_INSNS (1);
22076 if (GET_CODE (XEXP (x
, 0)) == MEM
)
22079 *total
= COSTS_N_INSNS (1);
22085 if (!FLOAT_MODE_P (mode
))
22087 *total
= COSTS_N_INSNS (1);
22093 case UNSIGNED_FLOAT
:
22096 case FLOAT_TRUNCATE
:
22097 *total
= rs6000_cost
->fp
;
22101 if (mode
== DFmode
)
22104 *total
= rs6000_cost
->fp
;
22108 switch (XINT (x
, 1))
22111 *total
= rs6000_cost
->fp
;
22123 *total
= COSTS_N_INSNS (1);
22126 else if (FLOAT_MODE_P (mode
)
22127 && TARGET_PPC_GFXOPT
&& TARGET_HARD_FLOAT
&& TARGET_FPRS
)
22129 *total
= rs6000_cost
->fp
;
22137 /* Carry bit requires mode == Pmode.
22138 NEG or PLUS already counted so only add one. */
22140 && (outer_code
== NEG
|| outer_code
== PLUS
))
22142 *total
= COSTS_N_INSNS (1);
22145 if (outer_code
== SET
)
22147 if (XEXP (x
, 1) == const0_rtx
)
22149 *total
= COSTS_N_INSNS (2);
22152 else if (mode
== Pmode
)
22154 *total
= COSTS_N_INSNS (3);
22163 if (outer_code
== SET
&& (XEXP (x
, 1) == const0_rtx
))
22165 *total
= COSTS_N_INSNS (2);
22169 if (outer_code
== COMPARE
)
22183 /* A C expression returning the cost of moving data from a register of class
22184 CLASS1 to one of CLASS2. */
22187 rs6000_register_move_cost (enum machine_mode mode
,
22188 enum reg_class from
, enum reg_class to
)
22190 /* Moves from/to GENERAL_REGS. */
22191 if (reg_classes_intersect_p (to
, GENERAL_REGS
)
22192 || reg_classes_intersect_p (from
, GENERAL_REGS
))
22194 if (! reg_classes_intersect_p (to
, GENERAL_REGS
))
22197 if (from
== FLOAT_REGS
|| from
== ALTIVEC_REGS
)
22198 return (rs6000_memory_move_cost (mode
, from
, 0)
22199 + rs6000_memory_move_cost (mode
, GENERAL_REGS
, 0));
22201 /* It's more expensive to move CR_REGS than CR0_REGS because of the
22203 else if (from
== CR_REGS
)
22206 /* Power6 has slower LR/CTR moves so make them more expensive than
22207 memory in order to bias spills to memory .*/
22208 else if (rs6000_cpu
== PROCESSOR_POWER6
22209 && reg_classes_intersect_p (from
, LINK_OR_CTR_REGS
))
22210 return 6 * hard_regno_nregs
[0][mode
];
22213 /* A move will cost one instruction per GPR moved. */
22214 return 2 * hard_regno_nregs
[0][mode
];
22217 /* Moving between two similar registers is just one instruction. */
22218 else if (reg_classes_intersect_p (to
, from
))
22219 return (mode
== TFmode
|| mode
== TDmode
) ? 4 : 2;
22221 /* Everything else has to go through GENERAL_REGS. */
22223 return (rs6000_register_move_cost (mode
, GENERAL_REGS
, to
)
22224 + rs6000_register_move_cost (mode
, from
, GENERAL_REGS
));
22227 /* A C expressions returning the cost of moving data of MODE from a register to
22231 rs6000_memory_move_cost (enum machine_mode mode
, enum reg_class rclass
,
22232 int in ATTRIBUTE_UNUSED
)
22234 if (reg_classes_intersect_p (rclass
, GENERAL_REGS
))
22235 return 4 * hard_regno_nregs
[0][mode
];
22236 else if (reg_classes_intersect_p (rclass
, FLOAT_REGS
))
22237 return 4 * hard_regno_nregs
[32][mode
];
22238 else if (reg_classes_intersect_p (rclass
, ALTIVEC_REGS
))
22239 return 4 * hard_regno_nregs
[FIRST_ALTIVEC_REGNO
][mode
];
22241 return 4 + rs6000_register_move_cost (mode
, rclass
, GENERAL_REGS
);
22244 /* Returns a code for a target-specific builtin that implements
22245 reciprocal of the function, or NULL_TREE if not available. */
22248 rs6000_builtin_reciprocal (unsigned int fn
, bool md_fn
,
22249 bool sqrt ATTRIBUTE_UNUSED
)
22251 if (! (TARGET_RECIP
&& TARGET_PPC_GFXOPT
&& !optimize_size
22252 && flag_finite_math_only
&& !flag_trapping_math
22253 && flag_unsafe_math_optimizations
))
22261 case BUILT_IN_SQRTF
:
22262 return rs6000_builtin_decls
[RS6000_BUILTIN_RSQRTF
];
22269 /* Newton-Raphson approximation of single-precision floating point divide n/d.
22270 Assumes no trapping math and finite arguments. */
22273 rs6000_emit_swdivsf (rtx dst
, rtx n
, rtx d
)
22275 rtx x0
, e0
, e1
, y1
, u0
, v0
, one
;
22277 x0
= gen_reg_rtx (SFmode
);
22278 e0
= gen_reg_rtx (SFmode
);
22279 e1
= gen_reg_rtx (SFmode
);
22280 y1
= gen_reg_rtx (SFmode
);
22281 u0
= gen_reg_rtx (SFmode
);
22282 v0
= gen_reg_rtx (SFmode
);
22283 one
= force_reg (SFmode
, CONST_DOUBLE_FROM_REAL_VALUE (dconst1
, SFmode
));
22285 /* x0 = 1./d estimate */
22286 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
22287 gen_rtx_UNSPEC (SFmode
, gen_rtvec (1, d
),
22289 /* e0 = 1. - d * x0 */
22290 emit_insn (gen_rtx_SET (VOIDmode
, e0
,
22291 gen_rtx_MINUS (SFmode
, one
,
22292 gen_rtx_MULT (SFmode
, d
, x0
))));
22293 /* e1 = e0 + e0 * e0 */
22294 emit_insn (gen_rtx_SET (VOIDmode
, e1
,
22295 gen_rtx_PLUS (SFmode
,
22296 gen_rtx_MULT (SFmode
, e0
, e0
), e0
)));
22297 /* y1 = x0 + e1 * x0 */
22298 emit_insn (gen_rtx_SET (VOIDmode
, y1
,
22299 gen_rtx_PLUS (SFmode
,
22300 gen_rtx_MULT (SFmode
, e1
, x0
), x0
)));
22302 emit_insn (gen_rtx_SET (VOIDmode
, u0
,
22303 gen_rtx_MULT (SFmode
, n
, y1
)));
22304 /* v0 = n - d * u0 */
22305 emit_insn (gen_rtx_SET (VOIDmode
, v0
,
22306 gen_rtx_MINUS (SFmode
, n
,
22307 gen_rtx_MULT (SFmode
, d
, u0
))));
22308 /* dst = u0 + v0 * y1 */
22309 emit_insn (gen_rtx_SET (VOIDmode
, dst
,
22310 gen_rtx_PLUS (SFmode
,
22311 gen_rtx_MULT (SFmode
, v0
, y1
), u0
)));
22314 /* Newton-Raphson approximation of double-precision floating point divide n/d.
22315 Assumes no trapping math and finite arguments. */
22318 rs6000_emit_swdivdf (rtx dst
, rtx n
, rtx d
)
22320 rtx x0
, e0
, e1
, e2
, y1
, y2
, y3
, u0
, v0
, one
;
22322 x0
= gen_reg_rtx (DFmode
);
22323 e0
= gen_reg_rtx (DFmode
);
22324 e1
= gen_reg_rtx (DFmode
);
22325 e2
= gen_reg_rtx (DFmode
);
22326 y1
= gen_reg_rtx (DFmode
);
22327 y2
= gen_reg_rtx (DFmode
);
22328 y3
= gen_reg_rtx (DFmode
);
22329 u0
= gen_reg_rtx (DFmode
);
22330 v0
= gen_reg_rtx (DFmode
);
22331 one
= force_reg (DFmode
, CONST_DOUBLE_FROM_REAL_VALUE (dconst1
, DFmode
));
22333 /* x0 = 1./d estimate */
22334 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
22335 gen_rtx_UNSPEC (DFmode
, gen_rtvec (1, d
),
22337 /* e0 = 1. - d * x0 */
22338 emit_insn (gen_rtx_SET (VOIDmode
, e0
,
22339 gen_rtx_MINUS (DFmode
, one
,
22340 gen_rtx_MULT (SFmode
, d
, x0
))));
22341 /* y1 = x0 + e0 * x0 */
22342 emit_insn (gen_rtx_SET (VOIDmode
, y1
,
22343 gen_rtx_PLUS (DFmode
,
22344 gen_rtx_MULT (DFmode
, e0
, x0
), x0
)));
22346 emit_insn (gen_rtx_SET (VOIDmode
, e1
,
22347 gen_rtx_MULT (DFmode
, e0
, e0
)));
22348 /* y2 = y1 + e1 * y1 */
22349 emit_insn (gen_rtx_SET (VOIDmode
, y2
,
22350 gen_rtx_PLUS (DFmode
,
22351 gen_rtx_MULT (DFmode
, e1
, y1
), y1
)));
22353 emit_insn (gen_rtx_SET (VOIDmode
, e2
,
22354 gen_rtx_MULT (DFmode
, e1
, e1
)));
22355 /* y3 = y2 + e2 * y2 */
22356 emit_insn (gen_rtx_SET (VOIDmode
, y3
,
22357 gen_rtx_PLUS (DFmode
,
22358 gen_rtx_MULT (DFmode
, e2
, y2
), y2
)));
22360 emit_insn (gen_rtx_SET (VOIDmode
, u0
,
22361 gen_rtx_MULT (DFmode
, n
, y3
)));
22362 /* v0 = n - d * u0 */
22363 emit_insn (gen_rtx_SET (VOIDmode
, v0
,
22364 gen_rtx_MINUS (DFmode
, n
,
22365 gen_rtx_MULT (DFmode
, d
, u0
))));
22366 /* dst = u0 + v0 * y3 */
22367 emit_insn (gen_rtx_SET (VOIDmode
, dst
,
22368 gen_rtx_PLUS (DFmode
,
22369 gen_rtx_MULT (DFmode
, v0
, y3
), u0
)));
22373 /* Newton-Raphson approximation of single-precision floating point rsqrt.
22374 Assumes no trapping math and finite arguments. */
22377 rs6000_emit_swrsqrtsf (rtx dst
, rtx src
)
22379 rtx x0
, x1
, x2
, y1
, u0
, u1
, u2
, v0
, v1
, v2
, t0
,
22380 half
, one
, halfthree
, c1
, cond
, label
;
22382 x0
= gen_reg_rtx (SFmode
);
22383 x1
= gen_reg_rtx (SFmode
);
22384 x2
= gen_reg_rtx (SFmode
);
22385 y1
= gen_reg_rtx (SFmode
);
22386 u0
= gen_reg_rtx (SFmode
);
22387 u1
= gen_reg_rtx (SFmode
);
22388 u2
= gen_reg_rtx (SFmode
);
22389 v0
= gen_reg_rtx (SFmode
);
22390 v1
= gen_reg_rtx (SFmode
);
22391 v2
= gen_reg_rtx (SFmode
);
22392 t0
= gen_reg_rtx (SFmode
);
22393 halfthree
= gen_reg_rtx (SFmode
);
22394 cond
= gen_rtx_REG (CCFPmode
, CR1_REGNO
);
22395 label
= gen_rtx_LABEL_REF (VOIDmode
, gen_label_rtx ());
22397 /* check 0.0, 1.0, NaN, Inf by testing src * src = src */
22398 emit_insn (gen_rtx_SET (VOIDmode
, t0
,
22399 gen_rtx_MULT (SFmode
, src
, src
)));
22401 emit_insn (gen_rtx_SET (VOIDmode
, cond
,
22402 gen_rtx_COMPARE (CCFPmode
, t0
, src
)));
22403 c1
= gen_rtx_EQ (VOIDmode
, cond
, const0_rtx
);
22404 emit_unlikely_jump (c1
, label
);
22406 half
= force_reg (SFmode
, CONST_DOUBLE_FROM_REAL_VALUE (dconsthalf
, SFmode
));
22407 one
= force_reg (SFmode
, CONST_DOUBLE_FROM_REAL_VALUE (dconst1
, SFmode
));
22409 /* halfthree = 1.5 = 1.0 + 0.5 */
22410 emit_insn (gen_rtx_SET (VOIDmode
, halfthree
,
22411 gen_rtx_PLUS (SFmode
, one
, half
)));
22413 /* x0 = rsqrt estimate */
22414 emit_insn (gen_rtx_SET (VOIDmode
, x0
,
22415 gen_rtx_UNSPEC (SFmode
, gen_rtvec (1, src
),
22418 /* y1 = 0.5 * src = 1.5 * src - src -> fewer constants */
22419 emit_insn (gen_rtx_SET (VOIDmode
, y1
,
22420 gen_rtx_MINUS (SFmode
,
22421 gen_rtx_MULT (SFmode
, src
, halfthree
),
22424 /* x1 = x0 * (1.5 - y1 * (x0 * x0)) */
22425 emit_insn (gen_rtx_SET (VOIDmode
, u0
,
22426 gen_rtx_MULT (SFmode
, x0
, x0
)));
22427 emit_insn (gen_rtx_SET (VOIDmode
, v0
,
22428 gen_rtx_MINUS (SFmode
,
22430 gen_rtx_MULT (SFmode
, y1
, u0
))));
22431 emit_insn (gen_rtx_SET (VOIDmode
, x1
,
22432 gen_rtx_MULT (SFmode
, x0
, v0
)));
22434 /* x2 = x1 * (1.5 - y1 * (x1 * x1)) */
22435 emit_insn (gen_rtx_SET (VOIDmode
, u1
,
22436 gen_rtx_MULT (SFmode
, x1
, x1
)));
22437 emit_insn (gen_rtx_SET (VOIDmode
, v1
,
22438 gen_rtx_MINUS (SFmode
,
22440 gen_rtx_MULT (SFmode
, y1
, u1
))));
22441 emit_insn (gen_rtx_SET (VOIDmode
, x2
,
22442 gen_rtx_MULT (SFmode
, x1
, v1
)));
22444 /* dst = x2 * (1.5 - y1 * (x2 * x2)) */
22445 emit_insn (gen_rtx_SET (VOIDmode
, u2
,
22446 gen_rtx_MULT (SFmode
, x2
, x2
)));
22447 emit_insn (gen_rtx_SET (VOIDmode
, v2
,
22448 gen_rtx_MINUS (SFmode
,
22450 gen_rtx_MULT (SFmode
, y1
, u2
))));
22451 emit_insn (gen_rtx_SET (VOIDmode
, dst
,
22452 gen_rtx_MULT (SFmode
, x2
, v2
)));
22454 emit_label (XEXP (label
, 0));
22457 /* Emit popcount intrinsic on TARGET_POPCNTB targets. DST is the
22458 target, and SRC is the argument operand. */
22461 rs6000_emit_popcount (rtx dst
, rtx src
)
22463 enum machine_mode mode
= GET_MODE (dst
);
22466 tmp1
= gen_reg_rtx (mode
);
22468 if (mode
== SImode
)
22470 emit_insn (gen_popcntbsi2 (tmp1
, src
));
22471 tmp2
= expand_mult (SImode
, tmp1
, GEN_INT (0x01010101),
22473 tmp2
= force_reg (SImode
, tmp2
);
22474 emit_insn (gen_lshrsi3 (dst
, tmp2
, GEN_INT (24)));
22478 emit_insn (gen_popcntbdi2 (tmp1
, src
));
22479 tmp2
= expand_mult (DImode
, tmp1
,
22480 GEN_INT ((HOST_WIDE_INT
)
22481 0x01010101 << 32 | 0x01010101),
22483 tmp2
= force_reg (DImode
, tmp2
);
22484 emit_insn (gen_lshrdi3 (dst
, tmp2
, GEN_INT (56)));
22489 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
22490 target, and SRC is the argument operand. */
22493 rs6000_emit_parity (rtx dst
, rtx src
)
22495 enum machine_mode mode
= GET_MODE (dst
);
22498 tmp
= gen_reg_rtx (mode
);
22499 if (mode
== SImode
)
22501 /* Is mult+shift >= shift+xor+shift+xor? */
22502 if (rs6000_cost
->mulsi_const
>= COSTS_N_INSNS (3))
22504 rtx tmp1
, tmp2
, tmp3
, tmp4
;
22506 tmp1
= gen_reg_rtx (SImode
);
22507 emit_insn (gen_popcntbsi2 (tmp1
, src
));
22509 tmp2
= gen_reg_rtx (SImode
);
22510 emit_insn (gen_lshrsi3 (tmp2
, tmp1
, GEN_INT (16)));
22511 tmp3
= gen_reg_rtx (SImode
);
22512 emit_insn (gen_xorsi3 (tmp3
, tmp1
, tmp2
));
22514 tmp4
= gen_reg_rtx (SImode
);
22515 emit_insn (gen_lshrsi3 (tmp4
, tmp3
, GEN_INT (8)));
22516 emit_insn (gen_xorsi3 (tmp
, tmp3
, tmp4
));
22519 rs6000_emit_popcount (tmp
, src
);
22520 emit_insn (gen_andsi3 (dst
, tmp
, const1_rtx
));
22524 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
22525 if (rs6000_cost
->muldi
>= COSTS_N_INSNS (5))
22527 rtx tmp1
, tmp2
, tmp3
, tmp4
, tmp5
, tmp6
;
22529 tmp1
= gen_reg_rtx (DImode
);
22530 emit_insn (gen_popcntbdi2 (tmp1
, src
));
22532 tmp2
= gen_reg_rtx (DImode
);
22533 emit_insn (gen_lshrdi3 (tmp2
, tmp1
, GEN_INT (32)));
22534 tmp3
= gen_reg_rtx (DImode
);
22535 emit_insn (gen_xordi3 (tmp3
, tmp1
, tmp2
));
22537 tmp4
= gen_reg_rtx (DImode
);
22538 emit_insn (gen_lshrdi3 (tmp4
, tmp3
, GEN_INT (16)));
22539 tmp5
= gen_reg_rtx (DImode
);
22540 emit_insn (gen_xordi3 (tmp5
, tmp3
, tmp4
));
22542 tmp6
= gen_reg_rtx (DImode
);
22543 emit_insn (gen_lshrdi3 (tmp6
, tmp5
, GEN_INT (8)));
22544 emit_insn (gen_xordi3 (tmp
, tmp5
, tmp6
));
22547 rs6000_emit_popcount (tmp
, src
);
22548 emit_insn (gen_anddi3 (dst
, tmp
, const1_rtx
));
22552 /* Return an RTX representing where to find the function value of a
22553 function returning MODE. */
22555 rs6000_complex_function_value (enum machine_mode mode
)
22557 unsigned int regno
;
22559 enum machine_mode inner
= GET_MODE_INNER (mode
);
22560 unsigned int inner_bytes
= GET_MODE_SIZE (inner
);
22562 if (FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
22563 regno
= FP_ARG_RETURN
;
22566 regno
= GP_ARG_RETURN
;
22568 /* 32-bit is OK since it'll go in r3/r4. */
22569 if (TARGET_32BIT
&& inner_bytes
>= 4)
22570 return gen_rtx_REG (mode
, regno
);
22573 if (inner_bytes
>= 8)
22574 return gen_rtx_REG (mode
, regno
);
22576 r1
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
),
22578 r2
= gen_rtx_EXPR_LIST (inner
, gen_rtx_REG (inner
, regno
+ 1),
22579 GEN_INT (inner_bytes
));
22580 return gen_rtx_PARALLEL (mode
, gen_rtvec (2, r1
, r2
));
22583 /* Define how to find the value returned by a function.
22584 VALTYPE is the data type of the value (as a tree).
22585 If the precise function being called is known, FUNC is its FUNCTION_DECL;
22586 otherwise, FUNC is 0.
22588 On the SPE, both FPs and vectors are returned in r3.
22590 On RS/6000 an integer value is in r3 and a floating-point value is in
22591 fp1, unless -msoft-float. */
22594 rs6000_function_value (const_tree valtype
, const_tree func ATTRIBUTE_UNUSED
)
22596 enum machine_mode mode
;
22597 unsigned int regno
;
22599 /* Special handling for structs in darwin64. */
22600 if (rs6000_darwin64_abi
22601 && TYPE_MODE (valtype
) == BLKmode
22602 && TREE_CODE (valtype
) == RECORD_TYPE
22603 && int_size_in_bytes (valtype
) > 0)
22605 CUMULATIVE_ARGS valcum
;
22609 valcum
.fregno
= FP_ARG_MIN_REG
;
22610 valcum
.vregno
= ALTIVEC_ARG_MIN_REG
;
22611 /* Do a trial code generation as if this were going to be passed as
22612 an argument; if any part goes in memory, we return NULL. */
22613 valret
= rs6000_darwin64_record_arg (&valcum
, valtype
, 1, true);
22616 /* Otherwise fall through to standard ABI rules. */
22619 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DImode
)
22621 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
22622 return gen_rtx_PARALLEL (DImode
,
22624 gen_rtx_EXPR_LIST (VOIDmode
,
22625 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
22627 gen_rtx_EXPR_LIST (VOIDmode
,
22628 gen_rtx_REG (SImode
,
22629 GP_ARG_RETURN
+ 1),
22632 if (TARGET_32BIT
&& TARGET_POWERPC64
&& TYPE_MODE (valtype
) == DCmode
)
22634 return gen_rtx_PARALLEL (DCmode
,
22636 gen_rtx_EXPR_LIST (VOIDmode
,
22637 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
22639 gen_rtx_EXPR_LIST (VOIDmode
,
22640 gen_rtx_REG (SImode
,
22641 GP_ARG_RETURN
+ 1),
22643 gen_rtx_EXPR_LIST (VOIDmode
,
22644 gen_rtx_REG (SImode
,
22645 GP_ARG_RETURN
+ 2),
22647 gen_rtx_EXPR_LIST (VOIDmode
,
22648 gen_rtx_REG (SImode
,
22649 GP_ARG_RETURN
+ 3),
22653 mode
= TYPE_MODE (valtype
);
22654 if ((INTEGRAL_TYPE_P (valtype
) && GET_MODE_BITSIZE (mode
) < BITS_PER_WORD
)
22655 || POINTER_TYPE_P (valtype
))
22656 mode
= TARGET_32BIT
? SImode
: DImode
;
22658 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
22659 /* _Decimal128 must use an even/odd register pair. */
22660 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
22661 else if (SCALAR_FLOAT_TYPE_P (valtype
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
22662 && ((TARGET_SINGLE_FLOAT
&& (mode
== SFmode
)) || TARGET_DOUBLE_FLOAT
))
22663 regno
= FP_ARG_RETURN
;
22664 else if (TREE_CODE (valtype
) == COMPLEX_TYPE
22665 && targetm
.calls
.split_complex_arg
)
22666 return rs6000_complex_function_value (mode
);
22667 else if (TREE_CODE (valtype
) == VECTOR_TYPE
22668 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
22669 && ALTIVEC_VECTOR_MODE (mode
))
22670 regno
= ALTIVEC_ARG_RETURN
;
22671 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
22672 && (mode
== DFmode
|| mode
== DCmode
22673 || mode
== TFmode
|| mode
== TCmode
))
22674 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
22676 regno
= GP_ARG_RETURN
;
22678 return gen_rtx_REG (mode
, regno
);
22681 /* Define how to find the value returned by a library function
22682 assuming the value has mode MODE. */
22684 rs6000_libcall_value (enum machine_mode mode
)
22686 unsigned int regno
;
22688 if (TARGET_32BIT
&& TARGET_POWERPC64
&& mode
== DImode
)
22690 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
22691 return gen_rtx_PARALLEL (DImode
,
22693 gen_rtx_EXPR_LIST (VOIDmode
,
22694 gen_rtx_REG (SImode
, GP_ARG_RETURN
),
22696 gen_rtx_EXPR_LIST (VOIDmode
,
22697 gen_rtx_REG (SImode
,
22698 GP_ARG_RETURN
+ 1),
22702 if (DECIMAL_FLOAT_MODE_P (mode
) && TARGET_HARD_FLOAT
&& TARGET_FPRS
)
22703 /* _Decimal128 must use an even/odd register pair. */
22704 regno
= (mode
== TDmode
) ? FP_ARG_RETURN
+ 1 : FP_ARG_RETURN
;
22705 else if (SCALAR_FLOAT_MODE_P (mode
)
22706 && TARGET_HARD_FLOAT
&& TARGET_FPRS
22707 && ((TARGET_SINGLE_FLOAT
&& mode
== SFmode
) || TARGET_DOUBLE_FLOAT
))
22708 regno
= FP_ARG_RETURN
;
22709 else if (ALTIVEC_VECTOR_MODE (mode
)
22710 && TARGET_ALTIVEC
&& TARGET_ALTIVEC_ABI
)
22711 regno
= ALTIVEC_ARG_RETURN
;
22712 else if (COMPLEX_MODE_P (mode
) && targetm
.calls
.split_complex_arg
)
22713 return rs6000_complex_function_value (mode
);
22714 else if (TARGET_E500_DOUBLE
&& TARGET_HARD_FLOAT
22715 && (mode
== DFmode
|| mode
== DCmode
22716 || mode
== TFmode
|| mode
== TCmode
))
22717 return spe_build_register_parallel (mode
, GP_ARG_RETURN
);
22719 regno
= GP_ARG_RETURN
;
22721 return gen_rtx_REG (mode
, regno
);
22724 /* Define the offset between two registers, FROM to be eliminated and its
22725 replacement TO, at the start of a routine. */
22727 rs6000_initial_elimination_offset (int from
, int to
)
22729 rs6000_stack_t
*info
= rs6000_stack_info ();
22730 HOST_WIDE_INT offset
;
22732 if (from
== HARD_FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
22733 offset
= info
->push_p
? 0 : -info
->total_size
;
22734 else if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
22736 offset
= info
->push_p
? 0 : -info
->total_size
;
22737 if (FRAME_GROWS_DOWNWARD
)
22738 offset
+= info
->fixed_size
+ info
->vars_size
+ info
->parm_size
;
22740 else if (from
== FRAME_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
22741 offset
= FRAME_GROWS_DOWNWARD
22742 ? info
->fixed_size
+ info
->vars_size
+ info
->parm_size
22744 else if (from
== ARG_POINTER_REGNUM
&& to
== HARD_FRAME_POINTER_REGNUM
)
22745 offset
= info
->total_size
;
22746 else if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
22747 offset
= info
->push_p
? info
->total_size
: 0;
22748 else if (from
== RS6000_PIC_OFFSET_TABLE_REGNUM
)
22751 gcc_unreachable ();
22757 rs6000_dwarf_register_span (rtx reg
)
22761 unsigned regno
= REGNO (reg
);
22762 enum machine_mode mode
= GET_MODE (reg
);
22766 && (SPE_VECTOR_MODE (GET_MODE (reg
))
22767 || (TARGET_E500_DOUBLE
&& FLOAT_MODE_P (mode
)
22768 && mode
!= SFmode
&& mode
!= SDmode
&& mode
!= SCmode
)))
22773 regno
= REGNO (reg
);
22775 /* The duality of the SPE register size wreaks all kinds of havoc.
22776 This is a way of distinguishing r0 in 32-bits from r0 in
22778 words
= (GET_MODE_SIZE (mode
) + UNITS_PER_FP_WORD
- 1) / UNITS_PER_FP_WORD
;
22779 gcc_assert (words
<= 4);
22780 for (i
= 0; i
< words
; i
++, regno
++)
22782 if (BYTES_BIG_ENDIAN
)
22784 parts
[2 * i
] = gen_rtx_REG (SImode
, regno
+ 1200);
22785 parts
[2 * i
+ 1] = gen_rtx_REG (SImode
, regno
);
22789 parts
[2 * i
] = gen_rtx_REG (SImode
, regno
);
22790 parts
[2 * i
+ 1] = gen_rtx_REG (SImode
, regno
+ 1200);
22794 return gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (words
* 2, parts
));
22797 /* Fill in sizes for SPE register high parts in table used by unwinder. */
22800 rs6000_init_dwarf_reg_sizes_extra (tree address
)
22805 enum machine_mode mode
= TYPE_MODE (char_type_node
);
22806 rtx addr
= expand_expr (address
, NULL_RTX
, VOIDmode
, EXPAND_NORMAL
);
22807 rtx mem
= gen_rtx_MEM (BLKmode
, addr
);
22808 rtx value
= gen_int_mode (4, mode
);
22810 for (i
= 1201; i
< 1232; i
++)
22812 int column
= DWARF_REG_TO_UNWIND_COLUMN (i
);
22813 HOST_WIDE_INT offset
22814 = DWARF_FRAME_REGNUM (column
) * GET_MODE_SIZE (mode
);
22816 emit_move_insn (adjust_address (mem
, mode
, offset
), value
);
22821 /* Map internal gcc register numbers to DWARF2 register numbers. */
22824 rs6000_dbx_register_number (unsigned int regno
)
22826 if (regno
<= 63 || write_symbols
!= DWARF2_DEBUG
)
22828 if (regno
== MQ_REGNO
)
22830 if (regno
== LR_REGNO
)
22832 if (regno
== CTR_REGNO
)
22834 if (CR_REGNO_P (regno
))
22835 return regno
- CR0_REGNO
+ 86;
22836 if (regno
== XER_REGNO
)
22838 if (ALTIVEC_REGNO_P (regno
))
22839 return regno
- FIRST_ALTIVEC_REGNO
+ 1124;
22840 if (regno
== VRSAVE_REGNO
)
22842 if (regno
== VSCR_REGNO
)
22844 if (regno
== SPE_ACC_REGNO
)
22846 if (regno
== SPEFSCR_REGNO
)
22848 /* SPE high reg number. We get these values of regno from
22849 rs6000_dwarf_register_span. */
22850 gcc_assert (regno
>= 1200 && regno
< 1232);
22854 /* target hook eh_return_filter_mode */
22855 static enum machine_mode
22856 rs6000_eh_return_filter_mode (void)
22858 return TARGET_32BIT
? SImode
: word_mode
;
22861 /* Target hook for scalar_mode_supported_p. */
22863 rs6000_scalar_mode_supported_p (enum machine_mode mode
)
22865 if (DECIMAL_FLOAT_MODE_P (mode
))
22868 return default_scalar_mode_supported_p (mode
);
22871 /* Target hook for vector_mode_supported_p. */
22873 rs6000_vector_mode_supported_p (enum machine_mode mode
)
22876 if (TARGET_PAIRED_FLOAT
&& PAIRED_VECTOR_MODE (mode
))
22879 if (TARGET_SPE
&& SPE_VECTOR_MODE (mode
))
22882 else if (TARGET_ALTIVEC
&& ALTIVEC_VECTOR_MODE (mode
))
22889 /* Target hook for invalid_arg_for_unprototyped_fn. */
22890 static const char *
22891 invalid_arg_for_unprototyped_fn (const_tree typelist
, const_tree funcdecl
, const_tree val
)
22893 return (!rs6000_darwin64_abi
22895 && TREE_CODE (TREE_TYPE (val
)) == VECTOR_TYPE
22896 && (funcdecl
== NULL_TREE
22897 || (TREE_CODE (funcdecl
) == FUNCTION_DECL
22898 && DECL_BUILT_IN_CLASS (funcdecl
) != BUILT_IN_MD
)))
22899 ? N_("AltiVec argument passed to unprototyped function")
22903 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
22904 setup by using __stack_chk_fail_local hidden function instead of
22905 calling __stack_chk_fail directly. Otherwise it is better to call
22906 __stack_chk_fail directly. */
22909 rs6000_stack_protect_fail (void)
22911 return (DEFAULT_ABI
== ABI_V4
&& TARGET_SECURE_PLT
&& flag_pic
)
22912 ? default_hidden_stack_protect_fail ()
22913 : default_external_stack_protect_fail ();
22917 rs6000_final_prescan_insn (rtx insn
, rtx
*operand ATTRIBUTE_UNUSED
,
22918 int num_operands ATTRIBUTE_UNUSED
)
22920 if (rs6000_warn_cell_microcode
)
22923 int insn_code_number
= recog_memoized (insn
);
22924 location_t location
= locator_location (INSN_LOCATOR (insn
));
22926 /* Punt on insns we cannot recognize. */
22927 if (insn_code_number
< 0)
22930 temp
= get_insn_template (insn_code_number
, insn
);
22932 if (get_attr_cell_micro (insn
) == CELL_MICRO_ALWAYS
)
22933 warning_at (location
, OPT_mwarn_cell_microcode
,
22934 "emitting microcode insn %s\t[%s] #%d",
22935 temp
, insn_data
[INSN_CODE (insn
)].name
, INSN_UID (insn
));
22936 else if (get_attr_cell_micro (insn
) == CELL_MICRO_CONDITIONAL
)
22937 warning_at (location
, OPT_mwarn_cell_microcode
,
22938 "emitting conditional microcode insn %s\t[%s] #%d",
22939 temp
, insn_data
[INSN_CODE (insn
)].name
, INSN_UID (insn
));
22943 #include "gt-rs6000.h"