]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/rs6000/rs6000.c
5199bee6d8927392a503d846711c22184e181084
[thirdparty/gcc.git] / gcc / config / rs6000 / rs6000.c
1 /* Subroutines used for code generation on IBM RS/6000.
2 Copyright (C) 1991-2014 Free Software Foundation, Inc.
3 Contributed by Richard Kenner (kenner@vlsi1.ultra.nyu.edu)
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 3, or (at your
10 option) any later version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
20
21 #include "config.h"
22 #include "system.h"
23 #include "coretypes.h"
24 #include "tm.h"
25 #include "rtl.h"
26 #include "regs.h"
27 #include "hard-reg-set.h"
28 #include "insn-config.h"
29 #include "conditions.h"
30 #include "insn-attr.h"
31 #include "flags.h"
32 #include "recog.h"
33 #include "obstack.h"
34 #include "tree.h"
35 #include "stringpool.h"
36 #include "stor-layout.h"
37 #include "calls.h"
38 #include "print-tree.h"
39 #include "varasm.h"
40 #include "expr.h"
41 #include "optabs.h"
42 #include "except.h"
43 #include "function.h"
44 #include "output.h"
45 #include "dbxout.h"
46 #include "basic-block.h"
47 #include "diagnostic-core.h"
48 #include "toplev.h"
49 #include "ggc.h"
50 #include "hashtab.h"
51 #include "tm_p.h"
52 #include "target.h"
53 #include "target-def.h"
54 #include "common/common-target.h"
55 #include "langhooks.h"
56 #include "reload.h"
57 #include "cfgloop.h"
58 #include "sched-int.h"
59 #include "hash-table.h"
60 #include "vec.h"
61 #include "basic-block.h"
62 #include "tree-ssa-alias.h"
63 #include "internal-fn.h"
64 #include "gimple-fold.h"
65 #include "tree-eh.h"
66 #include "gimple-expr.h"
67 #include "is-a.h"
68 #include "gimple.h"
69 #include "gimplify.h"
70 #include "gimple-iterator.h"
71 #include "gimple-walk.h"
72 #include "intl.h"
73 #include "params.h"
74 #include "tm-constrs.h"
75 #include "ira.h"
76 #include "opts.h"
77 #include "tree-vectorizer.h"
78 #include "dumpfile.h"
79 #include "cgraph.h"
80 #include "target-globals.h"
81 #include "builtins.h"
82 #include "context.h"
83 #include "tree-pass.h"
84 #if TARGET_XCOFF
85 #include "xcoffout.h" /* get declarations of xcoff_*_section_name */
86 #endif
87 #if TARGET_MACHO
88 #include "gstab.h" /* for N_SLINE */
89 #endif
90
91 #ifndef TARGET_NO_PROTOTYPE
92 #define TARGET_NO_PROTOTYPE 0
93 #endif
94
95 #define min(A,B) ((A) < (B) ? (A) : (B))
96 #define max(A,B) ((A) > (B) ? (A) : (B))
97
98 /* Structure used to define the rs6000 stack */
99 typedef struct rs6000_stack {
100 int reload_completed; /* stack info won't change from here on */
101 int first_gp_reg_save; /* first callee saved GP register used */
102 int first_fp_reg_save; /* first callee saved FP register used */
103 int first_altivec_reg_save; /* first callee saved AltiVec register used */
104 int lr_save_p; /* true if the link reg needs to be saved */
105 int cr_save_p; /* true if the CR reg needs to be saved */
106 unsigned int vrsave_mask; /* mask of vec registers to save */
107 int push_p; /* true if we need to allocate stack space */
108 int calls_p; /* true if the function makes any calls */
109 int world_save_p; /* true if we're saving *everything*:
110 r13-r31, cr, f14-f31, vrsave, v20-v31 */
111 enum rs6000_abi abi; /* which ABI to use */
112 int gp_save_offset; /* offset to save GP regs from initial SP */
113 int fp_save_offset; /* offset to save FP regs from initial SP */
114 int altivec_save_offset; /* offset to save AltiVec regs from initial SP */
115 int lr_save_offset; /* offset to save LR from initial SP */
116 int cr_save_offset; /* offset to save CR from initial SP */
117 int vrsave_save_offset; /* offset to save VRSAVE from initial SP */
118 int spe_gp_save_offset; /* offset to save spe 64-bit gprs */
119 int varargs_save_offset; /* offset to save the varargs registers */
120 int ehrd_offset; /* offset to EH return data */
121 int ehcr_offset; /* offset to EH CR field data */
122 int reg_size; /* register size (4 or 8) */
123 HOST_WIDE_INT vars_size; /* variable save area size */
124 int parm_size; /* outgoing parameter size */
125 int save_size; /* save area size */
126 int fixed_size; /* fixed size of stack frame */
127 int gp_size; /* size of saved GP registers */
128 int fp_size; /* size of saved FP registers */
129 int altivec_size; /* size of saved AltiVec registers */
130 int cr_size; /* size to hold CR if not in save_size */
131 int vrsave_size; /* size to hold VRSAVE if not in save_size */
132 int altivec_padding_size; /* size of altivec alignment padding if
133 not in save_size */
134 int spe_gp_size; /* size of 64-bit GPR save size for SPE */
135 int spe_padding_size;
136 HOST_WIDE_INT total_size; /* total bytes allocated for stack */
137 int spe_64bit_regs_used;
138 int savres_strategy;
139 } rs6000_stack_t;
140
141 /* A C structure for machine-specific, per-function data.
142 This is added to the cfun structure. */
143 typedef struct GTY(()) machine_function
144 {
145 /* Some local-dynamic symbol. */
146 const char *some_ld_name;
147 /* Whether the instruction chain has been scanned already. */
148 int insn_chain_scanned_p;
149 /* Flags if __builtin_return_address (n) with n >= 1 was used. */
150 int ra_needs_full_frame;
151 /* Flags if __builtin_return_address (0) was used. */
152 int ra_need_lr;
153 /* Cache lr_save_p after expansion of builtin_eh_return. */
154 int lr_save_state;
155 /* Whether we need to save the TOC to the reserved stack location in the
156 function prologue. */
157 bool save_toc_in_prologue;
158 /* Offset from virtual_stack_vars_rtx to the start of the ABI_V4
159 varargs save area. */
160 HOST_WIDE_INT varargs_save_offset;
161 /* Temporary stack slot to use for SDmode copies. This slot is
162 64-bits wide and is allocated early enough so that the offset
163 does not overflow the 16-bit load/store offset field. */
164 rtx sdmode_stack_slot;
165 /* Flag if r2 setup is needed with ELFv2 ABI. */
166 bool r2_setup_needed;
167 } machine_function;
168
169 /* Support targetm.vectorize.builtin_mask_for_load. */
170 static GTY(()) tree altivec_builtin_mask_for_load;
171
172 /* Set to nonzero once AIX common-mode calls have been defined. */
173 static GTY(()) int common_mode_defined;
174
175 /* Label number of label created for -mrelocatable, to call to so we can
176 get the address of the GOT section */
177 static int rs6000_pic_labelno;
178
179 #ifdef USING_ELFOS_H
180 /* Counter for labels which are to be placed in .fixup. */
181 int fixuplabelno = 0;
182 #endif
183
184 /* Whether to use variant of AIX ABI for PowerPC64 Linux. */
185 int dot_symbols;
186
187 /* Specify the machine mode that pointers have. After generation of rtl, the
188 compiler makes no further distinction between pointers and any other objects
189 of this machine mode. The type is unsigned since not all things that
190 include rs6000.h also include machmode.h. */
191 unsigned rs6000_pmode;
192
193 /* Width in bits of a pointer. */
194 unsigned rs6000_pointer_size;
195
196 #ifdef HAVE_AS_GNU_ATTRIBUTE
197 /* Flag whether floating point values have been passed/returned. */
198 static bool rs6000_passes_float;
199 /* Flag whether vector values have been passed/returned. */
200 static bool rs6000_passes_vector;
201 /* Flag whether small (<= 8 byte) structures have been returned. */
202 static bool rs6000_returns_struct;
203 #endif
204
205 /* Value is TRUE if register/mode pair is acceptable. */
206 bool rs6000_hard_regno_mode_ok_p[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
207
208 /* Maximum number of registers needed for a given register class and mode. */
209 unsigned char rs6000_class_max_nregs[NUM_MACHINE_MODES][LIM_REG_CLASSES];
210
211 /* How many registers are needed for a given register and mode. */
212 unsigned char rs6000_hard_regno_nregs[NUM_MACHINE_MODES][FIRST_PSEUDO_REGISTER];
213
214 /* Map register number to register class. */
215 enum reg_class rs6000_regno_regclass[FIRST_PSEUDO_REGISTER];
216
217 static int dbg_cost_ctrl;
218
219 /* Built in types. */
220 tree rs6000_builtin_types[RS6000_BTI_MAX];
221 tree rs6000_builtin_decls[RS6000_BUILTIN_COUNT];
222
223 /* Flag to say the TOC is initialized */
224 int toc_initialized;
225 char toc_label_name[10];
226
227 /* Cached value of rs6000_variable_issue. This is cached in
228 rs6000_variable_issue hook and returned from rs6000_sched_reorder2. */
229 static short cached_can_issue_more;
230
231 static GTY(()) section *read_only_data_section;
232 static GTY(()) section *private_data_section;
233 static GTY(()) section *tls_data_section;
234 static GTY(()) section *tls_private_data_section;
235 static GTY(()) section *read_only_private_data_section;
236 static GTY(()) section *sdata2_section;
237 static GTY(()) section *toc_section;
238
239 struct builtin_description
240 {
241 const HOST_WIDE_INT mask;
242 const enum insn_code icode;
243 const char *const name;
244 const enum rs6000_builtins code;
245 };
246
247 /* Describe the vector unit used for modes. */
248 enum rs6000_vector rs6000_vector_unit[NUM_MACHINE_MODES];
249 enum rs6000_vector rs6000_vector_mem[NUM_MACHINE_MODES];
250
251 /* Register classes for various constraints that are based on the target
252 switches. */
253 enum reg_class rs6000_constraints[RS6000_CONSTRAINT_MAX];
254
255 /* Describe the alignment of a vector. */
256 int rs6000_vector_align[NUM_MACHINE_MODES];
257
258 /* Map selected modes to types for builtins. */
259 static GTY(()) tree builtin_mode_to_type[MAX_MACHINE_MODE][2];
260
261 /* What modes to automatically generate reciprocal divide estimate (fre) and
262 reciprocal sqrt (frsqrte) for. */
263 unsigned char rs6000_recip_bits[MAX_MACHINE_MODE];
264
265 /* Masks to determine which reciprocal esitmate instructions to generate
266 automatically. */
267 enum rs6000_recip_mask {
268 RECIP_SF_DIV = 0x001, /* Use divide estimate */
269 RECIP_DF_DIV = 0x002,
270 RECIP_V4SF_DIV = 0x004,
271 RECIP_V2DF_DIV = 0x008,
272
273 RECIP_SF_RSQRT = 0x010, /* Use reciprocal sqrt estimate. */
274 RECIP_DF_RSQRT = 0x020,
275 RECIP_V4SF_RSQRT = 0x040,
276 RECIP_V2DF_RSQRT = 0x080,
277
278 /* Various combination of flags for -mrecip=xxx. */
279 RECIP_NONE = 0,
280 RECIP_ALL = (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
281 | RECIP_V2DF_DIV | RECIP_SF_RSQRT | RECIP_DF_RSQRT
282 | RECIP_V4SF_RSQRT | RECIP_V2DF_RSQRT),
283
284 RECIP_HIGH_PRECISION = RECIP_ALL,
285
286 /* On low precision machines like the power5, don't enable double precision
287 reciprocal square root estimate, since it isn't accurate enough. */
288 RECIP_LOW_PRECISION = (RECIP_ALL & ~(RECIP_DF_RSQRT | RECIP_V2DF_RSQRT))
289 };
290
291 /* -mrecip options. */
292 static struct
293 {
294 const char *string; /* option name */
295 unsigned int mask; /* mask bits to set */
296 } recip_options[] = {
297 { "all", RECIP_ALL },
298 { "none", RECIP_NONE },
299 { "div", (RECIP_SF_DIV | RECIP_DF_DIV | RECIP_V4SF_DIV
300 | RECIP_V2DF_DIV) },
301 { "divf", (RECIP_SF_DIV | RECIP_V4SF_DIV) },
302 { "divd", (RECIP_DF_DIV | RECIP_V2DF_DIV) },
303 { "rsqrt", (RECIP_SF_RSQRT | RECIP_DF_RSQRT | RECIP_V4SF_RSQRT
304 | RECIP_V2DF_RSQRT) },
305 { "rsqrtf", (RECIP_SF_RSQRT | RECIP_V4SF_RSQRT) },
306 { "rsqrtd", (RECIP_DF_RSQRT | RECIP_V2DF_RSQRT) },
307 };
308
309 /* Pointer to function (in rs6000-c.c) that can define or undefine target
310 macros that have changed. Languages that don't support the preprocessor
311 don't link in rs6000-c.c, so we can't call it directly. */
312 void (*rs6000_target_modify_macros_ptr) (bool, HOST_WIDE_INT, HOST_WIDE_INT);
313
314 /* Simplfy register classes into simpler classifications. We assume
315 GPR_REG_TYPE - FPR_REG_TYPE are ordered so that we can use a simple range
316 check for standard register classes (gpr/floating/altivec/vsx) and
317 floating/vector classes (float/altivec/vsx). */
318
319 enum rs6000_reg_type {
320 NO_REG_TYPE,
321 PSEUDO_REG_TYPE,
322 GPR_REG_TYPE,
323 VSX_REG_TYPE,
324 ALTIVEC_REG_TYPE,
325 FPR_REG_TYPE,
326 SPR_REG_TYPE,
327 CR_REG_TYPE,
328 SPE_ACC_TYPE,
329 SPEFSCR_REG_TYPE
330 };
331
332 /* Map register class to register type. */
333 static enum rs6000_reg_type reg_class_to_reg_type[N_REG_CLASSES];
334
335 /* First/last register type for the 'normal' register types (i.e. general
336 purpose, floating point, altivec, and VSX registers). */
337 #define IS_STD_REG_TYPE(RTYPE) IN_RANGE(RTYPE, GPR_REG_TYPE, FPR_REG_TYPE)
338
339 #define IS_FP_VECT_REG_TYPE(RTYPE) IN_RANGE(RTYPE, VSX_REG_TYPE, FPR_REG_TYPE)
340
341
342 /* Register classes we care about in secondary reload or go if legitimate
343 address. We only need to worry about GPR, FPR, and Altivec registers here,
344 along an ANY field that is the OR of the 3 register classes. */
345
346 enum rs6000_reload_reg_type {
347 RELOAD_REG_GPR, /* General purpose registers. */
348 RELOAD_REG_FPR, /* Traditional floating point regs. */
349 RELOAD_REG_VMX, /* Altivec (VMX) registers. */
350 RELOAD_REG_ANY, /* OR of GPR, FPR, Altivec masks. */
351 N_RELOAD_REG
352 };
353
354 /* For setting up register classes, loop through the 3 register classes mapping
355 into real registers, and skip the ANY class, which is just an OR of the
356 bits. */
357 #define FIRST_RELOAD_REG_CLASS RELOAD_REG_GPR
358 #define LAST_RELOAD_REG_CLASS RELOAD_REG_VMX
359
360 /* Map reload register type to a register in the register class. */
361 struct reload_reg_map_type {
362 const char *name; /* Register class name. */
363 int reg; /* Register in the register class. */
364 };
365
366 static const struct reload_reg_map_type reload_reg_map[N_RELOAD_REG] = {
367 { "Gpr", FIRST_GPR_REGNO }, /* RELOAD_REG_GPR. */
368 { "Fpr", FIRST_FPR_REGNO }, /* RELOAD_REG_FPR. */
369 { "VMX", FIRST_ALTIVEC_REGNO }, /* RELOAD_REG_VMX. */
370 { "Any", -1 }, /* RELOAD_REG_ANY. */
371 };
372
373 /* Mask bits for each register class, indexed per mode. Historically the
374 compiler has been more restrictive which types can do PRE_MODIFY instead of
375 PRE_INC and PRE_DEC, so keep track of sepaate bits for these two. */
376 typedef unsigned char addr_mask_type;
377
378 #define RELOAD_REG_VALID 0x01 /* Mode valid in register.. */
379 #define RELOAD_REG_MULTIPLE 0x02 /* Mode takes multiple registers. */
380 #define RELOAD_REG_INDEXED 0x04 /* Reg+reg addressing. */
381 #define RELOAD_REG_OFFSET 0x08 /* Reg+offset addressing. */
382 #define RELOAD_REG_PRE_INCDEC 0x10 /* PRE_INC/PRE_DEC valid. */
383 #define RELOAD_REG_PRE_MODIFY 0x20 /* PRE_MODIFY valid. */
384
385 /* Register type masks based on the type, of valid addressing modes. */
386 struct rs6000_reg_addr {
387 enum insn_code reload_load; /* INSN to reload for loading. */
388 enum insn_code reload_store; /* INSN to reload for storing. */
389 enum insn_code reload_fpr_gpr; /* INSN to move from FPR to GPR. */
390 enum insn_code reload_gpr_vsx; /* INSN to move from GPR to VSX. */
391 enum insn_code reload_vsx_gpr; /* INSN to move from VSX to GPR. */
392 addr_mask_type addr_mask[(int)N_RELOAD_REG]; /* Valid address masks. */
393 bool scalar_in_vmx_p; /* Scalar value can go in VMX. */
394 };
395
396 static struct rs6000_reg_addr reg_addr[NUM_MACHINE_MODES];
397
398 /* Helper function to say whether a mode supports PRE_INC or PRE_DEC. */
399 static inline bool
400 mode_supports_pre_incdec_p (enum machine_mode mode)
401 {
402 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_INCDEC)
403 != 0);
404 }
405
406 /* Helper function to say whether a mode supports PRE_MODIFY. */
407 static inline bool
408 mode_supports_pre_modify_p (enum machine_mode mode)
409 {
410 return ((reg_addr[mode].addr_mask[RELOAD_REG_ANY] & RELOAD_REG_PRE_MODIFY)
411 != 0);
412 }
413
414 \f
415 /* Target cpu costs. */
416
417 struct processor_costs {
418 const int mulsi; /* cost of SImode multiplication. */
419 const int mulsi_const; /* cost of SImode multiplication by constant. */
420 const int mulsi_const9; /* cost of SImode mult by short constant. */
421 const int muldi; /* cost of DImode multiplication. */
422 const int divsi; /* cost of SImode division. */
423 const int divdi; /* cost of DImode division. */
424 const int fp; /* cost of simple SFmode and DFmode insns. */
425 const int dmul; /* cost of DFmode multiplication (and fmadd). */
426 const int sdiv; /* cost of SFmode division (fdivs). */
427 const int ddiv; /* cost of DFmode division (fdiv). */
428 const int cache_line_size; /* cache line size in bytes. */
429 const int l1_cache_size; /* size of l1 cache, in kilobytes. */
430 const int l2_cache_size; /* size of l2 cache, in kilobytes. */
431 const int simultaneous_prefetches; /* number of parallel prefetch
432 operations. */
433 };
434
435 const struct processor_costs *rs6000_cost;
436
437 /* Processor costs (relative to an add) */
438
439 /* Instruction size costs on 32bit processors. */
440 static const
441 struct processor_costs size32_cost = {
442 COSTS_N_INSNS (1), /* mulsi */
443 COSTS_N_INSNS (1), /* mulsi_const */
444 COSTS_N_INSNS (1), /* mulsi_const9 */
445 COSTS_N_INSNS (1), /* muldi */
446 COSTS_N_INSNS (1), /* divsi */
447 COSTS_N_INSNS (1), /* divdi */
448 COSTS_N_INSNS (1), /* fp */
449 COSTS_N_INSNS (1), /* dmul */
450 COSTS_N_INSNS (1), /* sdiv */
451 COSTS_N_INSNS (1), /* ddiv */
452 32,
453 0,
454 0,
455 0,
456 };
457
458 /* Instruction size costs on 64bit processors. */
459 static const
460 struct processor_costs size64_cost = {
461 COSTS_N_INSNS (1), /* mulsi */
462 COSTS_N_INSNS (1), /* mulsi_const */
463 COSTS_N_INSNS (1), /* mulsi_const9 */
464 COSTS_N_INSNS (1), /* muldi */
465 COSTS_N_INSNS (1), /* divsi */
466 COSTS_N_INSNS (1), /* divdi */
467 COSTS_N_INSNS (1), /* fp */
468 COSTS_N_INSNS (1), /* dmul */
469 COSTS_N_INSNS (1), /* sdiv */
470 COSTS_N_INSNS (1), /* ddiv */
471 128,
472 0,
473 0,
474 0,
475 };
476
477 /* Instruction costs on RS64A processors. */
478 static const
479 struct processor_costs rs64a_cost = {
480 COSTS_N_INSNS (20), /* mulsi */
481 COSTS_N_INSNS (12), /* mulsi_const */
482 COSTS_N_INSNS (8), /* mulsi_const9 */
483 COSTS_N_INSNS (34), /* muldi */
484 COSTS_N_INSNS (65), /* divsi */
485 COSTS_N_INSNS (67), /* divdi */
486 COSTS_N_INSNS (4), /* fp */
487 COSTS_N_INSNS (4), /* dmul */
488 COSTS_N_INSNS (31), /* sdiv */
489 COSTS_N_INSNS (31), /* ddiv */
490 128, /* cache line size */
491 128, /* l1 cache */
492 2048, /* l2 cache */
493 1, /* streams */
494 };
495
496 /* Instruction costs on MPCCORE processors. */
497 static const
498 struct processor_costs mpccore_cost = {
499 COSTS_N_INSNS (2), /* mulsi */
500 COSTS_N_INSNS (2), /* mulsi_const */
501 COSTS_N_INSNS (2), /* mulsi_const9 */
502 COSTS_N_INSNS (2), /* muldi */
503 COSTS_N_INSNS (6), /* divsi */
504 COSTS_N_INSNS (6), /* divdi */
505 COSTS_N_INSNS (4), /* fp */
506 COSTS_N_INSNS (5), /* dmul */
507 COSTS_N_INSNS (10), /* sdiv */
508 COSTS_N_INSNS (17), /* ddiv */
509 32, /* cache line size */
510 4, /* l1 cache */
511 16, /* l2 cache */
512 1, /* streams */
513 };
514
515 /* Instruction costs on PPC403 processors. */
516 static const
517 struct processor_costs ppc403_cost = {
518 COSTS_N_INSNS (4), /* mulsi */
519 COSTS_N_INSNS (4), /* mulsi_const */
520 COSTS_N_INSNS (4), /* mulsi_const9 */
521 COSTS_N_INSNS (4), /* muldi */
522 COSTS_N_INSNS (33), /* divsi */
523 COSTS_N_INSNS (33), /* divdi */
524 COSTS_N_INSNS (11), /* fp */
525 COSTS_N_INSNS (11), /* dmul */
526 COSTS_N_INSNS (11), /* sdiv */
527 COSTS_N_INSNS (11), /* ddiv */
528 32, /* cache line size */
529 4, /* l1 cache */
530 16, /* l2 cache */
531 1, /* streams */
532 };
533
534 /* Instruction costs on PPC405 processors. */
535 static const
536 struct processor_costs ppc405_cost = {
537 COSTS_N_INSNS (5), /* mulsi */
538 COSTS_N_INSNS (4), /* mulsi_const */
539 COSTS_N_INSNS (3), /* mulsi_const9 */
540 COSTS_N_INSNS (5), /* muldi */
541 COSTS_N_INSNS (35), /* divsi */
542 COSTS_N_INSNS (35), /* divdi */
543 COSTS_N_INSNS (11), /* fp */
544 COSTS_N_INSNS (11), /* dmul */
545 COSTS_N_INSNS (11), /* sdiv */
546 COSTS_N_INSNS (11), /* ddiv */
547 32, /* cache line size */
548 16, /* l1 cache */
549 128, /* l2 cache */
550 1, /* streams */
551 };
552
553 /* Instruction costs on PPC440 processors. */
554 static const
555 struct processor_costs ppc440_cost = {
556 COSTS_N_INSNS (3), /* mulsi */
557 COSTS_N_INSNS (2), /* mulsi_const */
558 COSTS_N_INSNS (2), /* mulsi_const9 */
559 COSTS_N_INSNS (3), /* muldi */
560 COSTS_N_INSNS (34), /* divsi */
561 COSTS_N_INSNS (34), /* divdi */
562 COSTS_N_INSNS (5), /* fp */
563 COSTS_N_INSNS (5), /* dmul */
564 COSTS_N_INSNS (19), /* sdiv */
565 COSTS_N_INSNS (33), /* ddiv */
566 32, /* cache line size */
567 32, /* l1 cache */
568 256, /* l2 cache */
569 1, /* streams */
570 };
571
572 /* Instruction costs on PPC476 processors. */
573 static const
574 struct processor_costs ppc476_cost = {
575 COSTS_N_INSNS (4), /* mulsi */
576 COSTS_N_INSNS (4), /* mulsi_const */
577 COSTS_N_INSNS (4), /* mulsi_const9 */
578 COSTS_N_INSNS (4), /* muldi */
579 COSTS_N_INSNS (11), /* divsi */
580 COSTS_N_INSNS (11), /* divdi */
581 COSTS_N_INSNS (6), /* fp */
582 COSTS_N_INSNS (6), /* dmul */
583 COSTS_N_INSNS (19), /* sdiv */
584 COSTS_N_INSNS (33), /* ddiv */
585 32, /* l1 cache line size */
586 32, /* l1 cache */
587 512, /* l2 cache */
588 1, /* streams */
589 };
590
591 /* Instruction costs on PPC601 processors. */
592 static const
593 struct processor_costs ppc601_cost = {
594 COSTS_N_INSNS (5), /* mulsi */
595 COSTS_N_INSNS (5), /* mulsi_const */
596 COSTS_N_INSNS (5), /* mulsi_const9 */
597 COSTS_N_INSNS (5), /* muldi */
598 COSTS_N_INSNS (36), /* divsi */
599 COSTS_N_INSNS (36), /* divdi */
600 COSTS_N_INSNS (4), /* fp */
601 COSTS_N_INSNS (5), /* dmul */
602 COSTS_N_INSNS (17), /* sdiv */
603 COSTS_N_INSNS (31), /* ddiv */
604 32, /* cache line size */
605 32, /* l1 cache */
606 256, /* l2 cache */
607 1, /* streams */
608 };
609
610 /* Instruction costs on PPC603 processors. */
611 static const
612 struct processor_costs ppc603_cost = {
613 COSTS_N_INSNS (5), /* mulsi */
614 COSTS_N_INSNS (3), /* mulsi_const */
615 COSTS_N_INSNS (2), /* mulsi_const9 */
616 COSTS_N_INSNS (5), /* muldi */
617 COSTS_N_INSNS (37), /* divsi */
618 COSTS_N_INSNS (37), /* divdi */
619 COSTS_N_INSNS (3), /* fp */
620 COSTS_N_INSNS (4), /* dmul */
621 COSTS_N_INSNS (18), /* sdiv */
622 COSTS_N_INSNS (33), /* ddiv */
623 32, /* cache line size */
624 8, /* l1 cache */
625 64, /* l2 cache */
626 1, /* streams */
627 };
628
629 /* Instruction costs on PPC604 processors. */
630 static const
631 struct processor_costs ppc604_cost = {
632 COSTS_N_INSNS (4), /* mulsi */
633 COSTS_N_INSNS (4), /* mulsi_const */
634 COSTS_N_INSNS (4), /* mulsi_const9 */
635 COSTS_N_INSNS (4), /* muldi */
636 COSTS_N_INSNS (20), /* divsi */
637 COSTS_N_INSNS (20), /* divdi */
638 COSTS_N_INSNS (3), /* fp */
639 COSTS_N_INSNS (3), /* dmul */
640 COSTS_N_INSNS (18), /* sdiv */
641 COSTS_N_INSNS (32), /* ddiv */
642 32, /* cache line size */
643 16, /* l1 cache */
644 512, /* l2 cache */
645 1, /* streams */
646 };
647
648 /* Instruction costs on PPC604e processors. */
649 static const
650 struct processor_costs ppc604e_cost = {
651 COSTS_N_INSNS (2), /* mulsi */
652 COSTS_N_INSNS (2), /* mulsi_const */
653 COSTS_N_INSNS (2), /* mulsi_const9 */
654 COSTS_N_INSNS (2), /* muldi */
655 COSTS_N_INSNS (20), /* divsi */
656 COSTS_N_INSNS (20), /* divdi */
657 COSTS_N_INSNS (3), /* fp */
658 COSTS_N_INSNS (3), /* dmul */
659 COSTS_N_INSNS (18), /* sdiv */
660 COSTS_N_INSNS (32), /* ddiv */
661 32, /* cache line size */
662 32, /* l1 cache */
663 1024, /* l2 cache */
664 1, /* streams */
665 };
666
667 /* Instruction costs on PPC620 processors. */
668 static const
669 struct processor_costs ppc620_cost = {
670 COSTS_N_INSNS (5), /* mulsi */
671 COSTS_N_INSNS (4), /* mulsi_const */
672 COSTS_N_INSNS (3), /* mulsi_const9 */
673 COSTS_N_INSNS (7), /* muldi */
674 COSTS_N_INSNS (21), /* divsi */
675 COSTS_N_INSNS (37), /* divdi */
676 COSTS_N_INSNS (3), /* fp */
677 COSTS_N_INSNS (3), /* dmul */
678 COSTS_N_INSNS (18), /* sdiv */
679 COSTS_N_INSNS (32), /* ddiv */
680 128, /* cache line size */
681 32, /* l1 cache */
682 1024, /* l2 cache */
683 1, /* streams */
684 };
685
686 /* Instruction costs on PPC630 processors. */
687 static const
688 struct processor_costs ppc630_cost = {
689 COSTS_N_INSNS (5), /* mulsi */
690 COSTS_N_INSNS (4), /* mulsi_const */
691 COSTS_N_INSNS (3), /* mulsi_const9 */
692 COSTS_N_INSNS (7), /* muldi */
693 COSTS_N_INSNS (21), /* divsi */
694 COSTS_N_INSNS (37), /* divdi */
695 COSTS_N_INSNS (3), /* fp */
696 COSTS_N_INSNS (3), /* dmul */
697 COSTS_N_INSNS (17), /* sdiv */
698 COSTS_N_INSNS (21), /* ddiv */
699 128, /* cache line size */
700 64, /* l1 cache */
701 1024, /* l2 cache */
702 1, /* streams */
703 };
704
705 /* Instruction costs on Cell processor. */
706 /* COSTS_N_INSNS (1) ~ one add. */
707 static const
708 struct processor_costs ppccell_cost = {
709 COSTS_N_INSNS (9/2)+2, /* mulsi */
710 COSTS_N_INSNS (6/2), /* mulsi_const */
711 COSTS_N_INSNS (6/2), /* mulsi_const9 */
712 COSTS_N_INSNS (15/2)+2, /* muldi */
713 COSTS_N_INSNS (38/2), /* divsi */
714 COSTS_N_INSNS (70/2), /* divdi */
715 COSTS_N_INSNS (10/2), /* fp */
716 COSTS_N_INSNS (10/2), /* dmul */
717 COSTS_N_INSNS (74/2), /* sdiv */
718 COSTS_N_INSNS (74/2), /* ddiv */
719 128, /* cache line size */
720 32, /* l1 cache */
721 512, /* l2 cache */
722 6, /* streams */
723 };
724
725 /* Instruction costs on PPC750 and PPC7400 processors. */
726 static const
727 struct processor_costs ppc750_cost = {
728 COSTS_N_INSNS (5), /* mulsi */
729 COSTS_N_INSNS (3), /* mulsi_const */
730 COSTS_N_INSNS (2), /* mulsi_const9 */
731 COSTS_N_INSNS (5), /* muldi */
732 COSTS_N_INSNS (17), /* divsi */
733 COSTS_N_INSNS (17), /* divdi */
734 COSTS_N_INSNS (3), /* fp */
735 COSTS_N_INSNS (3), /* dmul */
736 COSTS_N_INSNS (17), /* sdiv */
737 COSTS_N_INSNS (31), /* ddiv */
738 32, /* cache line size */
739 32, /* l1 cache */
740 512, /* l2 cache */
741 1, /* streams */
742 };
743
744 /* Instruction costs on PPC7450 processors. */
745 static const
746 struct processor_costs ppc7450_cost = {
747 COSTS_N_INSNS (4), /* mulsi */
748 COSTS_N_INSNS (3), /* mulsi_const */
749 COSTS_N_INSNS (3), /* mulsi_const9 */
750 COSTS_N_INSNS (4), /* muldi */
751 COSTS_N_INSNS (23), /* divsi */
752 COSTS_N_INSNS (23), /* divdi */
753 COSTS_N_INSNS (5), /* fp */
754 COSTS_N_INSNS (5), /* dmul */
755 COSTS_N_INSNS (21), /* sdiv */
756 COSTS_N_INSNS (35), /* ddiv */
757 32, /* cache line size */
758 32, /* l1 cache */
759 1024, /* l2 cache */
760 1, /* streams */
761 };
762
763 /* Instruction costs on PPC8540 processors. */
764 static const
765 struct processor_costs ppc8540_cost = {
766 COSTS_N_INSNS (4), /* mulsi */
767 COSTS_N_INSNS (4), /* mulsi_const */
768 COSTS_N_INSNS (4), /* mulsi_const9 */
769 COSTS_N_INSNS (4), /* muldi */
770 COSTS_N_INSNS (19), /* divsi */
771 COSTS_N_INSNS (19), /* divdi */
772 COSTS_N_INSNS (4), /* fp */
773 COSTS_N_INSNS (4), /* dmul */
774 COSTS_N_INSNS (29), /* sdiv */
775 COSTS_N_INSNS (29), /* ddiv */
776 32, /* cache line size */
777 32, /* l1 cache */
778 256, /* l2 cache */
779 1, /* prefetch streams /*/
780 };
781
782 /* Instruction costs on E300C2 and E300C3 cores. */
783 static const
784 struct processor_costs ppce300c2c3_cost = {
785 COSTS_N_INSNS (4), /* mulsi */
786 COSTS_N_INSNS (4), /* mulsi_const */
787 COSTS_N_INSNS (4), /* mulsi_const9 */
788 COSTS_N_INSNS (4), /* muldi */
789 COSTS_N_INSNS (19), /* divsi */
790 COSTS_N_INSNS (19), /* divdi */
791 COSTS_N_INSNS (3), /* fp */
792 COSTS_N_INSNS (4), /* dmul */
793 COSTS_N_INSNS (18), /* sdiv */
794 COSTS_N_INSNS (33), /* ddiv */
795 32,
796 16, /* l1 cache */
797 16, /* l2 cache */
798 1, /* prefetch streams /*/
799 };
800
801 /* Instruction costs on PPCE500MC processors. */
802 static const
803 struct processor_costs ppce500mc_cost = {
804 COSTS_N_INSNS (4), /* mulsi */
805 COSTS_N_INSNS (4), /* mulsi_const */
806 COSTS_N_INSNS (4), /* mulsi_const9 */
807 COSTS_N_INSNS (4), /* muldi */
808 COSTS_N_INSNS (14), /* divsi */
809 COSTS_N_INSNS (14), /* divdi */
810 COSTS_N_INSNS (8), /* fp */
811 COSTS_N_INSNS (10), /* dmul */
812 COSTS_N_INSNS (36), /* sdiv */
813 COSTS_N_INSNS (66), /* ddiv */
814 64, /* cache line size */
815 32, /* l1 cache */
816 128, /* l2 cache */
817 1, /* prefetch streams /*/
818 };
819
820 /* Instruction costs on PPCE500MC64 processors. */
821 static const
822 struct processor_costs ppce500mc64_cost = {
823 COSTS_N_INSNS (4), /* mulsi */
824 COSTS_N_INSNS (4), /* mulsi_const */
825 COSTS_N_INSNS (4), /* mulsi_const9 */
826 COSTS_N_INSNS (4), /* muldi */
827 COSTS_N_INSNS (14), /* divsi */
828 COSTS_N_INSNS (14), /* divdi */
829 COSTS_N_INSNS (4), /* fp */
830 COSTS_N_INSNS (10), /* dmul */
831 COSTS_N_INSNS (36), /* sdiv */
832 COSTS_N_INSNS (66), /* ddiv */
833 64, /* cache line size */
834 32, /* l1 cache */
835 128, /* l2 cache */
836 1, /* prefetch streams /*/
837 };
838
839 /* Instruction costs on PPCE5500 processors. */
840 static const
841 struct processor_costs ppce5500_cost = {
842 COSTS_N_INSNS (5), /* mulsi */
843 COSTS_N_INSNS (5), /* mulsi_const */
844 COSTS_N_INSNS (4), /* mulsi_const9 */
845 COSTS_N_INSNS (5), /* muldi */
846 COSTS_N_INSNS (14), /* divsi */
847 COSTS_N_INSNS (14), /* divdi */
848 COSTS_N_INSNS (7), /* fp */
849 COSTS_N_INSNS (10), /* dmul */
850 COSTS_N_INSNS (36), /* sdiv */
851 COSTS_N_INSNS (66), /* ddiv */
852 64, /* cache line size */
853 32, /* l1 cache */
854 128, /* l2 cache */
855 1, /* prefetch streams /*/
856 };
857
858 /* Instruction costs on PPCE6500 processors. */
859 static const
860 struct processor_costs ppce6500_cost = {
861 COSTS_N_INSNS (5), /* mulsi */
862 COSTS_N_INSNS (5), /* mulsi_const */
863 COSTS_N_INSNS (4), /* mulsi_const9 */
864 COSTS_N_INSNS (5), /* muldi */
865 COSTS_N_INSNS (14), /* divsi */
866 COSTS_N_INSNS (14), /* divdi */
867 COSTS_N_INSNS (7), /* fp */
868 COSTS_N_INSNS (10), /* dmul */
869 COSTS_N_INSNS (36), /* sdiv */
870 COSTS_N_INSNS (66), /* ddiv */
871 64, /* cache line size */
872 32, /* l1 cache */
873 128, /* l2 cache */
874 1, /* prefetch streams /*/
875 };
876
877 /* Instruction costs on AppliedMicro Titan processors. */
878 static const
879 struct processor_costs titan_cost = {
880 COSTS_N_INSNS (5), /* mulsi */
881 COSTS_N_INSNS (5), /* mulsi_const */
882 COSTS_N_INSNS (5), /* mulsi_const9 */
883 COSTS_N_INSNS (5), /* muldi */
884 COSTS_N_INSNS (18), /* divsi */
885 COSTS_N_INSNS (18), /* divdi */
886 COSTS_N_INSNS (10), /* fp */
887 COSTS_N_INSNS (10), /* dmul */
888 COSTS_N_INSNS (46), /* sdiv */
889 COSTS_N_INSNS (72), /* ddiv */
890 32, /* cache line size */
891 32, /* l1 cache */
892 512, /* l2 cache */
893 1, /* prefetch streams /*/
894 };
895
896 /* Instruction costs on POWER4 and POWER5 processors. */
897 static const
898 struct processor_costs power4_cost = {
899 COSTS_N_INSNS (3), /* mulsi */
900 COSTS_N_INSNS (2), /* mulsi_const */
901 COSTS_N_INSNS (2), /* mulsi_const9 */
902 COSTS_N_INSNS (4), /* muldi */
903 COSTS_N_INSNS (18), /* divsi */
904 COSTS_N_INSNS (34), /* divdi */
905 COSTS_N_INSNS (3), /* fp */
906 COSTS_N_INSNS (3), /* dmul */
907 COSTS_N_INSNS (17), /* sdiv */
908 COSTS_N_INSNS (17), /* ddiv */
909 128, /* cache line size */
910 32, /* l1 cache */
911 1024, /* l2 cache */
912 8, /* prefetch streams /*/
913 };
914
915 /* Instruction costs on POWER6 processors. */
916 static const
917 struct processor_costs power6_cost = {
918 COSTS_N_INSNS (8), /* mulsi */
919 COSTS_N_INSNS (8), /* mulsi_const */
920 COSTS_N_INSNS (8), /* mulsi_const9 */
921 COSTS_N_INSNS (8), /* muldi */
922 COSTS_N_INSNS (22), /* divsi */
923 COSTS_N_INSNS (28), /* divdi */
924 COSTS_N_INSNS (3), /* fp */
925 COSTS_N_INSNS (3), /* dmul */
926 COSTS_N_INSNS (13), /* sdiv */
927 COSTS_N_INSNS (16), /* ddiv */
928 128, /* cache line size */
929 64, /* l1 cache */
930 2048, /* l2 cache */
931 16, /* prefetch streams */
932 };
933
934 /* Instruction costs on POWER7 processors. */
935 static const
936 struct processor_costs power7_cost = {
937 COSTS_N_INSNS (2), /* mulsi */
938 COSTS_N_INSNS (2), /* mulsi_const */
939 COSTS_N_INSNS (2), /* mulsi_const9 */
940 COSTS_N_INSNS (2), /* muldi */
941 COSTS_N_INSNS (18), /* divsi */
942 COSTS_N_INSNS (34), /* divdi */
943 COSTS_N_INSNS (3), /* fp */
944 COSTS_N_INSNS (3), /* dmul */
945 COSTS_N_INSNS (13), /* sdiv */
946 COSTS_N_INSNS (16), /* ddiv */
947 128, /* cache line size */
948 32, /* l1 cache */
949 256, /* l2 cache */
950 12, /* prefetch streams */
951 };
952
953 /* Instruction costs on POWER8 processors. */
954 static const
955 struct processor_costs power8_cost = {
956 COSTS_N_INSNS (3), /* mulsi */
957 COSTS_N_INSNS (3), /* mulsi_const */
958 COSTS_N_INSNS (3), /* mulsi_const9 */
959 COSTS_N_INSNS (3), /* muldi */
960 COSTS_N_INSNS (19), /* divsi */
961 COSTS_N_INSNS (35), /* divdi */
962 COSTS_N_INSNS (3), /* fp */
963 COSTS_N_INSNS (3), /* dmul */
964 COSTS_N_INSNS (14), /* sdiv */
965 COSTS_N_INSNS (17), /* ddiv */
966 128, /* cache line size */
967 32, /* l1 cache */
968 256, /* l2 cache */
969 12, /* prefetch streams */
970 };
971
972 /* Instruction costs on POWER A2 processors. */
973 static const
974 struct processor_costs ppca2_cost = {
975 COSTS_N_INSNS (16), /* mulsi */
976 COSTS_N_INSNS (16), /* mulsi_const */
977 COSTS_N_INSNS (16), /* mulsi_const9 */
978 COSTS_N_INSNS (16), /* muldi */
979 COSTS_N_INSNS (22), /* divsi */
980 COSTS_N_INSNS (28), /* divdi */
981 COSTS_N_INSNS (3), /* fp */
982 COSTS_N_INSNS (3), /* dmul */
983 COSTS_N_INSNS (59), /* sdiv */
984 COSTS_N_INSNS (72), /* ddiv */
985 64,
986 16, /* l1 cache */
987 2048, /* l2 cache */
988 16, /* prefetch streams */
989 };
990
991 \f
992 /* Table that classifies rs6000 builtin functions (pure, const, etc.). */
993 #undef RS6000_BUILTIN_1
994 #undef RS6000_BUILTIN_2
995 #undef RS6000_BUILTIN_3
996 #undef RS6000_BUILTIN_A
997 #undef RS6000_BUILTIN_D
998 #undef RS6000_BUILTIN_E
999 #undef RS6000_BUILTIN_H
1000 #undef RS6000_BUILTIN_P
1001 #undef RS6000_BUILTIN_Q
1002 #undef RS6000_BUILTIN_S
1003 #undef RS6000_BUILTIN_X
1004
1005 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
1006 { NAME, ICODE, MASK, ATTR },
1007
1008 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
1009 { NAME, ICODE, MASK, ATTR },
1010
1011 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
1012 { NAME, ICODE, MASK, ATTR },
1013
1014 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
1015 { NAME, ICODE, MASK, ATTR },
1016
1017 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
1018 { NAME, ICODE, MASK, ATTR },
1019
1020 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
1021 { NAME, ICODE, MASK, ATTR },
1022
1023 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
1024 { NAME, ICODE, MASK, ATTR },
1025
1026 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
1027 { NAME, ICODE, MASK, ATTR },
1028
1029 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
1030 { NAME, ICODE, MASK, ATTR },
1031
1032 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
1033 { NAME, ICODE, MASK, ATTR },
1034
1035 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE) \
1036 { NAME, ICODE, MASK, ATTR },
1037
1038 struct rs6000_builtin_info_type {
1039 const char *name;
1040 const enum insn_code icode;
1041 const HOST_WIDE_INT mask;
1042 const unsigned attr;
1043 };
1044
1045 static const struct rs6000_builtin_info_type rs6000_builtin_info[] =
1046 {
1047 #include "rs6000-builtin.def"
1048 };
1049
1050 #undef RS6000_BUILTIN_1
1051 #undef RS6000_BUILTIN_2
1052 #undef RS6000_BUILTIN_3
1053 #undef RS6000_BUILTIN_A
1054 #undef RS6000_BUILTIN_D
1055 #undef RS6000_BUILTIN_E
1056 #undef RS6000_BUILTIN_H
1057 #undef RS6000_BUILTIN_P
1058 #undef RS6000_BUILTIN_Q
1059 #undef RS6000_BUILTIN_S
1060 #undef RS6000_BUILTIN_X
1061
1062 /* Support for -mveclibabi=<xxx> to control which vector library to use. */
1063 static tree (*rs6000_veclib_handler) (tree, tree, tree);
1064
1065 \f
1066 static bool rs6000_debug_legitimate_address_p (enum machine_mode, rtx, bool);
1067 static bool spe_func_has_64bit_regs_p (void);
1068 static struct machine_function * rs6000_init_machine_status (void);
1069 static int rs6000_ra_ever_killed (void);
1070 static tree rs6000_handle_longcall_attribute (tree *, tree, tree, int, bool *);
1071 static tree rs6000_handle_altivec_attribute (tree *, tree, tree, int, bool *);
1072 static tree rs6000_handle_struct_attribute (tree *, tree, tree, int, bool *);
1073 static tree rs6000_builtin_vectorized_libmass (tree, tree, tree);
1074 static void rs6000_emit_set_long_const (rtx, HOST_WIDE_INT);
1075 static int rs6000_memory_move_cost (enum machine_mode, reg_class_t, bool);
1076 static bool rs6000_debug_rtx_costs (rtx, int, int, int, int *, bool);
1077 static int rs6000_debug_address_cost (rtx, enum machine_mode, addr_space_t,
1078 bool);
1079 static int rs6000_debug_adjust_cost (rtx_insn *, rtx, rtx_insn *, int);
1080 static bool is_microcoded_insn (rtx);
1081 static bool is_nonpipeline_insn (rtx);
1082 static bool is_cracked_insn (rtx);
1083 static bool is_load_insn (rtx, rtx *);
1084 static bool is_store_insn (rtx, rtx *);
1085 static bool set_to_load_agen (rtx,rtx);
1086 static bool insn_terminates_group_p (rtx , enum group_termination);
1087 static bool insn_must_be_first_in_group (rtx);
1088 static bool insn_must_be_last_in_group (rtx);
1089 static void altivec_init_builtins (void);
1090 static tree builtin_function_type (enum machine_mode, enum machine_mode,
1091 enum machine_mode, enum machine_mode,
1092 enum rs6000_builtins, const char *name);
1093 static void rs6000_common_init_builtins (void);
1094 static void paired_init_builtins (void);
1095 static rtx paired_expand_predicate_builtin (enum insn_code, tree, rtx);
1096 static void spe_init_builtins (void);
1097 static void htm_init_builtins (void);
1098 static rtx spe_expand_predicate_builtin (enum insn_code, tree, rtx);
1099 static rtx spe_expand_evsel_builtin (enum insn_code, tree, rtx);
1100 static int rs6000_emit_int_cmove (rtx, rtx, rtx, rtx);
1101 static rs6000_stack_t *rs6000_stack_info (void);
1102 static void is_altivec_return_reg (rtx, void *);
1103 int easy_vector_constant (rtx, enum machine_mode);
1104 static rtx rs6000_debug_legitimize_address (rtx, rtx, enum machine_mode);
1105 static rtx rs6000_legitimize_tls_address (rtx, enum tls_model);
1106 static int rs6000_get_some_local_dynamic_name_1 (rtx *, void *);
1107 static rtx rs6000_darwin64_record_arg (CUMULATIVE_ARGS *, const_tree,
1108 bool, bool);
1109 #if TARGET_MACHO
1110 static void macho_branch_islands (void);
1111 #endif
1112 static rtx rs6000_legitimize_reload_address (rtx, enum machine_mode, int, int,
1113 int, int *);
1114 static rtx rs6000_debug_legitimize_reload_address (rtx, enum machine_mode, int,
1115 int, int, int *);
1116 static bool rs6000_mode_dependent_address (const_rtx);
1117 static bool rs6000_debug_mode_dependent_address (const_rtx);
1118 static enum reg_class rs6000_secondary_reload_class (enum reg_class,
1119 enum machine_mode, rtx);
1120 static enum reg_class rs6000_debug_secondary_reload_class (enum reg_class,
1121 enum machine_mode,
1122 rtx);
1123 static enum reg_class rs6000_preferred_reload_class (rtx, enum reg_class);
1124 static enum reg_class rs6000_debug_preferred_reload_class (rtx,
1125 enum reg_class);
1126 static bool rs6000_secondary_memory_needed (enum reg_class, enum reg_class,
1127 enum machine_mode);
1128 static bool rs6000_debug_secondary_memory_needed (enum reg_class,
1129 enum reg_class,
1130 enum machine_mode);
1131 static bool rs6000_cannot_change_mode_class (enum machine_mode,
1132 enum machine_mode,
1133 enum reg_class);
1134 static bool rs6000_debug_cannot_change_mode_class (enum machine_mode,
1135 enum machine_mode,
1136 enum reg_class);
1137 static bool rs6000_save_toc_in_prologue_p (void);
1138
1139 rtx (*rs6000_legitimize_reload_address_ptr) (rtx, enum machine_mode, int, int,
1140 int, int *)
1141 = rs6000_legitimize_reload_address;
1142
1143 static bool (*rs6000_mode_dependent_address_ptr) (const_rtx)
1144 = rs6000_mode_dependent_address;
1145
1146 enum reg_class (*rs6000_secondary_reload_class_ptr) (enum reg_class,
1147 enum machine_mode, rtx)
1148 = rs6000_secondary_reload_class;
1149
1150 enum reg_class (*rs6000_preferred_reload_class_ptr) (rtx, enum reg_class)
1151 = rs6000_preferred_reload_class;
1152
1153 bool (*rs6000_secondary_memory_needed_ptr) (enum reg_class, enum reg_class,
1154 enum machine_mode)
1155 = rs6000_secondary_memory_needed;
1156
1157 bool (*rs6000_cannot_change_mode_class_ptr) (enum machine_mode,
1158 enum machine_mode,
1159 enum reg_class)
1160 = rs6000_cannot_change_mode_class;
1161
1162 const int INSN_NOT_AVAILABLE = -1;
1163
1164 static void rs6000_print_isa_options (FILE *, int, const char *,
1165 HOST_WIDE_INT);
1166 static void rs6000_print_builtin_options (FILE *, int, const char *,
1167 HOST_WIDE_INT);
1168
1169 static enum rs6000_reg_type register_to_reg_type (rtx, bool *);
1170 static bool rs6000_secondary_reload_move (enum rs6000_reg_type,
1171 enum rs6000_reg_type,
1172 enum machine_mode,
1173 secondary_reload_info *,
1174 bool);
1175 rtl_opt_pass *make_pass_analyze_swaps (gcc::context*);
1176
1177 /* Hash table stuff for keeping track of TOC entries. */
1178
1179 struct GTY(()) toc_hash_struct
1180 {
1181 /* `key' will satisfy CONSTANT_P; in fact, it will satisfy
1182 ASM_OUTPUT_SPECIAL_POOL_ENTRY_P. */
1183 rtx key;
1184 enum machine_mode key_mode;
1185 int labelno;
1186 };
1187
1188 static GTY ((param_is (struct toc_hash_struct))) htab_t toc_hash_table;
1189
1190 /* Hash table to keep track of the argument types for builtin functions. */
1191
1192 struct GTY(()) builtin_hash_struct
1193 {
1194 tree type;
1195 enum machine_mode mode[4]; /* return value + 3 arguments. */
1196 unsigned char uns_p[4]; /* and whether the types are unsigned. */
1197 };
1198
1199 static GTY ((param_is (struct builtin_hash_struct))) htab_t builtin_hash_table;
1200
1201 \f
1202 /* Default register names. */
1203 char rs6000_reg_names[][8] =
1204 {
1205 "0", "1", "2", "3", "4", "5", "6", "7",
1206 "8", "9", "10", "11", "12", "13", "14", "15",
1207 "16", "17", "18", "19", "20", "21", "22", "23",
1208 "24", "25", "26", "27", "28", "29", "30", "31",
1209 "0", "1", "2", "3", "4", "5", "6", "7",
1210 "8", "9", "10", "11", "12", "13", "14", "15",
1211 "16", "17", "18", "19", "20", "21", "22", "23",
1212 "24", "25", "26", "27", "28", "29", "30", "31",
1213 "mq", "lr", "ctr","ap",
1214 "0", "1", "2", "3", "4", "5", "6", "7",
1215 "ca",
1216 /* AltiVec registers. */
1217 "0", "1", "2", "3", "4", "5", "6", "7",
1218 "8", "9", "10", "11", "12", "13", "14", "15",
1219 "16", "17", "18", "19", "20", "21", "22", "23",
1220 "24", "25", "26", "27", "28", "29", "30", "31",
1221 "vrsave", "vscr",
1222 /* SPE registers. */
1223 "spe_acc", "spefscr",
1224 /* Soft frame pointer. */
1225 "sfp",
1226 /* HTM SPR registers. */
1227 "tfhar", "tfiar", "texasr",
1228 /* SPE High registers. */
1229 "0", "1", "2", "3", "4", "5", "6", "7",
1230 "8", "9", "10", "11", "12", "13", "14", "15",
1231 "16", "17", "18", "19", "20", "21", "22", "23",
1232 "24", "25", "26", "27", "28", "29", "30", "31"
1233 };
1234
1235 #ifdef TARGET_REGNAMES
1236 static const char alt_reg_names[][8] =
1237 {
1238 "%r0", "%r1", "%r2", "%r3", "%r4", "%r5", "%r6", "%r7",
1239 "%r8", "%r9", "%r10", "%r11", "%r12", "%r13", "%r14", "%r15",
1240 "%r16", "%r17", "%r18", "%r19", "%r20", "%r21", "%r22", "%r23",
1241 "%r24", "%r25", "%r26", "%r27", "%r28", "%r29", "%r30", "%r31",
1242 "%f0", "%f1", "%f2", "%f3", "%f4", "%f5", "%f6", "%f7",
1243 "%f8", "%f9", "%f10", "%f11", "%f12", "%f13", "%f14", "%f15",
1244 "%f16", "%f17", "%f18", "%f19", "%f20", "%f21", "%f22", "%f23",
1245 "%f24", "%f25", "%f26", "%f27", "%f28", "%f29", "%f30", "%f31",
1246 "mq", "lr", "ctr", "ap",
1247 "%cr0", "%cr1", "%cr2", "%cr3", "%cr4", "%cr5", "%cr6", "%cr7",
1248 "ca",
1249 /* AltiVec registers. */
1250 "%v0", "%v1", "%v2", "%v3", "%v4", "%v5", "%v6", "%v7",
1251 "%v8", "%v9", "%v10", "%v11", "%v12", "%v13", "%v14", "%v15",
1252 "%v16", "%v17", "%v18", "%v19", "%v20", "%v21", "%v22", "%v23",
1253 "%v24", "%v25", "%v26", "%v27", "%v28", "%v29", "%v30", "%v31",
1254 "vrsave", "vscr",
1255 /* SPE registers. */
1256 "spe_acc", "spefscr",
1257 /* Soft frame pointer. */
1258 "sfp",
1259 /* HTM SPR registers. */
1260 "tfhar", "tfiar", "texasr",
1261 /* SPE High registers. */
1262 "%rh0", "%rh1", "%rh2", "%rh3", "%rh4", "%rh5", "%rh6", "%rh7",
1263 "%rh8", "%rh9", "%rh10", "%r11", "%rh12", "%rh13", "%rh14", "%rh15",
1264 "%rh16", "%rh17", "%rh18", "%rh19", "%rh20", "%rh21", "%rh22", "%rh23",
1265 "%rh24", "%rh25", "%rh26", "%rh27", "%rh28", "%rh29", "%rh30", "%rh31"
1266 };
1267 #endif
1268
1269 /* Table of valid machine attributes. */
1270
1271 static const struct attribute_spec rs6000_attribute_table[] =
1272 {
1273 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
1274 affects_type_identity } */
1275 { "altivec", 1, 1, false, true, false, rs6000_handle_altivec_attribute,
1276 false },
1277 { "longcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1278 false },
1279 { "shortcall", 0, 0, false, true, true, rs6000_handle_longcall_attribute,
1280 false },
1281 { "ms_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1282 false },
1283 { "gcc_struct", 0, 0, false, false, false, rs6000_handle_struct_attribute,
1284 false },
1285 #ifdef SUBTARGET_ATTRIBUTE_TABLE
1286 SUBTARGET_ATTRIBUTE_TABLE,
1287 #endif
1288 { NULL, 0, 0, false, false, false, NULL, false }
1289 };
1290 \f
1291 #ifndef TARGET_PROFILE_KERNEL
1292 #define TARGET_PROFILE_KERNEL 0
1293 #endif
1294
1295 /* The VRSAVE bitmask puts bit %v0 as the most significant bit. */
1296 #define ALTIVEC_REG_BIT(REGNO) (0x80000000 >> ((REGNO) - FIRST_ALTIVEC_REGNO))
1297 \f
1298 /* Initialize the GCC target structure. */
1299 #undef TARGET_ATTRIBUTE_TABLE
1300 #define TARGET_ATTRIBUTE_TABLE rs6000_attribute_table
1301 #undef TARGET_SET_DEFAULT_TYPE_ATTRIBUTES
1302 #define TARGET_SET_DEFAULT_TYPE_ATTRIBUTES rs6000_set_default_type_attributes
1303 #undef TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P
1304 #define TARGET_ATTRIBUTE_TAKES_IDENTIFIER_P rs6000_attribute_takes_identifier_p
1305
1306 #undef TARGET_ASM_ALIGNED_DI_OP
1307 #define TARGET_ASM_ALIGNED_DI_OP DOUBLE_INT_ASM_OP
1308
1309 /* Default unaligned ops are only provided for ELF. Find the ops needed
1310 for non-ELF systems. */
1311 #ifndef OBJECT_FORMAT_ELF
1312 #if TARGET_XCOFF
1313 /* For XCOFF. rs6000_assemble_integer will handle unaligned DIs on
1314 64-bit targets. */
1315 #undef TARGET_ASM_UNALIGNED_HI_OP
1316 #define TARGET_ASM_UNALIGNED_HI_OP "\t.vbyte\t2,"
1317 #undef TARGET_ASM_UNALIGNED_SI_OP
1318 #define TARGET_ASM_UNALIGNED_SI_OP "\t.vbyte\t4,"
1319 #undef TARGET_ASM_UNALIGNED_DI_OP
1320 #define TARGET_ASM_UNALIGNED_DI_OP "\t.vbyte\t8,"
1321 #else
1322 /* For Darwin. */
1323 #undef TARGET_ASM_UNALIGNED_HI_OP
1324 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
1325 #undef TARGET_ASM_UNALIGNED_SI_OP
1326 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
1327 #undef TARGET_ASM_UNALIGNED_DI_OP
1328 #define TARGET_ASM_UNALIGNED_DI_OP "\t.quad\t"
1329 #undef TARGET_ASM_ALIGNED_DI_OP
1330 #define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
1331 #endif
1332 #endif
1333
1334 /* This hook deals with fixups for relocatable code and DI-mode objects
1335 in 64-bit code. */
1336 #undef TARGET_ASM_INTEGER
1337 #define TARGET_ASM_INTEGER rs6000_assemble_integer
1338
1339 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
1340 #undef TARGET_ASM_ASSEMBLE_VISIBILITY
1341 #define TARGET_ASM_ASSEMBLE_VISIBILITY rs6000_assemble_visibility
1342 #endif
1343
1344 #undef TARGET_SET_UP_BY_PROLOGUE
1345 #define TARGET_SET_UP_BY_PROLOGUE rs6000_set_up_by_prologue
1346
1347 #undef TARGET_HAVE_TLS
1348 #define TARGET_HAVE_TLS HAVE_AS_TLS
1349
1350 #undef TARGET_CANNOT_FORCE_CONST_MEM
1351 #define TARGET_CANNOT_FORCE_CONST_MEM rs6000_cannot_force_const_mem
1352
1353 #undef TARGET_DELEGITIMIZE_ADDRESS
1354 #define TARGET_DELEGITIMIZE_ADDRESS rs6000_delegitimize_address
1355
1356 #undef TARGET_CONST_NOT_OK_FOR_DEBUG_P
1357 #define TARGET_CONST_NOT_OK_FOR_DEBUG_P rs6000_const_not_ok_for_debug_p
1358
1359 #undef TARGET_ASM_FUNCTION_PROLOGUE
1360 #define TARGET_ASM_FUNCTION_PROLOGUE rs6000_output_function_prologue
1361 #undef TARGET_ASM_FUNCTION_EPILOGUE
1362 #define TARGET_ASM_FUNCTION_EPILOGUE rs6000_output_function_epilogue
1363
1364 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
1365 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA rs6000_output_addr_const_extra
1366
1367 #undef TARGET_LEGITIMIZE_ADDRESS
1368 #define TARGET_LEGITIMIZE_ADDRESS rs6000_legitimize_address
1369
1370 #undef TARGET_SCHED_VARIABLE_ISSUE
1371 #define TARGET_SCHED_VARIABLE_ISSUE rs6000_variable_issue
1372
1373 #undef TARGET_SCHED_ISSUE_RATE
1374 #define TARGET_SCHED_ISSUE_RATE rs6000_issue_rate
1375 #undef TARGET_SCHED_ADJUST_COST
1376 #define TARGET_SCHED_ADJUST_COST rs6000_adjust_cost
1377 #undef TARGET_SCHED_ADJUST_PRIORITY
1378 #define TARGET_SCHED_ADJUST_PRIORITY rs6000_adjust_priority
1379 #undef TARGET_SCHED_IS_COSTLY_DEPENDENCE
1380 #define TARGET_SCHED_IS_COSTLY_DEPENDENCE rs6000_is_costly_dependence
1381 #undef TARGET_SCHED_INIT
1382 #define TARGET_SCHED_INIT rs6000_sched_init
1383 #undef TARGET_SCHED_FINISH
1384 #define TARGET_SCHED_FINISH rs6000_sched_finish
1385 #undef TARGET_SCHED_REORDER
1386 #define TARGET_SCHED_REORDER rs6000_sched_reorder
1387 #undef TARGET_SCHED_REORDER2
1388 #define TARGET_SCHED_REORDER2 rs6000_sched_reorder2
1389
1390 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1391 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD rs6000_use_sched_lookahead
1392
1393 #undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD
1394 #define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD_GUARD rs6000_use_sched_lookahead_guard
1395
1396 #undef TARGET_SCHED_ALLOC_SCHED_CONTEXT
1397 #define TARGET_SCHED_ALLOC_SCHED_CONTEXT rs6000_alloc_sched_context
1398 #undef TARGET_SCHED_INIT_SCHED_CONTEXT
1399 #define TARGET_SCHED_INIT_SCHED_CONTEXT rs6000_init_sched_context
1400 #undef TARGET_SCHED_SET_SCHED_CONTEXT
1401 #define TARGET_SCHED_SET_SCHED_CONTEXT rs6000_set_sched_context
1402 #undef TARGET_SCHED_FREE_SCHED_CONTEXT
1403 #define TARGET_SCHED_FREE_SCHED_CONTEXT rs6000_free_sched_context
1404
1405 #undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
1406 #define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD rs6000_builtin_mask_for_load
1407 #undef TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT
1408 #define TARGET_VECTORIZE_SUPPORT_VECTOR_MISALIGNMENT \
1409 rs6000_builtin_support_vector_misalignment
1410 #undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
1411 #define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE rs6000_vector_alignment_reachable
1412 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
1413 #define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST \
1414 rs6000_builtin_vectorization_cost
1415 #undef TARGET_VECTORIZE_PREFERRED_SIMD_MODE
1416 #define TARGET_VECTORIZE_PREFERRED_SIMD_MODE \
1417 rs6000_preferred_simd_mode
1418 #undef TARGET_VECTORIZE_INIT_COST
1419 #define TARGET_VECTORIZE_INIT_COST rs6000_init_cost
1420 #undef TARGET_VECTORIZE_ADD_STMT_COST
1421 #define TARGET_VECTORIZE_ADD_STMT_COST rs6000_add_stmt_cost
1422 #undef TARGET_VECTORIZE_FINISH_COST
1423 #define TARGET_VECTORIZE_FINISH_COST rs6000_finish_cost
1424 #undef TARGET_VECTORIZE_DESTROY_COST_DATA
1425 #define TARGET_VECTORIZE_DESTROY_COST_DATA rs6000_destroy_cost_data
1426
1427 #undef TARGET_INIT_BUILTINS
1428 #define TARGET_INIT_BUILTINS rs6000_init_builtins
1429 #undef TARGET_BUILTIN_DECL
1430 #define TARGET_BUILTIN_DECL rs6000_builtin_decl
1431
1432 #undef TARGET_EXPAND_BUILTIN
1433 #define TARGET_EXPAND_BUILTIN rs6000_expand_builtin
1434
1435 #undef TARGET_MANGLE_TYPE
1436 #define TARGET_MANGLE_TYPE rs6000_mangle_type
1437
1438 #undef TARGET_INIT_LIBFUNCS
1439 #define TARGET_INIT_LIBFUNCS rs6000_init_libfuncs
1440
1441 #if TARGET_MACHO
1442 #undef TARGET_BINDS_LOCAL_P
1443 #define TARGET_BINDS_LOCAL_P darwin_binds_local_p
1444 #endif
1445
1446 #undef TARGET_MS_BITFIELD_LAYOUT_P
1447 #define TARGET_MS_BITFIELD_LAYOUT_P rs6000_ms_bitfield_layout_p
1448
1449 #undef TARGET_ASM_OUTPUT_MI_THUNK
1450 #define TARGET_ASM_OUTPUT_MI_THUNK rs6000_output_mi_thunk
1451
1452 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1453 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
1454
1455 #undef TARGET_FUNCTION_OK_FOR_SIBCALL
1456 #define TARGET_FUNCTION_OK_FOR_SIBCALL rs6000_function_ok_for_sibcall
1457
1458 #undef TARGET_REGISTER_MOVE_COST
1459 #define TARGET_REGISTER_MOVE_COST rs6000_register_move_cost
1460 #undef TARGET_MEMORY_MOVE_COST
1461 #define TARGET_MEMORY_MOVE_COST rs6000_memory_move_cost
1462 #undef TARGET_RTX_COSTS
1463 #define TARGET_RTX_COSTS rs6000_rtx_costs
1464 #undef TARGET_ADDRESS_COST
1465 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
1466
1467 #undef TARGET_DWARF_REGISTER_SPAN
1468 #define TARGET_DWARF_REGISTER_SPAN rs6000_dwarf_register_span
1469
1470 #undef TARGET_INIT_DWARF_REG_SIZES_EXTRA
1471 #define TARGET_INIT_DWARF_REG_SIZES_EXTRA rs6000_init_dwarf_reg_sizes_extra
1472
1473 #undef TARGET_MEMBER_TYPE_FORCES_BLK
1474 #define TARGET_MEMBER_TYPE_FORCES_BLK rs6000_member_type_forces_blk
1475
1476 /* On rs6000, function arguments are promoted, as are function return
1477 values. */
1478 #undef TARGET_PROMOTE_FUNCTION_MODE
1479 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
1480
1481 #undef TARGET_RETURN_IN_MEMORY
1482 #define TARGET_RETURN_IN_MEMORY rs6000_return_in_memory
1483
1484 #undef TARGET_RETURN_IN_MSB
1485 #define TARGET_RETURN_IN_MSB rs6000_return_in_msb
1486
1487 #undef TARGET_SETUP_INCOMING_VARARGS
1488 #define TARGET_SETUP_INCOMING_VARARGS setup_incoming_varargs
1489
1490 /* Always strict argument naming on rs6000. */
1491 #undef TARGET_STRICT_ARGUMENT_NAMING
1492 #define TARGET_STRICT_ARGUMENT_NAMING hook_bool_CUMULATIVE_ARGS_true
1493 #undef TARGET_PRETEND_OUTGOING_VARARGS_NAMED
1494 #define TARGET_PRETEND_OUTGOING_VARARGS_NAMED hook_bool_CUMULATIVE_ARGS_true
1495 #undef TARGET_SPLIT_COMPLEX_ARG
1496 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
1497 #undef TARGET_MUST_PASS_IN_STACK
1498 #define TARGET_MUST_PASS_IN_STACK rs6000_must_pass_in_stack
1499 #undef TARGET_PASS_BY_REFERENCE
1500 #define TARGET_PASS_BY_REFERENCE rs6000_pass_by_reference
1501 #undef TARGET_ARG_PARTIAL_BYTES
1502 #define TARGET_ARG_PARTIAL_BYTES rs6000_arg_partial_bytes
1503 #undef TARGET_FUNCTION_ARG_ADVANCE
1504 #define TARGET_FUNCTION_ARG_ADVANCE rs6000_function_arg_advance
1505 #undef TARGET_FUNCTION_ARG
1506 #define TARGET_FUNCTION_ARG rs6000_function_arg
1507 #undef TARGET_FUNCTION_ARG_BOUNDARY
1508 #define TARGET_FUNCTION_ARG_BOUNDARY rs6000_function_arg_boundary
1509
1510 #undef TARGET_BUILD_BUILTIN_VA_LIST
1511 #define TARGET_BUILD_BUILTIN_VA_LIST rs6000_build_builtin_va_list
1512
1513 #undef TARGET_EXPAND_BUILTIN_VA_START
1514 #define TARGET_EXPAND_BUILTIN_VA_START rs6000_va_start
1515
1516 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
1517 #define TARGET_GIMPLIFY_VA_ARG_EXPR rs6000_gimplify_va_arg
1518
1519 #undef TARGET_EH_RETURN_FILTER_MODE
1520 #define TARGET_EH_RETURN_FILTER_MODE rs6000_eh_return_filter_mode
1521
1522 #undef TARGET_SCALAR_MODE_SUPPORTED_P
1523 #define TARGET_SCALAR_MODE_SUPPORTED_P rs6000_scalar_mode_supported_p
1524
1525 #undef TARGET_VECTOR_MODE_SUPPORTED_P
1526 #define TARGET_VECTOR_MODE_SUPPORTED_P rs6000_vector_mode_supported_p
1527
1528 #undef TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN
1529 #define TARGET_INVALID_ARG_FOR_UNPROTOTYPED_FN invalid_arg_for_unprototyped_fn
1530
1531 #undef TARGET_ASM_LOOP_ALIGN_MAX_SKIP
1532 #define TARGET_ASM_LOOP_ALIGN_MAX_SKIP rs6000_loop_align_max_skip
1533
1534 #undef TARGET_OPTION_OVERRIDE
1535 #define TARGET_OPTION_OVERRIDE rs6000_option_override
1536
1537 #undef TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION
1538 #define TARGET_VECTORIZE_BUILTIN_VECTORIZED_FUNCTION \
1539 rs6000_builtin_vectorized_function
1540
1541 #if !TARGET_MACHO
1542 #undef TARGET_STACK_PROTECT_FAIL
1543 #define TARGET_STACK_PROTECT_FAIL rs6000_stack_protect_fail
1544 #endif
1545
1546 /* MPC604EUM 3.5.2 Weak Consistency between Multiple Processors
1547 The PowerPC architecture requires only weak consistency among
1548 processors--that is, memory accesses between processors need not be
1549 sequentially consistent and memory accesses among processors can occur
1550 in any order. The ability to order memory accesses weakly provides
1551 opportunities for more efficient use of the system bus. Unless a
1552 dependency exists, the 604e allows read operations to precede store
1553 operations. */
1554 #undef TARGET_RELAXED_ORDERING
1555 #define TARGET_RELAXED_ORDERING true
1556
1557 #ifdef HAVE_AS_TLS
1558 #undef TARGET_ASM_OUTPUT_DWARF_DTPREL
1559 #define TARGET_ASM_OUTPUT_DWARF_DTPREL rs6000_output_dwarf_dtprel
1560 #endif
1561
1562 /* Use a 32-bit anchor range. This leads to sequences like:
1563
1564 addis tmp,anchor,high
1565 add dest,tmp,low
1566
1567 where tmp itself acts as an anchor, and can be shared between
1568 accesses to the same 64k page. */
1569 #undef TARGET_MIN_ANCHOR_OFFSET
1570 #define TARGET_MIN_ANCHOR_OFFSET -0x7fffffff - 1
1571 #undef TARGET_MAX_ANCHOR_OFFSET
1572 #define TARGET_MAX_ANCHOR_OFFSET 0x7fffffff
1573 #undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1574 #define TARGET_USE_BLOCKS_FOR_CONSTANT_P rs6000_use_blocks_for_constant_p
1575 #undef TARGET_USE_BLOCKS_FOR_DECL_P
1576 #define TARGET_USE_BLOCKS_FOR_DECL_P rs6000_use_blocks_for_decl_p
1577
1578 #undef TARGET_BUILTIN_RECIPROCAL
1579 #define TARGET_BUILTIN_RECIPROCAL rs6000_builtin_reciprocal
1580
1581 #undef TARGET_EXPAND_TO_RTL_HOOK
1582 #define TARGET_EXPAND_TO_RTL_HOOK rs6000_alloc_sdmode_stack_slot
1583
1584 #undef TARGET_INSTANTIATE_DECLS
1585 #define TARGET_INSTANTIATE_DECLS rs6000_instantiate_decls
1586
1587 #undef TARGET_SECONDARY_RELOAD
1588 #define TARGET_SECONDARY_RELOAD rs6000_secondary_reload
1589
1590 #undef TARGET_LEGITIMATE_ADDRESS_P
1591 #define TARGET_LEGITIMATE_ADDRESS_P rs6000_legitimate_address_p
1592
1593 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
1594 #define TARGET_MODE_DEPENDENT_ADDRESS_P rs6000_mode_dependent_address_p
1595
1596 #undef TARGET_LRA_P
1597 #define TARGET_LRA_P rs6000_lra_p
1598
1599 #undef TARGET_CAN_ELIMINATE
1600 #define TARGET_CAN_ELIMINATE rs6000_can_eliminate
1601
1602 #undef TARGET_CONDITIONAL_REGISTER_USAGE
1603 #define TARGET_CONDITIONAL_REGISTER_USAGE rs6000_conditional_register_usage
1604
1605 #undef TARGET_TRAMPOLINE_INIT
1606 #define TARGET_TRAMPOLINE_INIT rs6000_trampoline_init
1607
1608 #undef TARGET_FUNCTION_VALUE
1609 #define TARGET_FUNCTION_VALUE rs6000_function_value
1610
1611 #undef TARGET_OPTION_VALID_ATTRIBUTE_P
1612 #define TARGET_OPTION_VALID_ATTRIBUTE_P rs6000_valid_attribute_p
1613
1614 #undef TARGET_OPTION_SAVE
1615 #define TARGET_OPTION_SAVE rs6000_function_specific_save
1616
1617 #undef TARGET_OPTION_RESTORE
1618 #define TARGET_OPTION_RESTORE rs6000_function_specific_restore
1619
1620 #undef TARGET_OPTION_PRINT
1621 #define TARGET_OPTION_PRINT rs6000_function_specific_print
1622
1623 #undef TARGET_CAN_INLINE_P
1624 #define TARGET_CAN_INLINE_P rs6000_can_inline_p
1625
1626 #undef TARGET_SET_CURRENT_FUNCTION
1627 #define TARGET_SET_CURRENT_FUNCTION rs6000_set_current_function
1628
1629 #undef TARGET_LEGITIMATE_CONSTANT_P
1630 #define TARGET_LEGITIMATE_CONSTANT_P rs6000_legitimate_constant_p
1631
1632 #undef TARGET_VECTORIZE_VEC_PERM_CONST_OK
1633 #define TARGET_VECTORIZE_VEC_PERM_CONST_OK rs6000_vectorize_vec_perm_const_ok
1634
1635 #undef TARGET_CAN_USE_DOLOOP_P
1636 #define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
1637 \f
1638
1639 /* Processor table. */
1640 struct rs6000_ptt
1641 {
1642 const char *const name; /* Canonical processor name. */
1643 const enum processor_type processor; /* Processor type enum value. */
1644 const HOST_WIDE_INT target_enable; /* Target flags to enable. */
1645 };
1646
1647 static struct rs6000_ptt const processor_target_table[] =
1648 {
1649 #define RS6000_CPU(NAME, CPU, FLAGS) { NAME, CPU, FLAGS },
1650 #include "rs6000-cpus.def"
1651 #undef RS6000_CPU
1652 };
1653
1654 /* Look up a processor name for -mcpu=xxx and -mtune=xxx. Return -1 if the
1655 name is invalid. */
1656
1657 static int
1658 rs6000_cpu_name_lookup (const char *name)
1659 {
1660 size_t i;
1661
1662 if (name != NULL)
1663 {
1664 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
1665 if (! strcmp (name, processor_target_table[i].name))
1666 return (int)i;
1667 }
1668
1669 return -1;
1670 }
1671
1672 \f
1673 /* Return number of consecutive hard regs needed starting at reg REGNO
1674 to hold something of mode MODE.
1675 This is ordinarily the length in words of a value of mode MODE
1676 but can be less for certain modes in special long registers.
1677
1678 For the SPE, GPRs are 64 bits but only 32 bits are visible in
1679 scalar instructions. The upper 32 bits are only available to the
1680 SIMD instructions.
1681
1682 POWER and PowerPC GPRs hold 32 bits worth;
1683 PowerPC64 GPRs and FPRs point register holds 64 bits worth. */
1684
1685 static int
1686 rs6000_hard_regno_nregs_internal (int regno, enum machine_mode mode)
1687 {
1688 unsigned HOST_WIDE_INT reg_size;
1689
1690 /* TF/TD modes are special in that they always take 2 registers. */
1691 if (FP_REGNO_P (regno))
1692 reg_size = ((VECTOR_MEM_VSX_P (mode) && mode != TDmode && mode != TFmode)
1693 ? UNITS_PER_VSX_WORD
1694 : UNITS_PER_FP_WORD);
1695
1696 else if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1697 reg_size = UNITS_PER_SPE_WORD;
1698
1699 else if (ALTIVEC_REGNO_P (regno))
1700 reg_size = UNITS_PER_ALTIVEC_WORD;
1701
1702 /* The value returned for SCmode in the E500 double case is 2 for
1703 ABI compatibility; storing an SCmode value in a single register
1704 would require function_arg and rs6000_spe_function_arg to handle
1705 SCmode so as to pass the value correctly in a pair of
1706 registers. */
1707 else if (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode) && mode != SCmode
1708 && !DECIMAL_FLOAT_MODE_P (mode))
1709 reg_size = UNITS_PER_FP_WORD;
1710
1711 else
1712 reg_size = UNITS_PER_WORD;
1713
1714 return (GET_MODE_SIZE (mode) + reg_size - 1) / reg_size;
1715 }
1716
1717 /* Value is 1 if hard register REGNO can hold a value of machine-mode
1718 MODE. */
1719 static int
1720 rs6000_hard_regno_mode_ok (int regno, enum machine_mode mode)
1721 {
1722 int last_regno = regno + rs6000_hard_regno_nregs[mode][regno] - 1;
1723
1724 /* PTImode can only go in GPRs. Quad word memory operations require even/odd
1725 register combinations, and use PTImode where we need to deal with quad
1726 word memory operations. Don't allow quad words in the argument or frame
1727 pointer registers, just registers 0..31. */
1728 if (mode == PTImode)
1729 return (IN_RANGE (regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1730 && IN_RANGE (last_regno, FIRST_GPR_REGNO, LAST_GPR_REGNO)
1731 && ((regno & 1) == 0));
1732
1733 /* VSX registers that overlap the FPR registers are larger than for non-VSX
1734 implementations. Don't allow an item to be split between a FP register
1735 and an Altivec register. Allow TImode in all VSX registers if the user
1736 asked for it. */
1737 if (TARGET_VSX && VSX_REGNO_P (regno)
1738 && (VECTOR_MEM_VSX_P (mode)
1739 || reg_addr[mode].scalar_in_vmx_p
1740 || (TARGET_VSX_TIMODE && mode == TImode)
1741 || (TARGET_VADDUQM && mode == V1TImode)))
1742 {
1743 if (FP_REGNO_P (regno))
1744 return FP_REGNO_P (last_regno);
1745
1746 if (ALTIVEC_REGNO_P (regno))
1747 {
1748 if (GET_MODE_SIZE (mode) != 16 && !reg_addr[mode].scalar_in_vmx_p)
1749 return 0;
1750
1751 return ALTIVEC_REGNO_P (last_regno);
1752 }
1753 }
1754
1755 /* The GPRs can hold any mode, but values bigger than one register
1756 cannot go past R31. */
1757 if (INT_REGNO_P (regno))
1758 return INT_REGNO_P (last_regno);
1759
1760 /* The float registers (except for VSX vector modes) can only hold floating
1761 modes and DImode. */
1762 if (FP_REGNO_P (regno))
1763 {
1764 if (SCALAR_FLOAT_MODE_P (mode)
1765 && (mode != TDmode || (regno % 2) == 0)
1766 && FP_REGNO_P (last_regno))
1767 return 1;
1768
1769 if (GET_MODE_CLASS (mode) == MODE_INT
1770 && GET_MODE_SIZE (mode) == UNITS_PER_FP_WORD)
1771 return 1;
1772
1773 if (PAIRED_SIMD_REGNO_P (regno) && TARGET_PAIRED_FLOAT
1774 && PAIRED_VECTOR_MODE (mode))
1775 return 1;
1776
1777 return 0;
1778 }
1779
1780 /* The CR register can only hold CC modes. */
1781 if (CR_REGNO_P (regno))
1782 return GET_MODE_CLASS (mode) == MODE_CC;
1783
1784 if (CA_REGNO_P (regno))
1785 return mode == BImode;
1786
1787 /* AltiVec only in AldyVec registers. */
1788 if (ALTIVEC_REGNO_P (regno))
1789 return (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode)
1790 || mode == V1TImode);
1791
1792 /* ...but GPRs can hold SIMD data on the SPE in one register. */
1793 if (SPE_SIMD_REGNO_P (regno) && TARGET_SPE && SPE_VECTOR_MODE (mode))
1794 return 1;
1795
1796 /* We cannot put non-VSX TImode or PTImode anywhere except general register
1797 and it must be able to fit within the register set. */
1798
1799 return GET_MODE_SIZE (mode) <= UNITS_PER_WORD;
1800 }
1801
1802 /* Print interesting facts about registers. */
1803 static void
1804 rs6000_debug_reg_print (int first_regno, int last_regno, const char *reg_name)
1805 {
1806 int r, m;
1807
1808 for (r = first_regno; r <= last_regno; ++r)
1809 {
1810 const char *comma = "";
1811 int len;
1812
1813 if (first_regno == last_regno)
1814 fprintf (stderr, "%s:\t", reg_name);
1815 else
1816 fprintf (stderr, "%s%d:\t", reg_name, r - first_regno);
1817
1818 len = 8;
1819 for (m = 0; m < NUM_MACHINE_MODES; ++m)
1820 if (rs6000_hard_regno_mode_ok_p[m][r] && rs6000_hard_regno_nregs[m][r])
1821 {
1822 if (len > 70)
1823 {
1824 fprintf (stderr, ",\n\t");
1825 len = 8;
1826 comma = "";
1827 }
1828
1829 if (rs6000_hard_regno_nregs[m][r] > 1)
1830 len += fprintf (stderr, "%s%s/%d", comma, GET_MODE_NAME (m),
1831 rs6000_hard_regno_nregs[m][r]);
1832 else
1833 len += fprintf (stderr, "%s%s", comma, GET_MODE_NAME (m));
1834
1835 comma = ", ";
1836 }
1837
1838 if (call_used_regs[r])
1839 {
1840 if (len > 70)
1841 {
1842 fprintf (stderr, ",\n\t");
1843 len = 8;
1844 comma = "";
1845 }
1846
1847 len += fprintf (stderr, "%s%s", comma, "call-used");
1848 comma = ", ";
1849 }
1850
1851 if (fixed_regs[r])
1852 {
1853 if (len > 70)
1854 {
1855 fprintf (stderr, ",\n\t");
1856 len = 8;
1857 comma = "";
1858 }
1859
1860 len += fprintf (stderr, "%s%s", comma, "fixed");
1861 comma = ", ";
1862 }
1863
1864 if (len > 70)
1865 {
1866 fprintf (stderr, ",\n\t");
1867 comma = "";
1868 }
1869
1870 len += fprintf (stderr, "%sreg-class = %s", comma,
1871 reg_class_names[(int)rs6000_regno_regclass[r]]);
1872 comma = ", ";
1873
1874 if (len > 70)
1875 {
1876 fprintf (stderr, ",\n\t");
1877 comma = "";
1878 }
1879
1880 fprintf (stderr, "%sregno = %d\n", comma, r);
1881 }
1882 }
1883
1884 static const char *
1885 rs6000_debug_vector_unit (enum rs6000_vector v)
1886 {
1887 const char *ret;
1888
1889 switch (v)
1890 {
1891 case VECTOR_NONE: ret = "none"; break;
1892 case VECTOR_ALTIVEC: ret = "altivec"; break;
1893 case VECTOR_VSX: ret = "vsx"; break;
1894 case VECTOR_P8_VECTOR: ret = "p8_vector"; break;
1895 case VECTOR_PAIRED: ret = "paired"; break;
1896 case VECTOR_SPE: ret = "spe"; break;
1897 case VECTOR_OTHER: ret = "other"; break;
1898 default: ret = "unknown"; break;
1899 }
1900
1901 return ret;
1902 }
1903
1904 /* Print the address masks in a human readble fashion. */
1905 DEBUG_FUNCTION void
1906 rs6000_debug_print_mode (ssize_t m)
1907 {
1908 ssize_t rc;
1909
1910 fprintf (stderr, "Mode: %-5s", GET_MODE_NAME (m));
1911 for (rc = 0; rc < N_RELOAD_REG; rc++)
1912 {
1913 addr_mask_type mask = reg_addr[m].addr_mask[rc];
1914 fprintf (stderr,
1915 " %s: %c%c%c%c%c%c",
1916 reload_reg_map[rc].name,
1917 (mask & RELOAD_REG_VALID) != 0 ? 'v' : ' ',
1918 (mask & RELOAD_REG_MULTIPLE) != 0 ? 'm' : ' ',
1919 (mask & RELOAD_REG_INDEXED) != 0 ? 'i' : ' ',
1920 (mask & RELOAD_REG_OFFSET) != 0 ? 'o' : ' ',
1921 (mask & RELOAD_REG_PRE_INCDEC) != 0 ? '+' : ' ',
1922 (mask & RELOAD_REG_PRE_MODIFY) != 0 ? '+' : ' ');
1923 }
1924
1925 if (rs6000_vector_unit[m] != VECTOR_NONE
1926 || rs6000_vector_mem[m] != VECTOR_NONE
1927 || (reg_addr[m].reload_store != CODE_FOR_nothing)
1928 || (reg_addr[m].reload_load != CODE_FOR_nothing)
1929 || reg_addr[m].scalar_in_vmx_p)
1930 {
1931 fprintf (stderr,
1932 " Vector-arith=%-10s Vector-mem=%-10s Reload=%c%c Upper=%c",
1933 rs6000_debug_vector_unit (rs6000_vector_unit[m]),
1934 rs6000_debug_vector_unit (rs6000_vector_mem[m]),
1935 (reg_addr[m].reload_store != CODE_FOR_nothing) ? 's' : '*',
1936 (reg_addr[m].reload_load != CODE_FOR_nothing) ? 'l' : '*',
1937 (reg_addr[m].scalar_in_vmx_p) ? 'y' : 'n');
1938 }
1939
1940 fputs ("\n", stderr);
1941 }
1942
1943 #define DEBUG_FMT_ID "%-32s= "
1944 #define DEBUG_FMT_D DEBUG_FMT_ID "%d\n"
1945 #define DEBUG_FMT_WX DEBUG_FMT_ID "%#.12" HOST_WIDE_INT_PRINT "x: "
1946 #define DEBUG_FMT_S DEBUG_FMT_ID "%s\n"
1947
1948 /* Print various interesting information with -mdebug=reg. */
1949 static void
1950 rs6000_debug_reg_global (void)
1951 {
1952 static const char *const tf[2] = { "false", "true" };
1953 const char *nl = (const char *)0;
1954 int m;
1955 size_t m1, m2, v;
1956 char costly_num[20];
1957 char nop_num[20];
1958 char flags_buffer[40];
1959 const char *costly_str;
1960 const char *nop_str;
1961 const char *trace_str;
1962 const char *abi_str;
1963 const char *cmodel_str;
1964 struct cl_target_option cl_opts;
1965
1966 /* Modes we want tieable information on. */
1967 static const enum machine_mode print_tieable_modes[] = {
1968 QImode,
1969 HImode,
1970 SImode,
1971 DImode,
1972 TImode,
1973 PTImode,
1974 SFmode,
1975 DFmode,
1976 TFmode,
1977 SDmode,
1978 DDmode,
1979 TDmode,
1980 V8QImode,
1981 V4HImode,
1982 V2SImode,
1983 V16QImode,
1984 V8HImode,
1985 V4SImode,
1986 V2DImode,
1987 V1TImode,
1988 V32QImode,
1989 V16HImode,
1990 V8SImode,
1991 V4DImode,
1992 V2TImode,
1993 V2SFmode,
1994 V4SFmode,
1995 V2DFmode,
1996 V8SFmode,
1997 V4DFmode,
1998 CCmode,
1999 CCUNSmode,
2000 CCEQmode,
2001 };
2002
2003 /* Virtual regs we are interested in. */
2004 const static struct {
2005 int regno; /* register number. */
2006 const char *name; /* register name. */
2007 } virtual_regs[] = {
2008 { STACK_POINTER_REGNUM, "stack pointer:" },
2009 { TOC_REGNUM, "toc: " },
2010 { STATIC_CHAIN_REGNUM, "static chain: " },
2011 { RS6000_PIC_OFFSET_TABLE_REGNUM, "pic offset: " },
2012 { HARD_FRAME_POINTER_REGNUM, "hard frame: " },
2013 { ARG_POINTER_REGNUM, "arg pointer: " },
2014 { FRAME_POINTER_REGNUM, "frame pointer:" },
2015 { FIRST_PSEUDO_REGISTER, "first pseudo: " },
2016 { FIRST_VIRTUAL_REGISTER, "first virtual:" },
2017 { VIRTUAL_INCOMING_ARGS_REGNUM, "incoming_args:" },
2018 { VIRTUAL_STACK_VARS_REGNUM, "stack_vars: " },
2019 { VIRTUAL_STACK_DYNAMIC_REGNUM, "stack_dynamic:" },
2020 { VIRTUAL_OUTGOING_ARGS_REGNUM, "outgoing_args:" },
2021 { VIRTUAL_CFA_REGNUM, "cfa (frame): " },
2022 { VIRTUAL_PREFERRED_STACK_BOUNDARY_REGNUM, "stack boundry:" },
2023 { LAST_VIRTUAL_REGISTER, "last virtual: " },
2024 };
2025
2026 fputs ("\nHard register information:\n", stderr);
2027 rs6000_debug_reg_print (FIRST_GPR_REGNO, LAST_GPR_REGNO, "gr");
2028 rs6000_debug_reg_print (FIRST_FPR_REGNO, LAST_FPR_REGNO, "fp");
2029 rs6000_debug_reg_print (FIRST_ALTIVEC_REGNO,
2030 LAST_ALTIVEC_REGNO,
2031 "vs");
2032 rs6000_debug_reg_print (LR_REGNO, LR_REGNO, "lr");
2033 rs6000_debug_reg_print (CTR_REGNO, CTR_REGNO, "ctr");
2034 rs6000_debug_reg_print (CR0_REGNO, CR7_REGNO, "cr");
2035 rs6000_debug_reg_print (CA_REGNO, CA_REGNO, "ca");
2036 rs6000_debug_reg_print (VRSAVE_REGNO, VRSAVE_REGNO, "vrsave");
2037 rs6000_debug_reg_print (VSCR_REGNO, VSCR_REGNO, "vscr");
2038 rs6000_debug_reg_print (SPE_ACC_REGNO, SPE_ACC_REGNO, "spe_a");
2039 rs6000_debug_reg_print (SPEFSCR_REGNO, SPEFSCR_REGNO, "spe_f");
2040
2041 fputs ("\nVirtual/stack/frame registers:\n", stderr);
2042 for (v = 0; v < ARRAY_SIZE (virtual_regs); v++)
2043 fprintf (stderr, "%s regno = %3d\n", virtual_regs[v].name, virtual_regs[v].regno);
2044
2045 fprintf (stderr,
2046 "\n"
2047 "d reg_class = %s\n"
2048 "f reg_class = %s\n"
2049 "v reg_class = %s\n"
2050 "wa reg_class = %s\n"
2051 "wd reg_class = %s\n"
2052 "wf reg_class = %s\n"
2053 "wg reg_class = %s\n"
2054 "wh reg_class = %s\n"
2055 "wi reg_class = %s\n"
2056 "wj reg_class = %s\n"
2057 "wk reg_class = %s\n"
2058 "wl reg_class = %s\n"
2059 "wm reg_class = %s\n"
2060 "wr reg_class = %s\n"
2061 "ws reg_class = %s\n"
2062 "wt reg_class = %s\n"
2063 "wu reg_class = %s\n"
2064 "wv reg_class = %s\n"
2065 "ww reg_class = %s\n"
2066 "wx reg_class = %s\n"
2067 "wy reg_class = %s\n"
2068 "wz reg_class = %s\n"
2069 "\n",
2070 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_d]],
2071 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_f]],
2072 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_v]],
2073 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wa]],
2074 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wd]],
2075 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wf]],
2076 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wg]],
2077 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wh]],
2078 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wi]],
2079 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wj]],
2080 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wk]],
2081 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wl]],
2082 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wm]],
2083 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wr]],
2084 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ws]],
2085 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wt]],
2086 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wu]],
2087 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wv]],
2088 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_ww]],
2089 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wx]],
2090 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wy]],
2091 reg_class_names[rs6000_constraints[RS6000_CONSTRAINT_wz]]);
2092
2093 nl = "\n";
2094 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2095 rs6000_debug_print_mode (m);
2096
2097 fputs ("\n", stderr);
2098
2099 for (m1 = 0; m1 < ARRAY_SIZE (print_tieable_modes); m1++)
2100 {
2101 enum machine_mode mode1 = print_tieable_modes[m1];
2102 bool first_time = true;
2103
2104 nl = (const char *)0;
2105 for (m2 = 0; m2 < ARRAY_SIZE (print_tieable_modes); m2++)
2106 {
2107 enum machine_mode mode2 = print_tieable_modes[m2];
2108 if (mode1 != mode2 && MODES_TIEABLE_P (mode1, mode2))
2109 {
2110 if (first_time)
2111 {
2112 fprintf (stderr, "Tieable modes %s:", GET_MODE_NAME (mode1));
2113 nl = "\n";
2114 first_time = false;
2115 }
2116
2117 fprintf (stderr, " %s", GET_MODE_NAME (mode2));
2118 }
2119 }
2120
2121 if (!first_time)
2122 fputs ("\n", stderr);
2123 }
2124
2125 if (nl)
2126 fputs (nl, stderr);
2127
2128 if (rs6000_recip_control)
2129 {
2130 fprintf (stderr, "\nReciprocal mask = 0x%x\n", rs6000_recip_control);
2131
2132 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2133 if (rs6000_recip_bits[m])
2134 {
2135 fprintf (stderr,
2136 "Reciprocal estimate mode: %-5s divide: %s rsqrt: %s\n",
2137 GET_MODE_NAME (m),
2138 (RS6000_RECIP_AUTO_RE_P (m)
2139 ? "auto"
2140 : (RS6000_RECIP_HAVE_RE_P (m) ? "have" : "none")),
2141 (RS6000_RECIP_AUTO_RSQRTE_P (m)
2142 ? "auto"
2143 : (RS6000_RECIP_HAVE_RSQRTE_P (m) ? "have" : "none")));
2144 }
2145
2146 fputs ("\n", stderr);
2147 }
2148
2149 if (rs6000_cpu_index >= 0)
2150 {
2151 const char *name = processor_target_table[rs6000_cpu_index].name;
2152 HOST_WIDE_INT flags
2153 = processor_target_table[rs6000_cpu_index].target_enable;
2154
2155 sprintf (flags_buffer, "-mcpu=%s flags", name);
2156 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2157 }
2158 else
2159 fprintf (stderr, DEBUG_FMT_S, "cpu", "<none>");
2160
2161 if (rs6000_tune_index >= 0)
2162 {
2163 const char *name = processor_target_table[rs6000_tune_index].name;
2164 HOST_WIDE_INT flags
2165 = processor_target_table[rs6000_tune_index].target_enable;
2166
2167 sprintf (flags_buffer, "-mtune=%s flags", name);
2168 rs6000_print_isa_options (stderr, 0, flags_buffer, flags);
2169 }
2170 else
2171 fprintf (stderr, DEBUG_FMT_S, "tune", "<none>");
2172
2173 cl_target_option_save (&cl_opts, &global_options);
2174 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags",
2175 rs6000_isa_flags);
2176
2177 rs6000_print_isa_options (stderr, 0, "rs6000_isa_flags_explicit",
2178 rs6000_isa_flags_explicit);
2179
2180 rs6000_print_builtin_options (stderr, 0, "rs6000_builtin_mask",
2181 rs6000_builtin_mask);
2182
2183 rs6000_print_isa_options (stderr, 0, "TARGET_DEFAULT", TARGET_DEFAULT);
2184
2185 fprintf (stderr, DEBUG_FMT_S, "--with-cpu default",
2186 OPTION_TARGET_CPU_DEFAULT ? OPTION_TARGET_CPU_DEFAULT : "<none>");
2187
2188 switch (rs6000_sched_costly_dep)
2189 {
2190 case max_dep_latency:
2191 costly_str = "max_dep_latency";
2192 break;
2193
2194 case no_dep_costly:
2195 costly_str = "no_dep_costly";
2196 break;
2197
2198 case all_deps_costly:
2199 costly_str = "all_deps_costly";
2200 break;
2201
2202 case true_store_to_load_dep_costly:
2203 costly_str = "true_store_to_load_dep_costly";
2204 break;
2205
2206 case store_to_load_dep_costly:
2207 costly_str = "store_to_load_dep_costly";
2208 break;
2209
2210 default:
2211 costly_str = costly_num;
2212 sprintf (costly_num, "%d", (int)rs6000_sched_costly_dep);
2213 break;
2214 }
2215
2216 fprintf (stderr, DEBUG_FMT_S, "sched_costly_dep", costly_str);
2217
2218 switch (rs6000_sched_insert_nops)
2219 {
2220 case sched_finish_regroup_exact:
2221 nop_str = "sched_finish_regroup_exact";
2222 break;
2223
2224 case sched_finish_pad_groups:
2225 nop_str = "sched_finish_pad_groups";
2226 break;
2227
2228 case sched_finish_none:
2229 nop_str = "sched_finish_none";
2230 break;
2231
2232 default:
2233 nop_str = nop_num;
2234 sprintf (nop_num, "%d", (int)rs6000_sched_insert_nops);
2235 break;
2236 }
2237
2238 fprintf (stderr, DEBUG_FMT_S, "sched_insert_nops", nop_str);
2239
2240 switch (rs6000_sdata)
2241 {
2242 default:
2243 case SDATA_NONE:
2244 break;
2245
2246 case SDATA_DATA:
2247 fprintf (stderr, DEBUG_FMT_S, "sdata", "data");
2248 break;
2249
2250 case SDATA_SYSV:
2251 fprintf (stderr, DEBUG_FMT_S, "sdata", "sysv");
2252 break;
2253
2254 case SDATA_EABI:
2255 fprintf (stderr, DEBUG_FMT_S, "sdata", "eabi");
2256 break;
2257
2258 }
2259
2260 switch (rs6000_traceback)
2261 {
2262 case traceback_default: trace_str = "default"; break;
2263 case traceback_none: trace_str = "none"; break;
2264 case traceback_part: trace_str = "part"; break;
2265 case traceback_full: trace_str = "full"; break;
2266 default: trace_str = "unknown"; break;
2267 }
2268
2269 fprintf (stderr, DEBUG_FMT_S, "traceback", trace_str);
2270
2271 switch (rs6000_current_cmodel)
2272 {
2273 case CMODEL_SMALL: cmodel_str = "small"; break;
2274 case CMODEL_MEDIUM: cmodel_str = "medium"; break;
2275 case CMODEL_LARGE: cmodel_str = "large"; break;
2276 default: cmodel_str = "unknown"; break;
2277 }
2278
2279 fprintf (stderr, DEBUG_FMT_S, "cmodel", cmodel_str);
2280
2281 switch (rs6000_current_abi)
2282 {
2283 case ABI_NONE: abi_str = "none"; break;
2284 case ABI_AIX: abi_str = "aix"; break;
2285 case ABI_ELFv2: abi_str = "ELFv2"; break;
2286 case ABI_V4: abi_str = "V4"; break;
2287 case ABI_DARWIN: abi_str = "darwin"; break;
2288 default: abi_str = "unknown"; break;
2289 }
2290
2291 fprintf (stderr, DEBUG_FMT_S, "abi", abi_str);
2292
2293 if (rs6000_altivec_abi)
2294 fprintf (stderr, DEBUG_FMT_S, "altivec_abi", "true");
2295
2296 if (rs6000_spe_abi)
2297 fprintf (stderr, DEBUG_FMT_S, "spe_abi", "true");
2298
2299 if (rs6000_darwin64_abi)
2300 fprintf (stderr, DEBUG_FMT_S, "darwin64_abi", "true");
2301
2302 if (rs6000_float_gprs)
2303 fprintf (stderr, DEBUG_FMT_S, "float_gprs", "true");
2304
2305 fprintf (stderr, DEBUG_FMT_S, "fprs",
2306 (TARGET_FPRS ? "true" : "false"));
2307
2308 fprintf (stderr, DEBUG_FMT_S, "single_float",
2309 (TARGET_SINGLE_FLOAT ? "true" : "false"));
2310
2311 fprintf (stderr, DEBUG_FMT_S, "double_float",
2312 (TARGET_DOUBLE_FLOAT ? "true" : "false"));
2313
2314 fprintf (stderr, DEBUG_FMT_S, "soft_float",
2315 (TARGET_SOFT_FLOAT ? "true" : "false"));
2316
2317 fprintf (stderr, DEBUG_FMT_S, "e500_single",
2318 (TARGET_E500_SINGLE ? "true" : "false"));
2319
2320 fprintf (stderr, DEBUG_FMT_S, "e500_double",
2321 (TARGET_E500_DOUBLE ? "true" : "false"));
2322
2323 if (TARGET_LINK_STACK)
2324 fprintf (stderr, DEBUG_FMT_S, "link_stack", "true");
2325
2326 if (targetm.lra_p ())
2327 fprintf (stderr, DEBUG_FMT_S, "lra", "true");
2328
2329 if (TARGET_P8_FUSION)
2330 fprintf (stderr, DEBUG_FMT_S, "p8 fusion",
2331 (TARGET_P8_FUSION_SIGN) ? "zero+sign" : "zero");
2332
2333 fprintf (stderr, DEBUG_FMT_S, "plt-format",
2334 TARGET_SECURE_PLT ? "secure" : "bss");
2335 fprintf (stderr, DEBUG_FMT_S, "struct-return",
2336 aix_struct_return ? "aix" : "sysv");
2337 fprintf (stderr, DEBUG_FMT_S, "always_hint", tf[!!rs6000_always_hint]);
2338 fprintf (stderr, DEBUG_FMT_S, "sched_groups", tf[!!rs6000_sched_groups]);
2339 fprintf (stderr, DEBUG_FMT_S, "align_branch",
2340 tf[!!rs6000_align_branch_targets]);
2341 fprintf (stderr, DEBUG_FMT_D, "tls_size", rs6000_tls_size);
2342 fprintf (stderr, DEBUG_FMT_D, "long_double_size",
2343 rs6000_long_double_type_size);
2344 fprintf (stderr, DEBUG_FMT_D, "sched_restricted_insns_priority",
2345 (int)rs6000_sched_restricted_insns_priority);
2346 fprintf (stderr, DEBUG_FMT_D, "Number of standard builtins",
2347 (int)END_BUILTINS);
2348 fprintf (stderr, DEBUG_FMT_D, "Number of rs6000 builtins",
2349 (int)RS6000_BUILTIN_COUNT);
2350
2351 if (TARGET_VSX)
2352 fprintf (stderr, DEBUG_FMT_D, "VSX easy 64-bit scalar element",
2353 (int)VECTOR_ELEMENT_SCALAR_64BIT);
2354 }
2355
2356 \f
2357 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2358 legitimate address support to figure out the appropriate addressing to
2359 use. */
2360
2361 static void
2362 rs6000_setup_reg_addr_masks (void)
2363 {
2364 ssize_t rc, reg, m, nregs;
2365 addr_mask_type any_addr_mask, addr_mask;
2366
2367 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2368 {
2369 enum machine_mode m2 = (enum machine_mode)m;
2370
2371 /* SDmode is special in that we want to access it only via REG+REG
2372 addressing on power7 and above, since we want to use the LFIWZX and
2373 STFIWZX instructions to load it. */
2374 bool indexed_only_p = (m == SDmode && TARGET_NO_SDMODE_STACK);
2375
2376 any_addr_mask = 0;
2377 for (rc = FIRST_RELOAD_REG_CLASS; rc <= LAST_RELOAD_REG_CLASS; rc++)
2378 {
2379 addr_mask = 0;
2380 reg = reload_reg_map[rc].reg;
2381
2382 /* Can mode values go in the GPR/FPR/Altivec registers? */
2383 if (reg >= 0 && rs6000_hard_regno_mode_ok_p[m][reg])
2384 {
2385 nregs = rs6000_hard_regno_nregs[m][reg];
2386 addr_mask |= RELOAD_REG_VALID;
2387
2388 /* Indicate if the mode takes more than 1 physical register. If
2389 it takes a single register, indicate it can do REG+REG
2390 addressing. */
2391 if (nregs > 1 || m == BLKmode)
2392 addr_mask |= RELOAD_REG_MULTIPLE;
2393 else
2394 addr_mask |= RELOAD_REG_INDEXED;
2395
2396 /* Figure out if we can do PRE_INC, PRE_DEC, or PRE_MODIFY
2397 addressing. Restrict addressing on SPE for 64-bit types
2398 because of the SUBREG hackery used to address 64-bit floats in
2399 '32-bit' GPRs. To simplify secondary reload, don't allow
2400 update forms on scalar floating point types that can go in the
2401 upper registers. */
2402
2403 if (TARGET_UPDATE
2404 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR)
2405 && GET_MODE_SIZE (m2) <= 8
2406 && !VECTOR_MODE_P (m2)
2407 && !COMPLEX_MODE_P (m2)
2408 && !indexed_only_p
2409 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (m2) == 8)
2410 && !reg_addr[m2].scalar_in_vmx_p)
2411 {
2412 addr_mask |= RELOAD_REG_PRE_INCDEC;
2413
2414 /* PRE_MODIFY is more restricted than PRE_INC/PRE_DEC in that
2415 we don't allow PRE_MODIFY for some multi-register
2416 operations. */
2417 switch (m)
2418 {
2419 default:
2420 addr_mask |= RELOAD_REG_PRE_MODIFY;
2421 break;
2422
2423 case DImode:
2424 if (TARGET_POWERPC64)
2425 addr_mask |= RELOAD_REG_PRE_MODIFY;
2426 break;
2427
2428 case DFmode:
2429 case DDmode:
2430 if (TARGET_DF_INSN)
2431 addr_mask |= RELOAD_REG_PRE_MODIFY;
2432 break;
2433 }
2434 }
2435 }
2436
2437 /* GPR and FPR registers can do REG+OFFSET addressing, except
2438 possibly for SDmode. */
2439 if ((addr_mask != 0) && !indexed_only_p
2440 && (rc == RELOAD_REG_GPR || rc == RELOAD_REG_FPR))
2441 addr_mask |= RELOAD_REG_OFFSET;
2442
2443 reg_addr[m].addr_mask[rc] = addr_mask;
2444 any_addr_mask |= addr_mask;
2445 }
2446
2447 reg_addr[m].addr_mask[RELOAD_REG_ANY] = any_addr_mask;
2448 }
2449 }
2450
2451 \f
2452 /* Initialize the various global tables that are based on register size. */
2453 static void
2454 rs6000_init_hard_regno_mode_ok (bool global_init_p)
2455 {
2456 ssize_t r, m, c;
2457 int align64;
2458 int align32;
2459
2460 /* Precalculate REGNO_REG_CLASS. */
2461 rs6000_regno_regclass[0] = GENERAL_REGS;
2462 for (r = 1; r < 32; ++r)
2463 rs6000_regno_regclass[r] = BASE_REGS;
2464
2465 for (r = 32; r < 64; ++r)
2466 rs6000_regno_regclass[r] = FLOAT_REGS;
2467
2468 for (r = 64; r < FIRST_PSEUDO_REGISTER; ++r)
2469 rs6000_regno_regclass[r] = NO_REGS;
2470
2471 for (r = FIRST_ALTIVEC_REGNO; r <= LAST_ALTIVEC_REGNO; ++r)
2472 rs6000_regno_regclass[r] = ALTIVEC_REGS;
2473
2474 rs6000_regno_regclass[CR0_REGNO] = CR0_REGS;
2475 for (r = CR1_REGNO; r <= CR7_REGNO; ++r)
2476 rs6000_regno_regclass[r] = CR_REGS;
2477
2478 rs6000_regno_regclass[LR_REGNO] = LINK_REGS;
2479 rs6000_regno_regclass[CTR_REGNO] = CTR_REGS;
2480 rs6000_regno_regclass[CA_REGNO] = CA_REGS;
2481 rs6000_regno_regclass[VRSAVE_REGNO] = VRSAVE_REGS;
2482 rs6000_regno_regclass[VSCR_REGNO] = VRSAVE_REGS;
2483 rs6000_regno_regclass[SPE_ACC_REGNO] = SPE_ACC_REGS;
2484 rs6000_regno_regclass[SPEFSCR_REGNO] = SPEFSCR_REGS;
2485 rs6000_regno_regclass[TFHAR_REGNO] = SPR_REGS;
2486 rs6000_regno_regclass[TFIAR_REGNO] = SPR_REGS;
2487 rs6000_regno_regclass[TEXASR_REGNO] = SPR_REGS;
2488 rs6000_regno_regclass[ARG_POINTER_REGNUM] = BASE_REGS;
2489 rs6000_regno_regclass[FRAME_POINTER_REGNUM] = BASE_REGS;
2490
2491 /* Precalculate register class to simpler reload register class. We don't
2492 need all of the register classes that are combinations of different
2493 classes, just the simple ones that have constraint letters. */
2494 for (c = 0; c < N_REG_CLASSES; c++)
2495 reg_class_to_reg_type[c] = NO_REG_TYPE;
2496
2497 reg_class_to_reg_type[(int)GENERAL_REGS] = GPR_REG_TYPE;
2498 reg_class_to_reg_type[(int)BASE_REGS] = GPR_REG_TYPE;
2499 reg_class_to_reg_type[(int)VSX_REGS] = VSX_REG_TYPE;
2500 reg_class_to_reg_type[(int)VRSAVE_REGS] = SPR_REG_TYPE;
2501 reg_class_to_reg_type[(int)VSCR_REGS] = SPR_REG_TYPE;
2502 reg_class_to_reg_type[(int)LINK_REGS] = SPR_REG_TYPE;
2503 reg_class_to_reg_type[(int)CTR_REGS] = SPR_REG_TYPE;
2504 reg_class_to_reg_type[(int)LINK_OR_CTR_REGS] = SPR_REG_TYPE;
2505 reg_class_to_reg_type[(int)CR_REGS] = CR_REG_TYPE;
2506 reg_class_to_reg_type[(int)CR0_REGS] = CR_REG_TYPE;
2507 reg_class_to_reg_type[(int)SPE_ACC_REGS] = SPE_ACC_TYPE;
2508 reg_class_to_reg_type[(int)SPEFSCR_REGS] = SPEFSCR_REG_TYPE;
2509
2510 if (TARGET_VSX)
2511 {
2512 reg_class_to_reg_type[(int)FLOAT_REGS] = VSX_REG_TYPE;
2513 reg_class_to_reg_type[(int)ALTIVEC_REGS] = VSX_REG_TYPE;
2514 }
2515 else
2516 {
2517 reg_class_to_reg_type[(int)FLOAT_REGS] = FPR_REG_TYPE;
2518 reg_class_to_reg_type[(int)ALTIVEC_REGS] = ALTIVEC_REG_TYPE;
2519 }
2520
2521 /* Precalculate the valid memory formats as well as the vector information,
2522 this must be set up before the rs6000_hard_regno_nregs_internal calls
2523 below. */
2524 gcc_assert ((int)VECTOR_NONE == 0);
2525 memset ((void *) &rs6000_vector_unit[0], '\0', sizeof (rs6000_vector_unit));
2526 memset ((void *) &rs6000_vector_mem[0], '\0', sizeof (rs6000_vector_unit));
2527
2528 gcc_assert ((int)CODE_FOR_nothing == 0);
2529 memset ((void *) &reg_addr[0], '\0', sizeof (reg_addr));
2530
2531 gcc_assert ((int)NO_REGS == 0);
2532 memset ((void *) &rs6000_constraints[0], '\0', sizeof (rs6000_constraints));
2533
2534 /* The VSX hardware allows native alignment for vectors, but control whether the compiler
2535 believes it can use native alignment or still uses 128-bit alignment. */
2536 if (TARGET_VSX && !TARGET_VSX_ALIGN_128)
2537 {
2538 align64 = 64;
2539 align32 = 32;
2540 }
2541 else
2542 {
2543 align64 = 128;
2544 align32 = 128;
2545 }
2546
2547 /* V2DF mode, VSX only. */
2548 if (TARGET_VSX)
2549 {
2550 rs6000_vector_unit[V2DFmode] = VECTOR_VSX;
2551 rs6000_vector_mem[V2DFmode] = VECTOR_VSX;
2552 rs6000_vector_align[V2DFmode] = align64;
2553 }
2554
2555 /* V4SF mode, either VSX or Altivec. */
2556 if (TARGET_VSX)
2557 {
2558 rs6000_vector_unit[V4SFmode] = VECTOR_VSX;
2559 rs6000_vector_mem[V4SFmode] = VECTOR_VSX;
2560 rs6000_vector_align[V4SFmode] = align32;
2561 }
2562 else if (TARGET_ALTIVEC)
2563 {
2564 rs6000_vector_unit[V4SFmode] = VECTOR_ALTIVEC;
2565 rs6000_vector_mem[V4SFmode] = VECTOR_ALTIVEC;
2566 rs6000_vector_align[V4SFmode] = align32;
2567 }
2568
2569 /* V16QImode, V8HImode, V4SImode are Altivec only, but possibly do VSX loads
2570 and stores. */
2571 if (TARGET_ALTIVEC)
2572 {
2573 rs6000_vector_unit[V4SImode] = VECTOR_ALTIVEC;
2574 rs6000_vector_unit[V8HImode] = VECTOR_ALTIVEC;
2575 rs6000_vector_unit[V16QImode] = VECTOR_ALTIVEC;
2576 rs6000_vector_align[V4SImode] = align32;
2577 rs6000_vector_align[V8HImode] = align32;
2578 rs6000_vector_align[V16QImode] = align32;
2579
2580 if (TARGET_VSX)
2581 {
2582 rs6000_vector_mem[V4SImode] = VECTOR_VSX;
2583 rs6000_vector_mem[V8HImode] = VECTOR_VSX;
2584 rs6000_vector_mem[V16QImode] = VECTOR_VSX;
2585 }
2586 else
2587 {
2588 rs6000_vector_mem[V4SImode] = VECTOR_ALTIVEC;
2589 rs6000_vector_mem[V8HImode] = VECTOR_ALTIVEC;
2590 rs6000_vector_mem[V16QImode] = VECTOR_ALTIVEC;
2591 }
2592 }
2593
2594 /* V2DImode, full mode depends on ISA 2.07 vector mode. Allow under VSX to
2595 do insert/splat/extract. Altivec doesn't have 64-bit integer support. */
2596 if (TARGET_VSX)
2597 {
2598 rs6000_vector_mem[V2DImode] = VECTOR_VSX;
2599 rs6000_vector_unit[V2DImode]
2600 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
2601 rs6000_vector_align[V2DImode] = align64;
2602
2603 rs6000_vector_mem[V1TImode] = VECTOR_VSX;
2604 rs6000_vector_unit[V1TImode]
2605 = (TARGET_P8_VECTOR) ? VECTOR_P8_VECTOR : VECTOR_NONE;
2606 rs6000_vector_align[V1TImode] = 128;
2607 }
2608
2609 /* DFmode, see if we want to use the VSX unit. */
2610 if (TARGET_VSX && TARGET_VSX_SCALAR_DOUBLE)
2611 {
2612 rs6000_vector_unit[DFmode] = VECTOR_VSX;
2613 rs6000_vector_mem[DFmode]
2614 = (TARGET_UPPER_REGS_DF ? VECTOR_VSX : VECTOR_NONE);
2615 rs6000_vector_align[DFmode] = align64;
2616 }
2617
2618 /* Allow TImode in VSX register and set the VSX memory macros. */
2619 if (TARGET_VSX && TARGET_VSX_TIMODE)
2620 {
2621 rs6000_vector_mem[TImode] = VECTOR_VSX;
2622 rs6000_vector_align[TImode] = align64;
2623 }
2624
2625 /* TODO add SPE and paired floating point vector support. */
2626
2627 /* Register class constraints for the constraints that depend on compile
2628 switches. When the VSX code was added, different constraints were added
2629 based on the type (DFmode, V2DFmode, V4SFmode). For the vector types, all
2630 of the VSX registers are used. The register classes for scalar floating
2631 point types is set, based on whether we allow that type into the upper
2632 (Altivec) registers. GCC has register classes to target the Altivec
2633 registers for load/store operations, to select using a VSX memory
2634 operation instead of the traditional floating point operation. The
2635 constraints are:
2636
2637 d - Register class to use with traditional DFmode instructions.
2638 f - Register class to use with traditional SFmode instructions.
2639 v - Altivec register.
2640 wa - Any VSX register.
2641 wc - Reserved to represent individual CR bits (used in LLVM).
2642 wd - Preferred register class for V2DFmode.
2643 wf - Preferred register class for V4SFmode.
2644 wg - Float register for power6x move insns.
2645 wh - FP register for direct move instructions.
2646 wi - FP or VSX register to hold 64-bit integers for VSX insns.
2647 wj - FP or VSX register to hold 64-bit integers for direct moves.
2648 wk - FP or VSX register to hold 64-bit doubles for direct moves.
2649 wl - Float register if we can do 32-bit signed int loads.
2650 wm - VSX register for ISA 2.07 direct move operations.
2651 wn - always NO_REGS.
2652 wr - GPR if 64-bit mode is permitted.
2653 ws - Register class to do ISA 2.06 DF operations.
2654 wt - VSX register for TImode in VSX registers.
2655 wu - Altivec register for ISA 2.07 VSX SF/SI load/stores.
2656 wv - Altivec register for ISA 2.06 VSX DF/DI load/stores.
2657 ww - Register class to do SF conversions in with VSX operations.
2658 wx - Float register if we can do 32-bit int stores.
2659 wy - Register class to do ISA 2.07 SF operations.
2660 wz - Float register if we can do 32-bit unsigned int loads. */
2661
2662 if (TARGET_HARD_FLOAT && TARGET_FPRS)
2663 rs6000_constraints[RS6000_CONSTRAINT_f] = FLOAT_REGS; /* SFmode */
2664
2665 if (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
2666 rs6000_constraints[RS6000_CONSTRAINT_d] = FLOAT_REGS; /* DFmode */
2667
2668 if (TARGET_VSX)
2669 {
2670 rs6000_constraints[RS6000_CONSTRAINT_wa] = VSX_REGS;
2671 rs6000_constraints[RS6000_CONSTRAINT_wd] = VSX_REGS; /* V2DFmode */
2672 rs6000_constraints[RS6000_CONSTRAINT_wf] = VSX_REGS; /* V4SFmode */
2673 rs6000_constraints[RS6000_CONSTRAINT_wi] = FLOAT_REGS; /* DImode */
2674
2675 if (TARGET_VSX_TIMODE)
2676 rs6000_constraints[RS6000_CONSTRAINT_wt] = VSX_REGS; /* TImode */
2677
2678 if (TARGET_UPPER_REGS_DF) /* DFmode */
2679 {
2680 rs6000_constraints[RS6000_CONSTRAINT_ws] = VSX_REGS;
2681 rs6000_constraints[RS6000_CONSTRAINT_wv] = ALTIVEC_REGS;
2682 }
2683 else
2684 rs6000_constraints[RS6000_CONSTRAINT_ws] = FLOAT_REGS;
2685 }
2686
2687 /* Add conditional constraints based on various options, to allow us to
2688 collapse multiple insn patterns. */
2689 if (TARGET_ALTIVEC)
2690 rs6000_constraints[RS6000_CONSTRAINT_v] = ALTIVEC_REGS;
2691
2692 if (TARGET_MFPGPR) /* DFmode */
2693 rs6000_constraints[RS6000_CONSTRAINT_wg] = FLOAT_REGS;
2694
2695 if (TARGET_LFIWAX)
2696 rs6000_constraints[RS6000_CONSTRAINT_wl] = FLOAT_REGS; /* DImode */
2697
2698 if (TARGET_DIRECT_MOVE)
2699 {
2700 rs6000_constraints[RS6000_CONSTRAINT_wh] = FLOAT_REGS;
2701 rs6000_constraints[RS6000_CONSTRAINT_wj] /* DImode */
2702 = rs6000_constraints[RS6000_CONSTRAINT_wi];
2703 rs6000_constraints[RS6000_CONSTRAINT_wk] /* DFmode */
2704 = rs6000_constraints[RS6000_CONSTRAINT_ws];
2705 rs6000_constraints[RS6000_CONSTRAINT_wm] = VSX_REGS;
2706 }
2707
2708 if (TARGET_POWERPC64)
2709 rs6000_constraints[RS6000_CONSTRAINT_wr] = GENERAL_REGS;
2710
2711 if (TARGET_P8_VECTOR && TARGET_UPPER_REGS_SF) /* SFmode */
2712 {
2713 rs6000_constraints[RS6000_CONSTRAINT_wu] = ALTIVEC_REGS;
2714 rs6000_constraints[RS6000_CONSTRAINT_wy] = VSX_REGS;
2715 rs6000_constraints[RS6000_CONSTRAINT_ww] = VSX_REGS;
2716 }
2717 else if (TARGET_P8_VECTOR)
2718 {
2719 rs6000_constraints[RS6000_CONSTRAINT_wy] = FLOAT_REGS;
2720 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
2721 }
2722 else if (TARGET_VSX)
2723 rs6000_constraints[RS6000_CONSTRAINT_ww] = FLOAT_REGS;
2724
2725 if (TARGET_STFIWX)
2726 rs6000_constraints[RS6000_CONSTRAINT_wx] = FLOAT_REGS; /* DImode */
2727
2728 if (TARGET_LFIWZX)
2729 rs6000_constraints[RS6000_CONSTRAINT_wz] = FLOAT_REGS; /* DImode */
2730
2731 /* Set up the reload helper and direct move functions. */
2732 if (TARGET_VSX || TARGET_ALTIVEC)
2733 {
2734 if (TARGET_64BIT)
2735 {
2736 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_di_store;
2737 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_di_load;
2738 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_di_store;
2739 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_di_load;
2740 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_di_store;
2741 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_di_load;
2742 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_di_store;
2743 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_di_load;
2744 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_di_store;
2745 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_di_load;
2746 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_di_store;
2747 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_di_load;
2748 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_di_store;
2749 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_di_load;
2750 if (TARGET_VSX && TARGET_UPPER_REGS_DF)
2751 {
2752 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_di_store;
2753 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_di_load;
2754 reg_addr[DFmode].scalar_in_vmx_p = true;
2755 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_di_store;
2756 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_di_load;
2757 }
2758 if (TARGET_P8_VECTOR)
2759 {
2760 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_di_store;
2761 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_di_load;
2762 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_di_store;
2763 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_di_load;
2764 if (TARGET_UPPER_REGS_SF)
2765 reg_addr[SFmode].scalar_in_vmx_p = true;
2766 }
2767 if (TARGET_VSX_TIMODE)
2768 {
2769 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_di_store;
2770 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_di_load;
2771 }
2772 if (TARGET_DIRECT_MOVE)
2773 {
2774 if (TARGET_POWERPC64)
2775 {
2776 reg_addr[TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxti;
2777 reg_addr[V1TImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv1ti;
2778 reg_addr[V2DFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2df;
2779 reg_addr[V2DImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv2di;
2780 reg_addr[V4SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4sf;
2781 reg_addr[V4SImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv4si;
2782 reg_addr[V8HImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv8hi;
2783 reg_addr[V16QImode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxv16qi;
2784 reg_addr[SFmode].reload_gpr_vsx = CODE_FOR_reload_gpr_from_vsxsf;
2785
2786 reg_addr[TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprti;
2787 reg_addr[V1TImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv1ti;
2788 reg_addr[V2DFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2df;
2789 reg_addr[V2DImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv2di;
2790 reg_addr[V4SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4sf;
2791 reg_addr[V4SImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv4si;
2792 reg_addr[V8HImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv8hi;
2793 reg_addr[V16QImode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprv16qi;
2794 reg_addr[SFmode].reload_vsx_gpr = CODE_FOR_reload_vsx_from_gprsf;
2795 }
2796 else
2797 {
2798 reg_addr[DImode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdi;
2799 reg_addr[DDmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdd;
2800 reg_addr[DFmode].reload_fpr_gpr = CODE_FOR_reload_fpr_from_gprdf;
2801 }
2802 }
2803 }
2804 else
2805 {
2806 reg_addr[V16QImode].reload_store = CODE_FOR_reload_v16qi_si_store;
2807 reg_addr[V16QImode].reload_load = CODE_FOR_reload_v16qi_si_load;
2808 reg_addr[V8HImode].reload_store = CODE_FOR_reload_v8hi_si_store;
2809 reg_addr[V8HImode].reload_load = CODE_FOR_reload_v8hi_si_load;
2810 reg_addr[V4SImode].reload_store = CODE_FOR_reload_v4si_si_store;
2811 reg_addr[V4SImode].reload_load = CODE_FOR_reload_v4si_si_load;
2812 reg_addr[V2DImode].reload_store = CODE_FOR_reload_v2di_si_store;
2813 reg_addr[V2DImode].reload_load = CODE_FOR_reload_v2di_si_load;
2814 reg_addr[V1TImode].reload_store = CODE_FOR_reload_v1ti_si_store;
2815 reg_addr[V1TImode].reload_load = CODE_FOR_reload_v1ti_si_load;
2816 reg_addr[V4SFmode].reload_store = CODE_FOR_reload_v4sf_si_store;
2817 reg_addr[V4SFmode].reload_load = CODE_FOR_reload_v4sf_si_load;
2818 reg_addr[V2DFmode].reload_store = CODE_FOR_reload_v2df_si_store;
2819 reg_addr[V2DFmode].reload_load = CODE_FOR_reload_v2df_si_load;
2820 if (TARGET_VSX && TARGET_UPPER_REGS_DF)
2821 {
2822 reg_addr[DFmode].reload_store = CODE_FOR_reload_df_si_store;
2823 reg_addr[DFmode].reload_load = CODE_FOR_reload_df_si_load;
2824 reg_addr[DFmode].scalar_in_vmx_p = true;
2825 reg_addr[DDmode].reload_store = CODE_FOR_reload_dd_si_store;
2826 reg_addr[DDmode].reload_load = CODE_FOR_reload_dd_si_load;
2827 }
2828 if (TARGET_P8_VECTOR)
2829 {
2830 reg_addr[SFmode].reload_store = CODE_FOR_reload_sf_si_store;
2831 reg_addr[SFmode].reload_load = CODE_FOR_reload_sf_si_load;
2832 reg_addr[SDmode].reload_store = CODE_FOR_reload_sd_si_store;
2833 reg_addr[SDmode].reload_load = CODE_FOR_reload_sd_si_load;
2834 if (TARGET_UPPER_REGS_SF)
2835 reg_addr[SFmode].scalar_in_vmx_p = true;
2836 }
2837 if (TARGET_VSX_TIMODE)
2838 {
2839 reg_addr[TImode].reload_store = CODE_FOR_reload_ti_si_store;
2840 reg_addr[TImode].reload_load = CODE_FOR_reload_ti_si_load;
2841 }
2842 }
2843 }
2844
2845 /* Precalculate HARD_REGNO_NREGS. */
2846 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2847 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2848 rs6000_hard_regno_nregs[m][r]
2849 = rs6000_hard_regno_nregs_internal (r, (enum machine_mode)m);
2850
2851 /* Precalculate HARD_REGNO_MODE_OK. */
2852 for (r = 0; r < FIRST_PSEUDO_REGISTER; ++r)
2853 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2854 if (rs6000_hard_regno_mode_ok (r, (enum machine_mode)m))
2855 rs6000_hard_regno_mode_ok_p[m][r] = true;
2856
2857 /* Precalculate CLASS_MAX_NREGS sizes. */
2858 for (c = 0; c < LIM_REG_CLASSES; ++c)
2859 {
2860 int reg_size;
2861
2862 if (TARGET_VSX && VSX_REG_CLASS_P (c))
2863 reg_size = UNITS_PER_VSX_WORD;
2864
2865 else if (c == ALTIVEC_REGS)
2866 reg_size = UNITS_PER_ALTIVEC_WORD;
2867
2868 else if (c == FLOAT_REGS)
2869 reg_size = UNITS_PER_FP_WORD;
2870
2871 else
2872 reg_size = UNITS_PER_WORD;
2873
2874 for (m = 0; m < NUM_MACHINE_MODES; ++m)
2875 {
2876 enum machine_mode m2 = (enum machine_mode)m;
2877 int reg_size2 = reg_size;
2878
2879 /* TFmode/TDmode always takes 2 registers, even in VSX. */
2880 if (TARGET_VSX && VSX_REG_CLASS_P (c)
2881 && (m == TDmode || m == TFmode))
2882 reg_size2 = UNITS_PER_FP_WORD;
2883
2884 rs6000_class_max_nregs[m][c]
2885 = (GET_MODE_SIZE (m2) + reg_size2 - 1) / reg_size2;
2886 }
2887 }
2888
2889 if (TARGET_E500_DOUBLE)
2890 rs6000_class_max_nregs[DFmode][GENERAL_REGS] = 1;
2891
2892 /* Calculate which modes to automatically generate code to use a the
2893 reciprocal divide and square root instructions. In the future, possibly
2894 automatically generate the instructions even if the user did not specify
2895 -mrecip. The older machines double precision reciprocal sqrt estimate is
2896 not accurate enough. */
2897 memset (rs6000_recip_bits, 0, sizeof (rs6000_recip_bits));
2898 if (TARGET_FRES)
2899 rs6000_recip_bits[SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2900 if (TARGET_FRE)
2901 rs6000_recip_bits[DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2902 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2903 rs6000_recip_bits[V4SFmode] = RS6000_RECIP_MASK_HAVE_RE;
2904 if (VECTOR_UNIT_VSX_P (V2DFmode))
2905 rs6000_recip_bits[V2DFmode] = RS6000_RECIP_MASK_HAVE_RE;
2906
2907 if (TARGET_FRSQRTES)
2908 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2909 if (TARGET_FRSQRTE)
2910 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2911 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode))
2912 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2913 if (VECTOR_UNIT_VSX_P (V2DFmode))
2914 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_HAVE_RSQRTE;
2915
2916 if (rs6000_recip_control)
2917 {
2918 if (!flag_finite_math_only)
2919 warning (0, "-mrecip requires -ffinite-math or -ffast-math");
2920 if (flag_trapping_math)
2921 warning (0, "-mrecip requires -fno-trapping-math or -ffast-math");
2922 if (!flag_reciprocal_math)
2923 warning (0, "-mrecip requires -freciprocal-math or -ffast-math");
2924 if (flag_finite_math_only && !flag_trapping_math && flag_reciprocal_math)
2925 {
2926 if (RS6000_RECIP_HAVE_RE_P (SFmode)
2927 && (rs6000_recip_control & RECIP_SF_DIV) != 0)
2928 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2929
2930 if (RS6000_RECIP_HAVE_RE_P (DFmode)
2931 && (rs6000_recip_control & RECIP_DF_DIV) != 0)
2932 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2933
2934 if (RS6000_RECIP_HAVE_RE_P (V4SFmode)
2935 && (rs6000_recip_control & RECIP_V4SF_DIV) != 0)
2936 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2937
2938 if (RS6000_RECIP_HAVE_RE_P (V2DFmode)
2939 && (rs6000_recip_control & RECIP_V2DF_DIV) != 0)
2940 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RE;
2941
2942 if (RS6000_RECIP_HAVE_RSQRTE_P (SFmode)
2943 && (rs6000_recip_control & RECIP_SF_RSQRT) != 0)
2944 rs6000_recip_bits[SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2945
2946 if (RS6000_RECIP_HAVE_RSQRTE_P (DFmode)
2947 && (rs6000_recip_control & RECIP_DF_RSQRT) != 0)
2948 rs6000_recip_bits[DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2949
2950 if (RS6000_RECIP_HAVE_RSQRTE_P (V4SFmode)
2951 && (rs6000_recip_control & RECIP_V4SF_RSQRT) != 0)
2952 rs6000_recip_bits[V4SFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2953
2954 if (RS6000_RECIP_HAVE_RSQRTE_P (V2DFmode)
2955 && (rs6000_recip_control & RECIP_V2DF_RSQRT) != 0)
2956 rs6000_recip_bits[V2DFmode] |= RS6000_RECIP_MASK_AUTO_RSQRTE;
2957 }
2958 }
2959
2960 /* Update the addr mask bits in reg_addr to help secondary reload and go if
2961 legitimate address support to figure out the appropriate addressing to
2962 use. */
2963 rs6000_setup_reg_addr_masks ();
2964
2965 if (global_init_p || TARGET_DEBUG_TARGET)
2966 {
2967 if (TARGET_DEBUG_REG)
2968 rs6000_debug_reg_global ();
2969
2970 if (TARGET_DEBUG_COST || TARGET_DEBUG_REG)
2971 fprintf (stderr,
2972 "SImode variable mult cost = %d\n"
2973 "SImode constant mult cost = %d\n"
2974 "SImode short constant mult cost = %d\n"
2975 "DImode multipliciation cost = %d\n"
2976 "SImode division cost = %d\n"
2977 "DImode division cost = %d\n"
2978 "Simple fp operation cost = %d\n"
2979 "DFmode multiplication cost = %d\n"
2980 "SFmode division cost = %d\n"
2981 "DFmode division cost = %d\n"
2982 "cache line size = %d\n"
2983 "l1 cache size = %d\n"
2984 "l2 cache size = %d\n"
2985 "simultaneous prefetches = %d\n"
2986 "\n",
2987 rs6000_cost->mulsi,
2988 rs6000_cost->mulsi_const,
2989 rs6000_cost->mulsi_const9,
2990 rs6000_cost->muldi,
2991 rs6000_cost->divsi,
2992 rs6000_cost->divdi,
2993 rs6000_cost->fp,
2994 rs6000_cost->dmul,
2995 rs6000_cost->sdiv,
2996 rs6000_cost->ddiv,
2997 rs6000_cost->cache_line_size,
2998 rs6000_cost->l1_cache_size,
2999 rs6000_cost->l2_cache_size,
3000 rs6000_cost->simultaneous_prefetches);
3001 }
3002 }
3003
3004 #if TARGET_MACHO
3005 /* The Darwin version of SUBTARGET_OVERRIDE_OPTIONS. */
3006
3007 static void
3008 darwin_rs6000_override_options (void)
3009 {
3010 /* The Darwin ABI always includes AltiVec, can't be (validly) turned
3011 off. */
3012 rs6000_altivec_abi = 1;
3013 TARGET_ALTIVEC_VRSAVE = 1;
3014 rs6000_current_abi = ABI_DARWIN;
3015
3016 if (DEFAULT_ABI == ABI_DARWIN
3017 && TARGET_64BIT)
3018 darwin_one_byte_bool = 1;
3019
3020 if (TARGET_64BIT && ! TARGET_POWERPC64)
3021 {
3022 rs6000_isa_flags |= OPTION_MASK_POWERPC64;
3023 warning (0, "-m64 requires PowerPC64 architecture, enabling");
3024 }
3025 if (flag_mkernel)
3026 {
3027 rs6000_default_long_calls = 1;
3028 rs6000_isa_flags |= OPTION_MASK_SOFT_FLOAT;
3029 }
3030
3031 /* Make -m64 imply -maltivec. Darwin's 64-bit ABI includes
3032 Altivec. */
3033 if (!flag_mkernel && !flag_apple_kext
3034 && TARGET_64BIT
3035 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC))
3036 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3037
3038 /* Unless the user (not the configurer) has explicitly overridden
3039 it with -mcpu=G3 or -mno-altivec, then 10.5+ targets default to
3040 G4 unless targeting the kernel. */
3041 if (!flag_mkernel
3042 && !flag_apple_kext
3043 && strverscmp (darwin_macosx_version_min, "10.5") >= 0
3044 && ! (rs6000_isa_flags_explicit & OPTION_MASK_ALTIVEC)
3045 && ! global_options_set.x_rs6000_cpu_index)
3046 {
3047 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3048 }
3049 }
3050 #endif
3051
3052 /* If not otherwise specified by a target, make 'long double' equivalent to
3053 'double'. */
3054
3055 #ifndef RS6000_DEFAULT_LONG_DOUBLE_SIZE
3056 #define RS6000_DEFAULT_LONG_DOUBLE_SIZE 64
3057 #endif
3058
3059 /* Return the builtin mask of the various options used that could affect which
3060 builtins were used. In the past we used target_flags, but we've run out of
3061 bits, and some options like SPE and PAIRED are no longer in
3062 target_flags. */
3063
3064 HOST_WIDE_INT
3065 rs6000_builtin_mask_calculate (void)
3066 {
3067 return (((TARGET_ALTIVEC) ? RS6000_BTM_ALTIVEC : 0)
3068 | ((TARGET_VSX) ? RS6000_BTM_VSX : 0)
3069 | ((TARGET_SPE) ? RS6000_BTM_SPE : 0)
3070 | ((TARGET_PAIRED_FLOAT) ? RS6000_BTM_PAIRED : 0)
3071 | ((TARGET_FRE) ? RS6000_BTM_FRE : 0)
3072 | ((TARGET_FRES) ? RS6000_BTM_FRES : 0)
3073 | ((TARGET_FRSQRTE) ? RS6000_BTM_FRSQRTE : 0)
3074 | ((TARGET_FRSQRTES) ? RS6000_BTM_FRSQRTES : 0)
3075 | ((TARGET_POPCNTD) ? RS6000_BTM_POPCNTD : 0)
3076 | ((rs6000_cpu == PROCESSOR_CELL) ? RS6000_BTM_CELL : 0)
3077 | ((TARGET_P8_VECTOR) ? RS6000_BTM_P8_VECTOR : 0)
3078 | ((TARGET_CRYPTO) ? RS6000_BTM_CRYPTO : 0)
3079 | ((TARGET_HTM) ? RS6000_BTM_HTM : 0)
3080 | ((TARGET_DFP) ? RS6000_BTM_DFP : 0)
3081 | ((TARGET_HARD_FLOAT) ? RS6000_BTM_HARD_FLOAT : 0)
3082 | ((TARGET_LONG_DOUBLE_128) ? RS6000_BTM_LDBL128 : 0));
3083 }
3084
3085 /* Override command line options. Mostly we process the processor type and
3086 sometimes adjust other TARGET_ options. */
3087
3088 static bool
3089 rs6000_option_override_internal (bool global_init_p)
3090 {
3091 bool ret = true;
3092 bool have_cpu = false;
3093
3094 /* The default cpu requested at configure time, if any. */
3095 const char *implicit_cpu = OPTION_TARGET_CPU_DEFAULT;
3096
3097 HOST_WIDE_INT set_masks;
3098 int cpu_index;
3099 int tune_index;
3100 struct cl_target_option *main_target_opt
3101 = ((global_init_p || target_option_default_node == NULL)
3102 ? NULL : TREE_TARGET_OPTION (target_option_default_node));
3103
3104 /* Remember the explicit arguments. */
3105 if (global_init_p)
3106 rs6000_isa_flags_explicit = global_options_set.x_rs6000_isa_flags;
3107
3108 /* On 64-bit Darwin, power alignment is ABI-incompatible with some C
3109 library functions, so warn about it. The flag may be useful for
3110 performance studies from time to time though, so don't disable it
3111 entirely. */
3112 if (global_options_set.x_rs6000_alignment_flags
3113 && rs6000_alignment_flags == MASK_ALIGN_POWER
3114 && DEFAULT_ABI == ABI_DARWIN
3115 && TARGET_64BIT)
3116 warning (0, "-malign-power is not supported for 64-bit Darwin;"
3117 " it is incompatible with the installed C and C++ libraries");
3118
3119 /* Numerous experiment shows that IRA based loop pressure
3120 calculation works better for RTL loop invariant motion on targets
3121 with enough (>= 32) registers. It is an expensive optimization.
3122 So it is on only for peak performance. */
3123 if (optimize >= 3 && global_init_p
3124 && !global_options_set.x_flag_ira_loop_pressure)
3125 flag_ira_loop_pressure = 1;
3126
3127 /* Set the pointer size. */
3128 if (TARGET_64BIT)
3129 {
3130 rs6000_pmode = (int)DImode;
3131 rs6000_pointer_size = 64;
3132 }
3133 else
3134 {
3135 rs6000_pmode = (int)SImode;
3136 rs6000_pointer_size = 32;
3137 }
3138
3139 /* Some OSs don't support saving the high part of 64-bit registers on context
3140 switch. Other OSs don't support saving Altivec registers. On those OSs,
3141 we don't touch the OPTION_MASK_POWERPC64 or OPTION_MASK_ALTIVEC settings;
3142 if the user wants either, the user must explicitly specify them and we
3143 won't interfere with the user's specification. */
3144
3145 set_masks = POWERPC_MASKS;
3146 #ifdef OS_MISSING_POWERPC64
3147 if (OS_MISSING_POWERPC64)
3148 set_masks &= ~OPTION_MASK_POWERPC64;
3149 #endif
3150 #ifdef OS_MISSING_ALTIVEC
3151 if (OS_MISSING_ALTIVEC)
3152 set_masks &= ~(OPTION_MASK_ALTIVEC | OPTION_MASK_VSX);
3153 #endif
3154
3155 /* Don't override by the processor default if given explicitly. */
3156 set_masks &= ~rs6000_isa_flags_explicit;
3157
3158 /* Process the -mcpu=<xxx> and -mtune=<xxx> argument. If the user changed
3159 the cpu in a target attribute or pragma, but did not specify a tuning
3160 option, use the cpu for the tuning option rather than the option specified
3161 with -mtune on the command line. Process a '--with-cpu' configuration
3162 request as an implicit --cpu. */
3163 if (rs6000_cpu_index >= 0)
3164 {
3165 cpu_index = rs6000_cpu_index;
3166 have_cpu = true;
3167 }
3168 else if (main_target_opt != NULL && main_target_opt->x_rs6000_cpu_index >= 0)
3169 {
3170 rs6000_cpu_index = cpu_index = main_target_opt->x_rs6000_cpu_index;
3171 have_cpu = true;
3172 }
3173 else if (implicit_cpu)
3174 {
3175 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (implicit_cpu);
3176 have_cpu = true;
3177 }
3178 else
3179 {
3180 const char *default_cpu = (TARGET_POWERPC64 ? "powerpc64" : "powerpc");
3181 rs6000_cpu_index = cpu_index = rs6000_cpu_name_lookup (default_cpu);
3182 have_cpu = false;
3183 }
3184
3185 gcc_assert (cpu_index >= 0);
3186
3187 /* If we have a cpu, either through an explicit -mcpu=<xxx> or if the
3188 compiler was configured with --with-cpu=<xxx>, replace all of the ISA bits
3189 with those from the cpu, except for options that were explicitly set. If
3190 we don't have a cpu, do not override the target bits set in
3191 TARGET_DEFAULT. */
3192 if (have_cpu)
3193 {
3194 rs6000_isa_flags &= ~set_masks;
3195 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3196 & set_masks);
3197 }
3198 else
3199 rs6000_isa_flags |= (processor_target_table[cpu_index].target_enable
3200 & ~rs6000_isa_flags_explicit);
3201
3202 /* If no -mcpu=<xxx>, inherit any default options that were cleared via
3203 POWERPC_MASKS. Originally, TARGET_DEFAULT was used to initialize
3204 target_flags via the TARGET_DEFAULT_TARGET_FLAGS hook. When we switched
3205 to using rs6000_isa_flags, we need to do the initialization here. */
3206 if (!have_cpu)
3207 rs6000_isa_flags |= (TARGET_DEFAULT & ~rs6000_isa_flags_explicit);
3208
3209 if (rs6000_tune_index >= 0)
3210 tune_index = rs6000_tune_index;
3211 else if (have_cpu)
3212 rs6000_tune_index = tune_index = cpu_index;
3213 else
3214 {
3215 size_t i;
3216 enum processor_type tune_proc
3217 = (TARGET_POWERPC64 ? PROCESSOR_DEFAULT64 : PROCESSOR_DEFAULT);
3218
3219 tune_index = -1;
3220 for (i = 0; i < ARRAY_SIZE (processor_target_table); i++)
3221 if (processor_target_table[i].processor == tune_proc)
3222 {
3223 rs6000_tune_index = tune_index = i;
3224 break;
3225 }
3226 }
3227
3228 gcc_assert (tune_index >= 0);
3229 rs6000_cpu = processor_target_table[tune_index].processor;
3230
3231 /* Pick defaults for SPE related control flags. Do this early to make sure
3232 that the TARGET_ macros are representative ASAP. */
3233 {
3234 int spe_capable_cpu =
3235 (rs6000_cpu == PROCESSOR_PPC8540
3236 || rs6000_cpu == PROCESSOR_PPC8548);
3237
3238 if (!global_options_set.x_rs6000_spe_abi)
3239 rs6000_spe_abi = spe_capable_cpu;
3240
3241 if (!global_options_set.x_rs6000_spe)
3242 rs6000_spe = spe_capable_cpu;
3243
3244 if (!global_options_set.x_rs6000_float_gprs)
3245 rs6000_float_gprs =
3246 (rs6000_cpu == PROCESSOR_PPC8540 ? 1
3247 : rs6000_cpu == PROCESSOR_PPC8548 ? 2
3248 : 0);
3249 }
3250
3251 if (global_options_set.x_rs6000_spe_abi
3252 && rs6000_spe_abi
3253 && !TARGET_SPE_ABI)
3254 error ("not configured for SPE ABI");
3255
3256 if (global_options_set.x_rs6000_spe
3257 && rs6000_spe
3258 && !TARGET_SPE)
3259 error ("not configured for SPE instruction set");
3260
3261 if (main_target_opt != NULL
3262 && ((main_target_opt->x_rs6000_spe_abi != rs6000_spe_abi)
3263 || (main_target_opt->x_rs6000_spe != rs6000_spe)
3264 || (main_target_opt->x_rs6000_float_gprs != rs6000_float_gprs)))
3265 error ("target attribute or pragma changes SPE ABI");
3266
3267 if (rs6000_cpu == PROCESSOR_PPCE300C2 || rs6000_cpu == PROCESSOR_PPCE300C3
3268 || rs6000_cpu == PROCESSOR_PPCE500MC || rs6000_cpu == PROCESSOR_PPCE500MC64
3269 || rs6000_cpu == PROCESSOR_PPCE5500)
3270 {
3271 if (TARGET_ALTIVEC)
3272 error ("AltiVec not supported in this target");
3273 if (TARGET_SPE)
3274 error ("SPE not supported in this target");
3275 }
3276 if (rs6000_cpu == PROCESSOR_PPCE6500)
3277 {
3278 if (TARGET_SPE)
3279 error ("SPE not supported in this target");
3280 }
3281
3282 /* Disable Cell microcode if we are optimizing for the Cell
3283 and not optimizing for size. */
3284 if (rs6000_gen_cell_microcode == -1)
3285 rs6000_gen_cell_microcode = !(rs6000_cpu == PROCESSOR_CELL
3286 && !optimize_size);
3287
3288 /* If we are optimizing big endian systems for space and it's OK to
3289 use instructions that would be microcoded on the Cell, use the
3290 load/store multiple and string instructions. */
3291 if (BYTES_BIG_ENDIAN && optimize_size && rs6000_gen_cell_microcode)
3292 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & (OPTION_MASK_MULTIPLE
3293 | OPTION_MASK_STRING);
3294
3295 /* Don't allow -mmultiple or -mstring on little endian systems
3296 unless the cpu is a 750, because the hardware doesn't support the
3297 instructions used in little endian mode, and causes an alignment
3298 trap. The 750 does not cause an alignment trap (except when the
3299 target is unaligned). */
3300
3301 if (!BYTES_BIG_ENDIAN && rs6000_cpu != PROCESSOR_PPC750)
3302 {
3303 if (TARGET_MULTIPLE)
3304 {
3305 rs6000_isa_flags &= ~OPTION_MASK_MULTIPLE;
3306 if ((rs6000_isa_flags_explicit & OPTION_MASK_MULTIPLE) != 0)
3307 warning (0, "-mmultiple is not supported on little endian systems");
3308 }
3309
3310 if (TARGET_STRING)
3311 {
3312 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3313 if ((rs6000_isa_flags_explicit & OPTION_MASK_STRING) != 0)
3314 warning (0, "-mstring is not supported on little endian systems");
3315 }
3316 }
3317
3318 /* If little-endian, default to -mstrict-align on older processors.
3319 Testing for htm matches power8 and later. */
3320 if (!BYTES_BIG_ENDIAN
3321 && !(processor_target_table[tune_index].target_enable & OPTION_MASK_HTM))
3322 rs6000_isa_flags |= ~rs6000_isa_flags_explicit & OPTION_MASK_STRICT_ALIGN;
3323
3324 /* -maltivec={le,be} implies -maltivec. */
3325 if (rs6000_altivec_element_order != 0)
3326 rs6000_isa_flags |= OPTION_MASK_ALTIVEC;
3327
3328 /* Disallow -maltivec=le in big endian mode for now. This is not
3329 known to be useful for anyone. */
3330 if (BYTES_BIG_ENDIAN && rs6000_altivec_element_order == 1)
3331 {
3332 warning (0, N_("-maltivec=le not allowed for big-endian targets"));
3333 rs6000_altivec_element_order = 0;
3334 }
3335
3336 /* Add some warnings for VSX. */
3337 if (TARGET_VSX)
3338 {
3339 const char *msg = NULL;
3340 if (!TARGET_HARD_FLOAT || !TARGET_FPRS
3341 || !TARGET_SINGLE_FLOAT || !TARGET_DOUBLE_FLOAT)
3342 {
3343 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3344 msg = N_("-mvsx requires hardware floating point");
3345 else
3346 {
3347 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3348 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3349 }
3350 }
3351 else if (TARGET_PAIRED_FLOAT)
3352 msg = N_("-mvsx and -mpaired are incompatible");
3353 else if (TARGET_AVOID_XFORM > 0)
3354 msg = N_("-mvsx needs indexed addressing");
3355 else if (!TARGET_ALTIVEC && (rs6000_isa_flags_explicit
3356 & OPTION_MASK_ALTIVEC))
3357 {
3358 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX)
3359 msg = N_("-mvsx and -mno-altivec are incompatible");
3360 else
3361 msg = N_("-mno-altivec disables vsx");
3362 }
3363
3364 if (msg)
3365 {
3366 warning (0, msg);
3367 rs6000_isa_flags &= ~ OPTION_MASK_VSX;
3368 rs6000_isa_flags_explicit |= OPTION_MASK_VSX;
3369 }
3370 }
3371
3372 /* If hard-float/altivec/vsx were explicitly turned off then don't allow
3373 the -mcpu setting to enable options that conflict. */
3374 if ((!TARGET_HARD_FLOAT || !TARGET_ALTIVEC || !TARGET_VSX)
3375 && (rs6000_isa_flags_explicit & (OPTION_MASK_SOFT_FLOAT
3376 | OPTION_MASK_ALTIVEC
3377 | OPTION_MASK_VSX)) != 0)
3378 rs6000_isa_flags &= ~((OPTION_MASK_P8_VECTOR | OPTION_MASK_CRYPTO
3379 | OPTION_MASK_DIRECT_MOVE)
3380 & ~rs6000_isa_flags_explicit);
3381
3382 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3383 rs6000_print_isa_options (stderr, 0, "before defaults", rs6000_isa_flags);
3384
3385 /* For the newer switches (vsx, dfp, etc.) set some of the older options,
3386 unless the user explicitly used the -mno-<option> to disable the code. */
3387 if (TARGET_P8_VECTOR || TARGET_DIRECT_MOVE || TARGET_CRYPTO)
3388 rs6000_isa_flags |= (ISA_2_7_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3389 else if (TARGET_VSX)
3390 rs6000_isa_flags |= (ISA_2_6_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3391 else if (TARGET_POPCNTD)
3392 rs6000_isa_flags |= (ISA_2_6_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
3393 else if (TARGET_DFP)
3394 rs6000_isa_flags |= (ISA_2_5_MASKS_SERVER & ~rs6000_isa_flags_explicit);
3395 else if (TARGET_CMPB)
3396 rs6000_isa_flags |= (ISA_2_5_MASKS_EMBEDDED & ~rs6000_isa_flags_explicit);
3397 else if (TARGET_FPRND)
3398 rs6000_isa_flags |= (ISA_2_4_MASKS & ~rs6000_isa_flags_explicit);
3399 else if (TARGET_POPCNTB)
3400 rs6000_isa_flags |= (ISA_2_2_MASKS & ~rs6000_isa_flags_explicit);
3401 else if (TARGET_ALTIVEC)
3402 rs6000_isa_flags |= (OPTION_MASK_PPC_GFXOPT & ~rs6000_isa_flags_explicit);
3403
3404 if (TARGET_CRYPTO && !TARGET_ALTIVEC)
3405 {
3406 if (rs6000_isa_flags_explicit & OPTION_MASK_CRYPTO)
3407 error ("-mcrypto requires -maltivec");
3408 rs6000_isa_flags &= ~OPTION_MASK_CRYPTO;
3409 }
3410
3411 if (TARGET_DIRECT_MOVE && !TARGET_VSX)
3412 {
3413 if (rs6000_isa_flags_explicit & OPTION_MASK_DIRECT_MOVE)
3414 error ("-mdirect-move requires -mvsx");
3415 rs6000_isa_flags &= ~OPTION_MASK_DIRECT_MOVE;
3416 }
3417
3418 if (TARGET_P8_VECTOR && !TARGET_ALTIVEC)
3419 {
3420 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3421 error ("-mpower8-vector requires -maltivec");
3422 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3423 }
3424
3425 if (TARGET_P8_VECTOR && !TARGET_VSX)
3426 {
3427 if (rs6000_isa_flags_explicit & OPTION_MASK_P8_VECTOR)
3428 error ("-mpower8-vector requires -mvsx");
3429 rs6000_isa_flags &= ~OPTION_MASK_P8_VECTOR;
3430 }
3431
3432 if (TARGET_VSX_TIMODE && !TARGET_VSX)
3433 {
3434 if (rs6000_isa_flags_explicit & OPTION_MASK_VSX_TIMODE)
3435 error ("-mvsx-timode requires -mvsx");
3436 rs6000_isa_flags &= ~OPTION_MASK_VSX_TIMODE;
3437 }
3438
3439 if (TARGET_DFP && !TARGET_HARD_FLOAT)
3440 {
3441 if (rs6000_isa_flags_explicit & OPTION_MASK_DFP)
3442 error ("-mhard-dfp requires -mhard-float");
3443 rs6000_isa_flags &= ~OPTION_MASK_DFP;
3444 }
3445
3446 /* The quad memory instructions only works in 64-bit mode. In 32-bit mode,
3447 silently turn off quad memory mode. */
3448 if ((TARGET_QUAD_MEMORY || TARGET_QUAD_MEMORY_ATOMIC) && !TARGET_POWERPC64)
3449 {
3450 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3451 warning (0, N_("-mquad-memory requires 64-bit mode"));
3452
3453 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
3454 warning (0, N_("-mquad-memory-atomic requires 64-bit mode"));
3455
3456 rs6000_isa_flags &= ~(OPTION_MASK_QUAD_MEMORY
3457 | OPTION_MASK_QUAD_MEMORY_ATOMIC);
3458 }
3459
3460 /* Non-atomic quad memory load/store are disabled for little endian, since
3461 the words are reversed, but atomic operations can still be done by
3462 swapping the words. */
3463 if (TARGET_QUAD_MEMORY && !WORDS_BIG_ENDIAN)
3464 {
3465 if ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY) != 0)
3466 warning (0, N_("-mquad-memory is not available in little endian mode"));
3467
3468 rs6000_isa_flags &= ~OPTION_MASK_QUAD_MEMORY;
3469 }
3470
3471 /* Assume if the user asked for normal quad memory instructions, they want
3472 the atomic versions as well, unless they explicity told us not to use quad
3473 word atomic instructions. */
3474 if (TARGET_QUAD_MEMORY
3475 && !TARGET_QUAD_MEMORY_ATOMIC
3476 && ((rs6000_isa_flags_explicit & OPTION_MASK_QUAD_MEMORY_ATOMIC) == 0))
3477 rs6000_isa_flags |= OPTION_MASK_QUAD_MEMORY_ATOMIC;
3478
3479 /* Enable power8 fusion if we are tuning for power8, even if we aren't
3480 generating power8 instructions. */
3481 if (!(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION))
3482 rs6000_isa_flags |= (processor_target_table[tune_index].target_enable
3483 & OPTION_MASK_P8_FUSION);
3484
3485 /* Power8 does not fuse sign extended loads with the addis. If we are
3486 optimizing at high levels for speed, convert a sign extended load into a
3487 zero extending load, and an explicit sign extension. */
3488 if (TARGET_P8_FUSION
3489 && !(rs6000_isa_flags_explicit & OPTION_MASK_P8_FUSION_SIGN)
3490 && optimize_function_for_speed_p (cfun)
3491 && optimize >= 3)
3492 rs6000_isa_flags |= OPTION_MASK_P8_FUSION_SIGN;
3493
3494 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3495 rs6000_print_isa_options (stderr, 0, "after defaults", rs6000_isa_flags);
3496
3497 /* E500mc does "better" if we inline more aggressively. Respect the
3498 user's opinion, though. */
3499 if (rs6000_block_move_inline_limit == 0
3500 && (rs6000_cpu == PROCESSOR_PPCE500MC
3501 || rs6000_cpu == PROCESSOR_PPCE500MC64
3502 || rs6000_cpu == PROCESSOR_PPCE5500
3503 || rs6000_cpu == PROCESSOR_PPCE6500))
3504 rs6000_block_move_inline_limit = 128;
3505
3506 /* store_one_arg depends on expand_block_move to handle at least the
3507 size of reg_parm_stack_space. */
3508 if (rs6000_block_move_inline_limit < (TARGET_POWERPC64 ? 64 : 32))
3509 rs6000_block_move_inline_limit = (TARGET_POWERPC64 ? 64 : 32);
3510
3511 if (global_init_p)
3512 {
3513 /* If the appropriate debug option is enabled, replace the target hooks
3514 with debug versions that call the real version and then prints
3515 debugging information. */
3516 if (TARGET_DEBUG_COST)
3517 {
3518 targetm.rtx_costs = rs6000_debug_rtx_costs;
3519 targetm.address_cost = rs6000_debug_address_cost;
3520 targetm.sched.adjust_cost = rs6000_debug_adjust_cost;
3521 }
3522
3523 if (TARGET_DEBUG_ADDR)
3524 {
3525 targetm.legitimate_address_p = rs6000_debug_legitimate_address_p;
3526 targetm.legitimize_address = rs6000_debug_legitimize_address;
3527 rs6000_secondary_reload_class_ptr
3528 = rs6000_debug_secondary_reload_class;
3529 rs6000_secondary_memory_needed_ptr
3530 = rs6000_debug_secondary_memory_needed;
3531 rs6000_cannot_change_mode_class_ptr
3532 = rs6000_debug_cannot_change_mode_class;
3533 rs6000_preferred_reload_class_ptr
3534 = rs6000_debug_preferred_reload_class;
3535 rs6000_legitimize_reload_address_ptr
3536 = rs6000_debug_legitimize_reload_address;
3537 rs6000_mode_dependent_address_ptr
3538 = rs6000_debug_mode_dependent_address;
3539 }
3540
3541 if (rs6000_veclibabi_name)
3542 {
3543 if (strcmp (rs6000_veclibabi_name, "mass") == 0)
3544 rs6000_veclib_handler = rs6000_builtin_vectorized_libmass;
3545 else
3546 {
3547 error ("unknown vectorization library ABI type (%s) for "
3548 "-mveclibabi= switch", rs6000_veclibabi_name);
3549 ret = false;
3550 }
3551 }
3552 }
3553
3554 if (!global_options_set.x_rs6000_long_double_type_size)
3555 {
3556 if (main_target_opt != NULL
3557 && (main_target_opt->x_rs6000_long_double_type_size
3558 != RS6000_DEFAULT_LONG_DOUBLE_SIZE))
3559 error ("target attribute or pragma changes long double size");
3560 else
3561 rs6000_long_double_type_size = RS6000_DEFAULT_LONG_DOUBLE_SIZE;
3562 }
3563
3564 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
3565 if (!global_options_set.x_rs6000_ieeequad)
3566 rs6000_ieeequad = 1;
3567 #endif
3568
3569 /* Disable VSX and Altivec silently if the user switched cpus to power7 in a
3570 target attribute or pragma which automatically enables both options,
3571 unless the altivec ABI was set. This is set by default for 64-bit, but
3572 not for 32-bit. */
3573 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3574 rs6000_isa_flags &= ~((OPTION_MASK_VSX | OPTION_MASK_ALTIVEC)
3575 & ~rs6000_isa_flags_explicit);
3576
3577 /* Enable Altivec ABI for AIX -maltivec. */
3578 if (TARGET_XCOFF && (TARGET_ALTIVEC || TARGET_VSX))
3579 {
3580 if (main_target_opt != NULL && !main_target_opt->x_rs6000_altivec_abi)
3581 error ("target attribute or pragma changes AltiVec ABI");
3582 else
3583 rs6000_altivec_abi = 1;
3584 }
3585
3586 /* The AltiVec ABI is the default for PowerPC-64 GNU/Linux. For
3587 PowerPC-32 GNU/Linux, -maltivec implies the AltiVec ABI. It can
3588 be explicitly overridden in either case. */
3589 if (TARGET_ELF)
3590 {
3591 if (!global_options_set.x_rs6000_altivec_abi
3592 && (TARGET_64BIT || TARGET_ALTIVEC || TARGET_VSX))
3593 {
3594 if (main_target_opt != NULL &&
3595 !main_target_opt->x_rs6000_altivec_abi)
3596 error ("target attribute or pragma changes AltiVec ABI");
3597 else
3598 rs6000_altivec_abi = 1;
3599 }
3600 }
3601
3602 /* Set the Darwin64 ABI as default for 64-bit Darwin.
3603 So far, the only darwin64 targets are also MACH-O. */
3604 if (TARGET_MACHO
3605 && DEFAULT_ABI == ABI_DARWIN
3606 && TARGET_64BIT)
3607 {
3608 if (main_target_opt != NULL && !main_target_opt->x_rs6000_darwin64_abi)
3609 error ("target attribute or pragma changes darwin64 ABI");
3610 else
3611 {
3612 rs6000_darwin64_abi = 1;
3613 /* Default to natural alignment, for better performance. */
3614 rs6000_alignment_flags = MASK_ALIGN_NATURAL;
3615 }
3616 }
3617
3618 /* Place FP constants in the constant pool instead of TOC
3619 if section anchors enabled. */
3620 if (flag_section_anchors
3621 && !global_options_set.x_TARGET_NO_FP_IN_TOC)
3622 TARGET_NO_FP_IN_TOC = 1;
3623
3624 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3625 rs6000_print_isa_options (stderr, 0, "before subtarget", rs6000_isa_flags);
3626
3627 #ifdef SUBTARGET_OVERRIDE_OPTIONS
3628 SUBTARGET_OVERRIDE_OPTIONS;
3629 #endif
3630 #ifdef SUBSUBTARGET_OVERRIDE_OPTIONS
3631 SUBSUBTARGET_OVERRIDE_OPTIONS;
3632 #endif
3633 #ifdef SUB3TARGET_OVERRIDE_OPTIONS
3634 SUB3TARGET_OVERRIDE_OPTIONS;
3635 #endif
3636
3637 if (TARGET_DEBUG_REG || TARGET_DEBUG_TARGET)
3638 rs6000_print_isa_options (stderr, 0, "after subtarget", rs6000_isa_flags);
3639
3640 /* For the E500 family of cores, reset the single/double FP flags to let us
3641 check that they remain constant across attributes or pragmas. Also,
3642 clear a possible request for string instructions, not supported and which
3643 we might have silently queried above for -Os.
3644
3645 For other families, clear ISEL in case it was set implicitly.
3646 */
3647
3648 switch (rs6000_cpu)
3649 {
3650 case PROCESSOR_PPC8540:
3651 case PROCESSOR_PPC8548:
3652 case PROCESSOR_PPCE500MC:
3653 case PROCESSOR_PPCE500MC64:
3654 case PROCESSOR_PPCE5500:
3655 case PROCESSOR_PPCE6500:
3656
3657 rs6000_single_float = TARGET_E500_SINGLE || TARGET_E500_DOUBLE;
3658 rs6000_double_float = TARGET_E500_DOUBLE;
3659
3660 rs6000_isa_flags &= ~OPTION_MASK_STRING;
3661
3662 break;
3663
3664 default:
3665
3666 if (have_cpu && !(rs6000_isa_flags_explicit & OPTION_MASK_ISEL))
3667 rs6000_isa_flags &= ~OPTION_MASK_ISEL;
3668
3669 break;
3670 }
3671
3672 if (main_target_opt)
3673 {
3674 if (main_target_opt->x_rs6000_single_float != rs6000_single_float)
3675 error ("target attribute or pragma changes single precision floating "
3676 "point");
3677 if (main_target_opt->x_rs6000_double_float != rs6000_double_float)
3678 error ("target attribute or pragma changes double precision floating "
3679 "point");
3680 }
3681
3682 /* Detect invalid option combinations with E500. */
3683 CHECK_E500_OPTIONS;
3684
3685 rs6000_always_hint = (rs6000_cpu != PROCESSOR_POWER4
3686 && rs6000_cpu != PROCESSOR_POWER5
3687 && rs6000_cpu != PROCESSOR_POWER6
3688 && rs6000_cpu != PROCESSOR_POWER7
3689 && rs6000_cpu != PROCESSOR_POWER8
3690 && rs6000_cpu != PROCESSOR_PPCA2
3691 && rs6000_cpu != PROCESSOR_CELL
3692 && rs6000_cpu != PROCESSOR_PPC476);
3693 rs6000_sched_groups = (rs6000_cpu == PROCESSOR_POWER4
3694 || rs6000_cpu == PROCESSOR_POWER5
3695 || rs6000_cpu == PROCESSOR_POWER7
3696 || rs6000_cpu == PROCESSOR_POWER8);
3697 rs6000_align_branch_targets = (rs6000_cpu == PROCESSOR_POWER4
3698 || rs6000_cpu == PROCESSOR_POWER5
3699 || rs6000_cpu == PROCESSOR_POWER6
3700 || rs6000_cpu == PROCESSOR_POWER7
3701 || rs6000_cpu == PROCESSOR_POWER8
3702 || rs6000_cpu == PROCESSOR_PPCE500MC
3703 || rs6000_cpu == PROCESSOR_PPCE500MC64
3704 || rs6000_cpu == PROCESSOR_PPCE5500
3705 || rs6000_cpu == PROCESSOR_PPCE6500);
3706
3707 /* Allow debug switches to override the above settings. These are set to -1
3708 in rs6000.opt to indicate the user hasn't directly set the switch. */
3709 if (TARGET_ALWAYS_HINT >= 0)
3710 rs6000_always_hint = TARGET_ALWAYS_HINT;
3711
3712 if (TARGET_SCHED_GROUPS >= 0)
3713 rs6000_sched_groups = TARGET_SCHED_GROUPS;
3714
3715 if (TARGET_ALIGN_BRANCH_TARGETS >= 0)
3716 rs6000_align_branch_targets = TARGET_ALIGN_BRANCH_TARGETS;
3717
3718 rs6000_sched_restricted_insns_priority
3719 = (rs6000_sched_groups ? 1 : 0);
3720
3721 /* Handle -msched-costly-dep option. */
3722 rs6000_sched_costly_dep
3723 = (rs6000_sched_groups ? true_store_to_load_dep_costly : no_dep_costly);
3724
3725 if (rs6000_sched_costly_dep_str)
3726 {
3727 if (! strcmp (rs6000_sched_costly_dep_str, "no"))
3728 rs6000_sched_costly_dep = no_dep_costly;
3729 else if (! strcmp (rs6000_sched_costly_dep_str, "all"))
3730 rs6000_sched_costly_dep = all_deps_costly;
3731 else if (! strcmp (rs6000_sched_costly_dep_str, "true_store_to_load"))
3732 rs6000_sched_costly_dep = true_store_to_load_dep_costly;
3733 else if (! strcmp (rs6000_sched_costly_dep_str, "store_to_load"))
3734 rs6000_sched_costly_dep = store_to_load_dep_costly;
3735 else
3736 rs6000_sched_costly_dep = ((enum rs6000_dependence_cost)
3737 atoi (rs6000_sched_costly_dep_str));
3738 }
3739
3740 /* Handle -minsert-sched-nops option. */
3741 rs6000_sched_insert_nops
3742 = (rs6000_sched_groups ? sched_finish_regroup_exact : sched_finish_none);
3743
3744 if (rs6000_sched_insert_nops_str)
3745 {
3746 if (! strcmp (rs6000_sched_insert_nops_str, "no"))
3747 rs6000_sched_insert_nops = sched_finish_none;
3748 else if (! strcmp (rs6000_sched_insert_nops_str, "pad"))
3749 rs6000_sched_insert_nops = sched_finish_pad_groups;
3750 else if (! strcmp (rs6000_sched_insert_nops_str, "regroup_exact"))
3751 rs6000_sched_insert_nops = sched_finish_regroup_exact;
3752 else
3753 rs6000_sched_insert_nops = ((enum rs6000_nop_insertion)
3754 atoi (rs6000_sched_insert_nops_str));
3755 }
3756
3757 if (global_init_p)
3758 {
3759 #ifdef TARGET_REGNAMES
3760 /* If the user desires alternate register names, copy in the
3761 alternate names now. */
3762 if (TARGET_REGNAMES)
3763 memcpy (rs6000_reg_names, alt_reg_names, sizeof (rs6000_reg_names));
3764 #endif
3765
3766 /* Set aix_struct_return last, after the ABI is determined.
3767 If -maix-struct-return or -msvr4-struct-return was explicitly
3768 used, don't override with the ABI default. */
3769 if (!global_options_set.x_aix_struct_return)
3770 aix_struct_return = (DEFAULT_ABI != ABI_V4 || DRAFT_V4_STRUCT_RET);
3771
3772 #if 0
3773 /* IBM XL compiler defaults to unsigned bitfields. */
3774 if (TARGET_XL_COMPAT)
3775 flag_signed_bitfields = 0;
3776 #endif
3777
3778 if (TARGET_LONG_DOUBLE_128 && !TARGET_IEEEQUAD)
3779 REAL_MODE_FORMAT (TFmode) = &ibm_extended_format;
3780
3781 if (TARGET_TOC)
3782 ASM_GENERATE_INTERNAL_LABEL (toc_label_name, "LCTOC", 1);
3783
3784 /* We can only guarantee the availability of DI pseudo-ops when
3785 assembling for 64-bit targets. */
3786 if (!TARGET_64BIT)
3787 {
3788 targetm.asm_out.aligned_op.di = NULL;
3789 targetm.asm_out.unaligned_op.di = NULL;
3790 }
3791
3792
3793 /* Set branch target alignment, if not optimizing for size. */
3794 if (!optimize_size)
3795 {
3796 /* Cell wants to be aligned 8byte for dual issue. Titan wants to be
3797 aligned 8byte to avoid misprediction by the branch predictor. */
3798 if (rs6000_cpu == PROCESSOR_TITAN
3799 || rs6000_cpu == PROCESSOR_CELL)
3800 {
3801 if (align_functions <= 0)
3802 align_functions = 8;
3803 if (align_jumps <= 0)
3804 align_jumps = 8;
3805 if (align_loops <= 0)
3806 align_loops = 8;
3807 }
3808 if (rs6000_align_branch_targets)
3809 {
3810 if (align_functions <= 0)
3811 align_functions = 16;
3812 if (align_jumps <= 0)
3813 align_jumps = 16;
3814 if (align_loops <= 0)
3815 {
3816 can_override_loop_align = 1;
3817 align_loops = 16;
3818 }
3819 }
3820 if (align_jumps_max_skip <= 0)
3821 align_jumps_max_skip = 15;
3822 if (align_loops_max_skip <= 0)
3823 align_loops_max_skip = 15;
3824 }
3825
3826 /* Arrange to save and restore machine status around nested functions. */
3827 init_machine_status = rs6000_init_machine_status;
3828
3829 /* We should always be splitting complex arguments, but we can't break
3830 Linux and Darwin ABIs at the moment. For now, only AIX is fixed. */
3831 if (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN)
3832 targetm.calls.split_complex_arg = NULL;
3833 }
3834
3835 /* Initialize rs6000_cost with the appropriate target costs. */
3836 if (optimize_size)
3837 rs6000_cost = TARGET_POWERPC64 ? &size64_cost : &size32_cost;
3838 else
3839 switch (rs6000_cpu)
3840 {
3841 case PROCESSOR_RS64A:
3842 rs6000_cost = &rs64a_cost;
3843 break;
3844
3845 case PROCESSOR_MPCCORE:
3846 rs6000_cost = &mpccore_cost;
3847 break;
3848
3849 case PROCESSOR_PPC403:
3850 rs6000_cost = &ppc403_cost;
3851 break;
3852
3853 case PROCESSOR_PPC405:
3854 rs6000_cost = &ppc405_cost;
3855 break;
3856
3857 case PROCESSOR_PPC440:
3858 rs6000_cost = &ppc440_cost;
3859 break;
3860
3861 case PROCESSOR_PPC476:
3862 rs6000_cost = &ppc476_cost;
3863 break;
3864
3865 case PROCESSOR_PPC601:
3866 rs6000_cost = &ppc601_cost;
3867 break;
3868
3869 case PROCESSOR_PPC603:
3870 rs6000_cost = &ppc603_cost;
3871 break;
3872
3873 case PROCESSOR_PPC604:
3874 rs6000_cost = &ppc604_cost;
3875 break;
3876
3877 case PROCESSOR_PPC604e:
3878 rs6000_cost = &ppc604e_cost;
3879 break;
3880
3881 case PROCESSOR_PPC620:
3882 rs6000_cost = &ppc620_cost;
3883 break;
3884
3885 case PROCESSOR_PPC630:
3886 rs6000_cost = &ppc630_cost;
3887 break;
3888
3889 case PROCESSOR_CELL:
3890 rs6000_cost = &ppccell_cost;
3891 break;
3892
3893 case PROCESSOR_PPC750:
3894 case PROCESSOR_PPC7400:
3895 rs6000_cost = &ppc750_cost;
3896 break;
3897
3898 case PROCESSOR_PPC7450:
3899 rs6000_cost = &ppc7450_cost;
3900 break;
3901
3902 case PROCESSOR_PPC8540:
3903 case PROCESSOR_PPC8548:
3904 rs6000_cost = &ppc8540_cost;
3905 break;
3906
3907 case PROCESSOR_PPCE300C2:
3908 case PROCESSOR_PPCE300C3:
3909 rs6000_cost = &ppce300c2c3_cost;
3910 break;
3911
3912 case PROCESSOR_PPCE500MC:
3913 rs6000_cost = &ppce500mc_cost;
3914 break;
3915
3916 case PROCESSOR_PPCE500MC64:
3917 rs6000_cost = &ppce500mc64_cost;
3918 break;
3919
3920 case PROCESSOR_PPCE5500:
3921 rs6000_cost = &ppce5500_cost;
3922 break;
3923
3924 case PROCESSOR_PPCE6500:
3925 rs6000_cost = &ppce6500_cost;
3926 break;
3927
3928 case PROCESSOR_TITAN:
3929 rs6000_cost = &titan_cost;
3930 break;
3931
3932 case PROCESSOR_POWER4:
3933 case PROCESSOR_POWER5:
3934 rs6000_cost = &power4_cost;
3935 break;
3936
3937 case PROCESSOR_POWER6:
3938 rs6000_cost = &power6_cost;
3939 break;
3940
3941 case PROCESSOR_POWER7:
3942 rs6000_cost = &power7_cost;
3943 break;
3944
3945 case PROCESSOR_POWER8:
3946 rs6000_cost = &power8_cost;
3947 break;
3948
3949 case PROCESSOR_PPCA2:
3950 rs6000_cost = &ppca2_cost;
3951 break;
3952
3953 default:
3954 gcc_unreachable ();
3955 }
3956
3957 if (global_init_p)
3958 {
3959 maybe_set_param_value (PARAM_SIMULTANEOUS_PREFETCHES,
3960 rs6000_cost->simultaneous_prefetches,
3961 global_options.x_param_values,
3962 global_options_set.x_param_values);
3963 maybe_set_param_value (PARAM_L1_CACHE_SIZE, rs6000_cost->l1_cache_size,
3964 global_options.x_param_values,
3965 global_options_set.x_param_values);
3966 maybe_set_param_value (PARAM_L1_CACHE_LINE_SIZE,
3967 rs6000_cost->cache_line_size,
3968 global_options.x_param_values,
3969 global_options_set.x_param_values);
3970 maybe_set_param_value (PARAM_L2_CACHE_SIZE, rs6000_cost->l2_cache_size,
3971 global_options.x_param_values,
3972 global_options_set.x_param_values);
3973
3974 /* Increase loop peeling limits based on performance analysis. */
3975 maybe_set_param_value (PARAM_MAX_PEELED_INSNS, 400,
3976 global_options.x_param_values,
3977 global_options_set.x_param_values);
3978 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEELED_INSNS, 400,
3979 global_options.x_param_values,
3980 global_options_set.x_param_values);
3981
3982 /* If using typedef char *va_list, signal that
3983 __builtin_va_start (&ap, 0) can be optimized to
3984 ap = __builtin_next_arg (0). */
3985 if (DEFAULT_ABI != ABI_V4)
3986 targetm.expand_builtin_va_start = NULL;
3987 }
3988
3989 /* Set up single/double float flags.
3990 If TARGET_HARD_FLOAT is set, but neither single or double is set,
3991 then set both flags. */
3992 if (TARGET_HARD_FLOAT && TARGET_FPRS
3993 && rs6000_single_float == 0 && rs6000_double_float == 0)
3994 rs6000_single_float = rs6000_double_float = 1;
3995
3996 /* If not explicitly specified via option, decide whether to generate indexed
3997 load/store instructions. */
3998 if (TARGET_AVOID_XFORM == -1)
3999 /* Avoid indexed addressing when targeting Power6 in order to avoid the
4000 DERAT mispredict penalty. However the LVE and STVE altivec instructions
4001 need indexed accesses and the type used is the scalar type of the element
4002 being loaded or stored. */
4003 TARGET_AVOID_XFORM = (rs6000_cpu == PROCESSOR_POWER6 && TARGET_CMPB
4004 && !TARGET_ALTIVEC);
4005
4006 /* Set the -mrecip options. */
4007 if (rs6000_recip_name)
4008 {
4009 char *p = ASTRDUP (rs6000_recip_name);
4010 char *q;
4011 unsigned int mask, i;
4012 bool invert;
4013
4014 while ((q = strtok (p, ",")) != NULL)
4015 {
4016 p = NULL;
4017 if (*q == '!')
4018 {
4019 invert = true;
4020 q++;
4021 }
4022 else
4023 invert = false;
4024
4025 if (!strcmp (q, "default"))
4026 mask = ((TARGET_RECIP_PRECISION)
4027 ? RECIP_HIGH_PRECISION : RECIP_LOW_PRECISION);
4028 else
4029 {
4030 for (i = 0; i < ARRAY_SIZE (recip_options); i++)
4031 if (!strcmp (q, recip_options[i].string))
4032 {
4033 mask = recip_options[i].mask;
4034 break;
4035 }
4036
4037 if (i == ARRAY_SIZE (recip_options))
4038 {
4039 error ("unknown option for -mrecip=%s", q);
4040 invert = false;
4041 mask = 0;
4042 ret = false;
4043 }
4044 }
4045
4046 if (invert)
4047 rs6000_recip_control &= ~mask;
4048 else
4049 rs6000_recip_control |= mask;
4050 }
4051 }
4052
4053 /* Set the builtin mask of the various options used that could affect which
4054 builtins were used. In the past we used target_flags, but we've run out
4055 of bits, and some options like SPE and PAIRED are no longer in
4056 target_flags. */
4057 rs6000_builtin_mask = rs6000_builtin_mask_calculate ();
4058 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
4059 {
4060 fprintf (stderr,
4061 "new builtin mask = " HOST_WIDE_INT_PRINT_HEX ", ",
4062 rs6000_builtin_mask);
4063 rs6000_print_builtin_options (stderr, 0, NULL, rs6000_builtin_mask);
4064 }
4065
4066 /* Initialize all of the registers. */
4067 rs6000_init_hard_regno_mode_ok (global_init_p);
4068
4069 /* Save the initial options in case the user does function specific options */
4070 if (global_init_p)
4071 target_option_default_node = target_option_current_node
4072 = build_target_option_node (&global_options);
4073
4074 /* If not explicitly specified via option, decide whether to generate the
4075 extra blr's required to preserve the link stack on some cpus (eg, 476). */
4076 if (TARGET_LINK_STACK == -1)
4077 SET_TARGET_LINK_STACK (rs6000_cpu == PROCESSOR_PPC476 && flag_pic);
4078
4079 return ret;
4080 }
4081
4082 /* Implement TARGET_OPTION_OVERRIDE. On the RS/6000 this is used to
4083 define the target cpu type. */
4084
4085 static void
4086 rs6000_option_override (void)
4087 {
4088 (void) rs6000_option_override_internal (true);
4089
4090 /* Register machine-specific passes. This needs to be done at start-up.
4091 It's convenient to do it here (like i386 does). */
4092 opt_pass *pass_analyze_swaps = make_pass_analyze_swaps (g);
4093
4094 static struct register_pass_info analyze_swaps_info
4095 = { pass_analyze_swaps, "cse1", 1, PASS_POS_INSERT_BEFORE };
4096
4097 register_pass (&analyze_swaps_info);
4098 }
4099
4100 \f
4101 /* Implement targetm.vectorize.builtin_mask_for_load. */
4102 static tree
4103 rs6000_builtin_mask_for_load (void)
4104 {
4105 if (TARGET_ALTIVEC || TARGET_VSX)
4106 return altivec_builtin_mask_for_load;
4107 else
4108 return 0;
4109 }
4110
4111 /* Implement LOOP_ALIGN. */
4112 int
4113 rs6000_loop_align (rtx label)
4114 {
4115 basic_block bb;
4116 int ninsns;
4117
4118 /* Don't override loop alignment if -falign-loops was specified. */
4119 if (!can_override_loop_align)
4120 return align_loops_log;
4121
4122 bb = BLOCK_FOR_INSN (label);
4123 ninsns = num_loop_insns(bb->loop_father);
4124
4125 /* Align small loops to 32 bytes to fit in an icache sector, otherwise return default. */
4126 if (ninsns > 4 && ninsns <= 8
4127 && (rs6000_cpu == PROCESSOR_POWER4
4128 || rs6000_cpu == PROCESSOR_POWER5
4129 || rs6000_cpu == PROCESSOR_POWER6
4130 || rs6000_cpu == PROCESSOR_POWER7
4131 || rs6000_cpu == PROCESSOR_POWER8))
4132 return 5;
4133 else
4134 return align_loops_log;
4135 }
4136
4137 /* Implement TARGET_LOOP_ALIGN_MAX_SKIP. */
4138 static int
4139 rs6000_loop_align_max_skip (rtx label)
4140 {
4141 return (1 << rs6000_loop_align (label)) - 1;
4142 }
4143
4144 /* Return true iff, data reference of TYPE can reach vector alignment (16)
4145 after applying N number of iterations. This routine does not determine
4146 how may iterations are required to reach desired alignment. */
4147
4148 static bool
4149 rs6000_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
4150 {
4151 if (is_packed)
4152 return false;
4153
4154 if (TARGET_32BIT)
4155 {
4156 if (rs6000_alignment_flags == MASK_ALIGN_NATURAL)
4157 return true;
4158
4159 if (rs6000_alignment_flags == MASK_ALIGN_POWER)
4160 return true;
4161
4162 return false;
4163 }
4164 else
4165 {
4166 if (TARGET_MACHO)
4167 return false;
4168
4169 /* Assuming that all other types are naturally aligned. CHECKME! */
4170 return true;
4171 }
4172 }
4173
4174 /* Return true if the vector misalignment factor is supported by the
4175 target. */
4176 static bool
4177 rs6000_builtin_support_vector_misalignment (enum machine_mode mode,
4178 const_tree type,
4179 int misalignment,
4180 bool is_packed)
4181 {
4182 if (TARGET_VSX)
4183 {
4184 /* Return if movmisalign pattern is not supported for this mode. */
4185 if (optab_handler (movmisalign_optab, mode) == CODE_FOR_nothing)
4186 return false;
4187
4188 if (misalignment == -1)
4189 {
4190 /* Misalignment factor is unknown at compile time but we know
4191 it's word aligned. */
4192 if (rs6000_vector_alignment_reachable (type, is_packed))
4193 {
4194 int element_size = TREE_INT_CST_LOW (TYPE_SIZE (type));
4195
4196 if (element_size == 64 || element_size == 32)
4197 return true;
4198 }
4199
4200 return false;
4201 }
4202
4203 /* VSX supports word-aligned vector. */
4204 if (misalignment % 4 == 0)
4205 return true;
4206 }
4207 return false;
4208 }
4209
4210 /* Implement targetm.vectorize.builtin_vectorization_cost. */
4211 static int
4212 rs6000_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
4213 tree vectype, int misalign)
4214 {
4215 unsigned elements;
4216 tree elem_type;
4217
4218 switch (type_of_cost)
4219 {
4220 case scalar_stmt:
4221 case scalar_load:
4222 case scalar_store:
4223 case vector_stmt:
4224 case vector_load:
4225 case vector_store:
4226 case vec_to_scalar:
4227 case scalar_to_vec:
4228 case cond_branch_not_taken:
4229 return 1;
4230
4231 case vec_perm:
4232 if (TARGET_VSX)
4233 return 3;
4234 else
4235 return 1;
4236
4237 case vec_promote_demote:
4238 if (TARGET_VSX)
4239 return 4;
4240 else
4241 return 1;
4242
4243 case cond_branch_taken:
4244 return 3;
4245
4246 case unaligned_load:
4247 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4248 {
4249 elements = TYPE_VECTOR_SUBPARTS (vectype);
4250 if (elements == 2)
4251 /* Double word aligned. */
4252 return 2;
4253
4254 if (elements == 4)
4255 {
4256 switch (misalign)
4257 {
4258 case 8:
4259 /* Double word aligned. */
4260 return 2;
4261
4262 case -1:
4263 /* Unknown misalignment. */
4264 case 4:
4265 case 12:
4266 /* Word aligned. */
4267 return 22;
4268
4269 default:
4270 gcc_unreachable ();
4271 }
4272 }
4273 }
4274
4275 if (TARGET_ALTIVEC)
4276 /* Misaligned loads are not supported. */
4277 gcc_unreachable ();
4278
4279 return 2;
4280
4281 case unaligned_store:
4282 if (TARGET_VSX && TARGET_ALLOW_MOVMISALIGN)
4283 {
4284 elements = TYPE_VECTOR_SUBPARTS (vectype);
4285 if (elements == 2)
4286 /* Double word aligned. */
4287 return 2;
4288
4289 if (elements == 4)
4290 {
4291 switch (misalign)
4292 {
4293 case 8:
4294 /* Double word aligned. */
4295 return 2;
4296
4297 case -1:
4298 /* Unknown misalignment. */
4299 case 4:
4300 case 12:
4301 /* Word aligned. */
4302 return 23;
4303
4304 default:
4305 gcc_unreachable ();
4306 }
4307 }
4308 }
4309
4310 if (TARGET_ALTIVEC)
4311 /* Misaligned stores are not supported. */
4312 gcc_unreachable ();
4313
4314 return 2;
4315
4316 case vec_construct:
4317 elements = TYPE_VECTOR_SUBPARTS (vectype);
4318 elem_type = TREE_TYPE (vectype);
4319 /* 32-bit vectors loaded into registers are stored as double
4320 precision, so we need n/2 converts in addition to the usual
4321 n/2 merges to construct a vector of short floats from them. */
4322 if (SCALAR_FLOAT_TYPE_P (elem_type)
4323 && TYPE_PRECISION (elem_type) == 32)
4324 return elements + 1;
4325 else
4326 return elements / 2 + 1;
4327
4328 default:
4329 gcc_unreachable ();
4330 }
4331 }
4332
4333 /* Implement targetm.vectorize.preferred_simd_mode. */
4334
4335 static enum machine_mode
4336 rs6000_preferred_simd_mode (enum machine_mode mode)
4337 {
4338 if (TARGET_VSX)
4339 switch (mode)
4340 {
4341 case DFmode:
4342 return V2DFmode;
4343 default:;
4344 }
4345 if (TARGET_ALTIVEC || TARGET_VSX)
4346 switch (mode)
4347 {
4348 case SFmode:
4349 return V4SFmode;
4350 case TImode:
4351 return V1TImode;
4352 case DImode:
4353 return V2DImode;
4354 case SImode:
4355 return V4SImode;
4356 case HImode:
4357 return V8HImode;
4358 case QImode:
4359 return V16QImode;
4360 default:;
4361 }
4362 if (TARGET_SPE)
4363 switch (mode)
4364 {
4365 case SFmode:
4366 return V2SFmode;
4367 case SImode:
4368 return V2SImode;
4369 default:;
4370 }
4371 if (TARGET_PAIRED_FLOAT
4372 && mode == SFmode)
4373 return V2SFmode;
4374 return word_mode;
4375 }
4376
4377 typedef struct _rs6000_cost_data
4378 {
4379 struct loop *loop_info;
4380 unsigned cost[3];
4381 } rs6000_cost_data;
4382
4383 /* Test for likely overcommitment of vector hardware resources. If a
4384 loop iteration is relatively large, and too large a percentage of
4385 instructions in the loop are vectorized, the cost model may not
4386 adequately reflect delays from unavailable vector resources.
4387 Penalize the loop body cost for this case. */
4388
4389 static void
4390 rs6000_density_test (rs6000_cost_data *data)
4391 {
4392 const int DENSITY_PCT_THRESHOLD = 85;
4393 const int DENSITY_SIZE_THRESHOLD = 70;
4394 const int DENSITY_PENALTY = 10;
4395 struct loop *loop = data->loop_info;
4396 basic_block *bbs = get_loop_body (loop);
4397 int nbbs = loop->num_nodes;
4398 int vec_cost = data->cost[vect_body], not_vec_cost = 0;
4399 int i, density_pct;
4400
4401 for (i = 0; i < nbbs; i++)
4402 {
4403 basic_block bb = bbs[i];
4404 gimple_stmt_iterator gsi;
4405
4406 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
4407 {
4408 gimple stmt = gsi_stmt (gsi);
4409 stmt_vec_info stmt_info = vinfo_for_stmt (stmt);
4410
4411 if (!STMT_VINFO_RELEVANT_P (stmt_info)
4412 && !STMT_VINFO_IN_PATTERN_P (stmt_info))
4413 not_vec_cost++;
4414 }
4415 }
4416
4417 free (bbs);
4418 density_pct = (vec_cost * 100) / (vec_cost + not_vec_cost);
4419
4420 if (density_pct > DENSITY_PCT_THRESHOLD
4421 && vec_cost + not_vec_cost > DENSITY_SIZE_THRESHOLD)
4422 {
4423 data->cost[vect_body] = vec_cost * (100 + DENSITY_PENALTY) / 100;
4424 if (dump_enabled_p ())
4425 dump_printf_loc (MSG_NOTE, vect_location,
4426 "density %d%%, cost %d exceeds threshold, penalizing "
4427 "loop body cost by %d%%", density_pct,
4428 vec_cost + not_vec_cost, DENSITY_PENALTY);
4429 }
4430 }
4431
4432 /* Implement targetm.vectorize.init_cost. */
4433
4434 static void *
4435 rs6000_init_cost (struct loop *loop_info)
4436 {
4437 rs6000_cost_data *data = XNEW (struct _rs6000_cost_data);
4438 data->loop_info = loop_info;
4439 data->cost[vect_prologue] = 0;
4440 data->cost[vect_body] = 0;
4441 data->cost[vect_epilogue] = 0;
4442 return data;
4443 }
4444
4445 /* Implement targetm.vectorize.add_stmt_cost. */
4446
4447 static unsigned
4448 rs6000_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
4449 struct _stmt_vec_info *stmt_info, int misalign,
4450 enum vect_cost_model_location where)
4451 {
4452 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
4453 unsigned retval = 0;
4454
4455 if (flag_vect_cost_model)
4456 {
4457 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
4458 int stmt_cost = rs6000_builtin_vectorization_cost (kind, vectype,
4459 misalign);
4460 /* Statements in an inner loop relative to the loop being
4461 vectorized are weighted more heavily. The value here is
4462 arbitrary and could potentially be improved with analysis. */
4463 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
4464 count *= 50; /* FIXME. */
4465
4466 retval = (unsigned) (count * stmt_cost);
4467 cost_data->cost[where] += retval;
4468 }
4469
4470 return retval;
4471 }
4472
4473 /* Implement targetm.vectorize.finish_cost. */
4474
4475 static void
4476 rs6000_finish_cost (void *data, unsigned *prologue_cost,
4477 unsigned *body_cost, unsigned *epilogue_cost)
4478 {
4479 rs6000_cost_data *cost_data = (rs6000_cost_data*) data;
4480
4481 if (cost_data->loop_info)
4482 rs6000_density_test (cost_data);
4483
4484 *prologue_cost = cost_data->cost[vect_prologue];
4485 *body_cost = cost_data->cost[vect_body];
4486 *epilogue_cost = cost_data->cost[vect_epilogue];
4487 }
4488
4489 /* Implement targetm.vectorize.destroy_cost_data. */
4490
4491 static void
4492 rs6000_destroy_cost_data (void *data)
4493 {
4494 free (data);
4495 }
4496
4497 /* Handler for the Mathematical Acceleration Subsystem (mass) interface to a
4498 library with vectorized intrinsics. */
4499
4500 static tree
4501 rs6000_builtin_vectorized_libmass (tree fndecl, tree type_out, tree type_in)
4502 {
4503 char name[32];
4504 const char *suffix = NULL;
4505 tree fntype, new_fndecl, bdecl = NULL_TREE;
4506 int n_args = 1;
4507 const char *bname;
4508 enum machine_mode el_mode, in_mode;
4509 int n, in_n;
4510
4511 /* Libmass is suitable for unsafe math only as it does not correctly support
4512 parts of IEEE with the required precision such as denormals. Only support
4513 it if we have VSX to use the simd d2 or f4 functions.
4514 XXX: Add variable length support. */
4515 if (!flag_unsafe_math_optimizations || !TARGET_VSX)
4516 return NULL_TREE;
4517
4518 el_mode = TYPE_MODE (TREE_TYPE (type_out));
4519 n = TYPE_VECTOR_SUBPARTS (type_out);
4520 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4521 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4522 if (el_mode != in_mode
4523 || n != in_n)
4524 return NULL_TREE;
4525
4526 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4527 {
4528 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4529 switch (fn)
4530 {
4531 case BUILT_IN_ATAN2:
4532 case BUILT_IN_HYPOT:
4533 case BUILT_IN_POW:
4534 n_args = 2;
4535 /* fall through */
4536
4537 case BUILT_IN_ACOS:
4538 case BUILT_IN_ACOSH:
4539 case BUILT_IN_ASIN:
4540 case BUILT_IN_ASINH:
4541 case BUILT_IN_ATAN:
4542 case BUILT_IN_ATANH:
4543 case BUILT_IN_CBRT:
4544 case BUILT_IN_COS:
4545 case BUILT_IN_COSH:
4546 case BUILT_IN_ERF:
4547 case BUILT_IN_ERFC:
4548 case BUILT_IN_EXP2:
4549 case BUILT_IN_EXP:
4550 case BUILT_IN_EXPM1:
4551 case BUILT_IN_LGAMMA:
4552 case BUILT_IN_LOG10:
4553 case BUILT_IN_LOG1P:
4554 case BUILT_IN_LOG2:
4555 case BUILT_IN_LOG:
4556 case BUILT_IN_SIN:
4557 case BUILT_IN_SINH:
4558 case BUILT_IN_SQRT:
4559 case BUILT_IN_TAN:
4560 case BUILT_IN_TANH:
4561 bdecl = builtin_decl_implicit (fn);
4562 suffix = "d2"; /* pow -> powd2 */
4563 if (el_mode != DFmode
4564 || n != 2
4565 || !bdecl)
4566 return NULL_TREE;
4567 break;
4568
4569 case BUILT_IN_ATAN2F:
4570 case BUILT_IN_HYPOTF:
4571 case BUILT_IN_POWF:
4572 n_args = 2;
4573 /* fall through */
4574
4575 case BUILT_IN_ACOSF:
4576 case BUILT_IN_ACOSHF:
4577 case BUILT_IN_ASINF:
4578 case BUILT_IN_ASINHF:
4579 case BUILT_IN_ATANF:
4580 case BUILT_IN_ATANHF:
4581 case BUILT_IN_CBRTF:
4582 case BUILT_IN_COSF:
4583 case BUILT_IN_COSHF:
4584 case BUILT_IN_ERFF:
4585 case BUILT_IN_ERFCF:
4586 case BUILT_IN_EXP2F:
4587 case BUILT_IN_EXPF:
4588 case BUILT_IN_EXPM1F:
4589 case BUILT_IN_LGAMMAF:
4590 case BUILT_IN_LOG10F:
4591 case BUILT_IN_LOG1PF:
4592 case BUILT_IN_LOG2F:
4593 case BUILT_IN_LOGF:
4594 case BUILT_IN_SINF:
4595 case BUILT_IN_SINHF:
4596 case BUILT_IN_SQRTF:
4597 case BUILT_IN_TANF:
4598 case BUILT_IN_TANHF:
4599 bdecl = builtin_decl_implicit (fn);
4600 suffix = "4"; /* powf -> powf4 */
4601 if (el_mode != SFmode
4602 || n != 4
4603 || !bdecl)
4604 return NULL_TREE;
4605 break;
4606
4607 default:
4608 return NULL_TREE;
4609 }
4610 }
4611 else
4612 return NULL_TREE;
4613
4614 gcc_assert (suffix != NULL);
4615 bname = IDENTIFIER_POINTER (DECL_NAME (bdecl));
4616 if (!bname)
4617 return NULL_TREE;
4618
4619 strcpy (name, bname + sizeof ("__builtin_") - 1);
4620 strcat (name, suffix);
4621
4622 if (n_args == 1)
4623 fntype = build_function_type_list (type_out, type_in, NULL);
4624 else if (n_args == 2)
4625 fntype = build_function_type_list (type_out, type_in, type_in, NULL);
4626 else
4627 gcc_unreachable ();
4628
4629 /* Build a function declaration for the vectorized function. */
4630 new_fndecl = build_decl (BUILTINS_LOCATION,
4631 FUNCTION_DECL, get_identifier (name), fntype);
4632 TREE_PUBLIC (new_fndecl) = 1;
4633 DECL_EXTERNAL (new_fndecl) = 1;
4634 DECL_IS_NOVOPS (new_fndecl) = 1;
4635 TREE_READONLY (new_fndecl) = 1;
4636
4637 return new_fndecl;
4638 }
4639
4640 /* Returns a function decl for a vectorized version of the builtin function
4641 with builtin function code FN and the result vector type TYPE, or NULL_TREE
4642 if it is not available. */
4643
4644 static tree
4645 rs6000_builtin_vectorized_function (tree fndecl, tree type_out,
4646 tree type_in)
4647 {
4648 enum machine_mode in_mode, out_mode;
4649 int in_n, out_n;
4650
4651 if (TARGET_DEBUG_BUILTIN)
4652 fprintf (stderr, "rs6000_builtin_vectorized_function (%s, %s, %s)\n",
4653 IDENTIFIER_POINTER (DECL_NAME (fndecl)),
4654 GET_MODE_NAME (TYPE_MODE (type_out)),
4655 GET_MODE_NAME (TYPE_MODE (type_in)));
4656
4657 if (TREE_CODE (type_out) != VECTOR_TYPE
4658 || TREE_CODE (type_in) != VECTOR_TYPE
4659 || !TARGET_VECTORIZE_BUILTINS)
4660 return NULL_TREE;
4661
4662 out_mode = TYPE_MODE (TREE_TYPE (type_out));
4663 out_n = TYPE_VECTOR_SUBPARTS (type_out);
4664 in_mode = TYPE_MODE (TREE_TYPE (type_in));
4665 in_n = TYPE_VECTOR_SUBPARTS (type_in);
4666
4667 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
4668 {
4669 enum built_in_function fn = DECL_FUNCTION_CODE (fndecl);
4670 switch (fn)
4671 {
4672 case BUILT_IN_CLZIMAX:
4673 case BUILT_IN_CLZLL:
4674 case BUILT_IN_CLZL:
4675 case BUILT_IN_CLZ:
4676 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4677 {
4678 if (out_mode == QImode && out_n == 16)
4679 return rs6000_builtin_decls[P8V_BUILTIN_VCLZB];
4680 else if (out_mode == HImode && out_n == 8)
4681 return rs6000_builtin_decls[P8V_BUILTIN_VCLZH];
4682 else if (out_mode == SImode && out_n == 4)
4683 return rs6000_builtin_decls[P8V_BUILTIN_VCLZW];
4684 else if (out_mode == DImode && out_n == 2)
4685 return rs6000_builtin_decls[P8V_BUILTIN_VCLZD];
4686 }
4687 break;
4688 case BUILT_IN_COPYSIGN:
4689 if (VECTOR_UNIT_VSX_P (V2DFmode)
4690 && out_mode == DFmode && out_n == 2
4691 && in_mode == DFmode && in_n == 2)
4692 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNDP];
4693 break;
4694 case BUILT_IN_COPYSIGNF:
4695 if (out_mode != SFmode || out_n != 4
4696 || in_mode != SFmode || in_n != 4)
4697 break;
4698 if (VECTOR_UNIT_VSX_P (V4SFmode))
4699 return rs6000_builtin_decls[VSX_BUILTIN_CPSGNSP];
4700 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4701 return rs6000_builtin_decls[ALTIVEC_BUILTIN_COPYSIGN_V4SF];
4702 break;
4703 case BUILT_IN_POPCOUNTIMAX:
4704 case BUILT_IN_POPCOUNTLL:
4705 case BUILT_IN_POPCOUNTL:
4706 case BUILT_IN_POPCOUNT:
4707 if (TARGET_P8_VECTOR && in_mode == out_mode && out_n == in_n)
4708 {
4709 if (out_mode == QImode && out_n == 16)
4710 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTB];
4711 else if (out_mode == HImode && out_n == 8)
4712 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTH];
4713 else if (out_mode == SImode && out_n == 4)
4714 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTW];
4715 else if (out_mode == DImode && out_n == 2)
4716 return rs6000_builtin_decls[P8V_BUILTIN_VPOPCNTD];
4717 }
4718 break;
4719 case BUILT_IN_SQRT:
4720 if (VECTOR_UNIT_VSX_P (V2DFmode)
4721 && out_mode == DFmode && out_n == 2
4722 && in_mode == DFmode && in_n == 2)
4723 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTDP];
4724 break;
4725 case BUILT_IN_SQRTF:
4726 if (VECTOR_UNIT_VSX_P (V4SFmode)
4727 && out_mode == SFmode && out_n == 4
4728 && in_mode == SFmode && in_n == 4)
4729 return rs6000_builtin_decls[VSX_BUILTIN_XVSQRTSP];
4730 break;
4731 case BUILT_IN_CEIL:
4732 if (VECTOR_UNIT_VSX_P (V2DFmode)
4733 && out_mode == DFmode && out_n == 2
4734 && in_mode == DFmode && in_n == 2)
4735 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIP];
4736 break;
4737 case BUILT_IN_CEILF:
4738 if (out_mode != SFmode || out_n != 4
4739 || in_mode != SFmode || in_n != 4)
4740 break;
4741 if (VECTOR_UNIT_VSX_P (V4SFmode))
4742 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIP];
4743 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4744 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIP];
4745 break;
4746 case BUILT_IN_FLOOR:
4747 if (VECTOR_UNIT_VSX_P (V2DFmode)
4748 && out_mode == DFmode && out_n == 2
4749 && in_mode == DFmode && in_n == 2)
4750 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIM];
4751 break;
4752 case BUILT_IN_FLOORF:
4753 if (out_mode != SFmode || out_n != 4
4754 || in_mode != SFmode || in_n != 4)
4755 break;
4756 if (VECTOR_UNIT_VSX_P (V4SFmode))
4757 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIM];
4758 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4759 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIM];
4760 break;
4761 case BUILT_IN_FMA:
4762 if (VECTOR_UNIT_VSX_P (V2DFmode)
4763 && out_mode == DFmode && out_n == 2
4764 && in_mode == DFmode && in_n == 2)
4765 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDDP];
4766 break;
4767 case BUILT_IN_FMAF:
4768 if (VECTOR_UNIT_VSX_P (V4SFmode)
4769 && out_mode == SFmode && out_n == 4
4770 && in_mode == SFmode && in_n == 4)
4771 return rs6000_builtin_decls[VSX_BUILTIN_XVMADDSP];
4772 else if (VECTOR_UNIT_ALTIVEC_P (V4SFmode)
4773 && out_mode == SFmode && out_n == 4
4774 && in_mode == SFmode && in_n == 4)
4775 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VMADDFP];
4776 break;
4777 case BUILT_IN_TRUNC:
4778 if (VECTOR_UNIT_VSX_P (V2DFmode)
4779 && out_mode == DFmode && out_n == 2
4780 && in_mode == DFmode && in_n == 2)
4781 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIZ];
4782 break;
4783 case BUILT_IN_TRUNCF:
4784 if (out_mode != SFmode || out_n != 4
4785 || in_mode != SFmode || in_n != 4)
4786 break;
4787 if (VECTOR_UNIT_VSX_P (V4SFmode))
4788 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIZ];
4789 if (VECTOR_UNIT_ALTIVEC_P (V4SFmode))
4790 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRFIZ];
4791 break;
4792 case BUILT_IN_NEARBYINT:
4793 if (VECTOR_UNIT_VSX_P (V2DFmode)
4794 && flag_unsafe_math_optimizations
4795 && out_mode == DFmode && out_n == 2
4796 && in_mode == DFmode && in_n == 2)
4797 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPI];
4798 break;
4799 case BUILT_IN_NEARBYINTF:
4800 if (VECTOR_UNIT_VSX_P (V4SFmode)
4801 && flag_unsafe_math_optimizations
4802 && out_mode == SFmode && out_n == 4
4803 && in_mode == SFmode && in_n == 4)
4804 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPI];
4805 break;
4806 case BUILT_IN_RINT:
4807 if (VECTOR_UNIT_VSX_P (V2DFmode)
4808 && !flag_trapping_math
4809 && out_mode == DFmode && out_n == 2
4810 && in_mode == DFmode && in_n == 2)
4811 return rs6000_builtin_decls[VSX_BUILTIN_XVRDPIC];
4812 break;
4813 case BUILT_IN_RINTF:
4814 if (VECTOR_UNIT_VSX_P (V4SFmode)
4815 && !flag_trapping_math
4816 && out_mode == SFmode && out_n == 4
4817 && in_mode == SFmode && in_n == 4)
4818 return rs6000_builtin_decls[VSX_BUILTIN_XVRSPIC];
4819 break;
4820 default:
4821 break;
4822 }
4823 }
4824
4825 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_MD)
4826 {
4827 enum rs6000_builtins fn
4828 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
4829 switch (fn)
4830 {
4831 case RS6000_BUILTIN_RSQRTF:
4832 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4833 && out_mode == SFmode && out_n == 4
4834 && in_mode == SFmode && in_n == 4)
4835 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRSQRTFP];
4836 break;
4837 case RS6000_BUILTIN_RSQRT:
4838 if (VECTOR_UNIT_VSX_P (V2DFmode)
4839 && out_mode == DFmode && out_n == 2
4840 && in_mode == DFmode && in_n == 2)
4841 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
4842 break;
4843 case RS6000_BUILTIN_RECIPF:
4844 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (V4SFmode)
4845 && out_mode == SFmode && out_n == 4
4846 && in_mode == SFmode && in_n == 4)
4847 return rs6000_builtin_decls[ALTIVEC_BUILTIN_VRECIPFP];
4848 break;
4849 case RS6000_BUILTIN_RECIP:
4850 if (VECTOR_UNIT_VSX_P (V2DFmode)
4851 && out_mode == DFmode && out_n == 2
4852 && in_mode == DFmode && in_n == 2)
4853 return rs6000_builtin_decls[VSX_BUILTIN_RECIP_V2DF];
4854 break;
4855 default:
4856 break;
4857 }
4858 }
4859
4860 /* Generate calls to libmass if appropriate. */
4861 if (rs6000_veclib_handler)
4862 return rs6000_veclib_handler (fndecl, type_out, type_in);
4863
4864 return NULL_TREE;
4865 }
4866 \f
4867 /* Default CPU string for rs6000*_file_start functions. */
4868 static const char *rs6000_default_cpu;
4869
4870 /* Do anything needed at the start of the asm file. */
4871
4872 static void
4873 rs6000_file_start (void)
4874 {
4875 char buffer[80];
4876 const char *start = buffer;
4877 FILE *file = asm_out_file;
4878
4879 rs6000_default_cpu = TARGET_CPU_DEFAULT;
4880
4881 default_file_start ();
4882
4883 if (flag_verbose_asm)
4884 {
4885 sprintf (buffer, "\n%s rs6000/powerpc options:", ASM_COMMENT_START);
4886
4887 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
4888 {
4889 fprintf (file, "%s --with-cpu=%s", start, rs6000_default_cpu);
4890 start = "";
4891 }
4892
4893 if (global_options_set.x_rs6000_cpu_index)
4894 {
4895 fprintf (file, "%s -mcpu=%s", start,
4896 processor_target_table[rs6000_cpu_index].name);
4897 start = "";
4898 }
4899
4900 if (global_options_set.x_rs6000_tune_index)
4901 {
4902 fprintf (file, "%s -mtune=%s", start,
4903 processor_target_table[rs6000_tune_index].name);
4904 start = "";
4905 }
4906
4907 if (PPC405_ERRATUM77)
4908 {
4909 fprintf (file, "%s PPC405CR_ERRATUM77", start);
4910 start = "";
4911 }
4912
4913 #ifdef USING_ELFOS_H
4914 switch (rs6000_sdata)
4915 {
4916 case SDATA_NONE: fprintf (file, "%s -msdata=none", start); start = ""; break;
4917 case SDATA_DATA: fprintf (file, "%s -msdata=data", start); start = ""; break;
4918 case SDATA_SYSV: fprintf (file, "%s -msdata=sysv", start); start = ""; break;
4919 case SDATA_EABI: fprintf (file, "%s -msdata=eabi", start); start = ""; break;
4920 }
4921
4922 if (rs6000_sdata && g_switch_value)
4923 {
4924 fprintf (file, "%s -G %d", start,
4925 g_switch_value);
4926 start = "";
4927 }
4928 #endif
4929
4930 if (*start == '\0')
4931 putc ('\n', file);
4932 }
4933
4934 if (DEFAULT_ABI == ABI_ELFv2)
4935 fprintf (file, "\t.abiversion 2\n");
4936
4937 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2
4938 || (TARGET_ELF && flag_pic == 2))
4939 {
4940 switch_to_section (toc_section);
4941 switch_to_section (text_section);
4942 }
4943 }
4944
4945 \f
4946 /* Return nonzero if this function is known to have a null epilogue. */
4947
4948 int
4949 direct_return (void)
4950 {
4951 if (reload_completed)
4952 {
4953 rs6000_stack_t *info = rs6000_stack_info ();
4954
4955 if (info->first_gp_reg_save == 32
4956 && info->first_fp_reg_save == 64
4957 && info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
4958 && ! info->lr_save_p
4959 && ! info->cr_save_p
4960 && info->vrsave_mask == 0
4961 && ! info->push_p)
4962 return 1;
4963 }
4964
4965 return 0;
4966 }
4967
4968 /* Return the number of instructions it takes to form a constant in an
4969 integer register. */
4970
4971 int
4972 num_insns_constant_wide (HOST_WIDE_INT value)
4973 {
4974 /* signed constant loadable with addi */
4975 if ((unsigned HOST_WIDE_INT) (value + 0x8000) < 0x10000)
4976 return 1;
4977
4978 /* constant loadable with addis */
4979 else if ((value & 0xffff) == 0
4980 && (value >> 31 == -1 || value >> 31 == 0))
4981 return 1;
4982
4983 else if (TARGET_POWERPC64)
4984 {
4985 HOST_WIDE_INT low = ((value & 0xffffffff) ^ 0x80000000) - 0x80000000;
4986 HOST_WIDE_INT high = value >> 31;
4987
4988 if (high == 0 || high == -1)
4989 return 2;
4990
4991 high >>= 1;
4992
4993 if (low == 0)
4994 return num_insns_constant_wide (high) + 1;
4995 else if (high == 0)
4996 return num_insns_constant_wide (low) + 1;
4997 else
4998 return (num_insns_constant_wide (high)
4999 + num_insns_constant_wide (low) + 1);
5000 }
5001
5002 else
5003 return 2;
5004 }
5005
5006 int
5007 num_insns_constant (rtx op, enum machine_mode mode)
5008 {
5009 HOST_WIDE_INT low, high;
5010
5011 switch (GET_CODE (op))
5012 {
5013 case CONST_INT:
5014 if ((INTVAL (op) >> 31) != 0 && (INTVAL (op) >> 31) != -1
5015 && mask64_operand (op, mode))
5016 return 2;
5017 else
5018 return num_insns_constant_wide (INTVAL (op));
5019
5020 case CONST_WIDE_INT:
5021 {
5022 int i;
5023 int ins = CONST_WIDE_INT_NUNITS (op) - 1;
5024 for (i = 0; i < CONST_WIDE_INT_NUNITS (op); i++)
5025 ins += num_insns_constant_wide (CONST_WIDE_INT_ELT (op, i));
5026 return ins;
5027 }
5028
5029 case CONST_DOUBLE:
5030 if (mode == SFmode || mode == SDmode)
5031 {
5032 long l;
5033 REAL_VALUE_TYPE rv;
5034
5035 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
5036 if (DECIMAL_FLOAT_MODE_P (mode))
5037 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
5038 else
5039 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
5040 return num_insns_constant_wide ((HOST_WIDE_INT) l);
5041 }
5042
5043 long l[2];
5044 REAL_VALUE_TYPE rv;
5045
5046 REAL_VALUE_FROM_CONST_DOUBLE (rv, op);
5047 if (DECIMAL_FLOAT_MODE_P (mode))
5048 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, l);
5049 else
5050 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
5051 high = l[WORDS_BIG_ENDIAN == 0];
5052 low = l[WORDS_BIG_ENDIAN != 0];
5053
5054 if (TARGET_32BIT)
5055 return (num_insns_constant_wide (low)
5056 + num_insns_constant_wide (high));
5057 else
5058 {
5059 if ((high == 0 && low >= 0)
5060 || (high == -1 && low < 0))
5061 return num_insns_constant_wide (low);
5062
5063 else if (mask64_operand (op, mode))
5064 return 2;
5065
5066 else if (low == 0)
5067 return num_insns_constant_wide (high) + 1;
5068
5069 else
5070 return (num_insns_constant_wide (high)
5071 + num_insns_constant_wide (low) + 1);
5072 }
5073
5074 default:
5075 gcc_unreachable ();
5076 }
5077 }
5078
5079 /* Interpret element ELT of the CONST_VECTOR OP as an integer value.
5080 If the mode of OP is MODE_VECTOR_INT, this simply returns the
5081 corresponding element of the vector, but for V4SFmode and V2SFmode,
5082 the corresponding "float" is interpreted as an SImode integer. */
5083
5084 HOST_WIDE_INT
5085 const_vector_elt_as_int (rtx op, unsigned int elt)
5086 {
5087 rtx tmp;
5088
5089 /* We can't handle V2DImode and V2DFmode vector constants here yet. */
5090 gcc_assert (GET_MODE (op) != V2DImode
5091 && GET_MODE (op) != V2DFmode);
5092
5093 tmp = CONST_VECTOR_ELT (op, elt);
5094 if (GET_MODE (op) == V4SFmode
5095 || GET_MODE (op) == V2SFmode)
5096 tmp = gen_lowpart (SImode, tmp);
5097 return INTVAL (tmp);
5098 }
5099
5100 /* Return true if OP can be synthesized with a particular vspltisb, vspltish
5101 or vspltisw instruction. OP is a CONST_VECTOR. Which instruction is used
5102 depends on STEP and COPIES, one of which will be 1. If COPIES > 1,
5103 all items are set to the same value and contain COPIES replicas of the
5104 vsplt's operand; if STEP > 1, one in STEP elements is set to the vsplt's
5105 operand and the others are set to the value of the operand's msb. */
5106
5107 static bool
5108 vspltis_constant (rtx op, unsigned step, unsigned copies)
5109 {
5110 enum machine_mode mode = GET_MODE (op);
5111 enum machine_mode inner = GET_MODE_INNER (mode);
5112
5113 unsigned i;
5114 unsigned nunits;
5115 unsigned bitsize;
5116 unsigned mask;
5117
5118 HOST_WIDE_INT val;
5119 HOST_WIDE_INT splat_val;
5120 HOST_WIDE_INT msb_val;
5121
5122 if (mode == V2DImode || mode == V2DFmode || mode == V1TImode)
5123 return false;
5124
5125 nunits = GET_MODE_NUNITS (mode);
5126 bitsize = GET_MODE_BITSIZE (inner);
5127 mask = GET_MODE_MASK (inner);
5128
5129 val = const_vector_elt_as_int (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5130 splat_val = val;
5131 msb_val = val >= 0 ? 0 : -1;
5132
5133 /* Construct the value to be splatted, if possible. If not, return 0. */
5134 for (i = 2; i <= copies; i *= 2)
5135 {
5136 HOST_WIDE_INT small_val;
5137 bitsize /= 2;
5138 small_val = splat_val >> bitsize;
5139 mask >>= bitsize;
5140 if (splat_val != ((small_val << bitsize) | (small_val & mask)))
5141 return false;
5142 splat_val = small_val;
5143 }
5144
5145 /* Check if SPLAT_VAL can really be the operand of a vspltis[bhw]. */
5146 if (EASY_VECTOR_15 (splat_val))
5147 ;
5148
5149 /* Also check if we can splat, and then add the result to itself. Do so if
5150 the value is positive, of if the splat instruction is using OP's mode;
5151 for splat_val < 0, the splat and the add should use the same mode. */
5152 else if (EASY_VECTOR_15_ADD_SELF (splat_val)
5153 && (splat_val >= 0 || (step == 1 && copies == 1)))
5154 ;
5155
5156 /* Also check if are loading up the most significant bit which can be done by
5157 loading up -1 and shifting the value left by -1. */
5158 else if (EASY_VECTOR_MSB (splat_val, inner))
5159 ;
5160
5161 else
5162 return false;
5163
5164 /* Check if VAL is present in every STEP-th element, and the
5165 other elements are filled with its most significant bit. */
5166 for (i = 1; i < nunits; ++i)
5167 {
5168 HOST_WIDE_INT desired_val;
5169 unsigned elt = BYTES_BIG_ENDIAN ? nunits - 1 - i : i;
5170 if ((i & (step - 1)) == 0)
5171 desired_val = val;
5172 else
5173 desired_val = msb_val;
5174
5175 if (desired_val != const_vector_elt_as_int (op, elt))
5176 return false;
5177 }
5178
5179 return true;
5180 }
5181
5182
5183 /* Return true if OP is of the given MODE and can be synthesized
5184 with a vspltisb, vspltish or vspltisw. */
5185
5186 bool
5187 easy_altivec_constant (rtx op, enum machine_mode mode)
5188 {
5189 unsigned step, copies;
5190
5191 if (mode == VOIDmode)
5192 mode = GET_MODE (op);
5193 else if (mode != GET_MODE (op))
5194 return false;
5195
5196 /* V2DI/V2DF was added with VSX. Only allow 0 and all 1's as easy
5197 constants. */
5198 if (mode == V2DFmode)
5199 return zero_constant (op, mode);
5200
5201 else if (mode == V2DImode)
5202 {
5203 if (GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
5204 || GET_CODE (CONST_VECTOR_ELT (op, 1)) != CONST_INT)
5205 return false;
5206
5207 if (zero_constant (op, mode))
5208 return true;
5209
5210 if (INTVAL (CONST_VECTOR_ELT (op, 0)) == -1
5211 && INTVAL (CONST_VECTOR_ELT (op, 1)) == -1)
5212 return true;
5213
5214 return false;
5215 }
5216
5217 /* V1TImode is a special container for TImode. Ignore for now. */
5218 else if (mode == V1TImode)
5219 return false;
5220
5221 /* Start with a vspltisw. */
5222 step = GET_MODE_NUNITS (mode) / 4;
5223 copies = 1;
5224
5225 if (vspltis_constant (op, step, copies))
5226 return true;
5227
5228 /* Then try with a vspltish. */
5229 if (step == 1)
5230 copies <<= 1;
5231 else
5232 step >>= 1;
5233
5234 if (vspltis_constant (op, step, copies))
5235 return true;
5236
5237 /* And finally a vspltisb. */
5238 if (step == 1)
5239 copies <<= 1;
5240 else
5241 step >>= 1;
5242
5243 if (vspltis_constant (op, step, copies))
5244 return true;
5245
5246 return false;
5247 }
5248
5249 /* Generate a VEC_DUPLICATE representing a vspltis[bhw] instruction whose
5250 result is OP. Abort if it is not possible. */
5251
5252 rtx
5253 gen_easy_altivec_constant (rtx op)
5254 {
5255 enum machine_mode mode = GET_MODE (op);
5256 int nunits = GET_MODE_NUNITS (mode);
5257 rtx val = CONST_VECTOR_ELT (op, BYTES_BIG_ENDIAN ? nunits - 1 : 0);
5258 unsigned step = nunits / 4;
5259 unsigned copies = 1;
5260
5261 /* Start with a vspltisw. */
5262 if (vspltis_constant (op, step, copies))
5263 return gen_rtx_VEC_DUPLICATE (V4SImode, gen_lowpart (SImode, val));
5264
5265 /* Then try with a vspltish. */
5266 if (step == 1)
5267 copies <<= 1;
5268 else
5269 step >>= 1;
5270
5271 if (vspltis_constant (op, step, copies))
5272 return gen_rtx_VEC_DUPLICATE (V8HImode, gen_lowpart (HImode, val));
5273
5274 /* And finally a vspltisb. */
5275 if (step == 1)
5276 copies <<= 1;
5277 else
5278 step >>= 1;
5279
5280 if (vspltis_constant (op, step, copies))
5281 return gen_rtx_VEC_DUPLICATE (V16QImode, gen_lowpart (QImode, val));
5282
5283 gcc_unreachable ();
5284 }
5285
5286 const char *
5287 output_vec_const_move (rtx *operands)
5288 {
5289 int cst, cst2;
5290 enum machine_mode mode;
5291 rtx dest, vec;
5292
5293 dest = operands[0];
5294 vec = operands[1];
5295 mode = GET_MODE (dest);
5296
5297 if (TARGET_VSX)
5298 {
5299 if (zero_constant (vec, mode))
5300 return "xxlxor %x0,%x0,%x0";
5301
5302 if ((mode == V2DImode || mode == V1TImode)
5303 && INTVAL (CONST_VECTOR_ELT (vec, 0)) == -1
5304 && INTVAL (CONST_VECTOR_ELT (vec, 1)) == -1)
5305 return "vspltisw %0,-1";
5306 }
5307
5308 if (TARGET_ALTIVEC)
5309 {
5310 rtx splat_vec;
5311 if (zero_constant (vec, mode))
5312 return "vxor %0,%0,%0";
5313
5314 splat_vec = gen_easy_altivec_constant (vec);
5315 gcc_assert (GET_CODE (splat_vec) == VEC_DUPLICATE);
5316 operands[1] = XEXP (splat_vec, 0);
5317 if (!EASY_VECTOR_15 (INTVAL (operands[1])))
5318 return "#";
5319
5320 switch (GET_MODE (splat_vec))
5321 {
5322 case V4SImode:
5323 return "vspltisw %0,%1";
5324
5325 case V8HImode:
5326 return "vspltish %0,%1";
5327
5328 case V16QImode:
5329 return "vspltisb %0,%1";
5330
5331 default:
5332 gcc_unreachable ();
5333 }
5334 }
5335
5336 gcc_assert (TARGET_SPE);
5337
5338 /* Vector constant 0 is handled as a splitter of V2SI, and in the
5339 pattern of V1DI, V4HI, and V2SF.
5340
5341 FIXME: We should probably return # and add post reload
5342 splitters for these, but this way is so easy ;-). */
5343 cst = INTVAL (CONST_VECTOR_ELT (vec, 0));
5344 cst2 = INTVAL (CONST_VECTOR_ELT (vec, 1));
5345 operands[1] = CONST_VECTOR_ELT (vec, 0);
5346 operands[2] = CONST_VECTOR_ELT (vec, 1);
5347 if (cst == cst2)
5348 return "li %0,%1\n\tevmergelo %0,%0,%0";
5349 else if (WORDS_BIG_ENDIAN)
5350 return "li %0,%1\n\tevmergelo %0,%0,%0\n\tli %0,%2";
5351 else
5352 return "li %0,%2\n\tevmergelo %0,%0,%0\n\tli %0,%1";
5353 }
5354
5355 /* Initialize TARGET of vector PAIRED to VALS. */
5356
5357 void
5358 paired_expand_vector_init (rtx target, rtx vals)
5359 {
5360 enum machine_mode mode = GET_MODE (target);
5361 int n_elts = GET_MODE_NUNITS (mode);
5362 int n_var = 0;
5363 rtx x, new_rtx, tmp, constant_op, op1, op2;
5364 int i;
5365
5366 for (i = 0; i < n_elts; ++i)
5367 {
5368 x = XVECEXP (vals, 0, i);
5369 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
5370 ++n_var;
5371 }
5372 if (n_var == 0)
5373 {
5374 /* Load from constant pool. */
5375 emit_move_insn (target, gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
5376 return;
5377 }
5378
5379 if (n_var == 2)
5380 {
5381 /* The vector is initialized only with non-constants. */
5382 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, XVECEXP (vals, 0, 0),
5383 XVECEXP (vals, 0, 1));
5384
5385 emit_move_insn (target, new_rtx);
5386 return;
5387 }
5388
5389 /* One field is non-constant and the other one is a constant. Load the
5390 constant from the constant pool and use ps_merge instruction to
5391 construct the whole vector. */
5392 op1 = XVECEXP (vals, 0, 0);
5393 op2 = XVECEXP (vals, 0, 1);
5394
5395 constant_op = (CONSTANT_P (op1)) ? op1 : op2;
5396
5397 tmp = gen_reg_rtx (GET_MODE (constant_op));
5398 emit_move_insn (tmp, constant_op);
5399
5400 if (CONSTANT_P (op1))
5401 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, tmp, op2);
5402 else
5403 new_rtx = gen_rtx_VEC_CONCAT (V2SFmode, op1, tmp);
5404
5405 emit_move_insn (target, new_rtx);
5406 }
5407
5408 void
5409 paired_expand_vector_move (rtx operands[])
5410 {
5411 rtx op0 = operands[0], op1 = operands[1];
5412
5413 emit_move_insn (op0, op1);
5414 }
5415
5416 /* Emit vector compare for code RCODE. DEST is destination, OP1 and
5417 OP2 are two VEC_COND_EXPR operands, CC_OP0 and CC_OP1 are the two
5418 operands for the relation operation COND. This is a recursive
5419 function. */
5420
5421 static void
5422 paired_emit_vector_compare (enum rtx_code rcode,
5423 rtx dest, rtx op0, rtx op1,
5424 rtx cc_op0, rtx cc_op1)
5425 {
5426 rtx tmp = gen_reg_rtx (V2SFmode);
5427 rtx tmp1, max, min;
5428
5429 gcc_assert (TARGET_PAIRED_FLOAT);
5430 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
5431
5432 switch (rcode)
5433 {
5434 case LT:
5435 case LTU:
5436 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
5437 return;
5438 case GE:
5439 case GEU:
5440 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
5441 emit_insn (gen_selv2sf4 (dest, tmp, op0, op1, CONST0_RTX (SFmode)));
5442 return;
5443 case LE:
5444 case LEU:
5445 paired_emit_vector_compare (GE, dest, op0, op1, cc_op1, cc_op0);
5446 return;
5447 case GT:
5448 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
5449 return;
5450 case EQ:
5451 tmp1 = gen_reg_rtx (V2SFmode);
5452 max = gen_reg_rtx (V2SFmode);
5453 min = gen_reg_rtx (V2SFmode);
5454 gen_reg_rtx (V2SFmode);
5455
5456 emit_insn (gen_subv2sf3 (tmp, cc_op0, cc_op1));
5457 emit_insn (gen_selv2sf4
5458 (max, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
5459 emit_insn (gen_subv2sf3 (tmp, cc_op1, cc_op0));
5460 emit_insn (gen_selv2sf4
5461 (min, tmp, cc_op0, cc_op1, CONST0_RTX (SFmode)));
5462 emit_insn (gen_subv2sf3 (tmp1, min, max));
5463 emit_insn (gen_selv2sf4 (dest, tmp1, op0, op1, CONST0_RTX (SFmode)));
5464 return;
5465 case NE:
5466 paired_emit_vector_compare (EQ, dest, op1, op0, cc_op0, cc_op1);
5467 return;
5468 case UNLE:
5469 paired_emit_vector_compare (LE, dest, op1, op0, cc_op0, cc_op1);
5470 return;
5471 case UNLT:
5472 paired_emit_vector_compare (LT, dest, op1, op0, cc_op0, cc_op1);
5473 return;
5474 case UNGE:
5475 paired_emit_vector_compare (GE, dest, op1, op0, cc_op0, cc_op1);
5476 return;
5477 case UNGT:
5478 paired_emit_vector_compare (GT, dest, op1, op0, cc_op0, cc_op1);
5479 return;
5480 default:
5481 gcc_unreachable ();
5482 }
5483
5484 return;
5485 }
5486
5487 /* Emit vector conditional expression.
5488 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
5489 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
5490
5491 int
5492 paired_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
5493 rtx cond, rtx cc_op0, rtx cc_op1)
5494 {
5495 enum rtx_code rcode = GET_CODE (cond);
5496
5497 if (!TARGET_PAIRED_FLOAT)
5498 return 0;
5499
5500 paired_emit_vector_compare (rcode, dest, op1, op2, cc_op0, cc_op1);
5501
5502 return 1;
5503 }
5504
5505 /* Initialize vector TARGET to VALS. */
5506
5507 void
5508 rs6000_expand_vector_init (rtx target, rtx vals)
5509 {
5510 enum machine_mode mode = GET_MODE (target);
5511 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5512 int n_elts = GET_MODE_NUNITS (mode);
5513 int n_var = 0, one_var = -1;
5514 bool all_same = true, all_const_zero = true;
5515 rtx x, mem;
5516 int i;
5517
5518 for (i = 0; i < n_elts; ++i)
5519 {
5520 x = XVECEXP (vals, 0, i);
5521 if (!(CONST_SCALAR_INT_P (x) || CONST_DOUBLE_P (x) || CONST_FIXED_P (x)))
5522 ++n_var, one_var = i;
5523 else if (x != CONST0_RTX (inner_mode))
5524 all_const_zero = false;
5525
5526 if (i > 0 && !rtx_equal_p (x, XVECEXP (vals, 0, 0)))
5527 all_same = false;
5528 }
5529
5530 if (n_var == 0)
5531 {
5532 rtx const_vec = gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0));
5533 bool int_vector_p = (GET_MODE_CLASS (mode) == MODE_VECTOR_INT);
5534 if ((int_vector_p || TARGET_VSX) && all_const_zero)
5535 {
5536 /* Zero register. */
5537 emit_insn (gen_rtx_SET (VOIDmode, target,
5538 gen_rtx_XOR (mode, target, target)));
5539 return;
5540 }
5541 else if (int_vector_p && easy_vector_constant (const_vec, mode))
5542 {
5543 /* Splat immediate. */
5544 emit_insn (gen_rtx_SET (VOIDmode, target, const_vec));
5545 return;
5546 }
5547 else
5548 {
5549 /* Load from constant pool. */
5550 emit_move_insn (target, const_vec);
5551 return;
5552 }
5553 }
5554
5555 /* Double word values on VSX can use xxpermdi or lxvdsx. */
5556 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5557 {
5558 rtx op0 = XVECEXP (vals, 0, 0);
5559 rtx op1 = XVECEXP (vals, 0, 1);
5560 if (all_same)
5561 {
5562 if (!MEM_P (op0) && !REG_P (op0))
5563 op0 = force_reg (inner_mode, op0);
5564 if (mode == V2DFmode)
5565 emit_insn (gen_vsx_splat_v2df (target, op0));
5566 else
5567 emit_insn (gen_vsx_splat_v2di (target, op0));
5568 }
5569 else
5570 {
5571 op0 = force_reg (inner_mode, op0);
5572 op1 = force_reg (inner_mode, op1);
5573 if (mode == V2DFmode)
5574 emit_insn (gen_vsx_concat_v2df (target, op0, op1));
5575 else
5576 emit_insn (gen_vsx_concat_v2di (target, op0, op1));
5577 }
5578 return;
5579 }
5580
5581 /* With single precision floating point on VSX, know that internally single
5582 precision is actually represented as a double, and either make 2 V2DF
5583 vectors, and convert these vectors to single precision, or do one
5584 conversion, and splat the result to the other elements. */
5585 if (mode == V4SFmode && VECTOR_MEM_VSX_P (mode))
5586 {
5587 if (all_same)
5588 {
5589 rtx freg = gen_reg_rtx (V4SFmode);
5590 rtx sreg = force_reg (SFmode, XVECEXP (vals, 0, 0));
5591 rtx cvt = ((TARGET_XSCVDPSPN)
5592 ? gen_vsx_xscvdpspn_scalar (freg, sreg)
5593 : gen_vsx_xscvdpsp_scalar (freg, sreg));
5594
5595 emit_insn (cvt);
5596 emit_insn (gen_vsx_xxspltw_v4sf_direct (target, freg, const0_rtx));
5597 }
5598 else
5599 {
5600 rtx dbl_even = gen_reg_rtx (V2DFmode);
5601 rtx dbl_odd = gen_reg_rtx (V2DFmode);
5602 rtx flt_even = gen_reg_rtx (V4SFmode);
5603 rtx flt_odd = gen_reg_rtx (V4SFmode);
5604 rtx op0 = force_reg (SFmode, XVECEXP (vals, 0, 0));
5605 rtx op1 = force_reg (SFmode, XVECEXP (vals, 0, 1));
5606 rtx op2 = force_reg (SFmode, XVECEXP (vals, 0, 2));
5607 rtx op3 = force_reg (SFmode, XVECEXP (vals, 0, 3));
5608
5609 emit_insn (gen_vsx_concat_v2sf (dbl_even, op0, op1));
5610 emit_insn (gen_vsx_concat_v2sf (dbl_odd, op2, op3));
5611 emit_insn (gen_vsx_xvcvdpsp (flt_even, dbl_even));
5612 emit_insn (gen_vsx_xvcvdpsp (flt_odd, dbl_odd));
5613 rs6000_expand_extract_even (target, flt_even, flt_odd);
5614 }
5615 return;
5616 }
5617
5618 /* Store value to stack temp. Load vector element. Splat. However, splat
5619 of 64-bit items is not supported on Altivec. */
5620 if (all_same && GET_MODE_SIZE (inner_mode) <= 4)
5621 {
5622 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5623 emit_move_insn (adjust_address_nv (mem, inner_mode, 0),
5624 XVECEXP (vals, 0, 0));
5625 x = gen_rtx_UNSPEC (VOIDmode,
5626 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5627 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5628 gen_rtvec (2,
5629 gen_rtx_SET (VOIDmode,
5630 target, mem),
5631 x)));
5632 x = gen_rtx_VEC_SELECT (inner_mode, target,
5633 gen_rtx_PARALLEL (VOIDmode,
5634 gen_rtvec (1, const0_rtx)));
5635 emit_insn (gen_rtx_SET (VOIDmode, target,
5636 gen_rtx_VEC_DUPLICATE (mode, x)));
5637 return;
5638 }
5639
5640 /* One field is non-constant. Load constant then overwrite
5641 varying field. */
5642 if (n_var == 1)
5643 {
5644 rtx copy = copy_rtx (vals);
5645
5646 /* Load constant part of vector, substitute neighboring value for
5647 varying element. */
5648 XVECEXP (copy, 0, one_var) = XVECEXP (vals, 0, (one_var + 1) % n_elts);
5649 rs6000_expand_vector_init (target, copy);
5650
5651 /* Insert variable. */
5652 rs6000_expand_vector_set (target, XVECEXP (vals, 0, one_var), one_var);
5653 return;
5654 }
5655
5656 /* Construct the vector in memory one field at a time
5657 and load the whole vector. */
5658 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5659 for (i = 0; i < n_elts; i++)
5660 emit_move_insn (adjust_address_nv (mem, inner_mode,
5661 i * GET_MODE_SIZE (inner_mode)),
5662 XVECEXP (vals, 0, i));
5663 emit_move_insn (target, mem);
5664 }
5665
5666 /* Set field ELT of TARGET to VAL. */
5667
5668 void
5669 rs6000_expand_vector_set (rtx target, rtx val, int elt)
5670 {
5671 enum machine_mode mode = GET_MODE (target);
5672 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5673 rtx reg = gen_reg_rtx (mode);
5674 rtx mask, mem, x;
5675 int width = GET_MODE_SIZE (inner_mode);
5676 int i;
5677
5678 if (VECTOR_MEM_VSX_P (mode) && (mode == V2DFmode || mode == V2DImode))
5679 {
5680 rtx (*set_func) (rtx, rtx, rtx, rtx)
5681 = ((mode == V2DFmode) ? gen_vsx_set_v2df : gen_vsx_set_v2di);
5682 emit_insn (set_func (target, target, val, GEN_INT (elt)));
5683 return;
5684 }
5685
5686 /* Simplify setting single element vectors like V1TImode. */
5687 if (GET_MODE_SIZE (mode) == GET_MODE_SIZE (inner_mode) && elt == 0)
5688 {
5689 emit_move_insn (target, gen_lowpart (mode, val));
5690 return;
5691 }
5692
5693 /* Load single variable value. */
5694 mem = assign_stack_temp (mode, GET_MODE_SIZE (inner_mode));
5695 emit_move_insn (adjust_address_nv (mem, inner_mode, 0), val);
5696 x = gen_rtx_UNSPEC (VOIDmode,
5697 gen_rtvec (1, const0_rtx), UNSPEC_LVE);
5698 emit_insn (gen_rtx_PARALLEL (VOIDmode,
5699 gen_rtvec (2,
5700 gen_rtx_SET (VOIDmode,
5701 reg, mem),
5702 x)));
5703
5704 /* Linear sequence. */
5705 mask = gen_rtx_PARALLEL (V16QImode, rtvec_alloc (16));
5706 for (i = 0; i < 16; ++i)
5707 XVECEXP (mask, 0, i) = GEN_INT (i);
5708
5709 /* Set permute mask to insert element into target. */
5710 for (i = 0; i < width; ++i)
5711 XVECEXP (mask, 0, elt*width + i)
5712 = GEN_INT (i + 0x10);
5713 x = gen_rtx_CONST_VECTOR (V16QImode, XVEC (mask, 0));
5714
5715 if (BYTES_BIG_ENDIAN)
5716 x = gen_rtx_UNSPEC (mode,
5717 gen_rtvec (3, target, reg,
5718 force_reg (V16QImode, x)),
5719 UNSPEC_VPERM);
5720 else
5721 {
5722 /* Invert selector. We prefer to generate VNAND on P8 so
5723 that future fusion opportunities can kick in, but must
5724 generate VNOR elsewhere. */
5725 rtx notx = gen_rtx_NOT (V16QImode, force_reg (V16QImode, x));
5726 rtx iorx = (TARGET_P8_VECTOR
5727 ? gen_rtx_IOR (V16QImode, notx, notx)
5728 : gen_rtx_AND (V16QImode, notx, notx));
5729 rtx tmp = gen_reg_rtx (V16QImode);
5730 emit_insn (gen_rtx_SET (VOIDmode, tmp, iorx));
5731
5732 /* Permute with operands reversed and adjusted selector. */
5733 x = gen_rtx_UNSPEC (mode, gen_rtvec (3, reg, target, tmp),
5734 UNSPEC_VPERM);
5735 }
5736
5737 emit_insn (gen_rtx_SET (VOIDmode, target, x));
5738 }
5739
5740 /* Extract field ELT from VEC into TARGET. */
5741
5742 void
5743 rs6000_expand_vector_extract (rtx target, rtx vec, int elt)
5744 {
5745 enum machine_mode mode = GET_MODE (vec);
5746 enum machine_mode inner_mode = GET_MODE_INNER (mode);
5747 rtx mem;
5748
5749 if (VECTOR_MEM_VSX_P (mode))
5750 {
5751 switch (mode)
5752 {
5753 default:
5754 break;
5755 case V1TImode:
5756 gcc_assert (elt == 0 && inner_mode == TImode);
5757 emit_move_insn (target, gen_lowpart (TImode, vec));
5758 break;
5759 case V2DFmode:
5760 emit_insn (gen_vsx_extract_v2df (target, vec, GEN_INT (elt)));
5761 return;
5762 case V2DImode:
5763 emit_insn (gen_vsx_extract_v2di (target, vec, GEN_INT (elt)));
5764 return;
5765 case V4SFmode:
5766 emit_insn (gen_vsx_extract_v4sf (target, vec, GEN_INT (elt)));
5767 return;
5768 }
5769 }
5770
5771 /* Allocate mode-sized buffer. */
5772 mem = assign_stack_temp (mode, GET_MODE_SIZE (mode));
5773
5774 emit_move_insn (mem, vec);
5775
5776 /* Add offset to field within buffer matching vector element. */
5777 mem = adjust_address_nv (mem, inner_mode, elt * GET_MODE_SIZE (inner_mode));
5778
5779 emit_move_insn (target, adjust_address_nv (mem, inner_mode, 0));
5780 }
5781
5782 /* Generates shifts and masks for a pair of rldicl or rldicr insns to
5783 implement ANDing by the mask IN. */
5784 void
5785 build_mask64_2_operands (rtx in, rtx *out)
5786 {
5787 unsigned HOST_WIDE_INT c, lsb, m1, m2;
5788 int shift;
5789
5790 gcc_assert (GET_CODE (in) == CONST_INT);
5791
5792 c = INTVAL (in);
5793 if (c & 1)
5794 {
5795 /* Assume c initially something like 0x00fff000000fffff. The idea
5796 is to rotate the word so that the middle ^^^^^^ group of zeros
5797 is at the MS end and can be cleared with an rldicl mask. We then
5798 rotate back and clear off the MS ^^ group of zeros with a
5799 second rldicl. */
5800 c = ~c; /* c == 0xff000ffffff00000 */
5801 lsb = c & -c; /* lsb == 0x0000000000100000 */
5802 m1 = -lsb; /* m1 == 0xfffffffffff00000 */
5803 c = ~c; /* c == 0x00fff000000fffff */
5804 c &= -lsb; /* c == 0x00fff00000000000 */
5805 lsb = c & -c; /* lsb == 0x0000100000000000 */
5806 c = ~c; /* c == 0xff000fffffffffff */
5807 c &= -lsb; /* c == 0xff00000000000000 */
5808 shift = 0;
5809 while ((lsb >>= 1) != 0)
5810 shift++; /* shift == 44 on exit from loop */
5811 m1 <<= 64 - shift; /* m1 == 0xffffff0000000000 */
5812 m1 = ~m1; /* m1 == 0x000000ffffffffff */
5813 m2 = ~c; /* m2 == 0x00ffffffffffffff */
5814 }
5815 else
5816 {
5817 /* Assume c initially something like 0xff000f0000000000. The idea
5818 is to rotate the word so that the ^^^ middle group of zeros
5819 is at the LS end and can be cleared with an rldicr mask. We then
5820 rotate back and clear off the LS group of ^^^^^^^^^^ zeros with
5821 a second rldicr. */
5822 lsb = c & -c; /* lsb == 0x0000010000000000 */
5823 m2 = -lsb; /* m2 == 0xffffff0000000000 */
5824 c = ~c; /* c == 0x00fff0ffffffffff */
5825 c &= -lsb; /* c == 0x00fff00000000000 */
5826 lsb = c & -c; /* lsb == 0x0000100000000000 */
5827 c = ~c; /* c == 0xff000fffffffffff */
5828 c &= -lsb; /* c == 0xff00000000000000 */
5829 shift = 0;
5830 while ((lsb >>= 1) != 0)
5831 shift++; /* shift == 44 on exit from loop */
5832 m1 = ~c; /* m1 == 0x00ffffffffffffff */
5833 m1 >>= shift; /* m1 == 0x0000000000000fff */
5834 m1 = ~m1; /* m1 == 0xfffffffffffff000 */
5835 }
5836
5837 /* Note that when we only have two 0->1 and 1->0 transitions, one of the
5838 masks will be all 1's. We are guaranteed more than one transition. */
5839 out[0] = GEN_INT (64 - shift);
5840 out[1] = GEN_INT (m1);
5841 out[2] = GEN_INT (shift);
5842 out[3] = GEN_INT (m2);
5843 }
5844
5845 /* Return TRUE if OP is an invalid SUBREG operation on the e500. */
5846
5847 bool
5848 invalid_e500_subreg (rtx op, enum machine_mode mode)
5849 {
5850 if (TARGET_E500_DOUBLE)
5851 {
5852 /* Reject (subreg:SI (reg:DF)); likewise with subreg:DI or
5853 subreg:TI and reg:TF. Decimal float modes are like integer
5854 modes (only low part of each register used) for this
5855 purpose. */
5856 if (GET_CODE (op) == SUBREG
5857 && (mode == SImode || mode == DImode || mode == TImode
5858 || mode == DDmode || mode == TDmode || mode == PTImode)
5859 && REG_P (SUBREG_REG (op))
5860 && (GET_MODE (SUBREG_REG (op)) == DFmode
5861 || GET_MODE (SUBREG_REG (op)) == TFmode))
5862 return true;
5863
5864 /* Reject (subreg:DF (reg:DI)); likewise with subreg:TF and
5865 reg:TI. */
5866 if (GET_CODE (op) == SUBREG
5867 && (mode == DFmode || mode == TFmode)
5868 && REG_P (SUBREG_REG (op))
5869 && (GET_MODE (SUBREG_REG (op)) == DImode
5870 || GET_MODE (SUBREG_REG (op)) == TImode
5871 || GET_MODE (SUBREG_REG (op)) == PTImode
5872 || GET_MODE (SUBREG_REG (op)) == DDmode
5873 || GET_MODE (SUBREG_REG (op)) == TDmode))
5874 return true;
5875 }
5876
5877 if (TARGET_SPE
5878 && GET_CODE (op) == SUBREG
5879 && mode == SImode
5880 && REG_P (SUBREG_REG (op))
5881 && SPE_VECTOR_MODE (GET_MODE (SUBREG_REG (op))))
5882 return true;
5883
5884 return false;
5885 }
5886
5887 /* Return alignment of TYPE. Existing alignment is ALIGN. HOW
5888 selects whether the alignment is abi mandated, optional, or
5889 both abi and optional alignment. */
5890
5891 unsigned int
5892 rs6000_data_alignment (tree type, unsigned int align, enum data_align how)
5893 {
5894 if (how != align_opt)
5895 {
5896 if (TREE_CODE (type) == VECTOR_TYPE)
5897 {
5898 if ((TARGET_SPE && SPE_VECTOR_MODE (TYPE_MODE (type)))
5899 || (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (TYPE_MODE (type))))
5900 {
5901 if (align < 64)
5902 align = 64;
5903 }
5904 else if (align < 128)
5905 align = 128;
5906 }
5907 else if (TARGET_E500_DOUBLE
5908 && TREE_CODE (type) == REAL_TYPE
5909 && TYPE_MODE (type) == DFmode)
5910 {
5911 if (align < 64)
5912 align = 64;
5913 }
5914 }
5915
5916 if (how != align_abi)
5917 {
5918 if (TREE_CODE (type) == ARRAY_TYPE
5919 && TYPE_MODE (TREE_TYPE (type)) == QImode)
5920 {
5921 if (align < BITS_PER_WORD)
5922 align = BITS_PER_WORD;
5923 }
5924 }
5925
5926 return align;
5927 }
5928
5929 /* Previous GCC releases forced all vector types to have 16-byte alignment. */
5930
5931 bool
5932 rs6000_special_adjust_field_align_p (tree field, unsigned int computed)
5933 {
5934 if (TARGET_ALTIVEC && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
5935 {
5936 if (computed != 128)
5937 {
5938 static bool warned;
5939 if (!warned && warn_psabi)
5940 {
5941 warned = true;
5942 inform (input_location,
5943 "the layout of aggregates containing vectors with"
5944 " %d-byte alignment has changed in GCC 4.10",
5945 computed / BITS_PER_UNIT);
5946 }
5947 }
5948 /* In current GCC there is no special case. */
5949 return false;
5950 }
5951
5952 return false;
5953 }
5954
5955 /* AIX increases natural record alignment to doubleword if the first
5956 field is an FP double while the FP fields remain word aligned. */
5957
5958 unsigned int
5959 rs6000_special_round_type_align (tree type, unsigned int computed,
5960 unsigned int specified)
5961 {
5962 unsigned int align = MAX (computed, specified);
5963 tree field = TYPE_FIELDS (type);
5964
5965 /* Skip all non field decls */
5966 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5967 field = DECL_CHAIN (field);
5968
5969 if (field != NULL && field != type)
5970 {
5971 type = TREE_TYPE (field);
5972 while (TREE_CODE (type) == ARRAY_TYPE)
5973 type = TREE_TYPE (type);
5974
5975 if (type != error_mark_node && TYPE_MODE (type) == DFmode)
5976 align = MAX (align, 64);
5977 }
5978
5979 return align;
5980 }
5981
5982 /* Darwin increases record alignment to the natural alignment of
5983 the first field. */
5984
5985 unsigned int
5986 darwin_rs6000_special_round_type_align (tree type, unsigned int computed,
5987 unsigned int specified)
5988 {
5989 unsigned int align = MAX (computed, specified);
5990
5991 if (TYPE_PACKED (type))
5992 return align;
5993
5994 /* Find the first field, looking down into aggregates. */
5995 do {
5996 tree field = TYPE_FIELDS (type);
5997 /* Skip all non field decls */
5998 while (field != NULL && TREE_CODE (field) != FIELD_DECL)
5999 field = DECL_CHAIN (field);
6000 if (! field)
6001 break;
6002 /* A packed field does not contribute any extra alignment. */
6003 if (DECL_PACKED (field))
6004 return align;
6005 type = TREE_TYPE (field);
6006 while (TREE_CODE (type) == ARRAY_TYPE)
6007 type = TREE_TYPE (type);
6008 } while (AGGREGATE_TYPE_P (type));
6009
6010 if (! AGGREGATE_TYPE_P (type) && type != error_mark_node)
6011 align = MAX (align, TYPE_ALIGN (type));
6012
6013 return align;
6014 }
6015
6016 /* Return 1 for an operand in small memory on V.4/eabi. */
6017
6018 int
6019 small_data_operand (rtx op ATTRIBUTE_UNUSED,
6020 enum machine_mode mode ATTRIBUTE_UNUSED)
6021 {
6022 #if TARGET_ELF
6023 rtx sym_ref;
6024
6025 if (rs6000_sdata == SDATA_NONE || rs6000_sdata == SDATA_DATA)
6026 return 0;
6027
6028 if (DEFAULT_ABI != ABI_V4)
6029 return 0;
6030
6031 /* Vector and float memory instructions have a limited offset on the
6032 SPE, so using a vector or float variable directly as an operand is
6033 not useful. */
6034 if (TARGET_SPE
6035 && (SPE_VECTOR_MODE (mode) || FLOAT_MODE_P (mode)))
6036 return 0;
6037
6038 if (GET_CODE (op) == SYMBOL_REF)
6039 sym_ref = op;
6040
6041 else if (GET_CODE (op) != CONST
6042 || GET_CODE (XEXP (op, 0)) != PLUS
6043 || GET_CODE (XEXP (XEXP (op, 0), 0)) != SYMBOL_REF
6044 || GET_CODE (XEXP (XEXP (op, 0), 1)) != CONST_INT)
6045 return 0;
6046
6047 else
6048 {
6049 rtx sum = XEXP (op, 0);
6050 HOST_WIDE_INT summand;
6051
6052 /* We have to be careful here, because it is the referenced address
6053 that must be 32k from _SDA_BASE_, not just the symbol. */
6054 summand = INTVAL (XEXP (sum, 1));
6055 if (summand < 0 || summand > g_switch_value)
6056 return 0;
6057
6058 sym_ref = XEXP (sum, 0);
6059 }
6060
6061 return SYMBOL_REF_SMALL_P (sym_ref);
6062 #else
6063 return 0;
6064 #endif
6065 }
6066
6067 /* Return true if either operand is a general purpose register. */
6068
6069 bool
6070 gpr_or_gpr_p (rtx op0, rtx op1)
6071 {
6072 return ((REG_P (op0) && INT_REGNO_P (REGNO (op0)))
6073 || (REG_P (op1) && INT_REGNO_P (REGNO (op1))));
6074 }
6075
6076 /* Return true if this is a move direct operation between GPR registers and
6077 floating point/VSX registers. */
6078
6079 bool
6080 direct_move_p (rtx op0, rtx op1)
6081 {
6082 int regno0, regno1;
6083
6084 if (!REG_P (op0) || !REG_P (op1))
6085 return false;
6086
6087 if (!TARGET_DIRECT_MOVE && !TARGET_MFPGPR)
6088 return false;
6089
6090 regno0 = REGNO (op0);
6091 regno1 = REGNO (op1);
6092 if (regno0 >= FIRST_PSEUDO_REGISTER || regno1 >= FIRST_PSEUDO_REGISTER)
6093 return false;
6094
6095 if (INT_REGNO_P (regno0))
6096 return (TARGET_DIRECT_MOVE) ? VSX_REGNO_P (regno1) : FP_REGNO_P (regno1);
6097
6098 else if (INT_REGNO_P (regno1))
6099 {
6100 if (TARGET_MFPGPR && FP_REGNO_P (regno0))
6101 return true;
6102
6103 else if (TARGET_DIRECT_MOVE && VSX_REGNO_P (regno0))
6104 return true;
6105 }
6106
6107 return false;
6108 }
6109
6110 /* Return true if this is a load or store quad operation. This function does
6111 not handle the atomic quad memory instructions. */
6112
6113 bool
6114 quad_load_store_p (rtx op0, rtx op1)
6115 {
6116 bool ret;
6117
6118 if (!TARGET_QUAD_MEMORY)
6119 ret = false;
6120
6121 else if (REG_P (op0) && MEM_P (op1))
6122 ret = (quad_int_reg_operand (op0, GET_MODE (op0))
6123 && quad_memory_operand (op1, GET_MODE (op1))
6124 && !reg_overlap_mentioned_p (op0, op1));
6125
6126 else if (MEM_P (op0) && REG_P (op1))
6127 ret = (quad_memory_operand (op0, GET_MODE (op0))
6128 && quad_int_reg_operand (op1, GET_MODE (op1)));
6129
6130 else
6131 ret = false;
6132
6133 if (TARGET_DEBUG_ADDR)
6134 {
6135 fprintf (stderr, "\n========== quad_load_store, return %s\n",
6136 ret ? "true" : "false");
6137 debug_rtx (gen_rtx_SET (VOIDmode, op0, op1));
6138 }
6139
6140 return ret;
6141 }
6142
6143 /* Given an address, return a constant offset term if one exists. */
6144
6145 static rtx
6146 address_offset (rtx op)
6147 {
6148 if (GET_CODE (op) == PRE_INC
6149 || GET_CODE (op) == PRE_DEC)
6150 op = XEXP (op, 0);
6151 else if (GET_CODE (op) == PRE_MODIFY
6152 || GET_CODE (op) == LO_SUM)
6153 op = XEXP (op, 1);
6154
6155 if (GET_CODE (op) == CONST)
6156 op = XEXP (op, 0);
6157
6158 if (GET_CODE (op) == PLUS)
6159 op = XEXP (op, 1);
6160
6161 if (CONST_INT_P (op))
6162 return op;
6163
6164 return NULL_RTX;
6165 }
6166
6167 /* Return true if the MEM operand is a memory operand suitable for use
6168 with a (full width, possibly multiple) gpr load/store. On
6169 powerpc64 this means the offset must be divisible by 4.
6170 Implements 'Y' constraint.
6171
6172 Accept direct, indexed, offset, lo_sum and tocref. Since this is
6173 a constraint function we know the operand has satisfied a suitable
6174 memory predicate. Also accept some odd rtl generated by reload
6175 (see rs6000_legitimize_reload_address for various forms). It is
6176 important that reload rtl be accepted by appropriate constraints
6177 but not by the operand predicate.
6178
6179 Offsetting a lo_sum should not be allowed, except where we know by
6180 alignment that a 32k boundary is not crossed, but see the ???
6181 comment in rs6000_legitimize_reload_address. Note that by
6182 "offsetting" here we mean a further offset to access parts of the
6183 MEM. It's fine to have a lo_sum where the inner address is offset
6184 from a sym, since the same sym+offset will appear in the high part
6185 of the address calculation. */
6186
6187 bool
6188 mem_operand_gpr (rtx op, enum machine_mode mode)
6189 {
6190 unsigned HOST_WIDE_INT offset;
6191 int extra;
6192 rtx addr = XEXP (op, 0);
6193
6194 op = address_offset (addr);
6195 if (op == NULL_RTX)
6196 return true;
6197
6198 offset = INTVAL (op);
6199 if (TARGET_POWERPC64 && (offset & 3) != 0)
6200 return false;
6201
6202 extra = GET_MODE_SIZE (mode) - UNITS_PER_WORD;
6203 if (extra < 0)
6204 extra = 0;
6205
6206 if (GET_CODE (addr) == LO_SUM)
6207 /* For lo_sum addresses, we must allow any offset except one that
6208 causes a wrap, so test only the low 16 bits. */
6209 offset = ((offset & 0xffff) ^ 0x8000) - 0x8000;
6210
6211 return offset + 0x8000 < 0x10000u - extra;
6212 }
6213 \f
6214 /* Subroutines of rs6000_legitimize_address and rs6000_legitimate_address_p. */
6215
6216 static bool
6217 reg_offset_addressing_ok_p (enum machine_mode mode)
6218 {
6219 switch (mode)
6220 {
6221 case V16QImode:
6222 case V8HImode:
6223 case V4SFmode:
6224 case V4SImode:
6225 case V2DFmode:
6226 case V2DImode:
6227 case V1TImode:
6228 case TImode:
6229 /* AltiVec/VSX vector modes. Only reg+reg addressing is valid. While
6230 TImode is not a vector mode, if we want to use the VSX registers to
6231 move it around, we need to restrict ourselves to reg+reg
6232 addressing. */
6233 if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
6234 return false;
6235 break;
6236
6237 case V4HImode:
6238 case V2SImode:
6239 case V1DImode:
6240 case V2SFmode:
6241 /* Paired vector modes. Only reg+reg addressing is valid. */
6242 if (TARGET_PAIRED_FLOAT)
6243 return false;
6244 break;
6245
6246 case SDmode:
6247 /* If we can do direct load/stores of SDmode, restrict it to reg+reg
6248 addressing for the LFIWZX and STFIWX instructions. */
6249 if (TARGET_NO_SDMODE_STACK)
6250 return false;
6251 break;
6252
6253 default:
6254 break;
6255 }
6256
6257 return true;
6258 }
6259
6260 static bool
6261 virtual_stack_registers_memory_p (rtx op)
6262 {
6263 int regnum;
6264
6265 if (GET_CODE (op) == REG)
6266 regnum = REGNO (op);
6267
6268 else if (GET_CODE (op) == PLUS
6269 && GET_CODE (XEXP (op, 0)) == REG
6270 && GET_CODE (XEXP (op, 1)) == CONST_INT)
6271 regnum = REGNO (XEXP (op, 0));
6272
6273 else
6274 return false;
6275
6276 return (regnum >= FIRST_VIRTUAL_REGISTER
6277 && regnum <= LAST_VIRTUAL_POINTER_REGISTER);
6278 }
6279
6280 /* Return true if a MODE sized memory accesses to OP plus OFFSET
6281 is known to not straddle a 32k boundary. */
6282
6283 static bool
6284 offsettable_ok_by_alignment (rtx op, HOST_WIDE_INT offset,
6285 enum machine_mode mode)
6286 {
6287 tree decl, type;
6288 unsigned HOST_WIDE_INT dsize, dalign, lsb, mask;
6289
6290 if (GET_CODE (op) != SYMBOL_REF)
6291 return false;
6292
6293 dsize = GET_MODE_SIZE (mode);
6294 decl = SYMBOL_REF_DECL (op);
6295 if (!decl)
6296 {
6297 if (dsize == 0)
6298 return false;
6299
6300 /* -fsection-anchors loses the original SYMBOL_REF_DECL when
6301 replacing memory addresses with an anchor plus offset. We
6302 could find the decl by rummaging around in the block->objects
6303 VEC for the given offset but that seems like too much work. */
6304 dalign = BITS_PER_UNIT;
6305 if (SYMBOL_REF_HAS_BLOCK_INFO_P (op)
6306 && SYMBOL_REF_ANCHOR_P (op)
6307 && SYMBOL_REF_BLOCK (op) != NULL)
6308 {
6309 struct object_block *block = SYMBOL_REF_BLOCK (op);
6310
6311 dalign = block->alignment;
6312 offset += SYMBOL_REF_BLOCK_OFFSET (op);
6313 }
6314 else if (CONSTANT_POOL_ADDRESS_P (op))
6315 {
6316 /* It would be nice to have get_pool_align().. */
6317 enum machine_mode cmode = get_pool_mode (op);
6318
6319 dalign = GET_MODE_ALIGNMENT (cmode);
6320 }
6321 }
6322 else if (DECL_P (decl))
6323 {
6324 dalign = DECL_ALIGN (decl);
6325
6326 if (dsize == 0)
6327 {
6328 /* Allow BLKmode when the entire object is known to not
6329 cross a 32k boundary. */
6330 if (!DECL_SIZE_UNIT (decl))
6331 return false;
6332
6333 if (!tree_fits_uhwi_p (DECL_SIZE_UNIT (decl)))
6334 return false;
6335
6336 dsize = tree_to_uhwi (DECL_SIZE_UNIT (decl));
6337 if (dsize > 32768)
6338 return false;
6339
6340 return dalign / BITS_PER_UNIT >= dsize;
6341 }
6342 }
6343 else
6344 {
6345 type = TREE_TYPE (decl);
6346
6347 dalign = TYPE_ALIGN (type);
6348 if (CONSTANT_CLASS_P (decl))
6349 dalign = CONSTANT_ALIGNMENT (decl, dalign);
6350 else
6351 dalign = DATA_ALIGNMENT (decl, dalign);
6352
6353 if (dsize == 0)
6354 {
6355 /* BLKmode, check the entire object. */
6356 if (TREE_CODE (decl) == STRING_CST)
6357 dsize = TREE_STRING_LENGTH (decl);
6358 else if (TYPE_SIZE_UNIT (type)
6359 && tree_fits_uhwi_p (TYPE_SIZE_UNIT (type)))
6360 dsize = tree_to_uhwi (TYPE_SIZE_UNIT (type));
6361 else
6362 return false;
6363 if (dsize > 32768)
6364 return false;
6365
6366 return dalign / BITS_PER_UNIT >= dsize;
6367 }
6368 }
6369
6370 /* Find how many bits of the alignment we know for this access. */
6371 mask = dalign / BITS_PER_UNIT - 1;
6372 lsb = offset & -offset;
6373 mask &= lsb - 1;
6374 dalign = mask + 1;
6375
6376 return dalign >= dsize;
6377 }
6378
6379 static bool
6380 constant_pool_expr_p (rtx op)
6381 {
6382 rtx base, offset;
6383
6384 split_const (op, &base, &offset);
6385 return (GET_CODE (base) == SYMBOL_REF
6386 && CONSTANT_POOL_ADDRESS_P (base)
6387 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (base), Pmode));
6388 }
6389
6390 static const_rtx tocrel_base, tocrel_offset;
6391
6392 /* Return true if OP is a toc pointer relative address (the output
6393 of create_TOC_reference). If STRICT, do not match high part or
6394 non-split -mcmodel=large/medium toc pointer relative addresses. */
6395
6396 bool
6397 toc_relative_expr_p (const_rtx op, bool strict)
6398 {
6399 if (!TARGET_TOC)
6400 return false;
6401
6402 if (TARGET_CMODEL != CMODEL_SMALL)
6403 {
6404 /* Only match the low part. */
6405 if (GET_CODE (op) == LO_SUM
6406 && REG_P (XEXP (op, 0))
6407 && INT_REG_OK_FOR_BASE_P (XEXP (op, 0), strict))
6408 op = XEXP (op, 1);
6409 else if (strict)
6410 return false;
6411 }
6412
6413 tocrel_base = op;
6414 tocrel_offset = const0_rtx;
6415 if (GET_CODE (op) == PLUS && add_cint_operand (XEXP (op, 1), GET_MODE (op)))
6416 {
6417 tocrel_base = XEXP (op, 0);
6418 tocrel_offset = XEXP (op, 1);
6419 }
6420
6421 return (GET_CODE (tocrel_base) == UNSPEC
6422 && XINT (tocrel_base, 1) == UNSPEC_TOCREL);
6423 }
6424
6425 /* Return true if X is a constant pool address, and also for cmodel=medium
6426 if X is a toc-relative address known to be offsettable within MODE. */
6427
6428 bool
6429 legitimate_constant_pool_address_p (const_rtx x, enum machine_mode mode,
6430 bool strict)
6431 {
6432 return (toc_relative_expr_p (x, strict)
6433 && (TARGET_CMODEL != CMODEL_MEDIUM
6434 || constant_pool_expr_p (XVECEXP (tocrel_base, 0, 0))
6435 || mode == QImode
6436 || offsettable_ok_by_alignment (XVECEXP (tocrel_base, 0, 0),
6437 INTVAL (tocrel_offset), mode)));
6438 }
6439
6440 static bool
6441 legitimate_small_data_p (enum machine_mode mode, rtx x)
6442 {
6443 return (DEFAULT_ABI == ABI_V4
6444 && !flag_pic && !TARGET_TOC
6445 && (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST)
6446 && small_data_operand (x, mode));
6447 }
6448
6449 /* SPE offset addressing is limited to 5-bits worth of double words. */
6450 #define SPE_CONST_OFFSET_OK(x) (((x) & ~0xf8) == 0)
6451
6452 bool
6453 rs6000_legitimate_offset_address_p (enum machine_mode mode, rtx x,
6454 bool strict, bool worst_case)
6455 {
6456 unsigned HOST_WIDE_INT offset;
6457 unsigned int extra;
6458
6459 if (GET_CODE (x) != PLUS)
6460 return false;
6461 if (!REG_P (XEXP (x, 0)))
6462 return false;
6463 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
6464 return false;
6465 if (!reg_offset_addressing_ok_p (mode))
6466 return virtual_stack_registers_memory_p (x);
6467 if (legitimate_constant_pool_address_p (x, mode, strict || lra_in_progress))
6468 return true;
6469 if (GET_CODE (XEXP (x, 1)) != CONST_INT)
6470 return false;
6471
6472 offset = INTVAL (XEXP (x, 1));
6473 extra = 0;
6474 switch (mode)
6475 {
6476 case V4HImode:
6477 case V2SImode:
6478 case V1DImode:
6479 case V2SFmode:
6480 /* SPE vector modes. */
6481 return SPE_CONST_OFFSET_OK (offset);
6482
6483 case DFmode:
6484 case DDmode:
6485 case DImode:
6486 /* On e500v2, we may have:
6487
6488 (subreg:DF (mem:DI (plus (reg) (const_int))) 0).
6489
6490 Which gets addressed with evldd instructions. */
6491 if (TARGET_E500_DOUBLE)
6492 return SPE_CONST_OFFSET_OK (offset);
6493
6494 /* If we are using VSX scalar loads, restrict ourselves to reg+reg
6495 addressing. */
6496 if (VECTOR_MEM_VSX_P (mode))
6497 return false;
6498
6499 if (!worst_case)
6500 break;
6501 if (!TARGET_POWERPC64)
6502 extra = 4;
6503 else if (offset & 3)
6504 return false;
6505 break;
6506
6507 case TFmode:
6508 if (TARGET_E500_DOUBLE)
6509 return (SPE_CONST_OFFSET_OK (offset)
6510 && SPE_CONST_OFFSET_OK (offset + 8));
6511 /* fall through */
6512
6513 case TDmode:
6514 case TImode:
6515 case PTImode:
6516 extra = 8;
6517 if (!worst_case)
6518 break;
6519 if (!TARGET_POWERPC64)
6520 extra = 12;
6521 else if (offset & 3)
6522 return false;
6523 break;
6524
6525 default:
6526 break;
6527 }
6528
6529 offset += 0x8000;
6530 return offset < 0x10000 - extra;
6531 }
6532
6533 bool
6534 legitimate_indexed_address_p (rtx x, int strict)
6535 {
6536 rtx op0, op1;
6537
6538 if (GET_CODE (x) != PLUS)
6539 return false;
6540
6541 op0 = XEXP (x, 0);
6542 op1 = XEXP (x, 1);
6543
6544 /* Recognize the rtl generated by reload which we know will later be
6545 replaced with proper base and index regs. */
6546 if (!strict
6547 && reload_in_progress
6548 && (REG_P (op0) || GET_CODE (op0) == PLUS)
6549 && REG_P (op1))
6550 return true;
6551
6552 return (REG_P (op0) && REG_P (op1)
6553 && ((INT_REG_OK_FOR_BASE_P (op0, strict)
6554 && INT_REG_OK_FOR_INDEX_P (op1, strict))
6555 || (INT_REG_OK_FOR_BASE_P (op1, strict)
6556 && INT_REG_OK_FOR_INDEX_P (op0, strict))));
6557 }
6558
6559 bool
6560 avoiding_indexed_address_p (enum machine_mode mode)
6561 {
6562 /* Avoid indexed addressing for modes that have non-indexed
6563 load/store instruction forms. */
6564 return (TARGET_AVOID_XFORM && VECTOR_MEM_NONE_P (mode));
6565 }
6566
6567 bool
6568 legitimate_indirect_address_p (rtx x, int strict)
6569 {
6570 return GET_CODE (x) == REG && INT_REG_OK_FOR_BASE_P (x, strict);
6571 }
6572
6573 bool
6574 macho_lo_sum_memory_operand (rtx x, enum machine_mode mode)
6575 {
6576 if (!TARGET_MACHO || !flag_pic
6577 || mode != SImode || GET_CODE (x) != MEM)
6578 return false;
6579 x = XEXP (x, 0);
6580
6581 if (GET_CODE (x) != LO_SUM)
6582 return false;
6583 if (GET_CODE (XEXP (x, 0)) != REG)
6584 return false;
6585 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 0))
6586 return false;
6587 x = XEXP (x, 1);
6588
6589 return CONSTANT_P (x);
6590 }
6591
6592 static bool
6593 legitimate_lo_sum_address_p (enum machine_mode mode, rtx x, int strict)
6594 {
6595 if (GET_CODE (x) != LO_SUM)
6596 return false;
6597 if (GET_CODE (XEXP (x, 0)) != REG)
6598 return false;
6599 if (!INT_REG_OK_FOR_BASE_P (XEXP (x, 0), strict))
6600 return false;
6601 /* Restrict addressing for DI because of our SUBREG hackery. */
6602 if (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
6603 return false;
6604 x = XEXP (x, 1);
6605
6606 if (TARGET_ELF || TARGET_MACHO)
6607 {
6608 bool large_toc_ok;
6609
6610 if (DEFAULT_ABI == ABI_V4 && flag_pic)
6611 return false;
6612 /* LRA don't use LEGITIMIZE_RELOAD_ADDRESS as it usually calls
6613 push_reload from reload pass code. LEGITIMIZE_RELOAD_ADDRESS
6614 recognizes some LO_SUM addresses as valid although this
6615 function says opposite. In most cases, LRA through different
6616 transformations can generate correct code for address reloads.
6617 It can not manage only some LO_SUM cases. So we need to add
6618 code analogous to one in rs6000_legitimize_reload_address for
6619 LOW_SUM here saying that some addresses are still valid. */
6620 large_toc_ok = (lra_in_progress && TARGET_CMODEL != CMODEL_SMALL
6621 && small_toc_ref (x, VOIDmode));
6622 if (TARGET_TOC && ! large_toc_ok)
6623 return false;
6624 if (GET_MODE_NUNITS (mode) != 1)
6625 return false;
6626 if (GET_MODE_SIZE (mode) > UNITS_PER_WORD
6627 && !(/* ??? Assume floating point reg based on mode? */
6628 TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT
6629 && (mode == DFmode || mode == DDmode)))
6630 return false;
6631
6632 return CONSTANT_P (x) || large_toc_ok;
6633 }
6634
6635 return false;
6636 }
6637
6638
6639 /* Try machine-dependent ways of modifying an illegitimate address
6640 to be legitimate. If we find one, return the new, valid address.
6641 This is used from only one place: `memory_address' in explow.c.
6642
6643 OLDX is the address as it was before break_out_memory_refs was
6644 called. In some cases it is useful to look at this to decide what
6645 needs to be done.
6646
6647 It is always safe for this function to do nothing. It exists to
6648 recognize opportunities to optimize the output.
6649
6650 On RS/6000, first check for the sum of a register with a constant
6651 integer that is out of range. If so, generate code to add the
6652 constant with the low-order 16 bits masked to the register and force
6653 this result into another register (this can be done with `cau').
6654 Then generate an address of REG+(CONST&0xffff), allowing for the
6655 possibility of bit 16 being a one.
6656
6657 Then check for the sum of a register and something not constant, try to
6658 load the other things into a register and return the sum. */
6659
6660 static rtx
6661 rs6000_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
6662 enum machine_mode mode)
6663 {
6664 unsigned int extra;
6665
6666 if (!reg_offset_addressing_ok_p (mode))
6667 {
6668 if (virtual_stack_registers_memory_p (x))
6669 return x;
6670
6671 /* In theory we should not be seeing addresses of the form reg+0,
6672 but just in case it is generated, optimize it away. */
6673 if (GET_CODE (x) == PLUS && XEXP (x, 1) == const0_rtx)
6674 return force_reg (Pmode, XEXP (x, 0));
6675
6676 /* For TImode with load/store quad, restrict addresses to just a single
6677 pointer, so it works with both GPRs and VSX registers. */
6678 /* Make sure both operands are registers. */
6679 else if (GET_CODE (x) == PLUS
6680 && (mode != TImode || !TARGET_QUAD_MEMORY))
6681 return gen_rtx_PLUS (Pmode,
6682 force_reg (Pmode, XEXP (x, 0)),
6683 force_reg (Pmode, XEXP (x, 1)));
6684 else
6685 return force_reg (Pmode, x);
6686 }
6687 if (GET_CODE (x) == SYMBOL_REF)
6688 {
6689 enum tls_model model = SYMBOL_REF_TLS_MODEL (x);
6690 if (model != 0)
6691 return rs6000_legitimize_tls_address (x, model);
6692 }
6693
6694 extra = 0;
6695 switch (mode)
6696 {
6697 case TFmode:
6698 case TDmode:
6699 case TImode:
6700 case PTImode:
6701 /* As in legitimate_offset_address_p we do not assume
6702 worst-case. The mode here is just a hint as to the registers
6703 used. A TImode is usually in gprs, but may actually be in
6704 fprs. Leave worst-case scenario for reload to handle via
6705 insn constraints. PTImode is only GPRs. */
6706 extra = 8;
6707 break;
6708 default:
6709 break;
6710 }
6711
6712 if (GET_CODE (x) == PLUS
6713 && GET_CODE (XEXP (x, 0)) == REG
6714 && GET_CODE (XEXP (x, 1)) == CONST_INT
6715 && ((unsigned HOST_WIDE_INT) (INTVAL (XEXP (x, 1)) + 0x8000)
6716 >= 0x10000 - extra)
6717 && !(SPE_VECTOR_MODE (mode)
6718 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)))
6719 {
6720 HOST_WIDE_INT high_int, low_int;
6721 rtx sum;
6722 low_int = ((INTVAL (XEXP (x, 1)) & 0xffff) ^ 0x8000) - 0x8000;
6723 if (low_int >= 0x8000 - extra)
6724 low_int = 0;
6725 high_int = INTVAL (XEXP (x, 1)) - low_int;
6726 sum = force_operand (gen_rtx_PLUS (Pmode, XEXP (x, 0),
6727 GEN_INT (high_int)), 0);
6728 return plus_constant (Pmode, sum, low_int);
6729 }
6730 else if (GET_CODE (x) == PLUS
6731 && GET_CODE (XEXP (x, 0)) == REG
6732 && GET_CODE (XEXP (x, 1)) != CONST_INT
6733 && GET_MODE_NUNITS (mode) == 1
6734 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6735 || (/* ??? Assume floating point reg based on mode? */
6736 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6737 && (mode == DFmode || mode == DDmode)))
6738 && !avoiding_indexed_address_p (mode))
6739 {
6740 return gen_rtx_PLUS (Pmode, XEXP (x, 0),
6741 force_reg (Pmode, force_operand (XEXP (x, 1), 0)));
6742 }
6743 else if (SPE_VECTOR_MODE (mode)
6744 || (TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD))
6745 {
6746 if (mode == DImode)
6747 return x;
6748 /* We accept [reg + reg] and [reg + OFFSET]. */
6749
6750 if (GET_CODE (x) == PLUS)
6751 {
6752 rtx op1 = XEXP (x, 0);
6753 rtx op2 = XEXP (x, 1);
6754 rtx y;
6755
6756 op1 = force_reg (Pmode, op1);
6757
6758 if (GET_CODE (op2) != REG
6759 && (GET_CODE (op2) != CONST_INT
6760 || !SPE_CONST_OFFSET_OK (INTVAL (op2))
6761 || (GET_MODE_SIZE (mode) > 8
6762 && !SPE_CONST_OFFSET_OK (INTVAL (op2) + 8))))
6763 op2 = force_reg (Pmode, op2);
6764
6765 /* We can't always do [reg + reg] for these, because [reg +
6766 reg + offset] is not a legitimate addressing mode. */
6767 y = gen_rtx_PLUS (Pmode, op1, op2);
6768
6769 if ((GET_MODE_SIZE (mode) > 8 || mode == DDmode) && REG_P (op2))
6770 return force_reg (Pmode, y);
6771 else
6772 return y;
6773 }
6774
6775 return force_reg (Pmode, x);
6776 }
6777 else if ((TARGET_ELF
6778 #if TARGET_MACHO
6779 || !MACHO_DYNAMIC_NO_PIC_P
6780 #endif
6781 )
6782 && TARGET_32BIT
6783 && TARGET_NO_TOC
6784 && ! flag_pic
6785 && GET_CODE (x) != CONST_INT
6786 && GET_CODE (x) != CONST_WIDE_INT
6787 && GET_CODE (x) != CONST_DOUBLE
6788 && CONSTANT_P (x)
6789 && GET_MODE_NUNITS (mode) == 1
6790 && (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
6791 || (/* ??? Assume floating point reg based on mode? */
6792 (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
6793 && (mode == DFmode || mode == DDmode))))
6794 {
6795 rtx reg = gen_reg_rtx (Pmode);
6796 if (TARGET_ELF)
6797 emit_insn (gen_elf_high (reg, x));
6798 else
6799 emit_insn (gen_macho_high (reg, x));
6800 return gen_rtx_LO_SUM (Pmode, reg, x);
6801 }
6802 else if (TARGET_TOC
6803 && GET_CODE (x) == SYMBOL_REF
6804 && constant_pool_expr_p (x)
6805 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (x), Pmode))
6806 return create_TOC_reference (x, NULL_RTX);
6807 else
6808 return x;
6809 }
6810
6811 /* Debug version of rs6000_legitimize_address. */
6812 static rtx
6813 rs6000_debug_legitimize_address (rtx x, rtx oldx, enum machine_mode mode)
6814 {
6815 rtx ret;
6816 rtx_insn *insns;
6817
6818 start_sequence ();
6819 ret = rs6000_legitimize_address (x, oldx, mode);
6820 insns = get_insns ();
6821 end_sequence ();
6822
6823 if (ret != x)
6824 {
6825 fprintf (stderr,
6826 "\nrs6000_legitimize_address: mode %s, old code %s, "
6827 "new code %s, modified\n",
6828 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)),
6829 GET_RTX_NAME (GET_CODE (ret)));
6830
6831 fprintf (stderr, "Original address:\n");
6832 debug_rtx (x);
6833
6834 fprintf (stderr, "oldx:\n");
6835 debug_rtx (oldx);
6836
6837 fprintf (stderr, "New address:\n");
6838 debug_rtx (ret);
6839
6840 if (insns)
6841 {
6842 fprintf (stderr, "Insns added:\n");
6843 debug_rtx_list (insns, 20);
6844 }
6845 }
6846 else
6847 {
6848 fprintf (stderr,
6849 "\nrs6000_legitimize_address: mode %s, code %s, no change:\n",
6850 GET_MODE_NAME (mode), GET_RTX_NAME (GET_CODE (x)));
6851
6852 debug_rtx (x);
6853 }
6854
6855 if (insns)
6856 emit_insn (insns);
6857
6858 return ret;
6859 }
6860
6861 /* This is called from dwarf2out.c via TARGET_ASM_OUTPUT_DWARF_DTPREL.
6862 We need to emit DTP-relative relocations. */
6863
6864 static void rs6000_output_dwarf_dtprel (FILE *, int, rtx) ATTRIBUTE_UNUSED;
6865 static void
6866 rs6000_output_dwarf_dtprel (FILE *file, int size, rtx x)
6867 {
6868 switch (size)
6869 {
6870 case 4:
6871 fputs ("\t.long\t", file);
6872 break;
6873 case 8:
6874 fputs (DOUBLE_INT_ASM_OP, file);
6875 break;
6876 default:
6877 gcc_unreachable ();
6878 }
6879 output_addr_const (file, x);
6880 fputs ("@dtprel+0x8000", file);
6881 }
6882
6883 /* Return true if X is a symbol that refers to real (rather than emulated)
6884 TLS. */
6885
6886 static bool
6887 rs6000_real_tls_symbol_ref_p (rtx x)
6888 {
6889 return (GET_CODE (x) == SYMBOL_REF
6890 && SYMBOL_REF_TLS_MODEL (x) >= TLS_MODEL_REAL);
6891 }
6892
6893 /* In the name of slightly smaller debug output, and to cater to
6894 general assembler lossage, recognize various UNSPEC sequences
6895 and turn them back into a direct symbol reference. */
6896
6897 static rtx
6898 rs6000_delegitimize_address (rtx orig_x)
6899 {
6900 rtx x, y, offset;
6901
6902 orig_x = delegitimize_mem_from_attrs (orig_x);
6903 x = orig_x;
6904 if (MEM_P (x))
6905 x = XEXP (x, 0);
6906
6907 y = x;
6908 if (TARGET_CMODEL != CMODEL_SMALL
6909 && GET_CODE (y) == LO_SUM)
6910 y = XEXP (y, 1);
6911
6912 offset = NULL_RTX;
6913 if (GET_CODE (y) == PLUS
6914 && GET_MODE (y) == Pmode
6915 && CONST_INT_P (XEXP (y, 1)))
6916 {
6917 offset = XEXP (y, 1);
6918 y = XEXP (y, 0);
6919 }
6920
6921 if (GET_CODE (y) == UNSPEC
6922 && XINT (y, 1) == UNSPEC_TOCREL)
6923 {
6924 #ifdef ENABLE_CHECKING
6925 if (REG_P (XVECEXP (y, 0, 1))
6926 && REGNO (XVECEXP (y, 0, 1)) == TOC_REGISTER)
6927 {
6928 /* All good. */
6929 }
6930 else if (GET_CODE (XVECEXP (y, 0, 1)) == DEBUG_EXPR)
6931 {
6932 /* Weirdness alert. df_note_compute can replace r2 with a
6933 debug_expr when this unspec is in a debug_insn.
6934 Seen in gcc.dg/pr51957-1.c */
6935 }
6936 else
6937 {
6938 debug_rtx (orig_x);
6939 abort ();
6940 }
6941 #endif
6942 y = XVECEXP (y, 0, 0);
6943
6944 #ifdef HAVE_AS_TLS
6945 /* Do not associate thread-local symbols with the original
6946 constant pool symbol. */
6947 if (TARGET_XCOFF
6948 && GET_CODE (y) == SYMBOL_REF
6949 && CONSTANT_POOL_ADDRESS_P (y)
6950 && rs6000_real_tls_symbol_ref_p (get_pool_constant (y)))
6951 return orig_x;
6952 #endif
6953
6954 if (offset != NULL_RTX)
6955 y = gen_rtx_PLUS (Pmode, y, offset);
6956 if (!MEM_P (orig_x))
6957 return y;
6958 else
6959 return replace_equiv_address_nv (orig_x, y);
6960 }
6961
6962 if (TARGET_MACHO
6963 && GET_CODE (orig_x) == LO_SUM
6964 && GET_CODE (XEXP (orig_x, 1)) == CONST)
6965 {
6966 y = XEXP (XEXP (orig_x, 1), 0);
6967 if (GET_CODE (y) == UNSPEC
6968 && XINT (y, 1) == UNSPEC_MACHOPIC_OFFSET)
6969 return XVECEXP (y, 0, 0);
6970 }
6971
6972 return orig_x;
6973 }
6974
6975 /* Return true if X shouldn't be emitted into the debug info.
6976 The linker doesn't like .toc section references from
6977 .debug_* sections, so reject .toc section symbols. */
6978
6979 static bool
6980 rs6000_const_not_ok_for_debug_p (rtx x)
6981 {
6982 if (GET_CODE (x) == SYMBOL_REF
6983 && CONSTANT_POOL_ADDRESS_P (x))
6984 {
6985 rtx c = get_pool_constant (x);
6986 enum machine_mode cmode = get_pool_mode (x);
6987 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (c, cmode))
6988 return true;
6989 }
6990
6991 return false;
6992 }
6993
6994 /* Construct the SYMBOL_REF for the tls_get_addr function. */
6995
6996 static GTY(()) rtx rs6000_tls_symbol;
6997 static rtx
6998 rs6000_tls_get_addr (void)
6999 {
7000 if (!rs6000_tls_symbol)
7001 rs6000_tls_symbol = init_one_libfunc ("__tls_get_addr");
7002
7003 return rs6000_tls_symbol;
7004 }
7005
7006 /* Construct the SYMBOL_REF for TLS GOT references. */
7007
7008 static GTY(()) rtx rs6000_got_symbol;
7009 static rtx
7010 rs6000_got_sym (void)
7011 {
7012 if (!rs6000_got_symbol)
7013 {
7014 rs6000_got_symbol = gen_rtx_SYMBOL_REF (Pmode, "_GLOBAL_OFFSET_TABLE_");
7015 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_LOCAL;
7016 SYMBOL_REF_FLAGS (rs6000_got_symbol) |= SYMBOL_FLAG_EXTERNAL;
7017 }
7018
7019 return rs6000_got_symbol;
7020 }
7021
7022 /* AIX Thread-Local Address support. */
7023
7024 static rtx
7025 rs6000_legitimize_tls_address_aix (rtx addr, enum tls_model model)
7026 {
7027 rtx sym, mem, tocref, tlsreg, tmpreg, dest, tlsaddr;
7028 const char *name;
7029 char *tlsname;
7030
7031 name = XSTR (addr, 0);
7032 /* Append TLS CSECT qualifier, unless the symbol already is qualified
7033 or the symbol will be in TLS private data section. */
7034 if (name[strlen (name) - 1] != ']'
7035 && (TREE_PUBLIC (SYMBOL_REF_DECL (addr))
7036 || bss_initializer_p (SYMBOL_REF_DECL (addr))))
7037 {
7038 tlsname = XALLOCAVEC (char, strlen (name) + 4);
7039 strcpy (tlsname, name);
7040 strcat (tlsname,
7041 bss_initializer_p (SYMBOL_REF_DECL (addr)) ? "[UL]" : "[TL]");
7042 tlsaddr = copy_rtx (addr);
7043 XSTR (tlsaddr, 0) = ggc_strdup (tlsname);
7044 }
7045 else
7046 tlsaddr = addr;
7047
7048 /* Place addr into TOC constant pool. */
7049 sym = force_const_mem (GET_MODE (tlsaddr), tlsaddr);
7050
7051 /* Output the TOC entry and create the MEM referencing the value. */
7052 if (constant_pool_expr_p (XEXP (sym, 0))
7053 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (XEXP (sym, 0)), Pmode))
7054 {
7055 tocref = create_TOC_reference (XEXP (sym, 0), NULL_RTX);
7056 mem = gen_const_mem (Pmode, tocref);
7057 set_mem_alias_set (mem, get_TOC_alias_set ());
7058 }
7059 else
7060 return sym;
7061
7062 /* Use global-dynamic for local-dynamic. */
7063 if (model == TLS_MODEL_GLOBAL_DYNAMIC
7064 || model == TLS_MODEL_LOCAL_DYNAMIC)
7065 {
7066 /* Create new TOC reference for @m symbol. */
7067 name = XSTR (XVECEXP (XEXP (mem, 0), 0, 0), 0);
7068 tlsname = XALLOCAVEC (char, strlen (name) + 1);
7069 strcpy (tlsname, "*LCM");
7070 strcat (tlsname, name + 3);
7071 rtx modaddr = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (tlsname));
7072 SYMBOL_REF_FLAGS (modaddr) |= SYMBOL_FLAG_LOCAL;
7073 tocref = create_TOC_reference (modaddr, NULL_RTX);
7074 rtx modmem = gen_const_mem (Pmode, tocref);
7075 set_mem_alias_set (modmem, get_TOC_alias_set ());
7076
7077 rtx modreg = gen_reg_rtx (Pmode);
7078 emit_insn (gen_rtx_SET (VOIDmode, modreg, modmem));
7079
7080 tmpreg = gen_reg_rtx (Pmode);
7081 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
7082
7083 dest = gen_reg_rtx (Pmode);
7084 if (TARGET_32BIT)
7085 emit_insn (gen_tls_get_addrsi (dest, modreg, tmpreg));
7086 else
7087 emit_insn (gen_tls_get_addrdi (dest, modreg, tmpreg));
7088 return dest;
7089 }
7090 /* Obtain TLS pointer: 32 bit call or 64 bit GPR 13. */
7091 else if (TARGET_32BIT)
7092 {
7093 tlsreg = gen_reg_rtx (SImode);
7094 emit_insn (gen_tls_get_tpointer (tlsreg));
7095 }
7096 else
7097 tlsreg = gen_rtx_REG (DImode, 13);
7098
7099 /* Load the TOC value into temporary register. */
7100 tmpreg = gen_reg_rtx (Pmode);
7101 emit_insn (gen_rtx_SET (VOIDmode, tmpreg, mem));
7102 set_unique_reg_note (get_last_insn (), REG_EQUAL,
7103 gen_rtx_MINUS (Pmode, addr, tlsreg));
7104
7105 /* Add TOC symbol value to TLS pointer. */
7106 dest = force_reg (Pmode, gen_rtx_PLUS (Pmode, tmpreg, tlsreg));
7107
7108 return dest;
7109 }
7110
7111 /* ADDR contains a thread-local SYMBOL_REF. Generate code to compute
7112 this (thread-local) address. */
7113
7114 static rtx
7115 rs6000_legitimize_tls_address (rtx addr, enum tls_model model)
7116 {
7117 rtx dest, insn;
7118
7119 if (TARGET_XCOFF)
7120 return rs6000_legitimize_tls_address_aix (addr, model);
7121
7122 dest = gen_reg_rtx (Pmode);
7123 if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 16)
7124 {
7125 rtx tlsreg;
7126
7127 if (TARGET_64BIT)
7128 {
7129 tlsreg = gen_rtx_REG (Pmode, 13);
7130 insn = gen_tls_tprel_64 (dest, tlsreg, addr);
7131 }
7132 else
7133 {
7134 tlsreg = gen_rtx_REG (Pmode, 2);
7135 insn = gen_tls_tprel_32 (dest, tlsreg, addr);
7136 }
7137 emit_insn (insn);
7138 }
7139 else if (model == TLS_MODEL_LOCAL_EXEC && rs6000_tls_size == 32)
7140 {
7141 rtx tlsreg, tmp;
7142
7143 tmp = gen_reg_rtx (Pmode);
7144 if (TARGET_64BIT)
7145 {
7146 tlsreg = gen_rtx_REG (Pmode, 13);
7147 insn = gen_tls_tprel_ha_64 (tmp, tlsreg, addr);
7148 }
7149 else
7150 {
7151 tlsreg = gen_rtx_REG (Pmode, 2);
7152 insn = gen_tls_tprel_ha_32 (tmp, tlsreg, addr);
7153 }
7154 emit_insn (insn);
7155 if (TARGET_64BIT)
7156 insn = gen_tls_tprel_lo_64 (dest, tmp, addr);
7157 else
7158 insn = gen_tls_tprel_lo_32 (dest, tmp, addr);
7159 emit_insn (insn);
7160 }
7161 else
7162 {
7163 rtx r3, got, tga, tmp1, tmp2, call_insn;
7164
7165 /* We currently use relocations like @got@tlsgd for tls, which
7166 means the linker will handle allocation of tls entries, placing
7167 them in the .got section. So use a pointer to the .got section,
7168 not one to secondary TOC sections used by 64-bit -mminimal-toc,
7169 or to secondary GOT sections used by 32-bit -fPIC. */
7170 if (TARGET_64BIT)
7171 got = gen_rtx_REG (Pmode, 2);
7172 else
7173 {
7174 if (flag_pic == 1)
7175 got = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
7176 else
7177 {
7178 rtx gsym = rs6000_got_sym ();
7179 got = gen_reg_rtx (Pmode);
7180 if (flag_pic == 0)
7181 rs6000_emit_move (got, gsym, Pmode);
7182 else
7183 {
7184 rtx mem, lab, last;
7185
7186 tmp1 = gen_reg_rtx (Pmode);
7187 tmp2 = gen_reg_rtx (Pmode);
7188 mem = gen_const_mem (Pmode, tmp1);
7189 lab = gen_label_rtx ();
7190 emit_insn (gen_load_toc_v4_PIC_1b (gsym, lab));
7191 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
7192 if (TARGET_LINK_STACK)
7193 emit_insn (gen_addsi3 (tmp1, tmp1, GEN_INT (4)));
7194 emit_move_insn (tmp2, mem);
7195 last = emit_insn (gen_addsi3 (got, tmp1, tmp2));
7196 set_unique_reg_note (last, REG_EQUAL, gsym);
7197 }
7198 }
7199 }
7200
7201 if (model == TLS_MODEL_GLOBAL_DYNAMIC)
7202 {
7203 tga = rs6000_tls_get_addr ();
7204 emit_library_call_value (tga, dest, LCT_CONST, Pmode,
7205 1, const0_rtx, Pmode);
7206
7207 r3 = gen_rtx_REG (Pmode, 3);
7208 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7209 {
7210 if (TARGET_64BIT)
7211 insn = gen_tls_gd_aix64 (r3, got, addr, tga, const0_rtx);
7212 else
7213 insn = gen_tls_gd_aix32 (r3, got, addr, tga, const0_rtx);
7214 }
7215 else if (DEFAULT_ABI == ABI_V4)
7216 insn = gen_tls_gd_sysvsi (r3, got, addr, tga, const0_rtx);
7217 else
7218 gcc_unreachable ();
7219 call_insn = last_call_insn ();
7220 PATTERN (call_insn) = insn;
7221 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
7222 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
7223 pic_offset_table_rtx);
7224 }
7225 else if (model == TLS_MODEL_LOCAL_DYNAMIC)
7226 {
7227 tga = rs6000_tls_get_addr ();
7228 tmp1 = gen_reg_rtx (Pmode);
7229 emit_library_call_value (tga, tmp1, LCT_CONST, Pmode,
7230 1, const0_rtx, Pmode);
7231
7232 r3 = gen_rtx_REG (Pmode, 3);
7233 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7234 {
7235 if (TARGET_64BIT)
7236 insn = gen_tls_ld_aix64 (r3, got, tga, const0_rtx);
7237 else
7238 insn = gen_tls_ld_aix32 (r3, got, tga, const0_rtx);
7239 }
7240 else if (DEFAULT_ABI == ABI_V4)
7241 insn = gen_tls_ld_sysvsi (r3, got, tga, const0_rtx);
7242 else
7243 gcc_unreachable ();
7244 call_insn = last_call_insn ();
7245 PATTERN (call_insn) = insn;
7246 if (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
7247 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn),
7248 pic_offset_table_rtx);
7249
7250 if (rs6000_tls_size == 16)
7251 {
7252 if (TARGET_64BIT)
7253 insn = gen_tls_dtprel_64 (dest, tmp1, addr);
7254 else
7255 insn = gen_tls_dtprel_32 (dest, tmp1, addr);
7256 }
7257 else if (rs6000_tls_size == 32)
7258 {
7259 tmp2 = gen_reg_rtx (Pmode);
7260 if (TARGET_64BIT)
7261 insn = gen_tls_dtprel_ha_64 (tmp2, tmp1, addr);
7262 else
7263 insn = gen_tls_dtprel_ha_32 (tmp2, tmp1, addr);
7264 emit_insn (insn);
7265 if (TARGET_64BIT)
7266 insn = gen_tls_dtprel_lo_64 (dest, tmp2, addr);
7267 else
7268 insn = gen_tls_dtprel_lo_32 (dest, tmp2, addr);
7269 }
7270 else
7271 {
7272 tmp2 = gen_reg_rtx (Pmode);
7273 if (TARGET_64BIT)
7274 insn = gen_tls_got_dtprel_64 (tmp2, got, addr);
7275 else
7276 insn = gen_tls_got_dtprel_32 (tmp2, got, addr);
7277 emit_insn (insn);
7278 insn = gen_rtx_SET (Pmode, dest,
7279 gen_rtx_PLUS (Pmode, tmp2, tmp1));
7280 }
7281 emit_insn (insn);
7282 }
7283 else
7284 {
7285 /* IE, or 64-bit offset LE. */
7286 tmp2 = gen_reg_rtx (Pmode);
7287 if (TARGET_64BIT)
7288 insn = gen_tls_got_tprel_64 (tmp2, got, addr);
7289 else
7290 insn = gen_tls_got_tprel_32 (tmp2, got, addr);
7291 emit_insn (insn);
7292 if (TARGET_64BIT)
7293 insn = gen_tls_tls_64 (dest, tmp2, addr);
7294 else
7295 insn = gen_tls_tls_32 (dest, tmp2, addr);
7296 emit_insn (insn);
7297 }
7298 }
7299
7300 return dest;
7301 }
7302
7303 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
7304
7305 static bool
7306 rs6000_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
7307 {
7308 if (GET_CODE (x) == HIGH
7309 && GET_CODE (XEXP (x, 0)) == UNSPEC)
7310 return true;
7311
7312 /* A TLS symbol in the TOC cannot contain a sum. */
7313 if (GET_CODE (x) == CONST
7314 && GET_CODE (XEXP (x, 0)) == PLUS
7315 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF
7316 && SYMBOL_REF_TLS_MODEL (XEXP (XEXP (x, 0), 0)) != 0)
7317 return true;
7318
7319 /* Do not place an ELF TLS symbol in the constant pool. */
7320 return TARGET_ELF && tls_referenced_p (x);
7321 }
7322
7323 /* Return true iff the given SYMBOL_REF refers to a constant pool entry
7324 that we have put in the TOC, or for cmodel=medium, if the SYMBOL_REF
7325 can be addressed relative to the toc pointer. */
7326
7327 static bool
7328 use_toc_relative_ref (rtx sym)
7329 {
7330 return ((constant_pool_expr_p (sym)
7331 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (get_pool_constant (sym),
7332 get_pool_mode (sym)))
7333 || (TARGET_CMODEL == CMODEL_MEDIUM
7334 && SYMBOL_REF_LOCAL_P (sym)));
7335 }
7336
7337 /* Our implementation of LEGITIMIZE_RELOAD_ADDRESS. Returns a value to
7338 replace the input X, or the original X if no replacement is called for.
7339 The output parameter *WIN is 1 if the calling macro should goto WIN,
7340 0 if it should not.
7341
7342 For RS/6000, we wish to handle large displacements off a base
7343 register by splitting the addend across an addiu/addis and the mem insn.
7344 This cuts number of extra insns needed from 3 to 1.
7345
7346 On Darwin, we use this to generate code for floating point constants.
7347 A movsf_low is generated so we wind up with 2 instructions rather than 3.
7348 The Darwin code is inside #if TARGET_MACHO because only then are the
7349 machopic_* functions defined. */
7350 static rtx
7351 rs6000_legitimize_reload_address (rtx x, enum machine_mode mode,
7352 int opnum, int type,
7353 int ind_levels ATTRIBUTE_UNUSED, int *win)
7354 {
7355 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
7356
7357 /* Nasty hack for vsx_splat_V2DF/V2DI load from mem, which takes a
7358 DFmode/DImode MEM. */
7359 if (reg_offset_p
7360 && opnum == 1
7361 && ((mode == DFmode && recog_data.operand_mode[0] == V2DFmode)
7362 || (mode == DImode && recog_data.operand_mode[0] == V2DImode)))
7363 reg_offset_p = false;
7364
7365 /* We must recognize output that we have already generated ourselves. */
7366 if (GET_CODE (x) == PLUS
7367 && GET_CODE (XEXP (x, 0)) == PLUS
7368 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
7369 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7370 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7371 {
7372 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7373 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
7374 opnum, (enum reload_type) type);
7375 *win = 1;
7376 return x;
7377 }
7378
7379 /* Likewise for (lo_sum (high ...) ...) output we have generated. */
7380 if (GET_CODE (x) == LO_SUM
7381 && GET_CODE (XEXP (x, 0)) == HIGH)
7382 {
7383 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7384 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7385 opnum, (enum reload_type) type);
7386 *win = 1;
7387 return x;
7388 }
7389
7390 #if TARGET_MACHO
7391 if (DEFAULT_ABI == ABI_DARWIN && flag_pic
7392 && GET_CODE (x) == LO_SUM
7393 && GET_CODE (XEXP (x, 0)) == PLUS
7394 && XEXP (XEXP (x, 0), 0) == pic_offset_table_rtx
7395 && GET_CODE (XEXP (XEXP (x, 0), 1)) == HIGH
7396 && XEXP (XEXP (XEXP (x, 0), 1), 0) == XEXP (x, 1)
7397 && machopic_operand_p (XEXP (x, 1)))
7398 {
7399 /* Result of previous invocation of this function on Darwin
7400 floating point constant. */
7401 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7402 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7403 opnum, (enum reload_type) type);
7404 *win = 1;
7405 return x;
7406 }
7407 #endif
7408
7409 if (TARGET_CMODEL != CMODEL_SMALL
7410 && reg_offset_p
7411 && small_toc_ref (x, VOIDmode))
7412 {
7413 rtx hi = gen_rtx_HIGH (Pmode, copy_rtx (x));
7414 x = gen_rtx_LO_SUM (Pmode, hi, x);
7415 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7416 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7417 opnum, (enum reload_type) type);
7418 *win = 1;
7419 return x;
7420 }
7421
7422 if (GET_CODE (x) == PLUS
7423 && GET_CODE (XEXP (x, 0)) == REG
7424 && REGNO (XEXP (x, 0)) < FIRST_PSEUDO_REGISTER
7425 && INT_REG_OK_FOR_BASE_P (XEXP (x, 0), 1)
7426 && GET_CODE (XEXP (x, 1)) == CONST_INT
7427 && reg_offset_p
7428 && !SPE_VECTOR_MODE (mode)
7429 && !(TARGET_E500_DOUBLE && GET_MODE_SIZE (mode) > UNITS_PER_WORD)
7430 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode)))
7431 {
7432 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
7433 HOST_WIDE_INT low = ((val & 0xffff) ^ 0x8000) - 0x8000;
7434 HOST_WIDE_INT high
7435 = (((val - low) & 0xffffffff) ^ 0x80000000) - 0x80000000;
7436
7437 /* Check for 32-bit overflow. */
7438 if (high + low != val)
7439 {
7440 *win = 0;
7441 return x;
7442 }
7443
7444 /* Reload the high part into a base reg; leave the low part
7445 in the mem directly. */
7446
7447 x = gen_rtx_PLUS (GET_MODE (x),
7448 gen_rtx_PLUS (GET_MODE (x), XEXP (x, 0),
7449 GEN_INT (high)),
7450 GEN_INT (low));
7451
7452 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7453 BASE_REG_CLASS, GET_MODE (x), VOIDmode, 0, 0,
7454 opnum, (enum reload_type) type);
7455 *win = 1;
7456 return x;
7457 }
7458
7459 if (GET_CODE (x) == SYMBOL_REF
7460 && reg_offset_p
7461 && (!VECTOR_MODE_P (mode) || VECTOR_MEM_NONE_P (mode))
7462 && !SPE_VECTOR_MODE (mode)
7463 #if TARGET_MACHO
7464 && DEFAULT_ABI == ABI_DARWIN
7465 && (flag_pic || MACHO_DYNAMIC_NO_PIC_P)
7466 && machopic_symbol_defined_p (x)
7467 #else
7468 && DEFAULT_ABI == ABI_V4
7469 && !flag_pic
7470 #endif
7471 /* Don't do this for TFmode or TDmode, since the result isn't offsettable.
7472 The same goes for DImode without 64-bit gprs and DFmode and DDmode
7473 without fprs.
7474 ??? Assume floating point reg based on mode? This assumption is
7475 violated by eg. powerpc-linux -m32 compile of gcc.dg/pr28796-2.c
7476 where reload ends up doing a DFmode load of a constant from
7477 mem using two gprs. Unfortunately, at this point reload
7478 hasn't yet selected regs so poking around in reload data
7479 won't help and even if we could figure out the regs reliably,
7480 we'd still want to allow this transformation when the mem is
7481 naturally aligned. Since we say the address is good here, we
7482 can't disable offsets from LO_SUMs in mem_operand_gpr.
7483 FIXME: Allow offset from lo_sum for other modes too, when
7484 mem is sufficiently aligned. */
7485 && mode != TFmode
7486 && mode != TDmode
7487 && (mode != TImode || !TARGET_VSX_TIMODE)
7488 && mode != PTImode
7489 && (mode != DImode || TARGET_POWERPC64)
7490 && ((mode != DFmode && mode != DDmode) || TARGET_POWERPC64
7491 || (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)))
7492 {
7493 #if TARGET_MACHO
7494 if (flag_pic)
7495 {
7496 rtx offset = machopic_gen_offset (x);
7497 x = gen_rtx_LO_SUM (GET_MODE (x),
7498 gen_rtx_PLUS (Pmode, pic_offset_table_rtx,
7499 gen_rtx_HIGH (Pmode, offset)), offset);
7500 }
7501 else
7502 #endif
7503 x = gen_rtx_LO_SUM (GET_MODE (x),
7504 gen_rtx_HIGH (Pmode, x), x);
7505
7506 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7507 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7508 opnum, (enum reload_type) type);
7509 *win = 1;
7510 return x;
7511 }
7512
7513 /* Reload an offset address wrapped by an AND that represents the
7514 masking of the lower bits. Strip the outer AND and let reload
7515 convert the offset address into an indirect address. For VSX,
7516 force reload to create the address with an AND in a separate
7517 register, because we can't guarantee an altivec register will
7518 be used. */
7519 if (VECTOR_MEM_ALTIVEC_P (mode)
7520 && GET_CODE (x) == AND
7521 && GET_CODE (XEXP (x, 0)) == PLUS
7522 && GET_CODE (XEXP (XEXP (x, 0), 0)) == REG
7523 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT
7524 && GET_CODE (XEXP (x, 1)) == CONST_INT
7525 && INTVAL (XEXP (x, 1)) == -16)
7526 {
7527 x = XEXP (x, 0);
7528 *win = 1;
7529 return x;
7530 }
7531
7532 if (TARGET_TOC
7533 && reg_offset_p
7534 && GET_CODE (x) == SYMBOL_REF
7535 && use_toc_relative_ref (x))
7536 {
7537 x = create_TOC_reference (x, NULL_RTX);
7538 if (TARGET_CMODEL != CMODEL_SMALL)
7539 push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
7540 BASE_REG_CLASS, Pmode, VOIDmode, 0, 0,
7541 opnum, (enum reload_type) type);
7542 *win = 1;
7543 return x;
7544 }
7545 *win = 0;
7546 return x;
7547 }
7548
7549 /* Debug version of rs6000_legitimize_reload_address. */
7550 static rtx
7551 rs6000_debug_legitimize_reload_address (rtx x, enum machine_mode mode,
7552 int opnum, int type,
7553 int ind_levels, int *win)
7554 {
7555 rtx ret = rs6000_legitimize_reload_address (x, mode, opnum, type,
7556 ind_levels, win);
7557 fprintf (stderr,
7558 "\nrs6000_legitimize_reload_address: mode = %s, opnum = %d, "
7559 "type = %d, ind_levels = %d, win = %d, original addr:\n",
7560 GET_MODE_NAME (mode), opnum, type, ind_levels, *win);
7561 debug_rtx (x);
7562
7563 if (x == ret)
7564 fprintf (stderr, "Same address returned\n");
7565 else if (!ret)
7566 fprintf (stderr, "NULL returned\n");
7567 else
7568 {
7569 fprintf (stderr, "New address:\n");
7570 debug_rtx (ret);
7571 }
7572
7573 return ret;
7574 }
7575
7576 /* TARGET_LEGITIMATE_ADDRESS_P recognizes an RTL expression
7577 that is a valid memory address for an instruction.
7578 The MODE argument is the machine mode for the MEM expression
7579 that wants to use this address.
7580
7581 On the RS/6000, there are four valid address: a SYMBOL_REF that
7582 refers to a constant pool entry of an address (or the sum of it
7583 plus a constant), a short (16-bit signed) constant plus a register,
7584 the sum of two registers, or a register indirect, possibly with an
7585 auto-increment. For DFmode, DDmode and DImode with a constant plus
7586 register, we must ensure that both words are addressable or PowerPC64
7587 with offset word aligned.
7588
7589 For modes spanning multiple registers (DFmode and DDmode in 32-bit GPRs,
7590 32-bit DImode, TImode, TFmode, TDmode), indexed addressing cannot be used
7591 because adjacent memory cells are accessed by adding word-sized offsets
7592 during assembly output. */
7593 static bool
7594 rs6000_legitimate_address_p (enum machine_mode mode, rtx x, bool reg_ok_strict)
7595 {
7596 bool reg_offset_p = reg_offset_addressing_ok_p (mode);
7597
7598 /* If this is an unaligned stvx/ldvx type address, discard the outer AND. */
7599 if (VECTOR_MEM_ALTIVEC_P (mode)
7600 && GET_CODE (x) == AND
7601 && GET_CODE (XEXP (x, 1)) == CONST_INT
7602 && INTVAL (XEXP (x, 1)) == -16)
7603 x = XEXP (x, 0);
7604
7605 if (TARGET_ELF && RS6000_SYMBOL_REF_TLS_P (x))
7606 return 0;
7607 if (legitimate_indirect_address_p (x, reg_ok_strict))
7608 return 1;
7609 if (TARGET_UPDATE
7610 && (GET_CODE (x) == PRE_INC || GET_CODE (x) == PRE_DEC)
7611 && mode_supports_pre_incdec_p (mode)
7612 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict))
7613 return 1;
7614 if (virtual_stack_registers_memory_p (x))
7615 return 1;
7616 if (reg_offset_p && legitimate_small_data_p (mode, x))
7617 return 1;
7618 if (reg_offset_p
7619 && legitimate_constant_pool_address_p (x, mode,
7620 reg_ok_strict || lra_in_progress))
7621 return 1;
7622 /* For TImode, if we have load/store quad and TImode in VSX registers, only
7623 allow register indirect addresses. This will allow the values to go in
7624 either GPRs or VSX registers without reloading. The vector types would
7625 tend to go into VSX registers, so we allow REG+REG, while TImode seems
7626 somewhat split, in that some uses are GPR based, and some VSX based. */
7627 if (mode == TImode && TARGET_QUAD_MEMORY && TARGET_VSX_TIMODE)
7628 return 0;
7629 /* If not REG_OK_STRICT (before reload) let pass any stack offset. */
7630 if (! reg_ok_strict
7631 && reg_offset_p
7632 && GET_CODE (x) == PLUS
7633 && GET_CODE (XEXP (x, 0)) == REG
7634 && (XEXP (x, 0) == virtual_stack_vars_rtx
7635 || XEXP (x, 0) == arg_pointer_rtx)
7636 && GET_CODE (XEXP (x, 1)) == CONST_INT)
7637 return 1;
7638 if (rs6000_legitimate_offset_address_p (mode, x, reg_ok_strict, false))
7639 return 1;
7640 if (mode != TFmode
7641 && mode != TDmode
7642 && ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT)
7643 || TARGET_POWERPC64
7644 || (mode != DFmode && mode != DDmode)
7645 || (TARGET_E500_DOUBLE && mode != DDmode))
7646 && (TARGET_POWERPC64 || mode != DImode)
7647 && (mode != TImode || VECTOR_MEM_VSX_P (TImode))
7648 && mode != PTImode
7649 && !avoiding_indexed_address_p (mode)
7650 && legitimate_indexed_address_p (x, reg_ok_strict))
7651 return 1;
7652 if (TARGET_UPDATE && GET_CODE (x) == PRE_MODIFY
7653 && mode_supports_pre_modify_p (mode)
7654 && legitimate_indirect_address_p (XEXP (x, 0), reg_ok_strict)
7655 && (rs6000_legitimate_offset_address_p (mode, XEXP (x, 1),
7656 reg_ok_strict, false)
7657 || (!avoiding_indexed_address_p (mode)
7658 && legitimate_indexed_address_p (XEXP (x, 1), reg_ok_strict)))
7659 && rtx_equal_p (XEXP (XEXP (x, 1), 0), XEXP (x, 0)))
7660 return 1;
7661 if (reg_offset_p && legitimate_lo_sum_address_p (mode, x, reg_ok_strict))
7662 return 1;
7663 return 0;
7664 }
7665
7666 /* Debug version of rs6000_legitimate_address_p. */
7667 static bool
7668 rs6000_debug_legitimate_address_p (enum machine_mode mode, rtx x,
7669 bool reg_ok_strict)
7670 {
7671 bool ret = rs6000_legitimate_address_p (mode, x, reg_ok_strict);
7672 fprintf (stderr,
7673 "\nrs6000_legitimate_address_p: return = %s, mode = %s, "
7674 "strict = %d, reload = %s, code = %s\n",
7675 ret ? "true" : "false",
7676 GET_MODE_NAME (mode),
7677 reg_ok_strict,
7678 (reload_completed
7679 ? "after"
7680 : (reload_in_progress ? "progress" : "before")),
7681 GET_RTX_NAME (GET_CODE (x)));
7682 debug_rtx (x);
7683
7684 return ret;
7685 }
7686
7687 /* Implement TARGET_MODE_DEPENDENT_ADDRESS_P. */
7688
7689 static bool
7690 rs6000_mode_dependent_address_p (const_rtx addr,
7691 addr_space_t as ATTRIBUTE_UNUSED)
7692 {
7693 return rs6000_mode_dependent_address_ptr (addr);
7694 }
7695
7696 /* Go to LABEL if ADDR (a legitimate address expression)
7697 has an effect that depends on the machine mode it is used for.
7698
7699 On the RS/6000 this is true of all integral offsets (since AltiVec
7700 and VSX modes don't allow them) or is a pre-increment or decrement.
7701
7702 ??? Except that due to conceptual problems in offsettable_address_p
7703 we can't really report the problems of integral offsets. So leave
7704 this assuming that the adjustable offset must be valid for the
7705 sub-words of a TFmode operand, which is what we had before. */
7706
7707 static bool
7708 rs6000_mode_dependent_address (const_rtx addr)
7709 {
7710 switch (GET_CODE (addr))
7711 {
7712 case PLUS:
7713 /* Any offset from virtual_stack_vars_rtx and arg_pointer_rtx
7714 is considered a legitimate address before reload, so there
7715 are no offset restrictions in that case. Note that this
7716 condition is safe in strict mode because any address involving
7717 virtual_stack_vars_rtx or arg_pointer_rtx would already have
7718 been rejected as illegitimate. */
7719 if (XEXP (addr, 0) != virtual_stack_vars_rtx
7720 && XEXP (addr, 0) != arg_pointer_rtx
7721 && GET_CODE (XEXP (addr, 1)) == CONST_INT)
7722 {
7723 unsigned HOST_WIDE_INT val = INTVAL (XEXP (addr, 1));
7724 return val + 0x8000 >= 0x10000 - (TARGET_POWERPC64 ? 8 : 12);
7725 }
7726 break;
7727
7728 case LO_SUM:
7729 /* Anything in the constant pool is sufficiently aligned that
7730 all bytes have the same high part address. */
7731 return !legitimate_constant_pool_address_p (addr, QImode, false);
7732
7733 /* Auto-increment cases are now treated generically in recog.c. */
7734 case PRE_MODIFY:
7735 return TARGET_UPDATE;
7736
7737 /* AND is only allowed in Altivec loads. */
7738 case AND:
7739 return true;
7740
7741 default:
7742 break;
7743 }
7744
7745 return false;
7746 }
7747
7748 /* Debug version of rs6000_mode_dependent_address. */
7749 static bool
7750 rs6000_debug_mode_dependent_address (const_rtx addr)
7751 {
7752 bool ret = rs6000_mode_dependent_address (addr);
7753
7754 fprintf (stderr, "\nrs6000_mode_dependent_address: ret = %s\n",
7755 ret ? "true" : "false");
7756 debug_rtx (addr);
7757
7758 return ret;
7759 }
7760
7761 /* Implement FIND_BASE_TERM. */
7762
7763 rtx
7764 rs6000_find_base_term (rtx op)
7765 {
7766 rtx base;
7767
7768 base = op;
7769 if (GET_CODE (base) == CONST)
7770 base = XEXP (base, 0);
7771 if (GET_CODE (base) == PLUS)
7772 base = XEXP (base, 0);
7773 if (GET_CODE (base) == UNSPEC)
7774 switch (XINT (base, 1))
7775 {
7776 case UNSPEC_TOCREL:
7777 case UNSPEC_MACHOPIC_OFFSET:
7778 /* OP represents SYM [+ OFFSET] - ANCHOR. SYM is the base term
7779 for aliasing purposes. */
7780 return XVECEXP (base, 0, 0);
7781 }
7782
7783 return op;
7784 }
7785
7786 /* More elaborate version of recog's offsettable_memref_p predicate
7787 that works around the ??? note of rs6000_mode_dependent_address.
7788 In particular it accepts
7789
7790 (mem:DI (plus:SI (reg/f:SI 31 31) (const_int 32760 [0x7ff8])))
7791
7792 in 32-bit mode, that the recog predicate rejects. */
7793
7794 static bool
7795 rs6000_offsettable_memref_p (rtx op, enum machine_mode reg_mode)
7796 {
7797 bool worst_case;
7798
7799 if (!MEM_P (op))
7800 return false;
7801
7802 /* First mimic offsettable_memref_p. */
7803 if (offsettable_address_p (true, GET_MODE (op), XEXP (op, 0)))
7804 return true;
7805
7806 /* offsettable_address_p invokes rs6000_mode_dependent_address, but
7807 the latter predicate knows nothing about the mode of the memory
7808 reference and, therefore, assumes that it is the largest supported
7809 mode (TFmode). As a consequence, legitimate offsettable memory
7810 references are rejected. rs6000_legitimate_offset_address_p contains
7811 the correct logic for the PLUS case of rs6000_mode_dependent_address,
7812 at least with a little bit of help here given that we know the
7813 actual registers used. */
7814 worst_case = ((TARGET_POWERPC64 && GET_MODE_CLASS (reg_mode) == MODE_INT)
7815 || GET_MODE_SIZE (reg_mode) == 4);
7816 return rs6000_legitimate_offset_address_p (GET_MODE (op), XEXP (op, 0),
7817 true, worst_case);
7818 }
7819
7820 /* Change register usage conditional on target flags. */
7821 static void
7822 rs6000_conditional_register_usage (void)
7823 {
7824 int i;
7825
7826 if (TARGET_DEBUG_TARGET)
7827 fprintf (stderr, "rs6000_conditional_register_usage called\n");
7828
7829 /* Set MQ register fixed (already call_used) so that it will not be
7830 allocated. */
7831 fixed_regs[64] = 1;
7832
7833 /* 64-bit AIX and Linux reserve GPR13 for thread-private data. */
7834 if (TARGET_64BIT)
7835 fixed_regs[13] = call_used_regs[13]
7836 = call_really_used_regs[13] = 1;
7837
7838 /* Conditionally disable FPRs. */
7839 if (TARGET_SOFT_FLOAT || !TARGET_FPRS)
7840 for (i = 32; i < 64; i++)
7841 fixed_regs[i] = call_used_regs[i]
7842 = call_really_used_regs[i] = 1;
7843
7844 /* The TOC register is not killed across calls in a way that is
7845 visible to the compiler. */
7846 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
7847 call_really_used_regs[2] = 0;
7848
7849 if (DEFAULT_ABI == ABI_V4
7850 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7851 && flag_pic == 2)
7852 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7853
7854 if (DEFAULT_ABI == ABI_V4
7855 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM
7856 && flag_pic == 1)
7857 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7858 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7859 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7860
7861 if (DEFAULT_ABI == ABI_DARWIN
7862 && PIC_OFFSET_TABLE_REGNUM != INVALID_REGNUM)
7863 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7864 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7865 = call_really_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7866
7867 if (TARGET_TOC && TARGET_MINIMAL_TOC)
7868 fixed_regs[RS6000_PIC_OFFSET_TABLE_REGNUM]
7869 = call_used_regs[RS6000_PIC_OFFSET_TABLE_REGNUM] = 1;
7870
7871 if (TARGET_SPE)
7872 {
7873 global_regs[SPEFSCR_REGNO] = 1;
7874 /* We used to use r14 as FIXED_SCRATCH to address SPE 64-bit
7875 registers in prologues and epilogues. We no longer use r14
7876 for FIXED_SCRATCH, but we're keeping r14 out of the allocation
7877 pool for link-compatibility with older versions of GCC. Once
7878 "old" code has died out, we can return r14 to the allocation
7879 pool. */
7880 fixed_regs[14]
7881 = call_used_regs[14]
7882 = call_really_used_regs[14] = 1;
7883 }
7884
7885 if (!TARGET_ALTIVEC && !TARGET_VSX)
7886 {
7887 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
7888 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7889 call_really_used_regs[VRSAVE_REGNO] = 1;
7890 }
7891
7892 if (TARGET_ALTIVEC || TARGET_VSX)
7893 global_regs[VSCR_REGNO] = 1;
7894
7895 if (TARGET_ALTIVEC_ABI)
7896 {
7897 for (i = FIRST_ALTIVEC_REGNO; i < FIRST_ALTIVEC_REGNO + 20; ++i)
7898 call_used_regs[i] = call_really_used_regs[i] = 1;
7899
7900 /* AIX reserves VR20:31 in non-extended ABI mode. */
7901 if (TARGET_XCOFF)
7902 for (i = FIRST_ALTIVEC_REGNO + 20; i < FIRST_ALTIVEC_REGNO + 32; ++i)
7903 fixed_regs[i] = call_used_regs[i] = call_really_used_regs[i] = 1;
7904 }
7905 }
7906
7907 \f
7908 /* Output insns to set DEST equal to the constant SOURCE as a series of
7909 lis, ori and shl instructions and return TRUE. */
7910
7911 bool
7912 rs6000_emit_set_const (rtx dest, rtx source)
7913 {
7914 enum machine_mode mode = GET_MODE (dest);
7915 rtx temp, set;
7916 rtx_insn *insn;
7917 HOST_WIDE_INT c;
7918
7919 gcc_checking_assert (CONST_INT_P (source));
7920 c = INTVAL (source);
7921 switch (mode)
7922 {
7923 case QImode:
7924 case HImode:
7925 emit_insn (gen_rtx_SET (VOIDmode, dest, source));
7926 return true;
7927
7928 case SImode:
7929 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (SImode);
7930
7931 emit_insn (gen_rtx_SET (VOIDmode, copy_rtx (temp),
7932 GEN_INT (c & ~(HOST_WIDE_INT) 0xffff)));
7933 emit_insn (gen_rtx_SET (VOIDmode, dest,
7934 gen_rtx_IOR (SImode, copy_rtx (temp),
7935 GEN_INT (c & 0xffff))));
7936 break;
7937
7938 case DImode:
7939 if (!TARGET_POWERPC64)
7940 {
7941 rtx hi, lo;
7942
7943 hi = operand_subword_force (copy_rtx (dest), WORDS_BIG_ENDIAN == 0,
7944 DImode);
7945 lo = operand_subword_force (dest, WORDS_BIG_ENDIAN != 0,
7946 DImode);
7947 emit_move_insn (hi, GEN_INT (c >> 32));
7948 c = ((c & 0xffffffff) ^ 0x80000000) - 0x80000000;
7949 emit_move_insn (lo, GEN_INT (c));
7950 }
7951 else
7952 rs6000_emit_set_long_const (dest, c);
7953 break;
7954
7955 default:
7956 gcc_unreachable ();
7957 }
7958
7959 insn = get_last_insn ();
7960 set = single_set (insn);
7961 if (! CONSTANT_P (SET_SRC (set)))
7962 set_unique_reg_note (insn, REG_EQUAL, GEN_INT (c));
7963
7964 return true;
7965 }
7966
7967 /* Subroutine of rs6000_emit_set_const, handling PowerPC64 DImode.
7968 Output insns to set DEST equal to the constant C as a series of
7969 lis, ori and shl instructions. */
7970
7971 static void
7972 rs6000_emit_set_long_const (rtx dest, HOST_WIDE_INT c)
7973 {
7974 rtx temp;
7975 HOST_WIDE_INT ud1, ud2, ud3, ud4;
7976
7977 ud1 = c & 0xffff;
7978 c = c >> 16;
7979 ud2 = c & 0xffff;
7980 c = c >> 16;
7981 ud3 = c & 0xffff;
7982 c = c >> 16;
7983 ud4 = c & 0xffff;
7984
7985 if ((ud4 == 0xffff && ud3 == 0xffff && ud2 == 0xffff && (ud1 & 0x8000))
7986 || (ud4 == 0 && ud3 == 0 && ud2 == 0 && ! (ud1 & 0x8000)))
7987 emit_move_insn (dest, GEN_INT ((ud1 ^ 0x8000) - 0x8000));
7988
7989 else if ((ud4 == 0xffff && ud3 == 0xffff && (ud2 & 0x8000))
7990 || (ud4 == 0 && ud3 == 0 && ! (ud2 & 0x8000)))
7991 {
7992 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
7993
7994 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
7995 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
7996 if (ud1 != 0)
7997 emit_move_insn (dest,
7998 gen_rtx_IOR (DImode, copy_rtx (temp),
7999 GEN_INT (ud1)));
8000 }
8001 else if (ud3 == 0 && ud4 == 0)
8002 {
8003 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
8004
8005 gcc_assert (ud2 & 0x8000);
8006 emit_move_insn (copy_rtx (temp),
8007 GEN_INT (((ud2 << 16) ^ 0x80000000) - 0x80000000));
8008 if (ud1 != 0)
8009 emit_move_insn (copy_rtx (temp),
8010 gen_rtx_IOR (DImode, copy_rtx (temp),
8011 GEN_INT (ud1)));
8012 emit_move_insn (dest,
8013 gen_rtx_ZERO_EXTEND (DImode,
8014 gen_lowpart (SImode,
8015 copy_rtx (temp))));
8016 }
8017 else if ((ud4 == 0xffff && (ud3 & 0x8000))
8018 || (ud4 == 0 && ! (ud3 & 0x8000)))
8019 {
8020 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
8021
8022 emit_move_insn (copy_rtx (temp),
8023 GEN_INT (((ud3 << 16) ^ 0x80000000) - 0x80000000));
8024 if (ud2 != 0)
8025 emit_move_insn (copy_rtx (temp),
8026 gen_rtx_IOR (DImode, copy_rtx (temp),
8027 GEN_INT (ud2)));
8028 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
8029 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
8030 GEN_INT (16)));
8031 if (ud1 != 0)
8032 emit_move_insn (dest,
8033 gen_rtx_IOR (DImode, copy_rtx (temp),
8034 GEN_INT (ud1)));
8035 }
8036 else
8037 {
8038 temp = !can_create_pseudo_p () ? dest : gen_reg_rtx (DImode);
8039
8040 emit_move_insn (copy_rtx (temp),
8041 GEN_INT (((ud4 << 16) ^ 0x80000000) - 0x80000000));
8042 if (ud3 != 0)
8043 emit_move_insn (copy_rtx (temp),
8044 gen_rtx_IOR (DImode, copy_rtx (temp),
8045 GEN_INT (ud3)));
8046
8047 emit_move_insn (ud2 != 0 || ud1 != 0 ? copy_rtx (temp) : dest,
8048 gen_rtx_ASHIFT (DImode, copy_rtx (temp),
8049 GEN_INT (32)));
8050 if (ud2 != 0)
8051 emit_move_insn (ud1 != 0 ? copy_rtx (temp) : dest,
8052 gen_rtx_IOR (DImode, copy_rtx (temp),
8053 GEN_INT (ud2 << 16)));
8054 if (ud1 != 0)
8055 emit_move_insn (dest,
8056 gen_rtx_IOR (DImode, copy_rtx (temp),
8057 GEN_INT (ud1)));
8058 }
8059 }
8060
8061 /* Helper for the following. Get rid of [r+r] memory refs
8062 in cases where it won't work (TImode, TFmode, TDmode, PTImode). */
8063
8064 static void
8065 rs6000_eliminate_indexed_memrefs (rtx operands[2])
8066 {
8067 if (reload_in_progress)
8068 return;
8069
8070 if (GET_CODE (operands[0]) == MEM
8071 && GET_CODE (XEXP (operands[0], 0)) != REG
8072 && ! legitimate_constant_pool_address_p (XEXP (operands[0], 0),
8073 GET_MODE (operands[0]), false))
8074 operands[0]
8075 = replace_equiv_address (operands[0],
8076 copy_addr_to_reg (XEXP (operands[0], 0)));
8077
8078 if (GET_CODE (operands[1]) == MEM
8079 && GET_CODE (XEXP (operands[1], 0)) != REG
8080 && ! legitimate_constant_pool_address_p (XEXP (operands[1], 0),
8081 GET_MODE (operands[1]), false))
8082 operands[1]
8083 = replace_equiv_address (operands[1],
8084 copy_addr_to_reg (XEXP (operands[1], 0)));
8085 }
8086
8087 /* Generate a vector of constants to permute MODE for a little-endian
8088 storage operation by swapping the two halves of a vector. */
8089 static rtvec
8090 rs6000_const_vec (enum machine_mode mode)
8091 {
8092 int i, subparts;
8093 rtvec v;
8094
8095 switch (mode)
8096 {
8097 case V1TImode:
8098 subparts = 1;
8099 break;
8100 case V2DFmode:
8101 case V2DImode:
8102 subparts = 2;
8103 break;
8104 case V4SFmode:
8105 case V4SImode:
8106 subparts = 4;
8107 break;
8108 case V8HImode:
8109 subparts = 8;
8110 break;
8111 case V16QImode:
8112 subparts = 16;
8113 break;
8114 default:
8115 gcc_unreachable();
8116 }
8117
8118 v = rtvec_alloc (subparts);
8119
8120 for (i = 0; i < subparts / 2; ++i)
8121 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i + subparts / 2);
8122 for (i = subparts / 2; i < subparts; ++i)
8123 RTVEC_ELT (v, i) = gen_rtx_CONST_INT (DImode, i - subparts / 2);
8124
8125 return v;
8126 }
8127
8128 /* Generate a permute rtx that represents an lxvd2x, stxvd2x, or xxpermdi
8129 for a VSX load or store operation. */
8130 rtx
8131 rs6000_gen_le_vsx_permute (rtx source, enum machine_mode mode)
8132 {
8133 rtx par = gen_rtx_PARALLEL (VOIDmode, rs6000_const_vec (mode));
8134 return gen_rtx_VEC_SELECT (mode, source, par);
8135 }
8136
8137 /* Emit a little-endian load from vector memory location SOURCE to VSX
8138 register DEST in mode MODE. The load is done with two permuting
8139 insn's that represent an lxvd2x and xxpermdi. */
8140 void
8141 rs6000_emit_le_vsx_load (rtx dest, rtx source, enum machine_mode mode)
8142 {
8143 rtx tmp, permute_mem, permute_reg;
8144
8145 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
8146 V1TImode). */
8147 if (mode == TImode || mode == V1TImode)
8148 {
8149 mode = V2DImode;
8150 dest = gen_lowpart (V2DImode, dest);
8151 source = adjust_address (source, V2DImode, 0);
8152 }
8153
8154 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (dest) : dest;
8155 permute_mem = rs6000_gen_le_vsx_permute (source, mode);
8156 permute_reg = rs6000_gen_le_vsx_permute (tmp, mode);
8157 emit_insn (gen_rtx_SET (VOIDmode, tmp, permute_mem));
8158 emit_insn (gen_rtx_SET (VOIDmode, dest, permute_reg));
8159 }
8160
8161 /* Emit a little-endian store to vector memory location DEST from VSX
8162 register SOURCE in mode MODE. The store is done with two permuting
8163 insn's that represent an xxpermdi and an stxvd2x. */
8164 void
8165 rs6000_emit_le_vsx_store (rtx dest, rtx source, enum machine_mode mode)
8166 {
8167 rtx tmp, permute_src, permute_tmp;
8168
8169 /* Use V2DImode to do swaps of types with 128-bit scalare parts (TImode,
8170 V1TImode). */
8171 if (mode == TImode || mode == V1TImode)
8172 {
8173 mode = V2DImode;
8174 dest = adjust_address (dest, V2DImode, 0);
8175 source = gen_lowpart (V2DImode, source);
8176 }
8177
8178 tmp = can_create_pseudo_p () ? gen_reg_rtx_and_attrs (source) : source;
8179 permute_src = rs6000_gen_le_vsx_permute (source, mode);
8180 permute_tmp = rs6000_gen_le_vsx_permute (tmp, mode);
8181 emit_insn (gen_rtx_SET (VOIDmode, tmp, permute_src));
8182 emit_insn (gen_rtx_SET (VOIDmode, dest, permute_tmp));
8183 }
8184
8185 /* Emit a sequence representing a little-endian VSX load or store,
8186 moving data from SOURCE to DEST in mode MODE. This is done
8187 separately from rs6000_emit_move to ensure it is called only
8188 during expand. LE VSX loads and stores introduced later are
8189 handled with a split. The expand-time RTL generation allows
8190 us to optimize away redundant pairs of register-permutes. */
8191 void
8192 rs6000_emit_le_vsx_move (rtx dest, rtx source, enum machine_mode mode)
8193 {
8194 gcc_assert (!BYTES_BIG_ENDIAN
8195 && VECTOR_MEM_VSX_P (mode)
8196 && !gpr_or_gpr_p (dest, source)
8197 && (MEM_P (source) ^ MEM_P (dest)));
8198
8199 if (MEM_P (source))
8200 {
8201 gcc_assert (REG_P (dest) || GET_CODE (dest) == SUBREG);
8202 rs6000_emit_le_vsx_load (dest, source, mode);
8203 }
8204 else
8205 {
8206 if (!REG_P (source))
8207 source = force_reg (mode, source);
8208 rs6000_emit_le_vsx_store (dest, source, mode);
8209 }
8210 }
8211
8212 /* Emit a move from SOURCE to DEST in mode MODE. */
8213 void
8214 rs6000_emit_move (rtx dest, rtx source, enum machine_mode mode)
8215 {
8216 rtx operands[2];
8217 operands[0] = dest;
8218 operands[1] = source;
8219
8220 if (TARGET_DEBUG_ADDR)
8221 {
8222 fprintf (stderr,
8223 "\nrs6000_emit_move: mode = %s, reload_in_progress = %d, "
8224 "reload_completed = %d, can_create_pseudos = %d.\ndest:\n",
8225 GET_MODE_NAME (mode),
8226 reload_in_progress,
8227 reload_completed,
8228 can_create_pseudo_p ());
8229 debug_rtx (dest);
8230 fprintf (stderr, "source:\n");
8231 debug_rtx (source);
8232 }
8233
8234 /* Sanity checks. Check that we get CONST_DOUBLE only when we should. */
8235 if (CONST_WIDE_INT_P (operands[1])
8236 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
8237 {
8238 /* This should be fixed with the introduction of CONST_WIDE_INT. */
8239 gcc_unreachable ();
8240 }
8241
8242 /* Check if GCC is setting up a block move that will end up using FP
8243 registers as temporaries. We must make sure this is acceptable. */
8244 if (GET_CODE (operands[0]) == MEM
8245 && GET_CODE (operands[1]) == MEM
8246 && mode == DImode
8247 && (SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[0]))
8248 || SLOW_UNALIGNED_ACCESS (DImode, MEM_ALIGN (operands[1])))
8249 && ! (SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[0]) > 32
8250 ? 32 : MEM_ALIGN (operands[0])))
8251 || SLOW_UNALIGNED_ACCESS (SImode, (MEM_ALIGN (operands[1]) > 32
8252 ? 32
8253 : MEM_ALIGN (operands[1]))))
8254 && ! MEM_VOLATILE_P (operands [0])
8255 && ! MEM_VOLATILE_P (operands [1]))
8256 {
8257 emit_move_insn (adjust_address (operands[0], SImode, 0),
8258 adjust_address (operands[1], SImode, 0));
8259 emit_move_insn (adjust_address (copy_rtx (operands[0]), SImode, 4),
8260 adjust_address (copy_rtx (operands[1]), SImode, 4));
8261 return;
8262 }
8263
8264 if (can_create_pseudo_p () && GET_CODE (operands[0]) == MEM
8265 && !gpc_reg_operand (operands[1], mode))
8266 operands[1] = force_reg (mode, operands[1]);
8267
8268 /* Recognize the case where operand[1] is a reference to thread-local
8269 data and load its address to a register. */
8270 if (tls_referenced_p (operands[1]))
8271 {
8272 enum tls_model model;
8273 rtx tmp = operands[1];
8274 rtx addend = NULL;
8275
8276 if (GET_CODE (tmp) == CONST && GET_CODE (XEXP (tmp, 0)) == PLUS)
8277 {
8278 addend = XEXP (XEXP (tmp, 0), 1);
8279 tmp = XEXP (XEXP (tmp, 0), 0);
8280 }
8281
8282 gcc_assert (GET_CODE (tmp) == SYMBOL_REF);
8283 model = SYMBOL_REF_TLS_MODEL (tmp);
8284 gcc_assert (model != 0);
8285
8286 tmp = rs6000_legitimize_tls_address (tmp, model);
8287 if (addend)
8288 {
8289 tmp = gen_rtx_PLUS (mode, tmp, addend);
8290 tmp = force_operand (tmp, operands[0]);
8291 }
8292 operands[1] = tmp;
8293 }
8294
8295 /* Handle the case where reload calls us with an invalid address. */
8296 if (reload_in_progress && mode == Pmode
8297 && (! general_operand (operands[1], mode)
8298 || ! nonimmediate_operand (operands[0], mode)))
8299 goto emit_set;
8300
8301 /* 128-bit constant floating-point values on Darwin should really be
8302 loaded as two parts. */
8303 if (!TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
8304 && mode == TFmode && GET_CODE (operands[1]) == CONST_DOUBLE)
8305 {
8306 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode, 0),
8307 simplify_gen_subreg (DFmode, operands[1], mode, 0),
8308 DFmode);
8309 rs6000_emit_move (simplify_gen_subreg (DFmode, operands[0], mode,
8310 GET_MODE_SIZE (DFmode)),
8311 simplify_gen_subreg (DFmode, operands[1], mode,
8312 GET_MODE_SIZE (DFmode)),
8313 DFmode);
8314 return;
8315 }
8316
8317 if (reload_in_progress && cfun->machine->sdmode_stack_slot != NULL_RTX)
8318 cfun->machine->sdmode_stack_slot =
8319 eliminate_regs (cfun->machine->sdmode_stack_slot, VOIDmode, NULL_RTX);
8320
8321
8322 /* Transform (p0:DD, (SUBREG:DD p1:SD)) to ((SUBREG:SD p0:DD),
8323 p1:SD) if p1 is not of floating point class and p0 is spilled as
8324 we can have no analogous movsd_store for this. */
8325 if (lra_in_progress && mode == DDmode
8326 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
8327 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
8328 && GET_CODE (operands[1]) == SUBREG && REG_P (SUBREG_REG (operands[1]))
8329 && GET_MODE (SUBREG_REG (operands[1])) == SDmode)
8330 {
8331 enum reg_class cl;
8332 int regno = REGNO (SUBREG_REG (operands[1]));
8333
8334 if (regno >= FIRST_PSEUDO_REGISTER)
8335 {
8336 cl = reg_preferred_class (regno);
8337 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][1];
8338 }
8339 if (regno >= 0 && ! FP_REGNO_P (regno))
8340 {
8341 mode = SDmode;
8342 operands[0] = gen_lowpart_SUBREG (SDmode, operands[0]);
8343 operands[1] = SUBREG_REG (operands[1]);
8344 }
8345 }
8346 if (lra_in_progress
8347 && mode == SDmode
8348 && REG_P (operands[0]) && REGNO (operands[0]) >= FIRST_PSEUDO_REGISTER
8349 && reg_preferred_class (REGNO (operands[0])) == NO_REGS
8350 && (REG_P (operands[1])
8351 || (GET_CODE (operands[1]) == SUBREG
8352 && REG_P (SUBREG_REG (operands[1])))))
8353 {
8354 int regno = REGNO (GET_CODE (operands[1]) == SUBREG
8355 ? SUBREG_REG (operands[1]) : operands[1]);
8356 enum reg_class cl;
8357
8358 if (regno >= FIRST_PSEUDO_REGISTER)
8359 {
8360 cl = reg_preferred_class (regno);
8361 gcc_assert (cl != NO_REGS);
8362 regno = ira_class_hard_regs[cl][0];
8363 }
8364 if (FP_REGNO_P (regno))
8365 {
8366 if (GET_MODE (operands[0]) != DDmode)
8367 operands[0] = gen_rtx_SUBREG (DDmode, operands[0], 0);
8368 emit_insn (gen_movsd_store (operands[0], operands[1]));
8369 }
8370 else if (INT_REGNO_P (regno))
8371 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
8372 else
8373 gcc_unreachable();
8374 return;
8375 }
8376 /* Transform ((SUBREG:DD p0:SD), p1:DD) to (p0:SD, (SUBREG:SD
8377 p:DD)) if p0 is not of floating point class and p1 is spilled as
8378 we can have no analogous movsd_load for this. */
8379 if (lra_in_progress && mode == DDmode
8380 && GET_CODE (operands[0]) == SUBREG && REG_P (SUBREG_REG (operands[0]))
8381 && GET_MODE (SUBREG_REG (operands[0])) == SDmode
8382 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
8383 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
8384 {
8385 enum reg_class cl;
8386 int regno = REGNO (SUBREG_REG (operands[0]));
8387
8388 if (regno >= FIRST_PSEUDO_REGISTER)
8389 {
8390 cl = reg_preferred_class (regno);
8391 regno = cl == NO_REGS ? -1 : ira_class_hard_regs[cl][0];
8392 }
8393 if (regno >= 0 && ! FP_REGNO_P (regno))
8394 {
8395 mode = SDmode;
8396 operands[0] = SUBREG_REG (operands[0]);
8397 operands[1] = gen_lowpart_SUBREG (SDmode, operands[1]);
8398 }
8399 }
8400 if (lra_in_progress
8401 && mode == SDmode
8402 && (REG_P (operands[0])
8403 || (GET_CODE (operands[0]) == SUBREG
8404 && REG_P (SUBREG_REG (operands[0]))))
8405 && REG_P (operands[1]) && REGNO (operands[1]) >= FIRST_PSEUDO_REGISTER
8406 && reg_preferred_class (REGNO (operands[1])) == NO_REGS)
8407 {
8408 int regno = REGNO (GET_CODE (operands[0]) == SUBREG
8409 ? SUBREG_REG (operands[0]) : operands[0]);
8410 enum reg_class cl;
8411
8412 if (regno >= FIRST_PSEUDO_REGISTER)
8413 {
8414 cl = reg_preferred_class (regno);
8415 gcc_assert (cl != NO_REGS);
8416 regno = ira_class_hard_regs[cl][0];
8417 }
8418 if (FP_REGNO_P (regno))
8419 {
8420 if (GET_MODE (operands[1]) != DDmode)
8421 operands[1] = gen_rtx_SUBREG (DDmode, operands[1], 0);
8422 emit_insn (gen_movsd_load (operands[0], operands[1]));
8423 }
8424 else if (INT_REGNO_P (regno))
8425 emit_insn (gen_movsd_hardfloat (operands[0], operands[1]));
8426 else
8427 gcc_unreachable();
8428 return;
8429 }
8430
8431 if (reload_in_progress
8432 && mode == SDmode
8433 && cfun->machine->sdmode_stack_slot != NULL_RTX
8434 && MEM_P (operands[0])
8435 && rtx_equal_p (operands[0], cfun->machine->sdmode_stack_slot)
8436 && REG_P (operands[1]))
8437 {
8438 if (FP_REGNO_P (REGNO (operands[1])))
8439 {
8440 rtx mem = adjust_address_nv (operands[0], DDmode, 0);
8441 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8442 emit_insn (gen_movsd_store (mem, operands[1]));
8443 }
8444 else if (INT_REGNO_P (REGNO (operands[1])))
8445 {
8446 rtx mem = operands[0];
8447 if (BYTES_BIG_ENDIAN)
8448 mem = adjust_address_nv (mem, mode, 4);
8449 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8450 emit_insn (gen_movsd_hardfloat (mem, operands[1]));
8451 }
8452 else
8453 gcc_unreachable();
8454 return;
8455 }
8456 if (reload_in_progress
8457 && mode == SDmode
8458 && REG_P (operands[0])
8459 && MEM_P (operands[1])
8460 && cfun->machine->sdmode_stack_slot != NULL_RTX
8461 && rtx_equal_p (operands[1], cfun->machine->sdmode_stack_slot))
8462 {
8463 if (FP_REGNO_P (REGNO (operands[0])))
8464 {
8465 rtx mem = adjust_address_nv (operands[1], DDmode, 0);
8466 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8467 emit_insn (gen_movsd_load (operands[0], mem));
8468 }
8469 else if (INT_REGNO_P (REGNO (operands[0])))
8470 {
8471 rtx mem = operands[1];
8472 if (BYTES_BIG_ENDIAN)
8473 mem = adjust_address_nv (mem, mode, 4);
8474 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
8475 emit_insn (gen_movsd_hardfloat (operands[0], mem));
8476 }
8477 else
8478 gcc_unreachable();
8479 return;
8480 }
8481
8482 /* FIXME: In the long term, this switch statement should go away
8483 and be replaced by a sequence of tests based on things like
8484 mode == Pmode. */
8485 switch (mode)
8486 {
8487 case HImode:
8488 case QImode:
8489 if (CONSTANT_P (operands[1])
8490 && GET_CODE (operands[1]) != CONST_INT)
8491 operands[1] = force_const_mem (mode, operands[1]);
8492 break;
8493
8494 case TFmode:
8495 case TDmode:
8496 rs6000_eliminate_indexed_memrefs (operands);
8497 /* fall through */
8498
8499 case DFmode:
8500 case DDmode:
8501 case SFmode:
8502 case SDmode:
8503 if (CONSTANT_P (operands[1])
8504 && ! easy_fp_constant (operands[1], mode))
8505 operands[1] = force_const_mem (mode, operands[1]);
8506 break;
8507
8508 case V16QImode:
8509 case V8HImode:
8510 case V4SFmode:
8511 case V4SImode:
8512 case V4HImode:
8513 case V2SFmode:
8514 case V2SImode:
8515 case V1DImode:
8516 case V2DFmode:
8517 case V2DImode:
8518 case V1TImode:
8519 if (CONSTANT_P (operands[1])
8520 && !easy_vector_constant (operands[1], mode))
8521 operands[1] = force_const_mem (mode, operands[1]);
8522 break;
8523
8524 case SImode:
8525 case DImode:
8526 /* Use default pattern for address of ELF small data */
8527 if (TARGET_ELF
8528 && mode == Pmode
8529 && DEFAULT_ABI == ABI_V4
8530 && (GET_CODE (operands[1]) == SYMBOL_REF
8531 || GET_CODE (operands[1]) == CONST)
8532 && small_data_operand (operands[1], mode))
8533 {
8534 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8535 return;
8536 }
8537
8538 if (DEFAULT_ABI == ABI_V4
8539 && mode == Pmode && mode == SImode
8540 && flag_pic == 1 && got_operand (operands[1], mode))
8541 {
8542 emit_insn (gen_movsi_got (operands[0], operands[1]));
8543 return;
8544 }
8545
8546 if ((TARGET_ELF || DEFAULT_ABI == ABI_DARWIN)
8547 && TARGET_NO_TOC
8548 && ! flag_pic
8549 && mode == Pmode
8550 && CONSTANT_P (operands[1])
8551 && GET_CODE (operands[1]) != HIGH
8552 && GET_CODE (operands[1]) != CONST_INT)
8553 {
8554 rtx target = (!can_create_pseudo_p ()
8555 ? operands[0]
8556 : gen_reg_rtx (mode));
8557
8558 /* If this is a function address on -mcall-aixdesc,
8559 convert it to the address of the descriptor. */
8560 if (DEFAULT_ABI == ABI_AIX
8561 && GET_CODE (operands[1]) == SYMBOL_REF
8562 && XSTR (operands[1], 0)[0] == '.')
8563 {
8564 const char *name = XSTR (operands[1], 0);
8565 rtx new_ref;
8566 while (*name == '.')
8567 name++;
8568 new_ref = gen_rtx_SYMBOL_REF (Pmode, name);
8569 CONSTANT_POOL_ADDRESS_P (new_ref)
8570 = CONSTANT_POOL_ADDRESS_P (operands[1]);
8571 SYMBOL_REF_FLAGS (new_ref) = SYMBOL_REF_FLAGS (operands[1]);
8572 SYMBOL_REF_USED (new_ref) = SYMBOL_REF_USED (operands[1]);
8573 SYMBOL_REF_DATA (new_ref) = SYMBOL_REF_DATA (operands[1]);
8574 operands[1] = new_ref;
8575 }
8576
8577 if (DEFAULT_ABI == ABI_DARWIN)
8578 {
8579 #if TARGET_MACHO
8580 if (MACHO_DYNAMIC_NO_PIC_P)
8581 {
8582 /* Take care of any required data indirection. */
8583 operands[1] = rs6000_machopic_legitimize_pic_address (
8584 operands[1], mode, operands[0]);
8585 if (operands[0] != operands[1])
8586 emit_insn (gen_rtx_SET (VOIDmode,
8587 operands[0], operands[1]));
8588 return;
8589 }
8590 #endif
8591 emit_insn (gen_macho_high (target, operands[1]));
8592 emit_insn (gen_macho_low (operands[0], target, operands[1]));
8593 return;
8594 }
8595
8596 emit_insn (gen_elf_high (target, operands[1]));
8597 emit_insn (gen_elf_low (operands[0], target, operands[1]));
8598 return;
8599 }
8600
8601 /* If this is a SYMBOL_REF that refers to a constant pool entry,
8602 and we have put it in the TOC, we just need to make a TOC-relative
8603 reference to it. */
8604 if (TARGET_TOC
8605 && GET_CODE (operands[1]) == SYMBOL_REF
8606 && use_toc_relative_ref (operands[1]))
8607 operands[1] = create_TOC_reference (operands[1], operands[0]);
8608 else if (mode == Pmode
8609 && CONSTANT_P (operands[1])
8610 && GET_CODE (operands[1]) != HIGH
8611 && ((GET_CODE (operands[1]) != CONST_INT
8612 && ! easy_fp_constant (operands[1], mode))
8613 || (GET_CODE (operands[1]) == CONST_INT
8614 && (num_insns_constant (operands[1], mode)
8615 > (TARGET_CMODEL != CMODEL_SMALL ? 3 : 2)))
8616 || (GET_CODE (operands[0]) == REG
8617 && FP_REGNO_P (REGNO (operands[0]))))
8618 && !toc_relative_expr_p (operands[1], false)
8619 && (TARGET_CMODEL == CMODEL_SMALL
8620 || can_create_pseudo_p ()
8621 || (REG_P (operands[0])
8622 && INT_REG_OK_FOR_BASE_P (operands[0], true))))
8623 {
8624
8625 #if TARGET_MACHO
8626 /* Darwin uses a special PIC legitimizer. */
8627 if (DEFAULT_ABI == ABI_DARWIN && MACHOPIC_INDIRECT)
8628 {
8629 operands[1] =
8630 rs6000_machopic_legitimize_pic_address (operands[1], mode,
8631 operands[0]);
8632 if (operands[0] != operands[1])
8633 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8634 return;
8635 }
8636 #endif
8637
8638 /* If we are to limit the number of things we put in the TOC and
8639 this is a symbol plus a constant we can add in one insn,
8640 just put the symbol in the TOC and add the constant. Don't do
8641 this if reload is in progress. */
8642 if (GET_CODE (operands[1]) == CONST
8643 && TARGET_NO_SUM_IN_TOC && ! reload_in_progress
8644 && GET_CODE (XEXP (operands[1], 0)) == PLUS
8645 && add_operand (XEXP (XEXP (operands[1], 0), 1), mode)
8646 && (GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == LABEL_REF
8647 || GET_CODE (XEXP (XEXP (operands[1], 0), 0)) == SYMBOL_REF)
8648 && ! side_effects_p (operands[0]))
8649 {
8650 rtx sym =
8651 force_const_mem (mode, XEXP (XEXP (operands[1], 0), 0));
8652 rtx other = XEXP (XEXP (operands[1], 0), 1);
8653
8654 sym = force_reg (mode, sym);
8655 emit_insn (gen_add3_insn (operands[0], sym, other));
8656 return;
8657 }
8658
8659 operands[1] = force_const_mem (mode, operands[1]);
8660
8661 if (TARGET_TOC
8662 && GET_CODE (XEXP (operands[1], 0)) == SYMBOL_REF
8663 && constant_pool_expr_p (XEXP (operands[1], 0))
8664 && ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (
8665 get_pool_constant (XEXP (operands[1], 0)),
8666 get_pool_mode (XEXP (operands[1], 0))))
8667 {
8668 rtx tocref = create_TOC_reference (XEXP (operands[1], 0),
8669 operands[0]);
8670 operands[1] = gen_const_mem (mode, tocref);
8671 set_mem_alias_set (operands[1], get_TOC_alias_set ());
8672 }
8673 }
8674 break;
8675
8676 case TImode:
8677 if (!VECTOR_MEM_VSX_P (TImode))
8678 rs6000_eliminate_indexed_memrefs (operands);
8679 break;
8680
8681 case PTImode:
8682 rs6000_eliminate_indexed_memrefs (operands);
8683 break;
8684
8685 default:
8686 fatal_insn ("bad move", gen_rtx_SET (VOIDmode, dest, source));
8687 }
8688
8689 /* Above, we may have called force_const_mem which may have returned
8690 an invalid address. If we can, fix this up; otherwise, reload will
8691 have to deal with it. */
8692 if (GET_CODE (operands[1]) == MEM && ! reload_in_progress)
8693 operands[1] = validize_mem (operands[1]);
8694
8695 emit_set:
8696 emit_insn (gen_rtx_SET (VOIDmode, operands[0], operands[1]));
8697 }
8698
8699 /* Return true if a structure, union or array containing FIELD should be
8700 accessed using `BLKMODE'.
8701
8702 For the SPE, simd types are V2SI, and gcc can be tempted to put the
8703 entire thing in a DI and use subregs to access the internals.
8704 store_bit_field() will force (subreg:DI (reg:V2SI x))'s to the
8705 back-end. Because a single GPR can hold a V2SI, but not a DI, the
8706 best thing to do is set structs to BLKmode and avoid Severe Tire
8707 Damage.
8708
8709 On e500 v2, DF and DI modes suffer from the same anomaly. DF can
8710 fit into 1, whereas DI still needs two. */
8711
8712 static bool
8713 rs6000_member_type_forces_blk (const_tree field, enum machine_mode mode)
8714 {
8715 return ((TARGET_SPE && TREE_CODE (TREE_TYPE (field)) == VECTOR_TYPE)
8716 || (TARGET_E500_DOUBLE && mode == DFmode));
8717 }
8718 \f
8719 /* Nonzero if we can use a floating-point register to pass this arg. */
8720 #define USE_FP_FOR_ARG_P(CUM,MODE) \
8721 (SCALAR_FLOAT_MODE_P (MODE) \
8722 && (CUM)->fregno <= FP_ARG_MAX_REG \
8723 && TARGET_HARD_FLOAT && TARGET_FPRS)
8724
8725 /* Nonzero if we can use an AltiVec register to pass this arg. */
8726 #define USE_ALTIVEC_FOR_ARG_P(CUM,MODE,NAMED) \
8727 (ALTIVEC_OR_VSX_VECTOR_MODE (MODE) \
8728 && (CUM)->vregno <= ALTIVEC_ARG_MAX_REG \
8729 && TARGET_ALTIVEC_ABI \
8730 && (NAMED))
8731
8732 /* Walk down the type tree of TYPE counting consecutive base elements.
8733 If *MODEP is VOIDmode, then set it to the first valid floating point
8734 or vector type. If a non-floating point or vector type is found, or
8735 if a floating point or vector type that doesn't match a non-VOIDmode
8736 *MODEP is found, then return -1, otherwise return the count in the
8737 sub-tree. */
8738
8739 static int
8740 rs6000_aggregate_candidate (const_tree type, enum machine_mode *modep)
8741 {
8742 enum machine_mode mode;
8743 HOST_WIDE_INT size;
8744
8745 switch (TREE_CODE (type))
8746 {
8747 case REAL_TYPE:
8748 mode = TYPE_MODE (type);
8749 if (!SCALAR_FLOAT_MODE_P (mode))
8750 return -1;
8751
8752 if (*modep == VOIDmode)
8753 *modep = mode;
8754
8755 if (*modep == mode)
8756 return 1;
8757
8758 break;
8759
8760 case COMPLEX_TYPE:
8761 mode = TYPE_MODE (TREE_TYPE (type));
8762 if (!SCALAR_FLOAT_MODE_P (mode))
8763 return -1;
8764
8765 if (*modep == VOIDmode)
8766 *modep = mode;
8767
8768 if (*modep == mode)
8769 return 2;
8770
8771 break;
8772
8773 case VECTOR_TYPE:
8774 if (!TARGET_ALTIVEC_ABI || !TARGET_ALTIVEC)
8775 return -1;
8776
8777 /* Use V4SImode as representative of all 128-bit vector types. */
8778 size = int_size_in_bytes (type);
8779 switch (size)
8780 {
8781 case 16:
8782 mode = V4SImode;
8783 break;
8784 default:
8785 return -1;
8786 }
8787
8788 if (*modep == VOIDmode)
8789 *modep = mode;
8790
8791 /* Vector modes are considered to be opaque: two vectors are
8792 equivalent for the purposes of being homogeneous aggregates
8793 if they are the same size. */
8794 if (*modep == mode)
8795 return 1;
8796
8797 break;
8798
8799 case ARRAY_TYPE:
8800 {
8801 int count;
8802 tree index = TYPE_DOMAIN (type);
8803
8804 /* Can't handle incomplete types nor sizes that are not
8805 fixed. */
8806 if (!COMPLETE_TYPE_P (type)
8807 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
8808 return -1;
8809
8810 count = rs6000_aggregate_candidate (TREE_TYPE (type), modep);
8811 if (count == -1
8812 || !index
8813 || !TYPE_MAX_VALUE (index)
8814 || !tree_fits_uhwi_p (TYPE_MAX_VALUE (index))
8815 || !TYPE_MIN_VALUE (index)
8816 || !tree_fits_uhwi_p (TYPE_MIN_VALUE (index))
8817 || count < 0)
8818 return -1;
8819
8820 count *= (1 + tree_to_uhwi (TYPE_MAX_VALUE (index))
8821 - tree_to_uhwi (TYPE_MIN_VALUE (index)));
8822
8823 /* There must be no padding. */
8824 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
8825 return -1;
8826
8827 return count;
8828 }
8829
8830 case RECORD_TYPE:
8831 {
8832 int count = 0;
8833 int sub_count;
8834 tree field;
8835
8836 /* Can't handle incomplete types nor sizes that are not
8837 fixed. */
8838 if (!COMPLETE_TYPE_P (type)
8839 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
8840 return -1;
8841
8842 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
8843 {
8844 if (TREE_CODE (field) != FIELD_DECL)
8845 continue;
8846
8847 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
8848 if (sub_count < 0)
8849 return -1;
8850 count += sub_count;
8851 }
8852
8853 /* There must be no padding. */
8854 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
8855 return -1;
8856
8857 return count;
8858 }
8859
8860 case UNION_TYPE:
8861 case QUAL_UNION_TYPE:
8862 {
8863 /* These aren't very interesting except in a degenerate case. */
8864 int count = 0;
8865 int sub_count;
8866 tree field;
8867
8868 /* Can't handle incomplete types nor sizes that are not
8869 fixed. */
8870 if (!COMPLETE_TYPE_P (type)
8871 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
8872 return -1;
8873
8874 for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
8875 {
8876 if (TREE_CODE (field) != FIELD_DECL)
8877 continue;
8878
8879 sub_count = rs6000_aggregate_candidate (TREE_TYPE (field), modep);
8880 if (sub_count < 0)
8881 return -1;
8882 count = count > sub_count ? count : sub_count;
8883 }
8884
8885 /* There must be no padding. */
8886 if (wi::ne_p (TYPE_SIZE (type), count * GET_MODE_BITSIZE (*modep)))
8887 return -1;
8888
8889 return count;
8890 }
8891
8892 default:
8893 break;
8894 }
8895
8896 return -1;
8897 }
8898
8899 /* If an argument, whose type is described by TYPE and MODE, is a homogeneous
8900 float or vector aggregate that shall be passed in FP/vector registers
8901 according to the ELFv2 ABI, return the homogeneous element mode in
8902 *ELT_MODE and the number of elements in *N_ELTS, and return TRUE.
8903
8904 Otherwise, set *ELT_MODE to MODE and *N_ELTS to 1, and return FALSE. */
8905
8906 static bool
8907 rs6000_discover_homogeneous_aggregate (enum machine_mode mode, const_tree type,
8908 enum machine_mode *elt_mode,
8909 int *n_elts)
8910 {
8911 /* Note that we do not accept complex types at the top level as
8912 homogeneous aggregates; these types are handled via the
8913 targetm.calls.split_complex_arg mechanism. Complex types
8914 can be elements of homogeneous aggregates, however. */
8915 if (DEFAULT_ABI == ABI_ELFv2 && type && AGGREGATE_TYPE_P (type))
8916 {
8917 enum machine_mode field_mode = VOIDmode;
8918 int field_count = rs6000_aggregate_candidate (type, &field_mode);
8919
8920 if (field_count > 0)
8921 {
8922 int n_regs = (SCALAR_FLOAT_MODE_P (field_mode)?
8923 (GET_MODE_SIZE (field_mode) + 7) >> 3 : 1);
8924
8925 /* The ELFv2 ABI allows homogeneous aggregates to occupy
8926 up to AGGR_ARG_NUM_REG registers. */
8927 if (field_count * n_regs <= AGGR_ARG_NUM_REG)
8928 {
8929 if (elt_mode)
8930 *elt_mode = field_mode;
8931 if (n_elts)
8932 *n_elts = field_count;
8933 return true;
8934 }
8935 }
8936 }
8937
8938 if (elt_mode)
8939 *elt_mode = mode;
8940 if (n_elts)
8941 *n_elts = 1;
8942 return false;
8943 }
8944
8945 /* Return a nonzero value to say to return the function value in
8946 memory, just as large structures are always returned. TYPE will be
8947 the data type of the value, and FNTYPE will be the type of the
8948 function doing the returning, or @code{NULL} for libcalls.
8949
8950 The AIX ABI for the RS/6000 specifies that all structures are
8951 returned in memory. The Darwin ABI does the same.
8952
8953 For the Darwin 64 Bit ABI, a function result can be returned in
8954 registers or in memory, depending on the size of the return data
8955 type. If it is returned in registers, the value occupies the same
8956 registers as it would if it were the first and only function
8957 argument. Otherwise, the function places its result in memory at
8958 the location pointed to by GPR3.
8959
8960 The SVR4 ABI specifies that structures <= 8 bytes are returned in r3/r4,
8961 but a draft put them in memory, and GCC used to implement the draft
8962 instead of the final standard. Therefore, aix_struct_return
8963 controls this instead of DEFAULT_ABI; V.4 targets needing backward
8964 compatibility can change DRAFT_V4_STRUCT_RET to override the
8965 default, and -m switches get the final word. See
8966 rs6000_option_override_internal for more details.
8967
8968 The PPC32 SVR4 ABI uses IEEE double extended for long double, if 128-bit
8969 long double support is enabled. These values are returned in memory.
8970
8971 int_size_in_bytes returns -1 for variable size objects, which go in
8972 memory always. The cast to unsigned makes -1 > 8. */
8973
8974 static bool
8975 rs6000_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
8976 {
8977 /* For the Darwin64 ABI, test if we can fit the return value in regs. */
8978 if (TARGET_MACHO
8979 && rs6000_darwin64_abi
8980 && TREE_CODE (type) == RECORD_TYPE
8981 && int_size_in_bytes (type) > 0)
8982 {
8983 CUMULATIVE_ARGS valcum;
8984 rtx valret;
8985
8986 valcum.words = 0;
8987 valcum.fregno = FP_ARG_MIN_REG;
8988 valcum.vregno = ALTIVEC_ARG_MIN_REG;
8989 /* Do a trial code generation as if this were going to be passed
8990 as an argument; if any part goes in memory, we return NULL. */
8991 valret = rs6000_darwin64_record_arg (&valcum, type, true, true);
8992 if (valret)
8993 return false;
8994 /* Otherwise fall through to more conventional ABI rules. */
8995 }
8996
8997 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers */
8998 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (type), type,
8999 NULL, NULL))
9000 return false;
9001
9002 /* The ELFv2 ABI returns aggregates up to 16B in registers */
9003 if (DEFAULT_ABI == ABI_ELFv2 && AGGREGATE_TYPE_P (type)
9004 && (unsigned HOST_WIDE_INT) int_size_in_bytes (type) <= 16)
9005 return false;
9006
9007 if (AGGREGATE_TYPE_P (type)
9008 && (aix_struct_return
9009 || (unsigned HOST_WIDE_INT) int_size_in_bytes (type) > 8))
9010 return true;
9011
9012 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
9013 modes only exist for GCC vector types if -maltivec. */
9014 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI
9015 && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
9016 return false;
9017
9018 /* Return synthetic vectors in memory. */
9019 if (TREE_CODE (type) == VECTOR_TYPE
9020 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
9021 {
9022 static bool warned_for_return_big_vectors = false;
9023 if (!warned_for_return_big_vectors)
9024 {
9025 warning (0, "GCC vector returned by reference: "
9026 "non-standard ABI extension with no compatibility guarantee");
9027 warned_for_return_big_vectors = true;
9028 }
9029 return true;
9030 }
9031
9032 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && TYPE_MODE (type) == TFmode)
9033 return true;
9034
9035 return false;
9036 }
9037
9038 /* Specify whether values returned in registers should be at the most
9039 significant end of a register. We want aggregates returned by
9040 value to match the way aggregates are passed to functions. */
9041
9042 static bool
9043 rs6000_return_in_msb (const_tree valtype)
9044 {
9045 return (DEFAULT_ABI == ABI_ELFv2
9046 && BYTES_BIG_ENDIAN
9047 && AGGREGATE_TYPE_P (valtype)
9048 && FUNCTION_ARG_PADDING (TYPE_MODE (valtype), valtype) == upward);
9049 }
9050
9051 #ifdef HAVE_AS_GNU_ATTRIBUTE
9052 /* Return TRUE if a call to function FNDECL may be one that
9053 potentially affects the function calling ABI of the object file. */
9054
9055 static bool
9056 call_ABI_of_interest (tree fndecl)
9057 {
9058 if (symtab->state == EXPANSION)
9059 {
9060 struct cgraph_node *c_node;
9061
9062 /* Libcalls are always interesting. */
9063 if (fndecl == NULL_TREE)
9064 return true;
9065
9066 /* Any call to an external function is interesting. */
9067 if (DECL_EXTERNAL (fndecl))
9068 return true;
9069
9070 /* Interesting functions that we are emitting in this object file. */
9071 c_node = cgraph_node::get (fndecl);
9072 c_node = c_node->ultimate_alias_target ();
9073 return !c_node->only_called_directly_p ();
9074 }
9075 return false;
9076 }
9077 #endif
9078
9079 /* Initialize a variable CUM of type CUMULATIVE_ARGS
9080 for a call to a function whose data type is FNTYPE.
9081 For a library call, FNTYPE is 0 and RETURN_MODE the return value mode.
9082
9083 For incoming args we set the number of arguments in the prototype large
9084 so we never return a PARALLEL. */
9085
9086 void
9087 init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
9088 rtx libname ATTRIBUTE_UNUSED, int incoming,
9089 int libcall, int n_named_args,
9090 tree fndecl ATTRIBUTE_UNUSED,
9091 enum machine_mode return_mode ATTRIBUTE_UNUSED)
9092 {
9093 static CUMULATIVE_ARGS zero_cumulative;
9094
9095 *cum = zero_cumulative;
9096 cum->words = 0;
9097 cum->fregno = FP_ARG_MIN_REG;
9098 cum->vregno = ALTIVEC_ARG_MIN_REG;
9099 cum->prototype = (fntype && prototype_p (fntype));
9100 cum->call_cookie = ((DEFAULT_ABI == ABI_V4 && libcall)
9101 ? CALL_LIBCALL : CALL_NORMAL);
9102 cum->sysv_gregno = GP_ARG_MIN_REG;
9103 cum->stdarg = stdarg_p (fntype);
9104
9105 cum->nargs_prototype = 0;
9106 if (incoming || cum->prototype)
9107 cum->nargs_prototype = n_named_args;
9108
9109 /* Check for a longcall attribute. */
9110 if ((!fntype && rs6000_default_long_calls)
9111 || (fntype
9112 && lookup_attribute ("longcall", TYPE_ATTRIBUTES (fntype))
9113 && !lookup_attribute ("shortcall", TYPE_ATTRIBUTES (fntype))))
9114 cum->call_cookie |= CALL_LONG;
9115
9116 if (TARGET_DEBUG_ARG)
9117 {
9118 fprintf (stderr, "\ninit_cumulative_args:");
9119 if (fntype)
9120 {
9121 tree ret_type = TREE_TYPE (fntype);
9122 fprintf (stderr, " ret code = %s,",
9123 get_tree_code_name (TREE_CODE (ret_type)));
9124 }
9125
9126 if (cum->call_cookie & CALL_LONG)
9127 fprintf (stderr, " longcall,");
9128
9129 fprintf (stderr, " proto = %d, nargs = %d\n",
9130 cum->prototype, cum->nargs_prototype);
9131 }
9132
9133 #ifdef HAVE_AS_GNU_ATTRIBUTE
9134 if (DEFAULT_ABI == ABI_V4)
9135 {
9136 cum->escapes = call_ABI_of_interest (fndecl);
9137 if (cum->escapes)
9138 {
9139 tree return_type;
9140
9141 if (fntype)
9142 {
9143 return_type = TREE_TYPE (fntype);
9144 return_mode = TYPE_MODE (return_type);
9145 }
9146 else
9147 return_type = lang_hooks.types.type_for_mode (return_mode, 0);
9148
9149 if (return_type != NULL)
9150 {
9151 if (TREE_CODE (return_type) == RECORD_TYPE
9152 && TYPE_TRANSPARENT_AGGR (return_type))
9153 {
9154 return_type = TREE_TYPE (first_field (return_type));
9155 return_mode = TYPE_MODE (return_type);
9156 }
9157 if (AGGREGATE_TYPE_P (return_type)
9158 && ((unsigned HOST_WIDE_INT) int_size_in_bytes (return_type)
9159 <= 8))
9160 rs6000_returns_struct = true;
9161 }
9162 if (SCALAR_FLOAT_MODE_P (return_mode))
9163 rs6000_passes_float = true;
9164 else if (ALTIVEC_OR_VSX_VECTOR_MODE (return_mode)
9165 || SPE_VECTOR_MODE (return_mode))
9166 rs6000_passes_vector = true;
9167 }
9168 }
9169 #endif
9170
9171 if (fntype
9172 && !TARGET_ALTIVEC
9173 && TARGET_ALTIVEC_ABI
9174 && ALTIVEC_VECTOR_MODE (TYPE_MODE (TREE_TYPE (fntype))))
9175 {
9176 error ("cannot return value in vector register because"
9177 " altivec instructions are disabled, use -maltivec"
9178 " to enable them");
9179 }
9180 }
9181 \f
9182 /* Return true if TYPE must be passed on the stack and not in registers. */
9183
9184 static bool
9185 rs6000_must_pass_in_stack (enum machine_mode mode, const_tree type)
9186 {
9187 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2 || TARGET_64BIT)
9188 return must_pass_in_stack_var_size (mode, type);
9189 else
9190 return must_pass_in_stack_var_size_or_pad (mode, type);
9191 }
9192
9193 /* If defined, a C expression which determines whether, and in which
9194 direction, to pad out an argument with extra space. The value
9195 should be of type `enum direction': either `upward' to pad above
9196 the argument, `downward' to pad below, or `none' to inhibit
9197 padding.
9198
9199 For the AIX ABI structs are always stored left shifted in their
9200 argument slot. */
9201
9202 enum direction
9203 function_arg_padding (enum machine_mode mode, const_tree type)
9204 {
9205 #ifndef AGGREGATE_PADDING_FIXED
9206 #define AGGREGATE_PADDING_FIXED 0
9207 #endif
9208 #ifndef AGGREGATES_PAD_UPWARD_ALWAYS
9209 #define AGGREGATES_PAD_UPWARD_ALWAYS 0
9210 #endif
9211
9212 if (!AGGREGATE_PADDING_FIXED)
9213 {
9214 /* GCC used to pass structures of the same size as integer types as
9215 if they were in fact integers, ignoring FUNCTION_ARG_PADDING.
9216 i.e. Structures of size 1 or 2 (or 4 when TARGET_64BIT) were
9217 passed padded downward, except that -mstrict-align further
9218 muddied the water in that multi-component structures of 2 and 4
9219 bytes in size were passed padded upward.
9220
9221 The following arranges for best compatibility with previous
9222 versions of gcc, but removes the -mstrict-align dependency. */
9223 if (BYTES_BIG_ENDIAN)
9224 {
9225 HOST_WIDE_INT size = 0;
9226
9227 if (mode == BLKmode)
9228 {
9229 if (type && TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
9230 size = int_size_in_bytes (type);
9231 }
9232 else
9233 size = GET_MODE_SIZE (mode);
9234
9235 if (size == 1 || size == 2 || size == 4)
9236 return downward;
9237 }
9238 return upward;
9239 }
9240
9241 if (AGGREGATES_PAD_UPWARD_ALWAYS)
9242 {
9243 if (type != 0 && AGGREGATE_TYPE_P (type))
9244 return upward;
9245 }
9246
9247 /* Fall back to the default. */
9248 return DEFAULT_FUNCTION_ARG_PADDING (mode, type);
9249 }
9250
9251 /* If defined, a C expression that gives the alignment boundary, in bits,
9252 of an argument with the specified mode and type. If it is not defined,
9253 PARM_BOUNDARY is used for all arguments.
9254
9255 V.4 wants long longs and doubles to be double word aligned. Just
9256 testing the mode size is a boneheaded way to do this as it means
9257 that other types such as complex int are also double word aligned.
9258 However, we're stuck with this because changing the ABI might break
9259 existing library interfaces.
9260
9261 Doubleword align SPE vectors.
9262 Quadword align Altivec/VSX vectors.
9263 Quadword align large synthetic vector types. */
9264
9265 static unsigned int
9266 rs6000_function_arg_boundary (enum machine_mode mode, const_tree type)
9267 {
9268 enum machine_mode elt_mode;
9269 int n_elts;
9270
9271 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
9272
9273 if (DEFAULT_ABI == ABI_V4
9274 && (GET_MODE_SIZE (mode) == 8
9275 || (TARGET_HARD_FLOAT
9276 && TARGET_FPRS
9277 && (mode == TFmode || mode == TDmode))))
9278 return 64;
9279 else if (SPE_VECTOR_MODE (mode)
9280 || (type && TREE_CODE (type) == VECTOR_TYPE
9281 && int_size_in_bytes (type) >= 8
9282 && int_size_in_bytes (type) < 16))
9283 return 64;
9284 else if (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
9285 || (type && TREE_CODE (type) == VECTOR_TYPE
9286 && int_size_in_bytes (type) >= 16))
9287 return 128;
9288
9289 /* Aggregate types that need > 8 byte alignment are quadword-aligned
9290 in the parameter area in the ELFv2 ABI, and in the AIX ABI unless
9291 -mcompat-align-parm is used. */
9292 if (((DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm)
9293 || DEFAULT_ABI == ABI_ELFv2)
9294 && type && TYPE_ALIGN (type) > 64)
9295 {
9296 /* "Aggregate" means any AGGREGATE_TYPE except for single-element
9297 or homogeneous float/vector aggregates here. We already handled
9298 vector aggregates above, but still need to check for float here. */
9299 bool aggregate_p = (AGGREGATE_TYPE_P (type)
9300 && !SCALAR_FLOAT_MODE_P (elt_mode));
9301
9302 /* We used to check for BLKmode instead of the above aggregate type
9303 check. Warn when this results in any difference to the ABI. */
9304 if (aggregate_p != (mode == BLKmode))
9305 {
9306 static bool warned;
9307 if (!warned && warn_psabi)
9308 {
9309 warned = true;
9310 inform (input_location,
9311 "the ABI of passing aggregates with %d-byte alignment"
9312 " has changed in GCC 4.10",
9313 (int) TYPE_ALIGN (type) / BITS_PER_UNIT);
9314 }
9315 }
9316
9317 if (aggregate_p)
9318 return 128;
9319 }
9320
9321 /* Similar for the Darwin64 ABI. Note that for historical reasons we
9322 implement the "aggregate type" check as a BLKmode check here; this
9323 means certain aggregate types are in fact not aligned. */
9324 if (TARGET_MACHO && rs6000_darwin64_abi
9325 && mode == BLKmode
9326 && type && TYPE_ALIGN (type) > 64)
9327 return 128;
9328
9329 return PARM_BOUNDARY;
9330 }
9331
9332 /* The offset in words to the start of the parameter save area. */
9333
9334 static unsigned int
9335 rs6000_parm_offset (void)
9336 {
9337 return (DEFAULT_ABI == ABI_V4 ? 2
9338 : DEFAULT_ABI == ABI_ELFv2 ? 4
9339 : 6);
9340 }
9341
9342 /* For a function parm of MODE and TYPE, return the starting word in
9343 the parameter area. NWORDS of the parameter area are already used. */
9344
9345 static unsigned int
9346 rs6000_parm_start (enum machine_mode mode, const_tree type,
9347 unsigned int nwords)
9348 {
9349 unsigned int align;
9350
9351 align = rs6000_function_arg_boundary (mode, type) / PARM_BOUNDARY - 1;
9352 return nwords + (-(rs6000_parm_offset () + nwords) & align);
9353 }
9354
9355 /* Compute the size (in words) of a function argument. */
9356
9357 static unsigned long
9358 rs6000_arg_size (enum machine_mode mode, const_tree type)
9359 {
9360 unsigned long size;
9361
9362 if (mode != BLKmode)
9363 size = GET_MODE_SIZE (mode);
9364 else
9365 size = int_size_in_bytes (type);
9366
9367 if (TARGET_32BIT)
9368 return (size + 3) >> 2;
9369 else
9370 return (size + 7) >> 3;
9371 }
9372 \f
9373 /* Use this to flush pending int fields. */
9374
9375 static void
9376 rs6000_darwin64_record_arg_advance_flush (CUMULATIVE_ARGS *cum,
9377 HOST_WIDE_INT bitpos, int final)
9378 {
9379 unsigned int startbit, endbit;
9380 int intregs, intoffset;
9381 enum machine_mode mode;
9382
9383 /* Handle the situations where a float is taking up the first half
9384 of the GPR, and the other half is empty (typically due to
9385 alignment restrictions). We can detect this by a 8-byte-aligned
9386 int field, or by seeing that this is the final flush for this
9387 argument. Count the word and continue on. */
9388 if (cum->floats_in_gpr == 1
9389 && (cum->intoffset % 64 == 0
9390 || (cum->intoffset == -1 && final)))
9391 {
9392 cum->words++;
9393 cum->floats_in_gpr = 0;
9394 }
9395
9396 if (cum->intoffset == -1)
9397 return;
9398
9399 intoffset = cum->intoffset;
9400 cum->intoffset = -1;
9401 cum->floats_in_gpr = 0;
9402
9403 if (intoffset % BITS_PER_WORD != 0)
9404 {
9405 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
9406 MODE_INT, 0);
9407 if (mode == BLKmode)
9408 {
9409 /* We couldn't find an appropriate mode, which happens,
9410 e.g., in packed structs when there are 3 bytes to load.
9411 Back intoffset back to the beginning of the word in this
9412 case. */
9413 intoffset = intoffset & -BITS_PER_WORD;
9414 }
9415 }
9416
9417 startbit = intoffset & -BITS_PER_WORD;
9418 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
9419 intregs = (endbit - startbit) / BITS_PER_WORD;
9420 cum->words += intregs;
9421 /* words should be unsigned. */
9422 if ((unsigned)cum->words < (endbit/BITS_PER_WORD))
9423 {
9424 int pad = (endbit/BITS_PER_WORD) - cum->words;
9425 cum->words += pad;
9426 }
9427 }
9428
9429 /* The darwin64 ABI calls for us to recurse down through structs,
9430 looking for elements passed in registers. Unfortunately, we have
9431 to track int register count here also because of misalignments
9432 in powerpc alignment mode. */
9433
9434 static void
9435 rs6000_darwin64_record_arg_advance_recurse (CUMULATIVE_ARGS *cum,
9436 const_tree type,
9437 HOST_WIDE_INT startbitpos)
9438 {
9439 tree f;
9440
9441 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
9442 if (TREE_CODE (f) == FIELD_DECL)
9443 {
9444 HOST_WIDE_INT bitpos = startbitpos;
9445 tree ftype = TREE_TYPE (f);
9446 enum machine_mode mode;
9447 if (ftype == error_mark_node)
9448 continue;
9449 mode = TYPE_MODE (ftype);
9450
9451 if (DECL_SIZE (f) != 0
9452 && tree_fits_uhwi_p (bit_position (f)))
9453 bitpos += int_bit_position (f);
9454
9455 /* ??? FIXME: else assume zero offset. */
9456
9457 if (TREE_CODE (ftype) == RECORD_TYPE)
9458 rs6000_darwin64_record_arg_advance_recurse (cum, ftype, bitpos);
9459 else if (USE_FP_FOR_ARG_P (cum, mode))
9460 {
9461 unsigned n_fpregs = (GET_MODE_SIZE (mode) + 7) >> 3;
9462 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
9463 cum->fregno += n_fpregs;
9464 /* Single-precision floats present a special problem for
9465 us, because they are smaller than an 8-byte GPR, and so
9466 the structure-packing rules combined with the standard
9467 varargs behavior mean that we want to pack float/float
9468 and float/int combinations into a single register's
9469 space. This is complicated by the arg advance flushing,
9470 which works on arbitrarily large groups of int-type
9471 fields. */
9472 if (mode == SFmode)
9473 {
9474 if (cum->floats_in_gpr == 1)
9475 {
9476 /* Two floats in a word; count the word and reset
9477 the float count. */
9478 cum->words++;
9479 cum->floats_in_gpr = 0;
9480 }
9481 else if (bitpos % 64 == 0)
9482 {
9483 /* A float at the beginning of an 8-byte word;
9484 count it and put off adjusting cum->words until
9485 we see if a arg advance flush is going to do it
9486 for us. */
9487 cum->floats_in_gpr++;
9488 }
9489 else
9490 {
9491 /* The float is at the end of a word, preceded
9492 by integer fields, so the arg advance flush
9493 just above has already set cum->words and
9494 everything is taken care of. */
9495 }
9496 }
9497 else
9498 cum->words += n_fpregs;
9499 }
9500 else if (USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
9501 {
9502 rs6000_darwin64_record_arg_advance_flush (cum, bitpos, 0);
9503 cum->vregno++;
9504 cum->words += 2;
9505 }
9506 else if (cum->intoffset == -1)
9507 cum->intoffset = bitpos;
9508 }
9509 }
9510
9511 /* Check for an item that needs to be considered specially under the darwin 64
9512 bit ABI. These are record types where the mode is BLK or the structure is
9513 8 bytes in size. */
9514 static int
9515 rs6000_darwin64_struct_check_p (enum machine_mode mode, const_tree type)
9516 {
9517 return rs6000_darwin64_abi
9518 && ((mode == BLKmode
9519 && TREE_CODE (type) == RECORD_TYPE
9520 && int_size_in_bytes (type) > 0)
9521 || (type && TREE_CODE (type) == RECORD_TYPE
9522 && int_size_in_bytes (type) == 8)) ? 1 : 0;
9523 }
9524
9525 /* Update the data in CUM to advance over an argument
9526 of mode MODE and data type TYPE.
9527 (TYPE is null for libcalls where that information may not be available.)
9528
9529 Note that for args passed by reference, function_arg will be called
9530 with MODE and TYPE set to that of the pointer to the arg, not the arg
9531 itself. */
9532
9533 static void
9534 rs6000_function_arg_advance_1 (CUMULATIVE_ARGS *cum, enum machine_mode mode,
9535 const_tree type, bool named, int depth)
9536 {
9537 enum machine_mode elt_mode;
9538 int n_elts;
9539
9540 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
9541
9542 /* Only tick off an argument if we're not recursing. */
9543 if (depth == 0)
9544 cum->nargs_prototype--;
9545
9546 #ifdef HAVE_AS_GNU_ATTRIBUTE
9547 if (DEFAULT_ABI == ABI_V4
9548 && cum->escapes)
9549 {
9550 if (SCALAR_FLOAT_MODE_P (mode))
9551 rs6000_passes_float = true;
9552 else if (named && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
9553 rs6000_passes_vector = true;
9554 else if (SPE_VECTOR_MODE (mode)
9555 && !cum->stdarg
9556 && cum->sysv_gregno <= GP_ARG_MAX_REG)
9557 rs6000_passes_vector = true;
9558 }
9559 #endif
9560
9561 if (TARGET_ALTIVEC_ABI
9562 && (ALTIVEC_OR_VSX_VECTOR_MODE (elt_mode)
9563 || (type && TREE_CODE (type) == VECTOR_TYPE
9564 && int_size_in_bytes (type) == 16)))
9565 {
9566 bool stack = false;
9567
9568 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
9569 {
9570 cum->vregno += n_elts;
9571
9572 if (!TARGET_ALTIVEC)
9573 error ("cannot pass argument in vector register because"
9574 " altivec instructions are disabled, use -maltivec"
9575 " to enable them");
9576
9577 /* PowerPC64 Linux and AIX allocate GPRs for a vector argument
9578 even if it is going to be passed in a vector register.
9579 Darwin does the same for variable-argument functions. */
9580 if (((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
9581 && TARGET_64BIT)
9582 || (cum->stdarg && DEFAULT_ABI != ABI_V4))
9583 stack = true;
9584 }
9585 else
9586 stack = true;
9587
9588 if (stack)
9589 {
9590 int align;
9591
9592 /* Vector parameters must be 16-byte aligned. In 32-bit
9593 mode this means we need to take into account the offset
9594 to the parameter save area. In 64-bit mode, they just
9595 have to start on an even word, since the parameter save
9596 area is 16-byte aligned. */
9597 if (TARGET_32BIT)
9598 align = -(rs6000_parm_offset () + cum->words) & 3;
9599 else
9600 align = cum->words & 1;
9601 cum->words += align + rs6000_arg_size (mode, type);
9602
9603 if (TARGET_DEBUG_ARG)
9604 {
9605 fprintf (stderr, "function_adv: words = %2d, align=%d, ",
9606 cum->words, align);
9607 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s\n",
9608 cum->nargs_prototype, cum->prototype,
9609 GET_MODE_NAME (mode));
9610 }
9611 }
9612 }
9613 else if (TARGET_SPE_ABI && TARGET_SPE && SPE_VECTOR_MODE (mode)
9614 && !cum->stdarg
9615 && cum->sysv_gregno <= GP_ARG_MAX_REG)
9616 cum->sysv_gregno++;
9617
9618 else if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
9619 {
9620 int size = int_size_in_bytes (type);
9621 /* Variable sized types have size == -1 and are
9622 treated as if consisting entirely of ints.
9623 Pad to 16 byte boundary if needed. */
9624 if (TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
9625 && (cum->words % 2) != 0)
9626 cum->words++;
9627 /* For varargs, we can just go up by the size of the struct. */
9628 if (!named)
9629 cum->words += (size + 7) / 8;
9630 else
9631 {
9632 /* It is tempting to say int register count just goes up by
9633 sizeof(type)/8, but this is wrong in a case such as
9634 { int; double; int; } [powerpc alignment]. We have to
9635 grovel through the fields for these too. */
9636 cum->intoffset = 0;
9637 cum->floats_in_gpr = 0;
9638 rs6000_darwin64_record_arg_advance_recurse (cum, type, 0);
9639 rs6000_darwin64_record_arg_advance_flush (cum,
9640 size * BITS_PER_UNIT, 1);
9641 }
9642 if (TARGET_DEBUG_ARG)
9643 {
9644 fprintf (stderr, "function_adv: words = %2d, align=%d, size=%d",
9645 cum->words, TYPE_ALIGN (type), size);
9646 fprintf (stderr,
9647 "nargs = %4d, proto = %d, mode = %4s (darwin64 abi)\n",
9648 cum->nargs_prototype, cum->prototype,
9649 GET_MODE_NAME (mode));
9650 }
9651 }
9652 else if (DEFAULT_ABI == ABI_V4)
9653 {
9654 if (TARGET_HARD_FLOAT && TARGET_FPRS
9655 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
9656 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
9657 || (mode == TFmode && !TARGET_IEEEQUAD)
9658 || mode == SDmode || mode == DDmode || mode == TDmode))
9659 {
9660 /* _Decimal128 must use an even/odd register pair. This assumes
9661 that the register number is odd when fregno is odd. */
9662 if (mode == TDmode && (cum->fregno % 2) == 1)
9663 cum->fregno++;
9664
9665 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
9666 <= FP_ARG_V4_MAX_REG)
9667 cum->fregno += (GET_MODE_SIZE (mode) + 7) >> 3;
9668 else
9669 {
9670 cum->fregno = FP_ARG_V4_MAX_REG + 1;
9671 if (mode == DFmode || mode == TFmode
9672 || mode == DDmode || mode == TDmode)
9673 cum->words += cum->words & 1;
9674 cum->words += rs6000_arg_size (mode, type);
9675 }
9676 }
9677 else
9678 {
9679 int n_words = rs6000_arg_size (mode, type);
9680 int gregno = cum->sysv_gregno;
9681
9682 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
9683 (r7,r8) or (r9,r10). As does any other 2 word item such
9684 as complex int due to a historical mistake. */
9685 if (n_words == 2)
9686 gregno += (1 - gregno) & 1;
9687
9688 /* Multi-reg args are not split between registers and stack. */
9689 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
9690 {
9691 /* Long long and SPE vectors are aligned on the stack.
9692 So are other 2 word items such as complex int due to
9693 a historical mistake. */
9694 if (n_words == 2)
9695 cum->words += cum->words & 1;
9696 cum->words += n_words;
9697 }
9698
9699 /* Note: continuing to accumulate gregno past when we've started
9700 spilling to the stack indicates the fact that we've started
9701 spilling to the stack to expand_builtin_saveregs. */
9702 cum->sysv_gregno = gregno + n_words;
9703 }
9704
9705 if (TARGET_DEBUG_ARG)
9706 {
9707 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
9708 cum->words, cum->fregno);
9709 fprintf (stderr, "gregno = %2d, nargs = %4d, proto = %d, ",
9710 cum->sysv_gregno, cum->nargs_prototype, cum->prototype);
9711 fprintf (stderr, "mode = %4s, named = %d\n",
9712 GET_MODE_NAME (mode), named);
9713 }
9714 }
9715 else
9716 {
9717 int n_words = rs6000_arg_size (mode, type);
9718 int start_words = cum->words;
9719 int align_words = rs6000_parm_start (mode, type, start_words);
9720
9721 cum->words = align_words + n_words;
9722
9723 if (SCALAR_FLOAT_MODE_P (elt_mode)
9724 && TARGET_HARD_FLOAT && TARGET_FPRS)
9725 {
9726 /* _Decimal128 must be passed in an even/odd float register pair.
9727 This assumes that the register number is odd when fregno is
9728 odd. */
9729 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
9730 cum->fregno++;
9731 cum->fregno += n_elts * ((GET_MODE_SIZE (elt_mode) + 7) >> 3);
9732 }
9733
9734 if (TARGET_DEBUG_ARG)
9735 {
9736 fprintf (stderr, "function_adv: words = %2d, fregno = %2d, ",
9737 cum->words, cum->fregno);
9738 fprintf (stderr, "nargs = %4d, proto = %d, mode = %4s, ",
9739 cum->nargs_prototype, cum->prototype, GET_MODE_NAME (mode));
9740 fprintf (stderr, "named = %d, align = %d, depth = %d\n",
9741 named, align_words - start_words, depth);
9742 }
9743 }
9744 }
9745
9746 static void
9747 rs6000_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
9748 const_tree type, bool named)
9749 {
9750 rs6000_function_arg_advance_1 (get_cumulative_args (cum), mode, type, named,
9751 0);
9752 }
9753
9754 static rtx
9755 spe_build_register_parallel (enum machine_mode mode, int gregno)
9756 {
9757 rtx r1, r3, r5, r7;
9758
9759 switch (mode)
9760 {
9761 case DFmode:
9762 r1 = gen_rtx_REG (DImode, gregno);
9763 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
9764 return gen_rtx_PARALLEL (mode, gen_rtvec (1, r1));
9765
9766 case DCmode:
9767 case TFmode:
9768 r1 = gen_rtx_REG (DImode, gregno);
9769 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
9770 r3 = gen_rtx_REG (DImode, gregno + 2);
9771 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
9772 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r3));
9773
9774 case TCmode:
9775 r1 = gen_rtx_REG (DImode, gregno);
9776 r1 = gen_rtx_EXPR_LIST (VOIDmode, r1, const0_rtx);
9777 r3 = gen_rtx_REG (DImode, gregno + 2);
9778 r3 = gen_rtx_EXPR_LIST (VOIDmode, r3, GEN_INT (8));
9779 r5 = gen_rtx_REG (DImode, gregno + 4);
9780 r5 = gen_rtx_EXPR_LIST (VOIDmode, r5, GEN_INT (16));
9781 r7 = gen_rtx_REG (DImode, gregno + 6);
9782 r7 = gen_rtx_EXPR_LIST (VOIDmode, r7, GEN_INT (24));
9783 return gen_rtx_PARALLEL (mode, gen_rtvec (4, r1, r3, r5, r7));
9784
9785 default:
9786 gcc_unreachable ();
9787 }
9788 }
9789
9790 /* Determine where to put a SIMD argument on the SPE. */
9791 static rtx
9792 rs6000_spe_function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
9793 const_tree type)
9794 {
9795 int gregno = cum->sysv_gregno;
9796
9797 /* On E500 v2, double arithmetic is done on the full 64-bit GPR, but
9798 are passed and returned in a pair of GPRs for ABI compatibility. */
9799 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode
9800 || mode == DCmode || mode == TCmode))
9801 {
9802 int n_words = rs6000_arg_size (mode, type);
9803
9804 /* Doubles go in an odd/even register pair (r5/r6, etc). */
9805 if (mode == DFmode)
9806 gregno += (1 - gregno) & 1;
9807
9808 /* Multi-reg args are not split between registers and stack. */
9809 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
9810 return NULL_RTX;
9811
9812 return spe_build_register_parallel (mode, gregno);
9813 }
9814 if (cum->stdarg)
9815 {
9816 int n_words = rs6000_arg_size (mode, type);
9817
9818 /* SPE vectors are put in odd registers. */
9819 if (n_words == 2 && (gregno & 1) == 0)
9820 gregno += 1;
9821
9822 if (gregno + n_words - 1 <= GP_ARG_MAX_REG)
9823 {
9824 rtx r1, r2;
9825 enum machine_mode m = SImode;
9826
9827 r1 = gen_rtx_REG (m, gregno);
9828 r1 = gen_rtx_EXPR_LIST (m, r1, const0_rtx);
9829 r2 = gen_rtx_REG (m, gregno + 1);
9830 r2 = gen_rtx_EXPR_LIST (m, r2, GEN_INT (4));
9831 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
9832 }
9833 else
9834 return NULL_RTX;
9835 }
9836 else
9837 {
9838 if (gregno <= GP_ARG_MAX_REG)
9839 return gen_rtx_REG (mode, gregno);
9840 else
9841 return NULL_RTX;
9842 }
9843 }
9844
9845 /* A subroutine of rs6000_darwin64_record_arg. Assign the bits of the
9846 structure between cum->intoffset and bitpos to integer registers. */
9847
9848 static void
9849 rs6000_darwin64_record_arg_flush (CUMULATIVE_ARGS *cum,
9850 HOST_WIDE_INT bitpos, rtx rvec[], int *k)
9851 {
9852 enum machine_mode mode;
9853 unsigned int regno;
9854 unsigned int startbit, endbit;
9855 int this_regno, intregs, intoffset;
9856 rtx reg;
9857
9858 if (cum->intoffset == -1)
9859 return;
9860
9861 intoffset = cum->intoffset;
9862 cum->intoffset = -1;
9863
9864 /* If this is the trailing part of a word, try to only load that
9865 much into the register. Otherwise load the whole register. Note
9866 that in the latter case we may pick up unwanted bits. It's not a
9867 problem at the moment but may wish to revisit. */
9868
9869 if (intoffset % BITS_PER_WORD != 0)
9870 {
9871 mode = mode_for_size (BITS_PER_WORD - intoffset % BITS_PER_WORD,
9872 MODE_INT, 0);
9873 if (mode == BLKmode)
9874 {
9875 /* We couldn't find an appropriate mode, which happens,
9876 e.g., in packed structs when there are 3 bytes to load.
9877 Back intoffset back to the beginning of the word in this
9878 case. */
9879 intoffset = intoffset & -BITS_PER_WORD;
9880 mode = word_mode;
9881 }
9882 }
9883 else
9884 mode = word_mode;
9885
9886 startbit = intoffset & -BITS_PER_WORD;
9887 endbit = (bitpos + BITS_PER_WORD - 1) & -BITS_PER_WORD;
9888 intregs = (endbit - startbit) / BITS_PER_WORD;
9889 this_regno = cum->words + intoffset / BITS_PER_WORD;
9890
9891 if (intregs > 0 && intregs > GP_ARG_NUM_REG - this_regno)
9892 cum->use_stack = 1;
9893
9894 intregs = MIN (intregs, GP_ARG_NUM_REG - this_regno);
9895 if (intregs <= 0)
9896 return;
9897
9898 intoffset /= BITS_PER_UNIT;
9899 do
9900 {
9901 regno = GP_ARG_MIN_REG + this_regno;
9902 reg = gen_rtx_REG (mode, regno);
9903 rvec[(*k)++] =
9904 gen_rtx_EXPR_LIST (VOIDmode, reg, GEN_INT (intoffset));
9905
9906 this_regno += 1;
9907 intoffset = (intoffset | (UNITS_PER_WORD-1)) + 1;
9908 mode = word_mode;
9909 intregs -= 1;
9910 }
9911 while (intregs > 0);
9912 }
9913
9914 /* Recursive workhorse for the following. */
9915
9916 static void
9917 rs6000_darwin64_record_arg_recurse (CUMULATIVE_ARGS *cum, const_tree type,
9918 HOST_WIDE_INT startbitpos, rtx rvec[],
9919 int *k)
9920 {
9921 tree f;
9922
9923 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
9924 if (TREE_CODE (f) == FIELD_DECL)
9925 {
9926 HOST_WIDE_INT bitpos = startbitpos;
9927 tree ftype = TREE_TYPE (f);
9928 enum machine_mode mode;
9929 if (ftype == error_mark_node)
9930 continue;
9931 mode = TYPE_MODE (ftype);
9932
9933 if (DECL_SIZE (f) != 0
9934 && tree_fits_uhwi_p (bit_position (f)))
9935 bitpos += int_bit_position (f);
9936
9937 /* ??? FIXME: else assume zero offset. */
9938
9939 if (TREE_CODE (ftype) == RECORD_TYPE)
9940 rs6000_darwin64_record_arg_recurse (cum, ftype, bitpos, rvec, k);
9941 else if (cum->named && USE_FP_FOR_ARG_P (cum, mode))
9942 {
9943 unsigned n_fpreg = (GET_MODE_SIZE (mode) + 7) >> 3;
9944 #if 0
9945 switch (mode)
9946 {
9947 case SCmode: mode = SFmode; break;
9948 case DCmode: mode = DFmode; break;
9949 case TCmode: mode = TFmode; break;
9950 default: break;
9951 }
9952 #endif
9953 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
9954 if (cum->fregno + n_fpreg > FP_ARG_MAX_REG + 1)
9955 {
9956 gcc_assert (cum->fregno == FP_ARG_MAX_REG
9957 && (mode == TFmode || mode == TDmode));
9958 /* Long double or _Decimal128 split over regs and memory. */
9959 mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode : DFmode;
9960 cum->use_stack=1;
9961 }
9962 rvec[(*k)++]
9963 = gen_rtx_EXPR_LIST (VOIDmode,
9964 gen_rtx_REG (mode, cum->fregno++),
9965 GEN_INT (bitpos / BITS_PER_UNIT));
9966 if (mode == TFmode || mode == TDmode)
9967 cum->fregno++;
9968 }
9969 else if (cum->named && USE_ALTIVEC_FOR_ARG_P (cum, mode, 1))
9970 {
9971 rs6000_darwin64_record_arg_flush (cum, bitpos, rvec, k);
9972 rvec[(*k)++]
9973 = gen_rtx_EXPR_LIST (VOIDmode,
9974 gen_rtx_REG (mode, cum->vregno++),
9975 GEN_INT (bitpos / BITS_PER_UNIT));
9976 }
9977 else if (cum->intoffset == -1)
9978 cum->intoffset = bitpos;
9979 }
9980 }
9981
9982 /* For the darwin64 ABI, we want to construct a PARALLEL consisting of
9983 the register(s) to be used for each field and subfield of a struct
9984 being passed by value, along with the offset of where the
9985 register's value may be found in the block. FP fields go in FP
9986 register, vector fields go in vector registers, and everything
9987 else goes in int registers, packed as in memory.
9988
9989 This code is also used for function return values. RETVAL indicates
9990 whether this is the case.
9991
9992 Much of this is taken from the SPARC V9 port, which has a similar
9993 calling convention. */
9994
9995 static rtx
9996 rs6000_darwin64_record_arg (CUMULATIVE_ARGS *orig_cum, const_tree type,
9997 bool named, bool retval)
9998 {
9999 rtx rvec[FIRST_PSEUDO_REGISTER];
10000 int k = 1, kbase = 1;
10001 HOST_WIDE_INT typesize = int_size_in_bytes (type);
10002 /* This is a copy; modifications are not visible to our caller. */
10003 CUMULATIVE_ARGS copy_cum = *orig_cum;
10004 CUMULATIVE_ARGS *cum = &copy_cum;
10005
10006 /* Pad to 16 byte boundary if needed. */
10007 if (!retval && TYPE_ALIGN (type) >= 2 * BITS_PER_WORD
10008 && (cum->words % 2) != 0)
10009 cum->words++;
10010
10011 cum->intoffset = 0;
10012 cum->use_stack = 0;
10013 cum->named = named;
10014
10015 /* Put entries into rvec[] for individual FP and vector fields, and
10016 for the chunks of memory that go in int regs. Note we start at
10017 element 1; 0 is reserved for an indication of using memory, and
10018 may or may not be filled in below. */
10019 rs6000_darwin64_record_arg_recurse (cum, type, /* startbit pos= */ 0, rvec, &k);
10020 rs6000_darwin64_record_arg_flush (cum, typesize * BITS_PER_UNIT, rvec, &k);
10021
10022 /* If any part of the struct went on the stack put all of it there.
10023 This hack is because the generic code for
10024 FUNCTION_ARG_PARTIAL_NREGS cannot handle cases where the register
10025 parts of the struct are not at the beginning. */
10026 if (cum->use_stack)
10027 {
10028 if (retval)
10029 return NULL_RTX; /* doesn't go in registers at all */
10030 kbase = 0;
10031 rvec[0] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
10032 }
10033 if (k > 1 || cum->use_stack)
10034 return gen_rtx_PARALLEL (BLKmode, gen_rtvec_v (k - kbase, &rvec[kbase]));
10035 else
10036 return NULL_RTX;
10037 }
10038
10039 /* Determine where to place an argument in 64-bit mode with 32-bit ABI. */
10040
10041 static rtx
10042 rs6000_mixed_function_arg (enum machine_mode mode, const_tree type,
10043 int align_words)
10044 {
10045 int n_units;
10046 int i, k;
10047 rtx rvec[GP_ARG_NUM_REG + 1];
10048
10049 if (align_words >= GP_ARG_NUM_REG)
10050 return NULL_RTX;
10051
10052 n_units = rs6000_arg_size (mode, type);
10053
10054 /* Optimize the simple case where the arg fits in one gpr, except in
10055 the case of BLKmode due to assign_parms assuming that registers are
10056 BITS_PER_WORD wide. */
10057 if (n_units == 0
10058 || (n_units == 1 && mode != BLKmode))
10059 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
10060
10061 k = 0;
10062 if (align_words + n_units > GP_ARG_NUM_REG)
10063 /* Not all of the arg fits in gprs. Say that it goes in memory too,
10064 using a magic NULL_RTX component.
10065 This is not strictly correct. Only some of the arg belongs in
10066 memory, not all of it. However, the normal scheme using
10067 function_arg_partial_nregs can result in unusual subregs, eg.
10068 (subreg:SI (reg:DF) 4), which are not handled well. The code to
10069 store the whole arg to memory is often more efficient than code
10070 to store pieces, and we know that space is available in the right
10071 place for the whole arg. */
10072 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
10073
10074 i = 0;
10075 do
10076 {
10077 rtx r = gen_rtx_REG (SImode, GP_ARG_MIN_REG + align_words);
10078 rtx off = GEN_INT (i++ * 4);
10079 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10080 }
10081 while (++align_words < GP_ARG_NUM_REG && --n_units != 0);
10082
10083 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
10084 }
10085
10086 /* We have an argument of MODE and TYPE that goes into FPRs or VRs,
10087 but must also be copied into the parameter save area starting at
10088 offset ALIGN_WORDS. Fill in RVEC with the elements corresponding
10089 to the GPRs and/or memory. Return the number of elements used. */
10090
10091 static int
10092 rs6000_psave_function_arg (enum machine_mode mode, const_tree type,
10093 int align_words, rtx *rvec)
10094 {
10095 int k = 0;
10096
10097 if (align_words < GP_ARG_NUM_REG)
10098 {
10099 int n_words = rs6000_arg_size (mode, type);
10100
10101 if (align_words + n_words > GP_ARG_NUM_REG
10102 || mode == BLKmode
10103 || (TARGET_32BIT && TARGET_POWERPC64))
10104 {
10105 /* If this is partially on the stack, then we only
10106 include the portion actually in registers here. */
10107 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
10108 int i = 0;
10109
10110 if (align_words + n_words > GP_ARG_NUM_REG)
10111 {
10112 /* Not all of the arg fits in gprs. Say that it goes in memory
10113 too, using a magic NULL_RTX component. Also see comment in
10114 rs6000_mixed_function_arg for why the normal
10115 function_arg_partial_nregs scheme doesn't work in this case. */
10116 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
10117 }
10118
10119 do
10120 {
10121 rtx r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
10122 rtx off = GEN_INT (i++ * GET_MODE_SIZE (rmode));
10123 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10124 }
10125 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
10126 }
10127 else
10128 {
10129 /* The whole arg fits in gprs. */
10130 rtx r = gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
10131 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, const0_rtx);
10132 }
10133 }
10134 else
10135 {
10136 /* It's entirely in memory. */
10137 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, NULL_RTX, const0_rtx);
10138 }
10139
10140 return k;
10141 }
10142
10143 /* RVEC is a vector of K components of an argument of mode MODE.
10144 Construct the final function_arg return value from it. */
10145
10146 static rtx
10147 rs6000_finish_function_arg (enum machine_mode mode, rtx *rvec, int k)
10148 {
10149 gcc_assert (k >= 1);
10150
10151 /* Avoid returning a PARALLEL in the trivial cases. */
10152 if (k == 1)
10153 {
10154 if (XEXP (rvec[0], 0) == NULL_RTX)
10155 return NULL_RTX;
10156
10157 if (GET_MODE (XEXP (rvec[0], 0)) == mode)
10158 return XEXP (rvec[0], 0);
10159 }
10160
10161 return gen_rtx_PARALLEL (mode, gen_rtvec_v (k, rvec));
10162 }
10163
10164 /* Determine where to put an argument to a function.
10165 Value is zero to push the argument on the stack,
10166 or a hard register in which to store the argument.
10167
10168 MODE is the argument's machine mode.
10169 TYPE is the data type of the argument (as a tree).
10170 This is null for libcalls where that information may
10171 not be available.
10172 CUM is a variable of type CUMULATIVE_ARGS which gives info about
10173 the preceding args and about the function being called. It is
10174 not modified in this routine.
10175 NAMED is nonzero if this argument is a named parameter
10176 (otherwise it is an extra parameter matching an ellipsis).
10177
10178 On RS/6000 the first eight words of non-FP are normally in registers
10179 and the rest are pushed. Under AIX, the first 13 FP args are in registers.
10180 Under V.4, the first 8 FP args are in registers.
10181
10182 If this is floating-point and no prototype is specified, we use
10183 both an FP and integer register (or possibly FP reg and stack). Library
10184 functions (when CALL_LIBCALL is set) always have the proper types for args,
10185 so we can pass the FP value just in one register. emit_library_function
10186 doesn't support PARALLEL anyway.
10187
10188 Note that for args passed by reference, function_arg will be called
10189 with MODE and TYPE set to that of the pointer to the arg, not the arg
10190 itself. */
10191
10192 static rtx
10193 rs6000_function_arg (cumulative_args_t cum_v, enum machine_mode mode,
10194 const_tree type, bool named)
10195 {
10196 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10197 enum rs6000_abi abi = DEFAULT_ABI;
10198 enum machine_mode elt_mode;
10199 int n_elts;
10200
10201 /* Return a marker to indicate whether CR1 needs to set or clear the
10202 bit that V.4 uses to say fp args were passed in registers.
10203 Assume that we don't need the marker for software floating point,
10204 or compiler generated library calls. */
10205 if (mode == VOIDmode)
10206 {
10207 if (abi == ABI_V4
10208 && (cum->call_cookie & CALL_LIBCALL) == 0
10209 && (cum->stdarg
10210 || (cum->nargs_prototype < 0
10211 && (cum->prototype || TARGET_NO_PROTOTYPE))))
10212 {
10213 /* For the SPE, we need to crxor CR6 always. */
10214 if (TARGET_SPE_ABI)
10215 return GEN_INT (cum->call_cookie | CALL_V4_SET_FP_ARGS);
10216 else if (TARGET_HARD_FLOAT && TARGET_FPRS)
10217 return GEN_INT (cum->call_cookie
10218 | ((cum->fregno == FP_ARG_MIN_REG)
10219 ? CALL_V4_SET_FP_ARGS
10220 : CALL_V4_CLEAR_FP_ARGS));
10221 }
10222
10223 return GEN_INT (cum->call_cookie & ~CALL_LIBCALL);
10224 }
10225
10226 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10227
10228 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10229 {
10230 rtx rslt = rs6000_darwin64_record_arg (cum, type, named, /*retval= */false);
10231 if (rslt != NULL_RTX)
10232 return rslt;
10233 /* Else fall through to usual handling. */
10234 }
10235
10236 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10237 {
10238 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
10239 rtx r, off;
10240 int i, k = 0;
10241
10242 /* Do we also need to pass this argument in the parameter
10243 save area? */
10244 if (TARGET_64BIT && ! cum->prototype)
10245 {
10246 int align_words = (cum->words + 1) & ~1;
10247 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
10248 }
10249
10250 /* Describe where this argument goes in the vector registers. */
10251 for (i = 0; i < n_elts && cum->vregno + i <= ALTIVEC_ARG_MAX_REG; i++)
10252 {
10253 r = gen_rtx_REG (elt_mode, cum->vregno + i);
10254 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
10255 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10256 }
10257
10258 return rs6000_finish_function_arg (mode, rvec, k);
10259 }
10260 else if (TARGET_ALTIVEC_ABI
10261 && (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
10262 || (type && TREE_CODE (type) == VECTOR_TYPE
10263 && int_size_in_bytes (type) == 16)))
10264 {
10265 if (named || abi == ABI_V4)
10266 return NULL_RTX;
10267 else
10268 {
10269 /* Vector parameters to varargs functions under AIX or Darwin
10270 get passed in memory and possibly also in GPRs. */
10271 int align, align_words, n_words;
10272 enum machine_mode part_mode;
10273
10274 /* Vector parameters must be 16-byte aligned. In 32-bit
10275 mode this means we need to take into account the offset
10276 to the parameter save area. In 64-bit mode, they just
10277 have to start on an even word, since the parameter save
10278 area is 16-byte aligned. */
10279 if (TARGET_32BIT)
10280 align = -(rs6000_parm_offset () + cum->words) & 3;
10281 else
10282 align = cum->words & 1;
10283 align_words = cum->words + align;
10284
10285 /* Out of registers? Memory, then. */
10286 if (align_words >= GP_ARG_NUM_REG)
10287 return NULL_RTX;
10288
10289 if (TARGET_32BIT && TARGET_POWERPC64)
10290 return rs6000_mixed_function_arg (mode, type, align_words);
10291
10292 /* The vector value goes in GPRs. Only the part of the
10293 value in GPRs is reported here. */
10294 part_mode = mode;
10295 n_words = rs6000_arg_size (mode, type);
10296 if (align_words + n_words > GP_ARG_NUM_REG)
10297 /* Fortunately, there are only two possibilities, the value
10298 is either wholly in GPRs or half in GPRs and half not. */
10299 part_mode = DImode;
10300
10301 return gen_rtx_REG (part_mode, GP_ARG_MIN_REG + align_words);
10302 }
10303 }
10304 else if (TARGET_SPE_ABI && TARGET_SPE
10305 && (SPE_VECTOR_MODE (mode)
10306 || (TARGET_E500_DOUBLE && (mode == DFmode
10307 || mode == DCmode
10308 || mode == TFmode
10309 || mode == TCmode))))
10310 return rs6000_spe_function_arg (cum, mode, type);
10311
10312 else if (abi == ABI_V4)
10313 {
10314 if (TARGET_HARD_FLOAT && TARGET_FPRS
10315 && ((TARGET_SINGLE_FLOAT && mode == SFmode)
10316 || (TARGET_DOUBLE_FLOAT && mode == DFmode)
10317 || (mode == TFmode && !TARGET_IEEEQUAD)
10318 || mode == SDmode || mode == DDmode || mode == TDmode))
10319 {
10320 /* _Decimal128 must use an even/odd register pair. This assumes
10321 that the register number is odd when fregno is odd. */
10322 if (mode == TDmode && (cum->fregno % 2) == 1)
10323 cum->fregno++;
10324
10325 if (cum->fregno + (mode == TFmode || mode == TDmode ? 1 : 0)
10326 <= FP_ARG_V4_MAX_REG)
10327 return gen_rtx_REG (mode, cum->fregno);
10328 else
10329 return NULL_RTX;
10330 }
10331 else
10332 {
10333 int n_words = rs6000_arg_size (mode, type);
10334 int gregno = cum->sysv_gregno;
10335
10336 /* Long long and SPE vectors are put in (r3,r4), (r5,r6),
10337 (r7,r8) or (r9,r10). As does any other 2 word item such
10338 as complex int due to a historical mistake. */
10339 if (n_words == 2)
10340 gregno += (1 - gregno) & 1;
10341
10342 /* Multi-reg args are not split between registers and stack. */
10343 if (gregno + n_words - 1 > GP_ARG_MAX_REG)
10344 return NULL_RTX;
10345
10346 if (TARGET_32BIT && TARGET_POWERPC64)
10347 return rs6000_mixed_function_arg (mode, type,
10348 gregno - GP_ARG_MIN_REG);
10349 return gen_rtx_REG (mode, gregno);
10350 }
10351 }
10352 else
10353 {
10354 int align_words = rs6000_parm_start (mode, type, cum->words);
10355
10356 /* _Decimal128 must be passed in an even/odd float register pair.
10357 This assumes that the register number is odd when fregno is odd. */
10358 if (elt_mode == TDmode && (cum->fregno % 2) == 1)
10359 cum->fregno++;
10360
10361 if (USE_FP_FOR_ARG_P (cum, elt_mode))
10362 {
10363 rtx rvec[GP_ARG_NUM_REG + AGGR_ARG_NUM_REG + 1];
10364 rtx r, off;
10365 int i, k = 0;
10366 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
10367 int fpr_words;
10368
10369 /* Do we also need to pass this argument in the parameter
10370 save area? */
10371 if (type && (cum->nargs_prototype <= 0
10372 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10373 && TARGET_XL_COMPAT
10374 && align_words >= GP_ARG_NUM_REG)))
10375 k = rs6000_psave_function_arg (mode, type, align_words, rvec);
10376
10377 /* Describe where this argument goes in the fprs. */
10378 for (i = 0; i < n_elts
10379 && cum->fregno + i * n_fpreg <= FP_ARG_MAX_REG; i++)
10380 {
10381 /* Check if the argument is split over registers and memory.
10382 This can only ever happen for long double or _Decimal128;
10383 complex types are handled via split_complex_arg. */
10384 enum machine_mode fmode = elt_mode;
10385 if (cum->fregno + (i + 1) * n_fpreg > FP_ARG_MAX_REG + 1)
10386 {
10387 gcc_assert (fmode == TFmode || fmode == TDmode);
10388 fmode = DECIMAL_FLOAT_MODE_P (fmode) ? DDmode : DFmode;
10389 }
10390
10391 r = gen_rtx_REG (fmode, cum->fregno + i * n_fpreg);
10392 off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
10393 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10394 }
10395
10396 /* If there were not enough FPRs to hold the argument, the rest
10397 usually goes into memory. However, if the current position
10398 is still within the register parameter area, a portion may
10399 actually have to go into GPRs.
10400
10401 Note that it may happen that the portion of the argument
10402 passed in the first "half" of the first GPR was already
10403 passed in the last FPR as well.
10404
10405 For unnamed arguments, we already set up GPRs to cover the
10406 whole argument in rs6000_psave_function_arg, so there is
10407 nothing further to do at this point. */
10408 fpr_words = (i * GET_MODE_SIZE (elt_mode)) / (TARGET_32BIT ? 4 : 8);
10409 if (i < n_elts && align_words + fpr_words < GP_ARG_NUM_REG
10410 && cum->nargs_prototype > 0)
10411 {
10412 static bool warned;
10413
10414 enum machine_mode rmode = TARGET_32BIT ? SImode : DImode;
10415 int n_words = rs6000_arg_size (mode, type);
10416
10417 align_words += fpr_words;
10418 n_words -= fpr_words;
10419
10420 do
10421 {
10422 r = gen_rtx_REG (rmode, GP_ARG_MIN_REG + align_words);
10423 off = GEN_INT (fpr_words++ * GET_MODE_SIZE (rmode));
10424 rvec[k++] = gen_rtx_EXPR_LIST (VOIDmode, r, off);
10425 }
10426 while (++align_words < GP_ARG_NUM_REG && --n_words != 0);
10427
10428 if (!warned && warn_psabi)
10429 {
10430 warned = true;
10431 inform (input_location,
10432 "the ABI of passing homogeneous float aggregates"
10433 " has changed in GCC 4.10");
10434 }
10435 }
10436
10437 return rs6000_finish_function_arg (mode, rvec, k);
10438 }
10439 else if (align_words < GP_ARG_NUM_REG)
10440 {
10441 if (TARGET_32BIT && TARGET_POWERPC64)
10442 return rs6000_mixed_function_arg (mode, type, align_words);
10443
10444 return gen_rtx_REG (mode, GP_ARG_MIN_REG + align_words);
10445 }
10446 else
10447 return NULL_RTX;
10448 }
10449 }
10450 \f
10451 /* For an arg passed partly in registers and partly in memory, this is
10452 the number of bytes passed in registers. For args passed entirely in
10453 registers or entirely in memory, zero. When an arg is described by a
10454 PARALLEL, perhaps using more than one register type, this function
10455 returns the number of bytes used by the first element of the PARALLEL. */
10456
10457 static int
10458 rs6000_arg_partial_bytes (cumulative_args_t cum_v, enum machine_mode mode,
10459 tree type, bool named)
10460 {
10461 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
10462 bool passed_in_gprs = true;
10463 int ret = 0;
10464 int align_words;
10465 enum machine_mode elt_mode;
10466 int n_elts;
10467
10468 rs6000_discover_homogeneous_aggregate (mode, type, &elt_mode, &n_elts);
10469
10470 if (DEFAULT_ABI == ABI_V4)
10471 return 0;
10472
10473 if (USE_ALTIVEC_FOR_ARG_P (cum, elt_mode, named))
10474 {
10475 /* If we are passing this arg in the fixed parameter save area
10476 (gprs or memory) as well as VRs, we do not use the partial
10477 bytes mechanism; instead, rs6000_function_arg will return a
10478 PARALLEL including a memory element as necessary. */
10479 if (TARGET_64BIT && ! cum->prototype)
10480 return 0;
10481
10482 /* Otherwise, we pass in VRs only. Check for partial copies. */
10483 passed_in_gprs = false;
10484 if (cum->vregno + n_elts > ALTIVEC_ARG_MAX_REG + 1)
10485 ret = (ALTIVEC_ARG_MAX_REG + 1 - cum->vregno) * 16;
10486 }
10487
10488 /* In this complicated case we just disable the partial_nregs code. */
10489 if (TARGET_MACHO && rs6000_darwin64_struct_check_p (mode, type))
10490 return 0;
10491
10492 align_words = rs6000_parm_start (mode, type, cum->words);
10493
10494 if (USE_FP_FOR_ARG_P (cum, elt_mode))
10495 {
10496 unsigned long n_fpreg = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
10497
10498 /* If we are passing this arg in the fixed parameter save area
10499 (gprs or memory) as well as FPRs, we do not use the partial
10500 bytes mechanism; instead, rs6000_function_arg will return a
10501 PARALLEL including a memory element as necessary. */
10502 if (type
10503 && (cum->nargs_prototype <= 0
10504 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
10505 && TARGET_XL_COMPAT
10506 && align_words >= GP_ARG_NUM_REG)))
10507 return 0;
10508
10509 /* Otherwise, we pass in FPRs only. Check for partial copies. */
10510 passed_in_gprs = false;
10511 if (cum->fregno + n_elts * n_fpreg > FP_ARG_MAX_REG + 1)
10512 {
10513 /* Compute number of bytes / words passed in FPRs. If there
10514 is still space available in the register parameter area
10515 *after* that amount, a part of the argument will be passed
10516 in GPRs. In that case, the total amount passed in any
10517 registers is equal to the amount that would have been passed
10518 in GPRs if everything were passed there, so we fall back to
10519 the GPR code below to compute the appropriate value. */
10520 int fpr = ((FP_ARG_MAX_REG + 1 - cum->fregno)
10521 * MIN (8, GET_MODE_SIZE (elt_mode)));
10522 int fpr_words = fpr / (TARGET_32BIT ? 4 : 8);
10523
10524 if (align_words + fpr_words < GP_ARG_NUM_REG)
10525 passed_in_gprs = true;
10526 else
10527 ret = fpr;
10528 }
10529 }
10530
10531 if (passed_in_gprs
10532 && align_words < GP_ARG_NUM_REG
10533 && GP_ARG_NUM_REG < align_words + rs6000_arg_size (mode, type))
10534 ret = (GP_ARG_NUM_REG - align_words) * (TARGET_32BIT ? 4 : 8);
10535
10536 if (ret != 0 && TARGET_DEBUG_ARG)
10537 fprintf (stderr, "rs6000_arg_partial_bytes: %d\n", ret);
10538
10539 return ret;
10540 }
10541 \f
10542 /* A C expression that indicates when an argument must be passed by
10543 reference. If nonzero for an argument, a copy of that argument is
10544 made in memory and a pointer to the argument is passed instead of
10545 the argument itself. The pointer is passed in whatever way is
10546 appropriate for passing a pointer to that type.
10547
10548 Under V.4, aggregates and long double are passed by reference.
10549
10550 As an extension to all 32-bit ABIs, AltiVec vectors are passed by
10551 reference unless the AltiVec vector extension ABI is in force.
10552
10553 As an extension to all ABIs, variable sized types are passed by
10554 reference. */
10555
10556 static bool
10557 rs6000_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
10558 enum machine_mode mode, const_tree type,
10559 bool named ATTRIBUTE_UNUSED)
10560 {
10561 if (DEFAULT_ABI == ABI_V4 && TARGET_IEEEQUAD && mode == TFmode)
10562 {
10563 if (TARGET_DEBUG_ARG)
10564 fprintf (stderr, "function_arg_pass_by_reference: V4 long double\n");
10565 return 1;
10566 }
10567
10568 if (!type)
10569 return 0;
10570
10571 if (DEFAULT_ABI == ABI_V4 && AGGREGATE_TYPE_P (type))
10572 {
10573 if (TARGET_DEBUG_ARG)
10574 fprintf (stderr, "function_arg_pass_by_reference: V4 aggregate\n");
10575 return 1;
10576 }
10577
10578 if (int_size_in_bytes (type) < 0)
10579 {
10580 if (TARGET_DEBUG_ARG)
10581 fprintf (stderr, "function_arg_pass_by_reference: variable size\n");
10582 return 1;
10583 }
10584
10585 /* Allow -maltivec -mabi=no-altivec without warning. Altivec vector
10586 modes only exist for GCC vector types if -maltivec. */
10587 if (TARGET_32BIT && !TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
10588 {
10589 if (TARGET_DEBUG_ARG)
10590 fprintf (stderr, "function_arg_pass_by_reference: AltiVec\n");
10591 return 1;
10592 }
10593
10594 /* Pass synthetic vectors in memory. */
10595 if (TREE_CODE (type) == VECTOR_TYPE
10596 && int_size_in_bytes (type) > (TARGET_ALTIVEC_ABI ? 16 : 8))
10597 {
10598 static bool warned_for_pass_big_vectors = false;
10599 if (TARGET_DEBUG_ARG)
10600 fprintf (stderr, "function_arg_pass_by_reference: synthetic vector\n");
10601 if (!warned_for_pass_big_vectors)
10602 {
10603 warning (0, "GCC vector passed by reference: "
10604 "non-standard ABI extension with no compatibility guarantee");
10605 warned_for_pass_big_vectors = true;
10606 }
10607 return 1;
10608 }
10609
10610 return 0;
10611 }
10612
10613 /* Process parameter of type TYPE after ARGS_SO_FAR parameters were
10614 already processes. Return true if the parameter must be passed
10615 (fully or partially) on the stack. */
10616
10617 static bool
10618 rs6000_parm_needs_stack (cumulative_args_t args_so_far, tree type)
10619 {
10620 enum machine_mode mode;
10621 int unsignedp;
10622 rtx entry_parm;
10623
10624 /* Catch errors. */
10625 if (type == NULL || type == error_mark_node)
10626 return true;
10627
10628 /* Handle types with no storage requirement. */
10629 if (TYPE_MODE (type) == VOIDmode)
10630 return false;
10631
10632 /* Handle complex types. */
10633 if (TREE_CODE (type) == COMPLEX_TYPE)
10634 return (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type))
10635 || rs6000_parm_needs_stack (args_so_far, TREE_TYPE (type)));
10636
10637 /* Handle transparent aggregates. */
10638 if ((TREE_CODE (type) == UNION_TYPE || TREE_CODE (type) == RECORD_TYPE)
10639 && TYPE_TRANSPARENT_AGGR (type))
10640 type = TREE_TYPE (first_field (type));
10641
10642 /* See if this arg was passed by invisible reference. */
10643 if (pass_by_reference (get_cumulative_args (args_so_far),
10644 TYPE_MODE (type), type, true))
10645 type = build_pointer_type (type);
10646
10647 /* Find mode as it is passed by the ABI. */
10648 unsignedp = TYPE_UNSIGNED (type);
10649 mode = promote_mode (type, TYPE_MODE (type), &unsignedp);
10650
10651 /* If we must pass in stack, we need a stack. */
10652 if (rs6000_must_pass_in_stack (mode, type))
10653 return true;
10654
10655 /* If there is no incoming register, we need a stack. */
10656 entry_parm = rs6000_function_arg (args_so_far, mode, type, true);
10657 if (entry_parm == NULL)
10658 return true;
10659
10660 /* Likewise if we need to pass both in registers and on the stack. */
10661 if (GET_CODE (entry_parm) == PARALLEL
10662 && XEXP (XVECEXP (entry_parm, 0, 0), 0) == NULL_RTX)
10663 return true;
10664
10665 /* Also true if we're partially in registers and partially not. */
10666 if (rs6000_arg_partial_bytes (args_so_far, mode, type, true) != 0)
10667 return true;
10668
10669 /* Update info on where next arg arrives in registers. */
10670 rs6000_function_arg_advance (args_so_far, mode, type, true);
10671 return false;
10672 }
10673
10674 /* Return true if FUN has no prototype, has a variable argument
10675 list, or passes any parameter in memory. */
10676
10677 static bool
10678 rs6000_function_parms_need_stack (tree fun, bool incoming)
10679 {
10680 tree fntype, result;
10681 CUMULATIVE_ARGS args_so_far_v;
10682 cumulative_args_t args_so_far;
10683
10684 if (!fun)
10685 /* Must be a libcall, all of which only use reg parms. */
10686 return false;
10687
10688 fntype = fun;
10689 if (!TYPE_P (fun))
10690 fntype = TREE_TYPE (fun);
10691
10692 /* Varargs functions need the parameter save area. */
10693 if ((!incoming && !prototype_p (fntype)) || stdarg_p (fntype))
10694 return true;
10695
10696 INIT_CUMULATIVE_INCOMING_ARGS (args_so_far_v, fntype, NULL_RTX);
10697 args_so_far = pack_cumulative_args (&args_so_far_v);
10698
10699 /* When incoming, we will have been passed the function decl.
10700 It is necessary to use the decl to handle K&R style functions,
10701 where TYPE_ARG_TYPES may not be available. */
10702 if (incoming)
10703 {
10704 gcc_assert (DECL_P (fun));
10705 result = DECL_RESULT (fun);
10706 }
10707 else
10708 result = TREE_TYPE (fntype);
10709
10710 if (result && aggregate_value_p (result, fntype))
10711 {
10712 if (!TYPE_P (result))
10713 result = TREE_TYPE (result);
10714 result = build_pointer_type (result);
10715 rs6000_parm_needs_stack (args_so_far, result);
10716 }
10717
10718 if (incoming)
10719 {
10720 tree parm;
10721
10722 for (parm = DECL_ARGUMENTS (fun);
10723 parm && parm != void_list_node;
10724 parm = TREE_CHAIN (parm))
10725 if (rs6000_parm_needs_stack (args_so_far, TREE_TYPE (parm)))
10726 return true;
10727 }
10728 else
10729 {
10730 function_args_iterator args_iter;
10731 tree arg_type;
10732
10733 FOREACH_FUNCTION_ARGS (fntype, arg_type, args_iter)
10734 if (rs6000_parm_needs_stack (args_so_far, arg_type))
10735 return true;
10736 }
10737
10738 return false;
10739 }
10740
10741 /* Return the size of the REG_PARM_STACK_SPACE are for FUN. This is
10742 usually a constant depending on the ABI. However, in the ELFv2 ABI
10743 the register parameter area is optional when calling a function that
10744 has a prototype is scope, has no variable argument list, and passes
10745 all parameters in registers. */
10746
10747 int
10748 rs6000_reg_parm_stack_space (tree fun, bool incoming)
10749 {
10750 int reg_parm_stack_space;
10751
10752 switch (DEFAULT_ABI)
10753 {
10754 default:
10755 reg_parm_stack_space = 0;
10756 break;
10757
10758 case ABI_AIX:
10759 case ABI_DARWIN:
10760 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
10761 break;
10762
10763 case ABI_ELFv2:
10764 /* ??? Recomputing this every time is a bit expensive. Is there
10765 a place to cache this information? */
10766 if (rs6000_function_parms_need_stack (fun, incoming))
10767 reg_parm_stack_space = TARGET_64BIT ? 64 : 32;
10768 else
10769 reg_parm_stack_space = 0;
10770 break;
10771 }
10772
10773 return reg_parm_stack_space;
10774 }
10775
10776 static void
10777 rs6000_move_block_from_reg (int regno, rtx x, int nregs)
10778 {
10779 int i;
10780 enum machine_mode reg_mode = TARGET_32BIT ? SImode : DImode;
10781
10782 if (nregs == 0)
10783 return;
10784
10785 for (i = 0; i < nregs; i++)
10786 {
10787 rtx tem = adjust_address_nv (x, reg_mode, i * GET_MODE_SIZE (reg_mode));
10788 if (reload_completed)
10789 {
10790 if (! strict_memory_address_p (reg_mode, XEXP (tem, 0)))
10791 tem = NULL_RTX;
10792 else
10793 tem = simplify_gen_subreg (reg_mode, x, BLKmode,
10794 i * GET_MODE_SIZE (reg_mode));
10795 }
10796 else
10797 tem = replace_equiv_address (tem, XEXP (tem, 0));
10798
10799 gcc_assert (tem);
10800
10801 emit_move_insn (tem, gen_rtx_REG (reg_mode, regno + i));
10802 }
10803 }
10804 \f
10805 /* Perform any needed actions needed for a function that is receiving a
10806 variable number of arguments.
10807
10808 CUM is as above.
10809
10810 MODE and TYPE are the mode and type of the current parameter.
10811
10812 PRETEND_SIZE is a variable that should be set to the amount of stack
10813 that must be pushed by the prolog to pretend that our caller pushed
10814 it.
10815
10816 Normally, this macro will push all remaining incoming registers on the
10817 stack and set PRETEND_SIZE to the length of the registers pushed. */
10818
10819 static void
10820 setup_incoming_varargs (cumulative_args_t cum, enum machine_mode mode,
10821 tree type, int *pretend_size ATTRIBUTE_UNUSED,
10822 int no_rtl)
10823 {
10824 CUMULATIVE_ARGS next_cum;
10825 int reg_size = TARGET_32BIT ? 4 : 8;
10826 rtx save_area = NULL_RTX, mem;
10827 int first_reg_offset;
10828 alias_set_type set;
10829
10830 /* Skip the last named argument. */
10831 next_cum = *get_cumulative_args (cum);
10832 rs6000_function_arg_advance_1 (&next_cum, mode, type, true, 0);
10833
10834 if (DEFAULT_ABI == ABI_V4)
10835 {
10836 first_reg_offset = next_cum.sysv_gregno - GP_ARG_MIN_REG;
10837
10838 if (! no_rtl)
10839 {
10840 int gpr_reg_num = 0, gpr_size = 0, fpr_size = 0;
10841 HOST_WIDE_INT offset = 0;
10842
10843 /* Try to optimize the size of the varargs save area.
10844 The ABI requires that ap.reg_save_area is doubleword
10845 aligned, but we don't need to allocate space for all
10846 the bytes, only those to which we actually will save
10847 anything. */
10848 if (cfun->va_list_gpr_size && first_reg_offset < GP_ARG_NUM_REG)
10849 gpr_reg_num = GP_ARG_NUM_REG - first_reg_offset;
10850 if (TARGET_HARD_FLOAT && TARGET_FPRS
10851 && next_cum.fregno <= FP_ARG_V4_MAX_REG
10852 && cfun->va_list_fpr_size)
10853 {
10854 if (gpr_reg_num)
10855 fpr_size = (next_cum.fregno - FP_ARG_MIN_REG)
10856 * UNITS_PER_FP_WORD;
10857 if (cfun->va_list_fpr_size
10858 < FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
10859 fpr_size += cfun->va_list_fpr_size * UNITS_PER_FP_WORD;
10860 else
10861 fpr_size += (FP_ARG_V4_MAX_REG + 1 - next_cum.fregno)
10862 * UNITS_PER_FP_WORD;
10863 }
10864 if (gpr_reg_num)
10865 {
10866 offset = -((first_reg_offset * reg_size) & ~7);
10867 if (!fpr_size && gpr_reg_num > cfun->va_list_gpr_size)
10868 {
10869 gpr_reg_num = cfun->va_list_gpr_size;
10870 if (reg_size == 4 && (first_reg_offset & 1))
10871 gpr_reg_num++;
10872 }
10873 gpr_size = (gpr_reg_num * reg_size + 7) & ~7;
10874 }
10875 else if (fpr_size)
10876 offset = - (int) (next_cum.fregno - FP_ARG_MIN_REG)
10877 * UNITS_PER_FP_WORD
10878 - (int) (GP_ARG_NUM_REG * reg_size);
10879
10880 if (gpr_size + fpr_size)
10881 {
10882 rtx reg_save_area
10883 = assign_stack_local (BLKmode, gpr_size + fpr_size, 64);
10884 gcc_assert (GET_CODE (reg_save_area) == MEM);
10885 reg_save_area = XEXP (reg_save_area, 0);
10886 if (GET_CODE (reg_save_area) == PLUS)
10887 {
10888 gcc_assert (XEXP (reg_save_area, 0)
10889 == virtual_stack_vars_rtx);
10890 gcc_assert (GET_CODE (XEXP (reg_save_area, 1)) == CONST_INT);
10891 offset += INTVAL (XEXP (reg_save_area, 1));
10892 }
10893 else
10894 gcc_assert (reg_save_area == virtual_stack_vars_rtx);
10895 }
10896
10897 cfun->machine->varargs_save_offset = offset;
10898 save_area = plus_constant (Pmode, virtual_stack_vars_rtx, offset);
10899 }
10900 }
10901 else
10902 {
10903 first_reg_offset = next_cum.words;
10904 save_area = virtual_incoming_args_rtx;
10905
10906 if (targetm.calls.must_pass_in_stack (mode, type))
10907 first_reg_offset += rs6000_arg_size (TYPE_MODE (type), type);
10908 }
10909
10910 set = get_varargs_alias_set ();
10911 if (! no_rtl && first_reg_offset < GP_ARG_NUM_REG
10912 && cfun->va_list_gpr_size)
10913 {
10914 int n_gpr, nregs = GP_ARG_NUM_REG - first_reg_offset;
10915
10916 if (va_list_gpr_counter_field)
10917 /* V4 va_list_gpr_size counts number of registers needed. */
10918 n_gpr = cfun->va_list_gpr_size;
10919 else
10920 /* char * va_list instead counts number of bytes needed. */
10921 n_gpr = (cfun->va_list_gpr_size + reg_size - 1) / reg_size;
10922
10923 if (nregs > n_gpr)
10924 nregs = n_gpr;
10925
10926 mem = gen_rtx_MEM (BLKmode,
10927 plus_constant (Pmode, save_area,
10928 first_reg_offset * reg_size));
10929 MEM_NOTRAP_P (mem) = 1;
10930 set_mem_alias_set (mem, set);
10931 set_mem_align (mem, BITS_PER_WORD);
10932
10933 rs6000_move_block_from_reg (GP_ARG_MIN_REG + first_reg_offset, mem,
10934 nregs);
10935 }
10936
10937 /* Save FP registers if needed. */
10938 if (DEFAULT_ABI == ABI_V4
10939 && TARGET_HARD_FLOAT && TARGET_FPRS
10940 && ! no_rtl
10941 && next_cum.fregno <= FP_ARG_V4_MAX_REG
10942 && cfun->va_list_fpr_size)
10943 {
10944 int fregno = next_cum.fregno, nregs;
10945 rtx cr1 = gen_rtx_REG (CCmode, CR1_REGNO);
10946 rtx lab = gen_label_rtx ();
10947 int off = (GP_ARG_NUM_REG * reg_size) + ((fregno - FP_ARG_MIN_REG)
10948 * UNITS_PER_FP_WORD);
10949
10950 emit_jump_insn
10951 (gen_rtx_SET (VOIDmode,
10952 pc_rtx,
10953 gen_rtx_IF_THEN_ELSE (VOIDmode,
10954 gen_rtx_NE (VOIDmode, cr1,
10955 const0_rtx),
10956 gen_rtx_LABEL_REF (VOIDmode, lab),
10957 pc_rtx)));
10958
10959 for (nregs = 0;
10960 fregno <= FP_ARG_V4_MAX_REG && nregs < cfun->va_list_fpr_size;
10961 fregno++, off += UNITS_PER_FP_WORD, nregs++)
10962 {
10963 mem = gen_rtx_MEM ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
10964 ? DFmode : SFmode,
10965 plus_constant (Pmode, save_area, off));
10966 MEM_NOTRAP_P (mem) = 1;
10967 set_mem_alias_set (mem, set);
10968 set_mem_align (mem, GET_MODE_ALIGNMENT (
10969 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
10970 ? DFmode : SFmode));
10971 emit_move_insn (mem, gen_rtx_REG (
10972 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT)
10973 ? DFmode : SFmode, fregno));
10974 }
10975
10976 emit_label (lab);
10977 }
10978 }
10979
10980 /* Create the va_list data type. */
10981
10982 static tree
10983 rs6000_build_builtin_va_list (void)
10984 {
10985 tree f_gpr, f_fpr, f_res, f_ovf, f_sav, record, type_decl;
10986
10987 /* For AIX, prefer 'char *' because that's what the system
10988 header files like. */
10989 if (DEFAULT_ABI != ABI_V4)
10990 return build_pointer_type (char_type_node);
10991
10992 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
10993 type_decl = build_decl (BUILTINS_LOCATION, TYPE_DECL,
10994 get_identifier ("__va_list_tag"), record);
10995
10996 f_gpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("gpr"),
10997 unsigned_char_type_node);
10998 f_fpr = build_decl (BUILTINS_LOCATION, FIELD_DECL, get_identifier ("fpr"),
10999 unsigned_char_type_node);
11000 /* Give the two bytes of padding a name, so that -Wpadded won't warn on
11001 every user file. */
11002 f_res = build_decl (BUILTINS_LOCATION, FIELD_DECL,
11003 get_identifier ("reserved"), short_unsigned_type_node);
11004 f_ovf = build_decl (BUILTINS_LOCATION, FIELD_DECL,
11005 get_identifier ("overflow_arg_area"),
11006 ptr_type_node);
11007 f_sav = build_decl (BUILTINS_LOCATION, FIELD_DECL,
11008 get_identifier ("reg_save_area"),
11009 ptr_type_node);
11010
11011 va_list_gpr_counter_field = f_gpr;
11012 va_list_fpr_counter_field = f_fpr;
11013
11014 DECL_FIELD_CONTEXT (f_gpr) = record;
11015 DECL_FIELD_CONTEXT (f_fpr) = record;
11016 DECL_FIELD_CONTEXT (f_res) = record;
11017 DECL_FIELD_CONTEXT (f_ovf) = record;
11018 DECL_FIELD_CONTEXT (f_sav) = record;
11019
11020 TYPE_STUB_DECL (record) = type_decl;
11021 TYPE_NAME (record) = type_decl;
11022 TYPE_FIELDS (record) = f_gpr;
11023 DECL_CHAIN (f_gpr) = f_fpr;
11024 DECL_CHAIN (f_fpr) = f_res;
11025 DECL_CHAIN (f_res) = f_ovf;
11026 DECL_CHAIN (f_ovf) = f_sav;
11027
11028 layout_type (record);
11029
11030 /* The correct type is an array type of one element. */
11031 return build_array_type (record, build_index_type (size_zero_node));
11032 }
11033
11034 /* Implement va_start. */
11035
11036 static void
11037 rs6000_va_start (tree valist, rtx nextarg)
11038 {
11039 HOST_WIDE_INT words, n_gpr, n_fpr;
11040 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
11041 tree gpr, fpr, ovf, sav, t;
11042
11043 /* Only SVR4 needs something special. */
11044 if (DEFAULT_ABI != ABI_V4)
11045 {
11046 std_expand_builtin_va_start (valist, nextarg);
11047 return;
11048 }
11049
11050 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11051 f_fpr = DECL_CHAIN (f_gpr);
11052 f_res = DECL_CHAIN (f_fpr);
11053 f_ovf = DECL_CHAIN (f_res);
11054 f_sav = DECL_CHAIN (f_ovf);
11055
11056 valist = build_simple_mem_ref (valist);
11057 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11058 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
11059 f_fpr, NULL_TREE);
11060 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
11061 f_ovf, NULL_TREE);
11062 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
11063 f_sav, NULL_TREE);
11064
11065 /* Count number of gp and fp argument registers used. */
11066 words = crtl->args.info.words;
11067 n_gpr = MIN (crtl->args.info.sysv_gregno - GP_ARG_MIN_REG,
11068 GP_ARG_NUM_REG);
11069 n_fpr = MIN (crtl->args.info.fregno - FP_ARG_MIN_REG,
11070 FP_ARG_NUM_REG);
11071
11072 if (TARGET_DEBUG_ARG)
11073 fprintf (stderr, "va_start: words = "HOST_WIDE_INT_PRINT_DEC", n_gpr = "
11074 HOST_WIDE_INT_PRINT_DEC", n_fpr = "HOST_WIDE_INT_PRINT_DEC"\n",
11075 words, n_gpr, n_fpr);
11076
11077 if (cfun->va_list_gpr_size)
11078 {
11079 t = build2 (MODIFY_EXPR, TREE_TYPE (gpr), gpr,
11080 build_int_cst (NULL_TREE, n_gpr));
11081 TREE_SIDE_EFFECTS (t) = 1;
11082 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11083 }
11084
11085 if (cfun->va_list_fpr_size)
11086 {
11087 t = build2 (MODIFY_EXPR, TREE_TYPE (fpr), fpr,
11088 build_int_cst (NULL_TREE, n_fpr));
11089 TREE_SIDE_EFFECTS (t) = 1;
11090 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11091
11092 #ifdef HAVE_AS_GNU_ATTRIBUTE
11093 if (call_ABI_of_interest (cfun->decl))
11094 rs6000_passes_float = true;
11095 #endif
11096 }
11097
11098 /* Find the overflow area. */
11099 t = make_tree (TREE_TYPE (ovf), virtual_incoming_args_rtx);
11100 if (words != 0)
11101 t = fold_build_pointer_plus_hwi (t, words * UNITS_PER_WORD);
11102 t = build2 (MODIFY_EXPR, TREE_TYPE (ovf), ovf, t);
11103 TREE_SIDE_EFFECTS (t) = 1;
11104 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11105
11106 /* If there were no va_arg invocations, don't set up the register
11107 save area. */
11108 if (!cfun->va_list_gpr_size
11109 && !cfun->va_list_fpr_size
11110 && n_gpr < GP_ARG_NUM_REG
11111 && n_fpr < FP_ARG_V4_MAX_REG)
11112 return;
11113
11114 /* Find the register save area. */
11115 t = make_tree (TREE_TYPE (sav), virtual_stack_vars_rtx);
11116 if (cfun->machine->varargs_save_offset)
11117 t = fold_build_pointer_plus_hwi (t, cfun->machine->varargs_save_offset);
11118 t = build2 (MODIFY_EXPR, TREE_TYPE (sav), sav, t);
11119 TREE_SIDE_EFFECTS (t) = 1;
11120 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
11121 }
11122
11123 /* Implement va_arg. */
11124
11125 static tree
11126 rs6000_gimplify_va_arg (tree valist, tree type, gimple_seq *pre_p,
11127 gimple_seq *post_p)
11128 {
11129 tree f_gpr, f_fpr, f_res, f_ovf, f_sav;
11130 tree gpr, fpr, ovf, sav, reg, t, u;
11131 int size, rsize, n_reg, sav_ofs, sav_scale;
11132 tree lab_false, lab_over, addr;
11133 int align;
11134 tree ptrtype = build_pointer_type_for_mode (type, ptr_mode, true);
11135 int regalign = 0;
11136 gimple stmt;
11137
11138 if (pass_by_reference (NULL, TYPE_MODE (type), type, false))
11139 {
11140 t = rs6000_gimplify_va_arg (valist, ptrtype, pre_p, post_p);
11141 return build_va_arg_indirect_ref (t);
11142 }
11143
11144 /* We need to deal with the fact that the darwin ppc64 ABI is defined by an
11145 earlier version of gcc, with the property that it always applied alignment
11146 adjustments to the va-args (even for zero-sized types). The cheapest way
11147 to deal with this is to replicate the effect of the part of
11148 std_gimplify_va_arg_expr that carries out the align adjust, for the case
11149 of relevance.
11150 We don't need to check for pass-by-reference because of the test above.
11151 We can return a simplifed answer, since we know there's no offset to add. */
11152
11153 if (((TARGET_MACHO
11154 && rs6000_darwin64_abi)
11155 || DEFAULT_ABI == ABI_ELFv2
11156 || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
11157 && integer_zerop (TYPE_SIZE (type)))
11158 {
11159 unsigned HOST_WIDE_INT align, boundary;
11160 tree valist_tmp = get_initialized_tmp_var (valist, pre_p, NULL);
11161 align = PARM_BOUNDARY / BITS_PER_UNIT;
11162 boundary = rs6000_function_arg_boundary (TYPE_MODE (type), type);
11163 if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
11164 boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
11165 boundary /= BITS_PER_UNIT;
11166 if (boundary > align)
11167 {
11168 tree t ;
11169 /* This updates arg ptr by the amount that would be necessary
11170 to align the zero-sized (but not zero-alignment) item. */
11171 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
11172 fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
11173 gimplify_and_add (t, pre_p);
11174
11175 t = fold_convert (sizetype, valist_tmp);
11176 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
11177 fold_convert (TREE_TYPE (valist),
11178 fold_build2 (BIT_AND_EXPR, sizetype, t,
11179 size_int (-boundary))));
11180 t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
11181 gimplify_and_add (t, pre_p);
11182 }
11183 /* Since it is zero-sized there's no increment for the item itself. */
11184 valist_tmp = fold_convert (build_pointer_type (type), valist_tmp);
11185 return build_va_arg_indirect_ref (valist_tmp);
11186 }
11187
11188 if (DEFAULT_ABI != ABI_V4)
11189 {
11190 if (targetm.calls.split_complex_arg && TREE_CODE (type) == COMPLEX_TYPE)
11191 {
11192 tree elem_type = TREE_TYPE (type);
11193 enum machine_mode elem_mode = TYPE_MODE (elem_type);
11194 int elem_size = GET_MODE_SIZE (elem_mode);
11195
11196 if (elem_size < UNITS_PER_WORD)
11197 {
11198 tree real_part, imag_part;
11199 gimple_seq post = NULL;
11200
11201 real_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
11202 &post);
11203 /* Copy the value into a temporary, lest the formal temporary
11204 be reused out from under us. */
11205 real_part = get_initialized_tmp_var (real_part, pre_p, &post);
11206 gimple_seq_add_seq (pre_p, post);
11207
11208 imag_part = rs6000_gimplify_va_arg (valist, elem_type, pre_p,
11209 post_p);
11210
11211 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
11212 }
11213 }
11214
11215 return std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
11216 }
11217
11218 f_gpr = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
11219 f_fpr = DECL_CHAIN (f_gpr);
11220 f_res = DECL_CHAIN (f_fpr);
11221 f_ovf = DECL_CHAIN (f_res);
11222 f_sav = DECL_CHAIN (f_ovf);
11223
11224 valist = build_va_arg_indirect_ref (valist);
11225 gpr = build3 (COMPONENT_REF, TREE_TYPE (f_gpr), valist, f_gpr, NULL_TREE);
11226 fpr = build3 (COMPONENT_REF, TREE_TYPE (f_fpr), unshare_expr (valist),
11227 f_fpr, NULL_TREE);
11228 ovf = build3 (COMPONENT_REF, TREE_TYPE (f_ovf), unshare_expr (valist),
11229 f_ovf, NULL_TREE);
11230 sav = build3 (COMPONENT_REF, TREE_TYPE (f_sav), unshare_expr (valist),
11231 f_sav, NULL_TREE);
11232
11233 size = int_size_in_bytes (type);
11234 rsize = (size + 3) / 4;
11235 align = 1;
11236
11237 if (TARGET_HARD_FLOAT && TARGET_FPRS
11238 && ((TARGET_SINGLE_FLOAT && TYPE_MODE (type) == SFmode)
11239 || (TARGET_DOUBLE_FLOAT
11240 && (TYPE_MODE (type) == DFmode
11241 || TYPE_MODE (type) == TFmode
11242 || TYPE_MODE (type) == SDmode
11243 || TYPE_MODE (type) == DDmode
11244 || TYPE_MODE (type) == TDmode))))
11245 {
11246 /* FP args go in FP registers, if present. */
11247 reg = fpr;
11248 n_reg = (size + 7) / 8;
11249 sav_ofs = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4) * 4;
11250 sav_scale = ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? 8 : 4);
11251 if (TYPE_MODE (type) != SFmode && TYPE_MODE (type) != SDmode)
11252 align = 8;
11253 }
11254 else
11255 {
11256 /* Otherwise into GP registers. */
11257 reg = gpr;
11258 n_reg = rsize;
11259 sav_ofs = 0;
11260 sav_scale = 4;
11261 if (n_reg == 2)
11262 align = 8;
11263 }
11264
11265 /* Pull the value out of the saved registers.... */
11266
11267 lab_over = NULL;
11268 addr = create_tmp_var (ptr_type_node, "addr");
11269
11270 /* AltiVec vectors never go in registers when -mabi=altivec. */
11271 if (TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (TYPE_MODE (type)))
11272 align = 16;
11273 else
11274 {
11275 lab_false = create_artificial_label (input_location);
11276 lab_over = create_artificial_label (input_location);
11277
11278 /* Long long and SPE vectors are aligned in the registers.
11279 As are any other 2 gpr item such as complex int due to a
11280 historical mistake. */
11281 u = reg;
11282 if (n_reg == 2 && reg == gpr)
11283 {
11284 regalign = 1;
11285 u = build2 (BIT_AND_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11286 build_int_cst (TREE_TYPE (reg), n_reg - 1));
11287 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg),
11288 unshare_expr (reg), u);
11289 }
11290 /* _Decimal128 is passed in even/odd fpr pairs; the stored
11291 reg number is 0 for f1, so we want to make it odd. */
11292 else if (reg == fpr && TYPE_MODE (type) == TDmode)
11293 {
11294 t = build2 (BIT_IOR_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11295 build_int_cst (TREE_TYPE (reg), 1));
11296 u = build2 (MODIFY_EXPR, void_type_node, unshare_expr (reg), t);
11297 }
11298
11299 t = fold_convert (TREE_TYPE (reg), size_int (8 - n_reg + 1));
11300 t = build2 (GE_EXPR, boolean_type_node, u, t);
11301 u = build1 (GOTO_EXPR, void_type_node, lab_false);
11302 t = build3 (COND_EXPR, void_type_node, t, u, NULL_TREE);
11303 gimplify_and_add (t, pre_p);
11304
11305 t = sav;
11306 if (sav_ofs)
11307 t = fold_build_pointer_plus_hwi (sav, sav_ofs);
11308
11309 u = build2 (POSTINCREMENT_EXPR, TREE_TYPE (reg), unshare_expr (reg),
11310 build_int_cst (TREE_TYPE (reg), n_reg));
11311 u = fold_convert (sizetype, u);
11312 u = build2 (MULT_EXPR, sizetype, u, size_int (sav_scale));
11313 t = fold_build_pointer_plus (t, u);
11314
11315 /* _Decimal32 varargs are located in the second word of the 64-bit
11316 FP register for 32-bit binaries. */
11317 if (!TARGET_POWERPC64
11318 && TARGET_HARD_FLOAT && TARGET_FPRS
11319 && TYPE_MODE (type) == SDmode)
11320 t = fold_build_pointer_plus_hwi (t, size);
11321
11322 gimplify_assign (addr, t, pre_p);
11323
11324 gimple_seq_add_stmt (pre_p, gimple_build_goto (lab_over));
11325
11326 stmt = gimple_build_label (lab_false);
11327 gimple_seq_add_stmt (pre_p, stmt);
11328
11329 if ((n_reg == 2 && !regalign) || n_reg > 2)
11330 {
11331 /* Ensure that we don't find any more args in regs.
11332 Alignment has taken care of for special cases. */
11333 gimplify_assign (reg, build_int_cst (TREE_TYPE (reg), 8), pre_p);
11334 }
11335 }
11336
11337 /* ... otherwise out of the overflow area. */
11338
11339 /* Care for on-stack alignment if needed. */
11340 t = ovf;
11341 if (align != 1)
11342 {
11343 t = fold_build_pointer_plus_hwi (t, align - 1);
11344 t = build2 (BIT_AND_EXPR, TREE_TYPE (t), t,
11345 build_int_cst (TREE_TYPE (t), -align));
11346 }
11347 gimplify_expr (&t, pre_p, NULL, is_gimple_val, fb_rvalue);
11348
11349 gimplify_assign (unshare_expr (addr), t, pre_p);
11350
11351 t = fold_build_pointer_plus_hwi (t, size);
11352 gimplify_assign (unshare_expr (ovf), t, pre_p);
11353
11354 if (lab_over)
11355 {
11356 stmt = gimple_build_label (lab_over);
11357 gimple_seq_add_stmt (pre_p, stmt);
11358 }
11359
11360 if (STRICT_ALIGNMENT
11361 && (TYPE_ALIGN (type)
11362 > (unsigned) BITS_PER_UNIT * (align < 4 ? 4 : align)))
11363 {
11364 /* The value (of type complex double, for example) may not be
11365 aligned in memory in the saved registers, so copy via a
11366 temporary. (This is the same code as used for SPARC.) */
11367 tree tmp = create_tmp_var (type, "va_arg_tmp");
11368 tree dest_addr = build_fold_addr_expr (tmp);
11369
11370 tree copy = build_call_expr (builtin_decl_implicit (BUILT_IN_MEMCPY),
11371 3, dest_addr, addr, size_int (rsize * 4));
11372
11373 gimplify_and_add (copy, pre_p);
11374 addr = dest_addr;
11375 }
11376
11377 addr = fold_convert (ptrtype, addr);
11378 return build_va_arg_indirect_ref (addr);
11379 }
11380
11381 /* Builtins. */
11382
11383 static void
11384 def_builtin (const char *name, tree type, enum rs6000_builtins code)
11385 {
11386 tree t;
11387 unsigned classify = rs6000_builtin_info[(int)code].attr;
11388 const char *attr_string = "";
11389
11390 gcc_assert (name != NULL);
11391 gcc_assert (IN_RANGE ((int)code, 0, (int)RS6000_BUILTIN_COUNT));
11392
11393 if (rs6000_builtin_decls[(int)code])
11394 fatal_error ("internal error: builtin function %s already processed", name);
11395
11396 rs6000_builtin_decls[(int)code] = t =
11397 add_builtin_function (name, type, (int)code, BUILT_IN_MD, NULL, NULL_TREE);
11398
11399 /* Set any special attributes. */
11400 if ((classify & RS6000_BTC_CONST) != 0)
11401 {
11402 /* const function, function only depends on the inputs. */
11403 TREE_READONLY (t) = 1;
11404 TREE_NOTHROW (t) = 1;
11405 attr_string = ", pure";
11406 }
11407 else if ((classify & RS6000_BTC_PURE) != 0)
11408 {
11409 /* pure function, function can read global memory, but does not set any
11410 external state. */
11411 DECL_PURE_P (t) = 1;
11412 TREE_NOTHROW (t) = 1;
11413 attr_string = ", const";
11414 }
11415 else if ((classify & RS6000_BTC_FP) != 0)
11416 {
11417 /* Function is a math function. If rounding mode is on, then treat the
11418 function as not reading global memory, but it can have arbitrary side
11419 effects. If it is off, then assume the function is a const function.
11420 This mimics the ATTR_MATHFN_FPROUNDING attribute in
11421 builtin-attribute.def that is used for the math functions. */
11422 TREE_NOTHROW (t) = 1;
11423 if (flag_rounding_math)
11424 {
11425 DECL_PURE_P (t) = 1;
11426 DECL_IS_NOVOPS (t) = 1;
11427 attr_string = ", fp, pure";
11428 }
11429 else
11430 {
11431 TREE_READONLY (t) = 1;
11432 attr_string = ", fp, const";
11433 }
11434 }
11435 else if ((classify & RS6000_BTC_ATTR_MASK) != 0)
11436 gcc_unreachable ();
11437
11438 if (TARGET_DEBUG_BUILTIN)
11439 fprintf (stderr, "rs6000_builtin, code = %4d, %s%s\n",
11440 (int)code, name, attr_string);
11441 }
11442
11443 /* Simple ternary operations: VECd = foo (VECa, VECb, VECc). */
11444
11445 #undef RS6000_BUILTIN_1
11446 #undef RS6000_BUILTIN_2
11447 #undef RS6000_BUILTIN_3
11448 #undef RS6000_BUILTIN_A
11449 #undef RS6000_BUILTIN_D
11450 #undef RS6000_BUILTIN_E
11451 #undef RS6000_BUILTIN_H
11452 #undef RS6000_BUILTIN_P
11453 #undef RS6000_BUILTIN_Q
11454 #undef RS6000_BUILTIN_S
11455 #undef RS6000_BUILTIN_X
11456
11457 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11458 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11459 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE) \
11460 { MASK, ICODE, NAME, ENUM },
11461
11462 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11463 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11464 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11465 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11466 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11467 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11468 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11469 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11470
11471 static const struct builtin_description bdesc_3arg[] =
11472 {
11473 #include "rs6000-builtin.def"
11474 };
11475
11476 /* DST operations: void foo (void *, const int, const char). */
11477
11478 #undef RS6000_BUILTIN_1
11479 #undef RS6000_BUILTIN_2
11480 #undef RS6000_BUILTIN_3
11481 #undef RS6000_BUILTIN_A
11482 #undef RS6000_BUILTIN_D
11483 #undef RS6000_BUILTIN_E
11484 #undef RS6000_BUILTIN_H
11485 #undef RS6000_BUILTIN_P
11486 #undef RS6000_BUILTIN_Q
11487 #undef RS6000_BUILTIN_S
11488 #undef RS6000_BUILTIN_X
11489
11490 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11491 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11492 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11493 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11494 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE) \
11495 { MASK, ICODE, NAME, ENUM },
11496
11497 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11498 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11499 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11500 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11501 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11502 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11503
11504 static const struct builtin_description bdesc_dst[] =
11505 {
11506 #include "rs6000-builtin.def"
11507 };
11508
11509 /* Simple binary operations: VECc = foo (VECa, VECb). */
11510
11511 #undef RS6000_BUILTIN_1
11512 #undef RS6000_BUILTIN_2
11513 #undef RS6000_BUILTIN_3
11514 #undef RS6000_BUILTIN_A
11515 #undef RS6000_BUILTIN_D
11516 #undef RS6000_BUILTIN_E
11517 #undef RS6000_BUILTIN_H
11518 #undef RS6000_BUILTIN_P
11519 #undef RS6000_BUILTIN_Q
11520 #undef RS6000_BUILTIN_S
11521 #undef RS6000_BUILTIN_X
11522
11523 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11524 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE) \
11525 { MASK, ICODE, NAME, ENUM },
11526
11527 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11528 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11529 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11530 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11531 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11532 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11533 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11534 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11535 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11536
11537 static const struct builtin_description bdesc_2arg[] =
11538 {
11539 #include "rs6000-builtin.def"
11540 };
11541
11542 #undef RS6000_BUILTIN_1
11543 #undef RS6000_BUILTIN_2
11544 #undef RS6000_BUILTIN_3
11545 #undef RS6000_BUILTIN_A
11546 #undef RS6000_BUILTIN_D
11547 #undef RS6000_BUILTIN_E
11548 #undef RS6000_BUILTIN_H
11549 #undef RS6000_BUILTIN_P
11550 #undef RS6000_BUILTIN_Q
11551 #undef RS6000_BUILTIN_S
11552 #undef RS6000_BUILTIN_X
11553
11554 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11555 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11556 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11557 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11558 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11559 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11560 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11561 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE) \
11562 { MASK, ICODE, NAME, ENUM },
11563
11564 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11565 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11566 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11567
11568 /* AltiVec predicates. */
11569
11570 static const struct builtin_description bdesc_altivec_preds[] =
11571 {
11572 #include "rs6000-builtin.def"
11573 };
11574
11575 /* SPE predicates. */
11576 #undef RS6000_BUILTIN_1
11577 #undef RS6000_BUILTIN_2
11578 #undef RS6000_BUILTIN_3
11579 #undef RS6000_BUILTIN_A
11580 #undef RS6000_BUILTIN_D
11581 #undef RS6000_BUILTIN_E
11582 #undef RS6000_BUILTIN_H
11583 #undef RS6000_BUILTIN_P
11584 #undef RS6000_BUILTIN_Q
11585 #undef RS6000_BUILTIN_S
11586 #undef RS6000_BUILTIN_X
11587
11588 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11589 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11590 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11591 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11592 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11593 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11594 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11595 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11596 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11597 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE) \
11598 { MASK, ICODE, NAME, ENUM },
11599
11600 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11601
11602 static const struct builtin_description bdesc_spe_predicates[] =
11603 {
11604 #include "rs6000-builtin.def"
11605 };
11606
11607 /* SPE evsel predicates. */
11608 #undef RS6000_BUILTIN_1
11609 #undef RS6000_BUILTIN_2
11610 #undef RS6000_BUILTIN_3
11611 #undef RS6000_BUILTIN_A
11612 #undef RS6000_BUILTIN_D
11613 #undef RS6000_BUILTIN_E
11614 #undef RS6000_BUILTIN_H
11615 #undef RS6000_BUILTIN_P
11616 #undef RS6000_BUILTIN_Q
11617 #undef RS6000_BUILTIN_S
11618 #undef RS6000_BUILTIN_X
11619
11620 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11621 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11622 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11623 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11624 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11625 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE) \
11626 { MASK, ICODE, NAME, ENUM },
11627
11628 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11629 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11630 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11631 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11632 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11633
11634 static const struct builtin_description bdesc_spe_evsel[] =
11635 {
11636 #include "rs6000-builtin.def"
11637 };
11638
11639 /* PAIRED predicates. */
11640 #undef RS6000_BUILTIN_1
11641 #undef RS6000_BUILTIN_2
11642 #undef RS6000_BUILTIN_3
11643 #undef RS6000_BUILTIN_A
11644 #undef RS6000_BUILTIN_D
11645 #undef RS6000_BUILTIN_E
11646 #undef RS6000_BUILTIN_H
11647 #undef RS6000_BUILTIN_P
11648 #undef RS6000_BUILTIN_Q
11649 #undef RS6000_BUILTIN_S
11650 #undef RS6000_BUILTIN_X
11651
11652 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11653 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11654 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11655 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11656 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11657 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11658 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11659 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11660 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE) \
11661 { MASK, ICODE, NAME, ENUM },
11662
11663 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11664 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11665
11666 static const struct builtin_description bdesc_paired_preds[] =
11667 {
11668 #include "rs6000-builtin.def"
11669 };
11670
11671 /* ABS* operations. */
11672
11673 #undef RS6000_BUILTIN_1
11674 #undef RS6000_BUILTIN_2
11675 #undef RS6000_BUILTIN_3
11676 #undef RS6000_BUILTIN_A
11677 #undef RS6000_BUILTIN_D
11678 #undef RS6000_BUILTIN_E
11679 #undef RS6000_BUILTIN_H
11680 #undef RS6000_BUILTIN_P
11681 #undef RS6000_BUILTIN_Q
11682 #undef RS6000_BUILTIN_S
11683 #undef RS6000_BUILTIN_X
11684
11685 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11686 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11687 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11688 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE) \
11689 { MASK, ICODE, NAME, ENUM },
11690
11691 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11692 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11693 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11694 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11695 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11696 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11697 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11698
11699 static const struct builtin_description bdesc_abs[] =
11700 {
11701 #include "rs6000-builtin.def"
11702 };
11703
11704 /* Simple unary operations: VECb = foo (unsigned literal) or VECb =
11705 foo (VECa). */
11706
11707 #undef RS6000_BUILTIN_1
11708 #undef RS6000_BUILTIN_2
11709 #undef RS6000_BUILTIN_3
11710 #undef RS6000_BUILTIN_A
11711 #undef RS6000_BUILTIN_D
11712 #undef RS6000_BUILTIN_E
11713 #undef RS6000_BUILTIN_H
11714 #undef RS6000_BUILTIN_P
11715 #undef RS6000_BUILTIN_Q
11716 #undef RS6000_BUILTIN_S
11717 #undef RS6000_BUILTIN_X
11718
11719 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE) \
11720 { MASK, ICODE, NAME, ENUM },
11721
11722 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11723 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11724 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11725 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11726 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11727 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE)
11728 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11729 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11730 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11731 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11732
11733 static const struct builtin_description bdesc_1arg[] =
11734 {
11735 #include "rs6000-builtin.def"
11736 };
11737
11738 /* HTM builtins. */
11739 #undef RS6000_BUILTIN_1
11740 #undef RS6000_BUILTIN_2
11741 #undef RS6000_BUILTIN_3
11742 #undef RS6000_BUILTIN_A
11743 #undef RS6000_BUILTIN_D
11744 #undef RS6000_BUILTIN_E
11745 #undef RS6000_BUILTIN_H
11746 #undef RS6000_BUILTIN_P
11747 #undef RS6000_BUILTIN_Q
11748 #undef RS6000_BUILTIN_S
11749 #undef RS6000_BUILTIN_X
11750
11751 #define RS6000_BUILTIN_1(ENUM, NAME, MASK, ATTR, ICODE)
11752 #define RS6000_BUILTIN_2(ENUM, NAME, MASK, ATTR, ICODE)
11753 #define RS6000_BUILTIN_3(ENUM, NAME, MASK, ATTR, ICODE)
11754 #define RS6000_BUILTIN_A(ENUM, NAME, MASK, ATTR, ICODE)
11755 #define RS6000_BUILTIN_D(ENUM, NAME, MASK, ATTR, ICODE)
11756 #define RS6000_BUILTIN_E(ENUM, NAME, MASK, ATTR, ICODE)
11757 #define RS6000_BUILTIN_H(ENUM, NAME, MASK, ATTR, ICODE) \
11758 { MASK, ICODE, NAME, ENUM },
11759
11760 #define RS6000_BUILTIN_P(ENUM, NAME, MASK, ATTR, ICODE)
11761 #define RS6000_BUILTIN_Q(ENUM, NAME, MASK, ATTR, ICODE)
11762 #define RS6000_BUILTIN_S(ENUM, NAME, MASK, ATTR, ICODE)
11763 #define RS6000_BUILTIN_X(ENUM, NAME, MASK, ATTR, ICODE)
11764
11765 static const struct builtin_description bdesc_htm[] =
11766 {
11767 #include "rs6000-builtin.def"
11768 };
11769
11770 #undef RS6000_BUILTIN_1
11771 #undef RS6000_BUILTIN_2
11772 #undef RS6000_BUILTIN_3
11773 #undef RS6000_BUILTIN_A
11774 #undef RS6000_BUILTIN_D
11775 #undef RS6000_BUILTIN_E
11776 #undef RS6000_BUILTIN_H
11777 #undef RS6000_BUILTIN_P
11778 #undef RS6000_BUILTIN_Q
11779 #undef RS6000_BUILTIN_S
11780
11781 /* Return true if a builtin function is overloaded. */
11782 bool
11783 rs6000_overloaded_builtin_p (enum rs6000_builtins fncode)
11784 {
11785 return (rs6000_builtin_info[(int)fncode].attr & RS6000_BTC_OVERLOADED) != 0;
11786 }
11787
11788 /* Expand an expression EXP that calls a builtin without arguments. */
11789 static rtx
11790 rs6000_expand_zeroop_builtin (enum insn_code icode, rtx target)
11791 {
11792 rtx pat;
11793 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11794
11795 if (icode == CODE_FOR_nothing)
11796 /* Builtin not supported on this processor. */
11797 return 0;
11798
11799 if (target == 0
11800 || GET_MODE (target) != tmode
11801 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11802 target = gen_reg_rtx (tmode);
11803
11804 pat = GEN_FCN (icode) (target);
11805 if (! pat)
11806 return 0;
11807 emit_insn (pat);
11808
11809 return target;
11810 }
11811
11812
11813 static rtx
11814 rs6000_expand_mtfsf_builtin (enum insn_code icode, tree exp)
11815 {
11816 rtx pat;
11817 tree arg0 = CALL_EXPR_ARG (exp, 0);
11818 tree arg1 = CALL_EXPR_ARG (exp, 1);
11819 rtx op0 = expand_normal (arg0);
11820 rtx op1 = expand_normal (arg1);
11821 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
11822 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
11823
11824 if (icode == CODE_FOR_nothing)
11825 /* Builtin not supported on this processor. */
11826 return 0;
11827
11828 /* If we got invalid arguments bail out before generating bad rtl. */
11829 if (arg0 == error_mark_node || arg1 == error_mark_node)
11830 return const0_rtx;
11831
11832 if (GET_CODE (op0) != CONST_INT
11833 || INTVAL (op0) > 255
11834 || INTVAL (op0) < 0)
11835 {
11836 error ("argument 1 must be an 8-bit field value");
11837 return const0_rtx;
11838 }
11839
11840 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
11841 op0 = copy_to_mode_reg (mode0, op0);
11842
11843 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
11844 op1 = copy_to_mode_reg (mode1, op1);
11845
11846 pat = GEN_FCN (icode) (op0, op1);
11847 if (! pat)
11848 return const0_rtx;
11849 emit_insn (pat);
11850
11851 return NULL_RTX;
11852 }
11853
11854
11855 static rtx
11856 rs6000_expand_unop_builtin (enum insn_code icode, tree exp, rtx target)
11857 {
11858 rtx pat;
11859 tree arg0 = CALL_EXPR_ARG (exp, 0);
11860 rtx op0 = expand_normal (arg0);
11861 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11862 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11863
11864 if (icode == CODE_FOR_nothing)
11865 /* Builtin not supported on this processor. */
11866 return 0;
11867
11868 /* If we got invalid arguments bail out before generating bad rtl. */
11869 if (arg0 == error_mark_node)
11870 return const0_rtx;
11871
11872 if (icode == CODE_FOR_altivec_vspltisb
11873 || icode == CODE_FOR_altivec_vspltish
11874 || icode == CODE_FOR_altivec_vspltisw
11875 || icode == CODE_FOR_spe_evsplatfi
11876 || icode == CODE_FOR_spe_evsplati)
11877 {
11878 /* Only allow 5-bit *signed* literals. */
11879 if (GET_CODE (op0) != CONST_INT
11880 || INTVAL (op0) > 15
11881 || INTVAL (op0) < -16)
11882 {
11883 error ("argument 1 must be a 5-bit signed literal");
11884 return const0_rtx;
11885 }
11886 }
11887
11888 if (target == 0
11889 || GET_MODE (target) != tmode
11890 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11891 target = gen_reg_rtx (tmode);
11892
11893 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11894 op0 = copy_to_mode_reg (mode0, op0);
11895
11896 pat = GEN_FCN (icode) (target, op0);
11897 if (! pat)
11898 return 0;
11899 emit_insn (pat);
11900
11901 return target;
11902 }
11903
11904 static rtx
11905 altivec_expand_abs_builtin (enum insn_code icode, tree exp, rtx target)
11906 {
11907 rtx pat, scratch1, scratch2;
11908 tree arg0 = CALL_EXPR_ARG (exp, 0);
11909 rtx op0 = expand_normal (arg0);
11910 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11911 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11912
11913 /* If we have invalid arguments, bail out before generating bad rtl. */
11914 if (arg0 == error_mark_node)
11915 return const0_rtx;
11916
11917 if (target == 0
11918 || GET_MODE (target) != tmode
11919 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11920 target = gen_reg_rtx (tmode);
11921
11922 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11923 op0 = copy_to_mode_reg (mode0, op0);
11924
11925 scratch1 = gen_reg_rtx (mode0);
11926 scratch2 = gen_reg_rtx (mode0);
11927
11928 pat = GEN_FCN (icode) (target, op0, scratch1, scratch2);
11929 if (! pat)
11930 return 0;
11931 emit_insn (pat);
11932
11933 return target;
11934 }
11935
11936 static rtx
11937 rs6000_expand_binop_builtin (enum insn_code icode, tree exp, rtx target)
11938 {
11939 rtx pat;
11940 tree arg0 = CALL_EXPR_ARG (exp, 0);
11941 tree arg1 = CALL_EXPR_ARG (exp, 1);
11942 rtx op0 = expand_normal (arg0);
11943 rtx op1 = expand_normal (arg1);
11944 enum machine_mode tmode = insn_data[icode].operand[0].mode;
11945 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
11946 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
11947
11948 if (icode == CODE_FOR_nothing)
11949 /* Builtin not supported on this processor. */
11950 return 0;
11951
11952 /* If we got invalid arguments bail out before generating bad rtl. */
11953 if (arg0 == error_mark_node || arg1 == error_mark_node)
11954 return const0_rtx;
11955
11956 if (icode == CODE_FOR_altivec_vcfux
11957 || icode == CODE_FOR_altivec_vcfsx
11958 || icode == CODE_FOR_altivec_vctsxs
11959 || icode == CODE_FOR_altivec_vctuxs
11960 || icode == CODE_FOR_altivec_vspltb
11961 || icode == CODE_FOR_altivec_vsplth
11962 || icode == CODE_FOR_altivec_vspltw
11963 || icode == CODE_FOR_spe_evaddiw
11964 || icode == CODE_FOR_spe_evldd
11965 || icode == CODE_FOR_spe_evldh
11966 || icode == CODE_FOR_spe_evldw
11967 || icode == CODE_FOR_spe_evlhhesplat
11968 || icode == CODE_FOR_spe_evlhhossplat
11969 || icode == CODE_FOR_spe_evlhhousplat
11970 || icode == CODE_FOR_spe_evlwhe
11971 || icode == CODE_FOR_spe_evlwhos
11972 || icode == CODE_FOR_spe_evlwhou
11973 || icode == CODE_FOR_spe_evlwhsplat
11974 || icode == CODE_FOR_spe_evlwwsplat
11975 || icode == CODE_FOR_spe_evrlwi
11976 || icode == CODE_FOR_spe_evslwi
11977 || icode == CODE_FOR_spe_evsrwis
11978 || icode == CODE_FOR_spe_evsubifw
11979 || icode == CODE_FOR_spe_evsrwiu)
11980 {
11981 /* Only allow 5-bit unsigned literals. */
11982 STRIP_NOPS (arg1);
11983 if (TREE_CODE (arg1) != INTEGER_CST
11984 || TREE_INT_CST_LOW (arg1) & ~0x1f)
11985 {
11986 error ("argument 2 must be a 5-bit unsigned literal");
11987 return const0_rtx;
11988 }
11989 }
11990
11991 if (target == 0
11992 || GET_MODE (target) != tmode
11993 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
11994 target = gen_reg_rtx (tmode);
11995
11996 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
11997 op0 = copy_to_mode_reg (mode0, op0);
11998 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
11999 op1 = copy_to_mode_reg (mode1, op1);
12000
12001 pat = GEN_FCN (icode) (target, op0, op1);
12002 if (! pat)
12003 return 0;
12004 emit_insn (pat);
12005
12006 return target;
12007 }
12008
12009 static rtx
12010 altivec_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
12011 {
12012 rtx pat, scratch;
12013 tree cr6_form = CALL_EXPR_ARG (exp, 0);
12014 tree arg0 = CALL_EXPR_ARG (exp, 1);
12015 tree arg1 = CALL_EXPR_ARG (exp, 2);
12016 rtx op0 = expand_normal (arg0);
12017 rtx op1 = expand_normal (arg1);
12018 enum machine_mode tmode = SImode;
12019 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12020 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12021 int cr6_form_int;
12022
12023 if (TREE_CODE (cr6_form) != INTEGER_CST)
12024 {
12025 error ("argument 1 of __builtin_altivec_predicate must be a constant");
12026 return const0_rtx;
12027 }
12028 else
12029 cr6_form_int = TREE_INT_CST_LOW (cr6_form);
12030
12031 gcc_assert (mode0 == mode1);
12032
12033 /* If we have invalid arguments, bail out before generating bad rtl. */
12034 if (arg0 == error_mark_node || arg1 == error_mark_node)
12035 return const0_rtx;
12036
12037 if (target == 0
12038 || GET_MODE (target) != tmode
12039 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12040 target = gen_reg_rtx (tmode);
12041
12042 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12043 op0 = copy_to_mode_reg (mode0, op0);
12044 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12045 op1 = copy_to_mode_reg (mode1, op1);
12046
12047 scratch = gen_reg_rtx (mode0);
12048
12049 pat = GEN_FCN (icode) (scratch, op0, op1);
12050 if (! pat)
12051 return 0;
12052 emit_insn (pat);
12053
12054 /* The vec_any* and vec_all* predicates use the same opcodes for two
12055 different operations, but the bits in CR6 will be different
12056 depending on what information we want. So we have to play tricks
12057 with CR6 to get the right bits out.
12058
12059 If you think this is disgusting, look at the specs for the
12060 AltiVec predicates. */
12061
12062 switch (cr6_form_int)
12063 {
12064 case 0:
12065 emit_insn (gen_cr6_test_for_zero (target));
12066 break;
12067 case 1:
12068 emit_insn (gen_cr6_test_for_zero_reverse (target));
12069 break;
12070 case 2:
12071 emit_insn (gen_cr6_test_for_lt (target));
12072 break;
12073 case 3:
12074 emit_insn (gen_cr6_test_for_lt_reverse (target));
12075 break;
12076 default:
12077 error ("argument 1 of __builtin_altivec_predicate is out of range");
12078 break;
12079 }
12080
12081 return target;
12082 }
12083
12084 static rtx
12085 paired_expand_lv_builtin (enum insn_code icode, tree exp, rtx target)
12086 {
12087 rtx pat, addr;
12088 tree arg0 = CALL_EXPR_ARG (exp, 0);
12089 tree arg1 = CALL_EXPR_ARG (exp, 1);
12090 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12091 enum machine_mode mode0 = Pmode;
12092 enum machine_mode mode1 = Pmode;
12093 rtx op0 = expand_normal (arg0);
12094 rtx op1 = expand_normal (arg1);
12095
12096 if (icode == CODE_FOR_nothing)
12097 /* Builtin not supported on this processor. */
12098 return 0;
12099
12100 /* If we got invalid arguments bail out before generating bad rtl. */
12101 if (arg0 == error_mark_node || arg1 == error_mark_node)
12102 return const0_rtx;
12103
12104 if (target == 0
12105 || GET_MODE (target) != tmode
12106 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12107 target = gen_reg_rtx (tmode);
12108
12109 op1 = copy_to_mode_reg (mode1, op1);
12110
12111 if (op0 == const0_rtx)
12112 {
12113 addr = gen_rtx_MEM (tmode, op1);
12114 }
12115 else
12116 {
12117 op0 = copy_to_mode_reg (mode0, op0);
12118 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op0, op1));
12119 }
12120
12121 pat = GEN_FCN (icode) (target, addr);
12122
12123 if (! pat)
12124 return 0;
12125 emit_insn (pat);
12126
12127 return target;
12128 }
12129
12130 /* Return a constant vector for use as a little-endian permute control vector
12131 to reverse the order of elements of the given vector mode. */
12132 static rtx
12133 swap_selector_for_mode (enum machine_mode mode)
12134 {
12135 /* These are little endian vectors, so their elements are reversed
12136 from what you would normally expect for a permute control vector. */
12137 unsigned int swap2[16] = {7,6,5,4,3,2,1,0,15,14,13,12,11,10,9,8};
12138 unsigned int swap4[16] = {3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12};
12139 unsigned int swap8[16] = {1,0,3,2,5,4,7,6,9,8,11,10,13,12,15,14};
12140 unsigned int swap16[16] = {0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15};
12141 unsigned int *swaparray, i;
12142 rtx perm[16];
12143
12144 switch (mode)
12145 {
12146 case V2DFmode:
12147 case V2DImode:
12148 swaparray = swap2;
12149 break;
12150 case V4SFmode:
12151 case V4SImode:
12152 swaparray = swap4;
12153 break;
12154 case V8HImode:
12155 swaparray = swap8;
12156 break;
12157 case V16QImode:
12158 swaparray = swap16;
12159 break;
12160 default:
12161 gcc_unreachable ();
12162 }
12163
12164 for (i = 0; i < 16; ++i)
12165 perm[i] = GEN_INT (swaparray[i]);
12166
12167 return force_reg (V16QImode, gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm)));
12168 }
12169
12170 /* Generate code for an "lvx", "lvxl", or "lve*x" built-in for a little endian target
12171 with -maltivec=be specified. Issue the load followed by an element-reversing
12172 permute. */
12173 void
12174 altivec_expand_lvx_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
12175 {
12176 rtx tmp = gen_reg_rtx (mode);
12177 rtx load = gen_rtx_SET (VOIDmode, tmp, op1);
12178 rtx lvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
12179 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, load, lvx));
12180 rtx sel = swap_selector_for_mode (mode);
12181 rtx vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, tmp, tmp, sel), UNSPEC_VPERM);
12182
12183 gcc_assert (REG_P (op0));
12184 emit_insn (par);
12185 emit_insn (gen_rtx_SET (VOIDmode, op0, vperm));
12186 }
12187
12188 /* Generate code for a "stvx" or "stvxl" built-in for a little endian target
12189 with -maltivec=be specified. Issue the store preceded by an element-reversing
12190 permute. */
12191 void
12192 altivec_expand_stvx_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
12193 {
12194 rtx tmp = gen_reg_rtx (mode);
12195 rtx store = gen_rtx_SET (VOIDmode, op0, tmp);
12196 rtx stvx = gen_rtx_UNSPEC (mode, gen_rtvec (1, const0_rtx), unspec);
12197 rtx par = gen_rtx_PARALLEL (mode, gen_rtvec (2, store, stvx));
12198 rtx sel = swap_selector_for_mode (mode);
12199 rtx vperm;
12200
12201 gcc_assert (REG_P (op1));
12202 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
12203 emit_insn (gen_rtx_SET (VOIDmode, tmp, vperm));
12204 emit_insn (par);
12205 }
12206
12207 /* Generate code for a "stve*x" built-in for a little endian target with -maltivec=be
12208 specified. Issue the store preceded by an element-reversing permute. */
12209 void
12210 altivec_expand_stvex_be (rtx op0, rtx op1, enum machine_mode mode, unsigned unspec)
12211 {
12212 enum machine_mode inner_mode = GET_MODE_INNER (mode);
12213 rtx tmp = gen_reg_rtx (mode);
12214 rtx stvx = gen_rtx_UNSPEC (inner_mode, gen_rtvec (1, tmp), unspec);
12215 rtx sel = swap_selector_for_mode (mode);
12216 rtx vperm;
12217
12218 gcc_assert (REG_P (op1));
12219 vperm = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op1, sel), UNSPEC_VPERM);
12220 emit_insn (gen_rtx_SET (VOIDmode, tmp, vperm));
12221 emit_insn (gen_rtx_SET (VOIDmode, op0, stvx));
12222 }
12223
12224 static rtx
12225 altivec_expand_lv_builtin (enum insn_code icode, tree exp, rtx target, bool blk)
12226 {
12227 rtx pat, addr;
12228 tree arg0 = CALL_EXPR_ARG (exp, 0);
12229 tree arg1 = CALL_EXPR_ARG (exp, 1);
12230 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12231 enum machine_mode mode0 = Pmode;
12232 enum machine_mode mode1 = Pmode;
12233 rtx op0 = expand_normal (arg0);
12234 rtx op1 = expand_normal (arg1);
12235
12236 if (icode == CODE_FOR_nothing)
12237 /* Builtin not supported on this processor. */
12238 return 0;
12239
12240 /* If we got invalid arguments bail out before generating bad rtl. */
12241 if (arg0 == error_mark_node || arg1 == error_mark_node)
12242 return const0_rtx;
12243
12244 if (target == 0
12245 || GET_MODE (target) != tmode
12246 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12247 target = gen_reg_rtx (tmode);
12248
12249 op1 = copy_to_mode_reg (mode1, op1);
12250
12251 if (op0 == const0_rtx)
12252 {
12253 addr = gen_rtx_MEM (blk ? BLKmode : tmode, op1);
12254 }
12255 else
12256 {
12257 op0 = copy_to_mode_reg (mode0, op0);
12258 addr = gen_rtx_MEM (blk ? BLKmode : tmode, gen_rtx_PLUS (Pmode, op0, op1));
12259 }
12260
12261 pat = GEN_FCN (icode) (target, addr);
12262
12263 if (! pat)
12264 return 0;
12265 emit_insn (pat);
12266
12267 return target;
12268 }
12269
12270 static rtx
12271 spe_expand_stv_builtin (enum insn_code icode, tree exp)
12272 {
12273 tree arg0 = CALL_EXPR_ARG (exp, 0);
12274 tree arg1 = CALL_EXPR_ARG (exp, 1);
12275 tree arg2 = CALL_EXPR_ARG (exp, 2);
12276 rtx op0 = expand_normal (arg0);
12277 rtx op1 = expand_normal (arg1);
12278 rtx op2 = expand_normal (arg2);
12279 rtx pat;
12280 enum machine_mode mode0 = insn_data[icode].operand[0].mode;
12281 enum machine_mode mode1 = insn_data[icode].operand[1].mode;
12282 enum machine_mode mode2 = insn_data[icode].operand[2].mode;
12283
12284 /* Invalid arguments. Bail before doing anything stoopid! */
12285 if (arg0 == error_mark_node
12286 || arg1 == error_mark_node
12287 || arg2 == error_mark_node)
12288 return const0_rtx;
12289
12290 if (! (*insn_data[icode].operand[2].predicate) (op0, mode2))
12291 op0 = copy_to_mode_reg (mode2, op0);
12292 if (! (*insn_data[icode].operand[0].predicate) (op1, mode0))
12293 op1 = copy_to_mode_reg (mode0, op1);
12294 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
12295 op2 = copy_to_mode_reg (mode1, op2);
12296
12297 pat = GEN_FCN (icode) (op1, op2, op0);
12298 if (pat)
12299 emit_insn (pat);
12300 return NULL_RTX;
12301 }
12302
12303 static rtx
12304 paired_expand_stv_builtin (enum insn_code icode, tree exp)
12305 {
12306 tree arg0 = CALL_EXPR_ARG (exp, 0);
12307 tree arg1 = CALL_EXPR_ARG (exp, 1);
12308 tree arg2 = CALL_EXPR_ARG (exp, 2);
12309 rtx op0 = expand_normal (arg0);
12310 rtx op1 = expand_normal (arg1);
12311 rtx op2 = expand_normal (arg2);
12312 rtx pat, addr;
12313 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12314 enum machine_mode mode1 = Pmode;
12315 enum machine_mode mode2 = Pmode;
12316
12317 /* Invalid arguments. Bail before doing anything stoopid! */
12318 if (arg0 == error_mark_node
12319 || arg1 == error_mark_node
12320 || arg2 == error_mark_node)
12321 return const0_rtx;
12322
12323 if (! (*insn_data[icode].operand[1].predicate) (op0, tmode))
12324 op0 = copy_to_mode_reg (tmode, op0);
12325
12326 op2 = copy_to_mode_reg (mode2, op2);
12327
12328 if (op1 == const0_rtx)
12329 {
12330 addr = gen_rtx_MEM (tmode, op2);
12331 }
12332 else
12333 {
12334 op1 = copy_to_mode_reg (mode1, op1);
12335 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
12336 }
12337
12338 pat = GEN_FCN (icode) (addr, op0);
12339 if (pat)
12340 emit_insn (pat);
12341 return NULL_RTX;
12342 }
12343
12344 static rtx
12345 altivec_expand_stv_builtin (enum insn_code icode, tree exp)
12346 {
12347 tree arg0 = CALL_EXPR_ARG (exp, 0);
12348 tree arg1 = CALL_EXPR_ARG (exp, 1);
12349 tree arg2 = CALL_EXPR_ARG (exp, 2);
12350 rtx op0 = expand_normal (arg0);
12351 rtx op1 = expand_normal (arg1);
12352 rtx op2 = expand_normal (arg2);
12353 rtx pat, addr;
12354 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12355 enum machine_mode smode = insn_data[icode].operand[1].mode;
12356 enum machine_mode mode1 = Pmode;
12357 enum machine_mode mode2 = Pmode;
12358
12359 /* Invalid arguments. Bail before doing anything stoopid! */
12360 if (arg0 == error_mark_node
12361 || arg1 == error_mark_node
12362 || arg2 == error_mark_node)
12363 return const0_rtx;
12364
12365 if (! (*insn_data[icode].operand[1].predicate) (op0, smode))
12366 op0 = copy_to_mode_reg (smode, op0);
12367
12368 op2 = copy_to_mode_reg (mode2, op2);
12369
12370 if (op1 == const0_rtx)
12371 {
12372 addr = gen_rtx_MEM (tmode, op2);
12373 }
12374 else
12375 {
12376 op1 = copy_to_mode_reg (mode1, op1);
12377 addr = gen_rtx_MEM (tmode, gen_rtx_PLUS (Pmode, op1, op2));
12378 }
12379
12380 pat = GEN_FCN (icode) (addr, op0);
12381 if (pat)
12382 emit_insn (pat);
12383 return NULL_RTX;
12384 }
12385
12386 /* Return the appropriate SPR number associated with the given builtin. */
12387 static inline HOST_WIDE_INT
12388 htm_spr_num (enum rs6000_builtins code)
12389 {
12390 if (code == HTM_BUILTIN_GET_TFHAR
12391 || code == HTM_BUILTIN_SET_TFHAR)
12392 return TFHAR_SPR;
12393 else if (code == HTM_BUILTIN_GET_TFIAR
12394 || code == HTM_BUILTIN_SET_TFIAR)
12395 return TFIAR_SPR;
12396 else if (code == HTM_BUILTIN_GET_TEXASR
12397 || code == HTM_BUILTIN_SET_TEXASR)
12398 return TEXASR_SPR;
12399 gcc_assert (code == HTM_BUILTIN_GET_TEXASRU
12400 || code == HTM_BUILTIN_SET_TEXASRU);
12401 return TEXASRU_SPR;
12402 }
12403
12404 /* Return the appropriate SPR regno associated with the given builtin. */
12405 static inline HOST_WIDE_INT
12406 htm_spr_regno (enum rs6000_builtins code)
12407 {
12408 if (code == HTM_BUILTIN_GET_TFHAR
12409 || code == HTM_BUILTIN_SET_TFHAR)
12410 return TFHAR_REGNO;
12411 else if (code == HTM_BUILTIN_GET_TFIAR
12412 || code == HTM_BUILTIN_SET_TFIAR)
12413 return TFIAR_REGNO;
12414 gcc_assert (code == HTM_BUILTIN_GET_TEXASR
12415 || code == HTM_BUILTIN_SET_TEXASR
12416 || code == HTM_BUILTIN_GET_TEXASRU
12417 || code == HTM_BUILTIN_SET_TEXASRU);
12418 return TEXASR_REGNO;
12419 }
12420
12421 /* Return the correct ICODE value depending on whether we are
12422 setting or reading the HTM SPRs. */
12423 static inline enum insn_code
12424 rs6000_htm_spr_icode (bool nonvoid)
12425 {
12426 if (nonvoid)
12427 return (TARGET_64BIT) ? CODE_FOR_htm_mfspr_di : CODE_FOR_htm_mfspr_si;
12428 else
12429 return (TARGET_64BIT) ? CODE_FOR_htm_mtspr_di : CODE_FOR_htm_mtspr_si;
12430 }
12431
12432 /* Expand the HTM builtin in EXP and store the result in TARGET.
12433 Store true in *EXPANDEDP if we found a builtin to expand. */
12434 static rtx
12435 htm_expand_builtin (tree exp, rtx target, bool * expandedp)
12436 {
12437 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12438 bool nonvoid = TREE_TYPE (TREE_TYPE (fndecl)) != void_type_node;
12439 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
12440 const struct builtin_description *d;
12441 size_t i;
12442
12443 *expandedp = false;
12444
12445 /* Expand the HTM builtins. */
12446 d = bdesc_htm;
12447 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
12448 if (d->code == fcode)
12449 {
12450 rtx op[MAX_HTM_OPERANDS], pat;
12451 int nopnds = 0;
12452 tree arg;
12453 call_expr_arg_iterator iter;
12454 unsigned attr = rs6000_builtin_info[fcode].attr;
12455 enum insn_code icode = d->icode;
12456
12457 if (attr & RS6000_BTC_SPR)
12458 icode = rs6000_htm_spr_icode (nonvoid);
12459
12460 if (nonvoid)
12461 {
12462 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12463 if (!target
12464 || GET_MODE (target) != tmode
12465 || !(*insn_data[icode].operand[0].predicate) (target, tmode))
12466 target = gen_reg_rtx (tmode);
12467 op[nopnds++] = target;
12468 }
12469
12470 FOR_EACH_CALL_EXPR_ARG (arg, iter, exp)
12471 {
12472 const struct insn_operand_data *insn_op;
12473
12474 if (arg == error_mark_node || nopnds >= MAX_HTM_OPERANDS)
12475 return NULL_RTX;
12476
12477 insn_op = &insn_data[icode].operand[nopnds];
12478
12479 op[nopnds] = expand_normal (arg);
12480
12481 if (!(*insn_op->predicate) (op[nopnds], insn_op->mode))
12482 {
12483 if (!strcmp (insn_op->constraint, "n"))
12484 {
12485 int arg_num = (nonvoid) ? nopnds : nopnds + 1;
12486 if (!CONST_INT_P (op[nopnds]))
12487 error ("argument %d must be an unsigned literal", arg_num);
12488 else
12489 error ("argument %d is an unsigned literal that is "
12490 "out of range", arg_num);
12491 return const0_rtx;
12492 }
12493 op[nopnds] = copy_to_mode_reg (insn_op->mode, op[nopnds]);
12494 }
12495
12496 nopnds++;
12497 }
12498
12499 /* Handle the builtins for extended mnemonics. These accept
12500 no arguments, but map to builtins that take arguments. */
12501 switch (fcode)
12502 {
12503 case HTM_BUILTIN_TENDALL: /* Alias for: tend. 1 */
12504 case HTM_BUILTIN_TRESUME: /* Alias for: tsr. 1 */
12505 op[nopnds++] = GEN_INT (1);
12506 #ifdef ENABLE_CHECKING
12507 attr |= RS6000_BTC_UNARY;
12508 #endif
12509 break;
12510 case HTM_BUILTIN_TSUSPEND: /* Alias for: tsr. 0 */
12511 op[nopnds++] = GEN_INT (0);
12512 #ifdef ENABLE_CHECKING
12513 attr |= RS6000_BTC_UNARY;
12514 #endif
12515 break;
12516 default:
12517 break;
12518 }
12519
12520 /* If this builtin accesses SPRs, then pass in the appropriate
12521 SPR number and SPR regno as the last two operands. */
12522 if (attr & RS6000_BTC_SPR)
12523 {
12524 op[nopnds++] = gen_rtx_CONST_INT (Pmode, htm_spr_num (fcode));
12525 op[nopnds++] = gen_rtx_REG (Pmode, htm_spr_regno (fcode));
12526 }
12527
12528 #ifdef ENABLE_CHECKING
12529 int expected_nopnds = 0;
12530 if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_UNARY)
12531 expected_nopnds = 1;
12532 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_BINARY)
12533 expected_nopnds = 2;
12534 else if ((attr & RS6000_BTC_TYPE_MASK) == RS6000_BTC_TERNARY)
12535 expected_nopnds = 3;
12536 if (!(attr & RS6000_BTC_VOID))
12537 expected_nopnds += 1;
12538 if (attr & RS6000_BTC_SPR)
12539 expected_nopnds += 2;
12540
12541 gcc_assert (nopnds == expected_nopnds && nopnds <= MAX_HTM_OPERANDS);
12542 #endif
12543
12544 switch (nopnds)
12545 {
12546 case 1:
12547 pat = GEN_FCN (icode) (op[0]);
12548 break;
12549 case 2:
12550 pat = GEN_FCN (icode) (op[0], op[1]);
12551 break;
12552 case 3:
12553 pat = GEN_FCN (icode) (op[0], op[1], op[2]);
12554 break;
12555 case 4:
12556 pat = GEN_FCN (icode) (op[0], op[1], op[2], op[3]);
12557 break;
12558 default:
12559 gcc_unreachable ();
12560 }
12561 if (!pat)
12562 return NULL_RTX;
12563 emit_insn (pat);
12564
12565 *expandedp = true;
12566 if (nonvoid)
12567 return target;
12568 return const0_rtx;
12569 }
12570
12571 return NULL_RTX;
12572 }
12573
12574 static rtx
12575 rs6000_expand_ternop_builtin (enum insn_code icode, tree exp, rtx target)
12576 {
12577 rtx pat;
12578 tree arg0 = CALL_EXPR_ARG (exp, 0);
12579 tree arg1 = CALL_EXPR_ARG (exp, 1);
12580 tree arg2 = CALL_EXPR_ARG (exp, 2);
12581 rtx op0 = expand_normal (arg0);
12582 rtx op1 = expand_normal (arg1);
12583 rtx op2 = expand_normal (arg2);
12584 enum machine_mode tmode = insn_data[icode].operand[0].mode;
12585 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
12586 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
12587 enum machine_mode mode2 = insn_data[icode].operand[3].mode;
12588
12589 if (icode == CODE_FOR_nothing)
12590 /* Builtin not supported on this processor. */
12591 return 0;
12592
12593 /* If we got invalid arguments bail out before generating bad rtl. */
12594 if (arg0 == error_mark_node
12595 || arg1 == error_mark_node
12596 || arg2 == error_mark_node)
12597 return const0_rtx;
12598
12599 /* Check and prepare argument depending on the instruction code.
12600
12601 Note that a switch statement instead of the sequence of tests
12602 would be incorrect as many of the CODE_FOR values could be
12603 CODE_FOR_nothing and that would yield multiple alternatives
12604 with identical values. We'd never reach here at runtime in
12605 this case. */
12606 if (icode == CODE_FOR_altivec_vsldoi_v4sf
12607 || icode == CODE_FOR_altivec_vsldoi_v4si
12608 || icode == CODE_FOR_altivec_vsldoi_v8hi
12609 || icode == CODE_FOR_altivec_vsldoi_v16qi)
12610 {
12611 /* Only allow 4-bit unsigned literals. */
12612 STRIP_NOPS (arg2);
12613 if (TREE_CODE (arg2) != INTEGER_CST
12614 || TREE_INT_CST_LOW (arg2) & ~0xf)
12615 {
12616 error ("argument 3 must be a 4-bit unsigned literal");
12617 return const0_rtx;
12618 }
12619 }
12620 else if (icode == CODE_FOR_vsx_xxpermdi_v2df
12621 || icode == CODE_FOR_vsx_xxpermdi_v2di
12622 || icode == CODE_FOR_vsx_xxsldwi_v16qi
12623 || icode == CODE_FOR_vsx_xxsldwi_v8hi
12624 || icode == CODE_FOR_vsx_xxsldwi_v4si
12625 || icode == CODE_FOR_vsx_xxsldwi_v4sf
12626 || icode == CODE_FOR_vsx_xxsldwi_v2di
12627 || icode == CODE_FOR_vsx_xxsldwi_v2df)
12628 {
12629 /* Only allow 2-bit unsigned literals. */
12630 STRIP_NOPS (arg2);
12631 if (TREE_CODE (arg2) != INTEGER_CST
12632 || TREE_INT_CST_LOW (arg2) & ~0x3)
12633 {
12634 error ("argument 3 must be a 2-bit unsigned literal");
12635 return const0_rtx;
12636 }
12637 }
12638 else if (icode == CODE_FOR_vsx_set_v2df
12639 || icode == CODE_FOR_vsx_set_v2di
12640 || icode == CODE_FOR_bcdadd
12641 || icode == CODE_FOR_bcdadd_lt
12642 || icode == CODE_FOR_bcdadd_eq
12643 || icode == CODE_FOR_bcdadd_gt
12644 || icode == CODE_FOR_bcdsub
12645 || icode == CODE_FOR_bcdsub_lt
12646 || icode == CODE_FOR_bcdsub_eq
12647 || icode == CODE_FOR_bcdsub_gt)
12648 {
12649 /* Only allow 1-bit unsigned literals. */
12650 STRIP_NOPS (arg2);
12651 if (TREE_CODE (arg2) != INTEGER_CST
12652 || TREE_INT_CST_LOW (arg2) & ~0x1)
12653 {
12654 error ("argument 3 must be a 1-bit unsigned literal");
12655 return const0_rtx;
12656 }
12657 }
12658 else if (icode == CODE_FOR_dfp_ddedpd_dd
12659 || icode == CODE_FOR_dfp_ddedpd_td)
12660 {
12661 /* Only allow 2-bit unsigned literals where the value is 0 or 2. */
12662 STRIP_NOPS (arg0);
12663 if (TREE_CODE (arg0) != INTEGER_CST
12664 || TREE_INT_CST_LOW (arg2) & ~0x3)
12665 {
12666 error ("argument 1 must be 0 or 2");
12667 return const0_rtx;
12668 }
12669 }
12670 else if (icode == CODE_FOR_dfp_denbcd_dd
12671 || icode == CODE_FOR_dfp_denbcd_td)
12672 {
12673 /* Only allow 1-bit unsigned literals. */
12674 STRIP_NOPS (arg0);
12675 if (TREE_CODE (arg0) != INTEGER_CST
12676 || TREE_INT_CST_LOW (arg0) & ~0x1)
12677 {
12678 error ("argument 1 must be a 1-bit unsigned literal");
12679 return const0_rtx;
12680 }
12681 }
12682 else if (icode == CODE_FOR_dfp_dscli_dd
12683 || icode == CODE_FOR_dfp_dscli_td
12684 || icode == CODE_FOR_dfp_dscri_dd
12685 || icode == CODE_FOR_dfp_dscri_td)
12686 {
12687 /* Only allow 6-bit unsigned literals. */
12688 STRIP_NOPS (arg1);
12689 if (TREE_CODE (arg1) != INTEGER_CST
12690 || TREE_INT_CST_LOW (arg1) & ~0x3f)
12691 {
12692 error ("argument 2 must be a 6-bit unsigned literal");
12693 return const0_rtx;
12694 }
12695 }
12696 else if (icode == CODE_FOR_crypto_vshasigmaw
12697 || icode == CODE_FOR_crypto_vshasigmad)
12698 {
12699 /* Check whether the 2nd and 3rd arguments are integer constants and in
12700 range and prepare arguments. */
12701 STRIP_NOPS (arg1);
12702 if (TREE_CODE (arg1) != INTEGER_CST || wi::geu_p (arg1, 2))
12703 {
12704 error ("argument 2 must be 0 or 1");
12705 return const0_rtx;
12706 }
12707
12708 STRIP_NOPS (arg2);
12709 if (TREE_CODE (arg2) != INTEGER_CST || wi::geu_p (arg1, 16))
12710 {
12711 error ("argument 3 must be in the range 0..15");
12712 return const0_rtx;
12713 }
12714 }
12715
12716 if (target == 0
12717 || GET_MODE (target) != tmode
12718 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12719 target = gen_reg_rtx (tmode);
12720
12721 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12722 op0 = copy_to_mode_reg (mode0, op0);
12723 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
12724 op1 = copy_to_mode_reg (mode1, op1);
12725 if (! (*insn_data[icode].operand[3].predicate) (op2, mode2))
12726 op2 = copy_to_mode_reg (mode2, op2);
12727
12728 if (TARGET_PAIRED_FLOAT && icode == CODE_FOR_selv2sf4)
12729 pat = GEN_FCN (icode) (target, op0, op1, op2, CONST0_RTX (SFmode));
12730 else
12731 pat = GEN_FCN (icode) (target, op0, op1, op2);
12732 if (! pat)
12733 return 0;
12734 emit_insn (pat);
12735
12736 return target;
12737 }
12738
12739 /* Expand the lvx builtins. */
12740 static rtx
12741 altivec_expand_ld_builtin (tree exp, rtx target, bool *expandedp)
12742 {
12743 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12744 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
12745 tree arg0;
12746 enum machine_mode tmode, mode0;
12747 rtx pat, op0;
12748 enum insn_code icode;
12749
12750 switch (fcode)
12751 {
12752 case ALTIVEC_BUILTIN_LD_INTERNAL_16qi:
12753 icode = CODE_FOR_vector_altivec_load_v16qi;
12754 break;
12755 case ALTIVEC_BUILTIN_LD_INTERNAL_8hi:
12756 icode = CODE_FOR_vector_altivec_load_v8hi;
12757 break;
12758 case ALTIVEC_BUILTIN_LD_INTERNAL_4si:
12759 icode = CODE_FOR_vector_altivec_load_v4si;
12760 break;
12761 case ALTIVEC_BUILTIN_LD_INTERNAL_4sf:
12762 icode = CODE_FOR_vector_altivec_load_v4sf;
12763 break;
12764 case ALTIVEC_BUILTIN_LD_INTERNAL_2df:
12765 icode = CODE_FOR_vector_altivec_load_v2df;
12766 break;
12767 case ALTIVEC_BUILTIN_LD_INTERNAL_2di:
12768 icode = CODE_FOR_vector_altivec_load_v2di;
12769 case ALTIVEC_BUILTIN_LD_INTERNAL_1ti:
12770 icode = CODE_FOR_vector_altivec_load_v1ti;
12771 break;
12772 default:
12773 *expandedp = false;
12774 return NULL_RTX;
12775 }
12776
12777 *expandedp = true;
12778
12779 arg0 = CALL_EXPR_ARG (exp, 0);
12780 op0 = expand_normal (arg0);
12781 tmode = insn_data[icode].operand[0].mode;
12782 mode0 = insn_data[icode].operand[1].mode;
12783
12784 if (target == 0
12785 || GET_MODE (target) != tmode
12786 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
12787 target = gen_reg_rtx (tmode);
12788
12789 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
12790 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12791
12792 pat = GEN_FCN (icode) (target, op0);
12793 if (! pat)
12794 return 0;
12795 emit_insn (pat);
12796 return target;
12797 }
12798
12799 /* Expand the stvx builtins. */
12800 static rtx
12801 altivec_expand_st_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
12802 bool *expandedp)
12803 {
12804 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12805 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
12806 tree arg0, arg1;
12807 enum machine_mode mode0, mode1;
12808 rtx pat, op0, op1;
12809 enum insn_code icode;
12810
12811 switch (fcode)
12812 {
12813 case ALTIVEC_BUILTIN_ST_INTERNAL_16qi:
12814 icode = CODE_FOR_vector_altivec_store_v16qi;
12815 break;
12816 case ALTIVEC_BUILTIN_ST_INTERNAL_8hi:
12817 icode = CODE_FOR_vector_altivec_store_v8hi;
12818 break;
12819 case ALTIVEC_BUILTIN_ST_INTERNAL_4si:
12820 icode = CODE_FOR_vector_altivec_store_v4si;
12821 break;
12822 case ALTIVEC_BUILTIN_ST_INTERNAL_4sf:
12823 icode = CODE_FOR_vector_altivec_store_v4sf;
12824 break;
12825 case ALTIVEC_BUILTIN_ST_INTERNAL_2df:
12826 icode = CODE_FOR_vector_altivec_store_v2df;
12827 break;
12828 case ALTIVEC_BUILTIN_ST_INTERNAL_2di:
12829 icode = CODE_FOR_vector_altivec_store_v2di;
12830 case ALTIVEC_BUILTIN_ST_INTERNAL_1ti:
12831 icode = CODE_FOR_vector_altivec_store_v1ti;
12832 break;
12833 default:
12834 *expandedp = false;
12835 return NULL_RTX;
12836 }
12837
12838 arg0 = CALL_EXPR_ARG (exp, 0);
12839 arg1 = CALL_EXPR_ARG (exp, 1);
12840 op0 = expand_normal (arg0);
12841 op1 = expand_normal (arg1);
12842 mode0 = insn_data[icode].operand[0].mode;
12843 mode1 = insn_data[icode].operand[1].mode;
12844
12845 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
12846 op0 = gen_rtx_MEM (mode0, copy_to_mode_reg (Pmode, op0));
12847 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
12848 op1 = copy_to_mode_reg (mode1, op1);
12849
12850 pat = GEN_FCN (icode) (op0, op1);
12851 if (pat)
12852 emit_insn (pat);
12853
12854 *expandedp = true;
12855 return NULL_RTX;
12856 }
12857
12858 /* Expand the dst builtins. */
12859 static rtx
12860 altivec_expand_dst_builtin (tree exp, rtx target ATTRIBUTE_UNUSED,
12861 bool *expandedp)
12862 {
12863 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
12864 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
12865 tree arg0, arg1, arg2;
12866 enum machine_mode mode0, mode1;
12867 rtx pat, op0, op1, op2;
12868 const struct builtin_description *d;
12869 size_t i;
12870
12871 *expandedp = false;
12872
12873 /* Handle DST variants. */
12874 d = bdesc_dst;
12875 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
12876 if (d->code == fcode)
12877 {
12878 arg0 = CALL_EXPR_ARG (exp, 0);
12879 arg1 = CALL_EXPR_ARG (exp, 1);
12880 arg2 = CALL_EXPR_ARG (exp, 2);
12881 op0 = expand_normal (arg0);
12882 op1 = expand_normal (arg1);
12883 op2 = expand_normal (arg2);
12884 mode0 = insn_data[d->icode].operand[0].mode;
12885 mode1 = insn_data[d->icode].operand[1].mode;
12886
12887 /* Invalid arguments, bail out before generating bad rtl. */
12888 if (arg0 == error_mark_node
12889 || arg1 == error_mark_node
12890 || arg2 == error_mark_node)
12891 return const0_rtx;
12892
12893 *expandedp = true;
12894 STRIP_NOPS (arg2);
12895 if (TREE_CODE (arg2) != INTEGER_CST
12896 || TREE_INT_CST_LOW (arg2) & ~0x3)
12897 {
12898 error ("argument to %qs must be a 2-bit unsigned literal", d->name);
12899 return const0_rtx;
12900 }
12901
12902 if (! (*insn_data[d->icode].operand[0].predicate) (op0, mode0))
12903 op0 = copy_to_mode_reg (Pmode, op0);
12904 if (! (*insn_data[d->icode].operand[1].predicate) (op1, mode1))
12905 op1 = copy_to_mode_reg (mode1, op1);
12906
12907 pat = GEN_FCN (d->icode) (op0, op1, op2);
12908 if (pat != 0)
12909 emit_insn (pat);
12910
12911 return NULL_RTX;
12912 }
12913
12914 return NULL_RTX;
12915 }
12916
12917 /* Expand vec_init builtin. */
12918 static rtx
12919 altivec_expand_vec_init_builtin (tree type, tree exp, rtx target)
12920 {
12921 enum machine_mode tmode = TYPE_MODE (type);
12922 enum machine_mode inner_mode = GET_MODE_INNER (tmode);
12923 int i, n_elt = GET_MODE_NUNITS (tmode);
12924
12925 gcc_assert (VECTOR_MODE_P (tmode));
12926 gcc_assert (n_elt == call_expr_nargs (exp));
12927
12928 if (!target || !register_operand (target, tmode))
12929 target = gen_reg_rtx (tmode);
12930
12931 /* If we have a vector compromised of a single element, such as V1TImode, do
12932 the initialization directly. */
12933 if (n_elt == 1 && GET_MODE_SIZE (tmode) == GET_MODE_SIZE (inner_mode))
12934 {
12935 rtx x = expand_normal (CALL_EXPR_ARG (exp, 0));
12936 emit_move_insn (target, gen_lowpart (tmode, x));
12937 }
12938 else
12939 {
12940 rtvec v = rtvec_alloc (n_elt);
12941
12942 for (i = 0; i < n_elt; ++i)
12943 {
12944 rtx x = expand_normal (CALL_EXPR_ARG (exp, i));
12945 RTVEC_ELT (v, i) = gen_lowpart (inner_mode, x);
12946 }
12947
12948 rs6000_expand_vector_init (target, gen_rtx_PARALLEL (tmode, v));
12949 }
12950
12951 return target;
12952 }
12953
12954 /* Return the integer constant in ARG. Constrain it to be in the range
12955 of the subparts of VEC_TYPE; issue an error if not. */
12956
12957 static int
12958 get_element_number (tree vec_type, tree arg)
12959 {
12960 unsigned HOST_WIDE_INT elt, max = TYPE_VECTOR_SUBPARTS (vec_type) - 1;
12961
12962 if (!tree_fits_uhwi_p (arg)
12963 || (elt = tree_to_uhwi (arg), elt > max))
12964 {
12965 error ("selector must be an integer constant in the range 0..%wi", max);
12966 return 0;
12967 }
12968
12969 return elt;
12970 }
12971
12972 /* Expand vec_set builtin. */
12973 static rtx
12974 altivec_expand_vec_set_builtin (tree exp)
12975 {
12976 enum machine_mode tmode, mode1;
12977 tree arg0, arg1, arg2;
12978 int elt;
12979 rtx op0, op1;
12980
12981 arg0 = CALL_EXPR_ARG (exp, 0);
12982 arg1 = CALL_EXPR_ARG (exp, 1);
12983 arg2 = CALL_EXPR_ARG (exp, 2);
12984
12985 tmode = TYPE_MODE (TREE_TYPE (arg0));
12986 mode1 = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
12987 gcc_assert (VECTOR_MODE_P (tmode));
12988
12989 op0 = expand_expr (arg0, NULL_RTX, tmode, EXPAND_NORMAL);
12990 op1 = expand_expr (arg1, NULL_RTX, mode1, EXPAND_NORMAL);
12991 elt = get_element_number (TREE_TYPE (arg0), arg2);
12992
12993 if (GET_MODE (op1) != mode1 && GET_MODE (op1) != VOIDmode)
12994 op1 = convert_modes (mode1, GET_MODE (op1), op1, true);
12995
12996 op0 = force_reg (tmode, op0);
12997 op1 = force_reg (mode1, op1);
12998
12999 rs6000_expand_vector_set (op0, op1, elt);
13000
13001 return op0;
13002 }
13003
13004 /* Expand vec_ext builtin. */
13005 static rtx
13006 altivec_expand_vec_ext_builtin (tree exp, rtx target)
13007 {
13008 enum machine_mode tmode, mode0;
13009 tree arg0, arg1;
13010 int elt;
13011 rtx op0;
13012
13013 arg0 = CALL_EXPR_ARG (exp, 0);
13014 arg1 = CALL_EXPR_ARG (exp, 1);
13015
13016 op0 = expand_normal (arg0);
13017 elt = get_element_number (TREE_TYPE (arg0), arg1);
13018
13019 tmode = TYPE_MODE (TREE_TYPE (TREE_TYPE (arg0)));
13020 mode0 = TYPE_MODE (TREE_TYPE (arg0));
13021 gcc_assert (VECTOR_MODE_P (mode0));
13022
13023 op0 = force_reg (mode0, op0);
13024
13025 if (optimize || !target || !register_operand (target, tmode))
13026 target = gen_reg_rtx (tmode);
13027
13028 rs6000_expand_vector_extract (target, op0, elt);
13029
13030 return target;
13031 }
13032
13033 /* Expand the builtin in EXP and store the result in TARGET. Store
13034 true in *EXPANDEDP if we found a builtin to expand. */
13035 static rtx
13036 altivec_expand_builtin (tree exp, rtx target, bool *expandedp)
13037 {
13038 const struct builtin_description *d;
13039 size_t i;
13040 enum insn_code icode;
13041 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13042 tree arg0;
13043 rtx op0, pat;
13044 enum machine_mode tmode, mode0;
13045 enum rs6000_builtins fcode
13046 = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13047
13048 if (rs6000_overloaded_builtin_p (fcode))
13049 {
13050 *expandedp = true;
13051 error ("unresolved overload for Altivec builtin %qF", fndecl);
13052
13053 /* Given it is invalid, just generate a normal call. */
13054 return expand_call (exp, target, false);
13055 }
13056
13057 target = altivec_expand_ld_builtin (exp, target, expandedp);
13058 if (*expandedp)
13059 return target;
13060
13061 target = altivec_expand_st_builtin (exp, target, expandedp);
13062 if (*expandedp)
13063 return target;
13064
13065 target = altivec_expand_dst_builtin (exp, target, expandedp);
13066 if (*expandedp)
13067 return target;
13068
13069 *expandedp = true;
13070
13071 switch (fcode)
13072 {
13073 case ALTIVEC_BUILTIN_STVX_V2DF:
13074 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2df, exp);
13075 case ALTIVEC_BUILTIN_STVX_V2DI:
13076 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v2di, exp);
13077 case ALTIVEC_BUILTIN_STVX_V4SF:
13078 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4sf, exp);
13079 case ALTIVEC_BUILTIN_STVX:
13080 case ALTIVEC_BUILTIN_STVX_V4SI:
13081 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v4si, exp);
13082 case ALTIVEC_BUILTIN_STVX_V8HI:
13083 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v8hi, exp);
13084 case ALTIVEC_BUILTIN_STVX_V16QI:
13085 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvx_v16qi, exp);
13086 case ALTIVEC_BUILTIN_STVEBX:
13087 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvebx, exp);
13088 case ALTIVEC_BUILTIN_STVEHX:
13089 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvehx, exp);
13090 case ALTIVEC_BUILTIN_STVEWX:
13091 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvewx, exp);
13092 case ALTIVEC_BUILTIN_STVXL_V2DF:
13093 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2df, exp);
13094 case ALTIVEC_BUILTIN_STVXL_V2DI:
13095 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v2di, exp);
13096 case ALTIVEC_BUILTIN_STVXL_V4SF:
13097 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4sf, exp);
13098 case ALTIVEC_BUILTIN_STVXL:
13099 case ALTIVEC_BUILTIN_STVXL_V4SI:
13100 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v4si, exp);
13101 case ALTIVEC_BUILTIN_STVXL_V8HI:
13102 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v8hi, exp);
13103 case ALTIVEC_BUILTIN_STVXL_V16QI:
13104 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvxl_v16qi, exp);
13105
13106 case ALTIVEC_BUILTIN_STVLX:
13107 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlx, exp);
13108 case ALTIVEC_BUILTIN_STVLXL:
13109 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvlxl, exp);
13110 case ALTIVEC_BUILTIN_STVRX:
13111 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrx, exp);
13112 case ALTIVEC_BUILTIN_STVRXL:
13113 return altivec_expand_stv_builtin (CODE_FOR_altivec_stvrxl, exp);
13114
13115 case VSX_BUILTIN_STXVD2X_V1TI:
13116 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v1ti, exp);
13117 case VSX_BUILTIN_STXVD2X_V2DF:
13118 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2df, exp);
13119 case VSX_BUILTIN_STXVD2X_V2DI:
13120 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v2di, exp);
13121 case VSX_BUILTIN_STXVW4X_V4SF:
13122 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4sf, exp);
13123 case VSX_BUILTIN_STXVW4X_V4SI:
13124 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v4si, exp);
13125 case VSX_BUILTIN_STXVW4X_V8HI:
13126 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v8hi, exp);
13127 case VSX_BUILTIN_STXVW4X_V16QI:
13128 return altivec_expand_stv_builtin (CODE_FOR_vsx_store_v16qi, exp);
13129
13130 case ALTIVEC_BUILTIN_MFVSCR:
13131 icode = CODE_FOR_altivec_mfvscr;
13132 tmode = insn_data[icode].operand[0].mode;
13133
13134 if (target == 0
13135 || GET_MODE (target) != tmode
13136 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13137 target = gen_reg_rtx (tmode);
13138
13139 pat = GEN_FCN (icode) (target);
13140 if (! pat)
13141 return 0;
13142 emit_insn (pat);
13143 return target;
13144
13145 case ALTIVEC_BUILTIN_MTVSCR:
13146 icode = CODE_FOR_altivec_mtvscr;
13147 arg0 = CALL_EXPR_ARG (exp, 0);
13148 op0 = expand_normal (arg0);
13149 mode0 = insn_data[icode].operand[0].mode;
13150
13151 /* If we got invalid arguments bail out before generating bad rtl. */
13152 if (arg0 == error_mark_node)
13153 return const0_rtx;
13154
13155 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13156 op0 = copy_to_mode_reg (mode0, op0);
13157
13158 pat = GEN_FCN (icode) (op0);
13159 if (pat)
13160 emit_insn (pat);
13161 return NULL_RTX;
13162
13163 case ALTIVEC_BUILTIN_DSSALL:
13164 emit_insn (gen_altivec_dssall ());
13165 return NULL_RTX;
13166
13167 case ALTIVEC_BUILTIN_DSS:
13168 icode = CODE_FOR_altivec_dss;
13169 arg0 = CALL_EXPR_ARG (exp, 0);
13170 STRIP_NOPS (arg0);
13171 op0 = expand_normal (arg0);
13172 mode0 = insn_data[icode].operand[0].mode;
13173
13174 /* If we got invalid arguments bail out before generating bad rtl. */
13175 if (arg0 == error_mark_node)
13176 return const0_rtx;
13177
13178 if (TREE_CODE (arg0) != INTEGER_CST
13179 || TREE_INT_CST_LOW (arg0) & ~0x3)
13180 {
13181 error ("argument to dss must be a 2-bit unsigned literal");
13182 return const0_rtx;
13183 }
13184
13185 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13186 op0 = copy_to_mode_reg (mode0, op0);
13187
13188 emit_insn (gen_altivec_dss (op0));
13189 return NULL_RTX;
13190
13191 case ALTIVEC_BUILTIN_VEC_INIT_V4SI:
13192 case ALTIVEC_BUILTIN_VEC_INIT_V8HI:
13193 case ALTIVEC_BUILTIN_VEC_INIT_V16QI:
13194 case ALTIVEC_BUILTIN_VEC_INIT_V4SF:
13195 case VSX_BUILTIN_VEC_INIT_V2DF:
13196 case VSX_BUILTIN_VEC_INIT_V2DI:
13197 case VSX_BUILTIN_VEC_INIT_V1TI:
13198 return altivec_expand_vec_init_builtin (TREE_TYPE (exp), exp, target);
13199
13200 case ALTIVEC_BUILTIN_VEC_SET_V4SI:
13201 case ALTIVEC_BUILTIN_VEC_SET_V8HI:
13202 case ALTIVEC_BUILTIN_VEC_SET_V16QI:
13203 case ALTIVEC_BUILTIN_VEC_SET_V4SF:
13204 case VSX_BUILTIN_VEC_SET_V2DF:
13205 case VSX_BUILTIN_VEC_SET_V2DI:
13206 case VSX_BUILTIN_VEC_SET_V1TI:
13207 return altivec_expand_vec_set_builtin (exp);
13208
13209 case ALTIVEC_BUILTIN_VEC_EXT_V4SI:
13210 case ALTIVEC_BUILTIN_VEC_EXT_V8HI:
13211 case ALTIVEC_BUILTIN_VEC_EXT_V16QI:
13212 case ALTIVEC_BUILTIN_VEC_EXT_V4SF:
13213 case VSX_BUILTIN_VEC_EXT_V2DF:
13214 case VSX_BUILTIN_VEC_EXT_V2DI:
13215 case VSX_BUILTIN_VEC_EXT_V1TI:
13216 return altivec_expand_vec_ext_builtin (exp, target);
13217
13218 default:
13219 break;
13220 /* Fall through. */
13221 }
13222
13223 /* Expand abs* operations. */
13224 d = bdesc_abs;
13225 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
13226 if (d->code == fcode)
13227 return altivec_expand_abs_builtin (d->icode, exp, target);
13228
13229 /* Expand the AltiVec predicates. */
13230 d = bdesc_altivec_preds;
13231 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
13232 if (d->code == fcode)
13233 return altivec_expand_predicate_builtin (d->icode, exp, target);
13234
13235 /* LV* are funky. We initialized them differently. */
13236 switch (fcode)
13237 {
13238 case ALTIVEC_BUILTIN_LVSL:
13239 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsl,
13240 exp, target, false);
13241 case ALTIVEC_BUILTIN_LVSR:
13242 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvsr,
13243 exp, target, false);
13244 case ALTIVEC_BUILTIN_LVEBX:
13245 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvebx,
13246 exp, target, false);
13247 case ALTIVEC_BUILTIN_LVEHX:
13248 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvehx,
13249 exp, target, false);
13250 case ALTIVEC_BUILTIN_LVEWX:
13251 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvewx,
13252 exp, target, false);
13253 case ALTIVEC_BUILTIN_LVXL_V2DF:
13254 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2df,
13255 exp, target, false);
13256 case ALTIVEC_BUILTIN_LVXL_V2DI:
13257 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v2di,
13258 exp, target, false);
13259 case ALTIVEC_BUILTIN_LVXL_V4SF:
13260 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4sf,
13261 exp, target, false);
13262 case ALTIVEC_BUILTIN_LVXL:
13263 case ALTIVEC_BUILTIN_LVXL_V4SI:
13264 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v4si,
13265 exp, target, false);
13266 case ALTIVEC_BUILTIN_LVXL_V8HI:
13267 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v8hi,
13268 exp, target, false);
13269 case ALTIVEC_BUILTIN_LVXL_V16QI:
13270 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvxl_v16qi,
13271 exp, target, false);
13272 case ALTIVEC_BUILTIN_LVX_V2DF:
13273 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2df,
13274 exp, target, false);
13275 case ALTIVEC_BUILTIN_LVX_V2DI:
13276 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v2di,
13277 exp, target, false);
13278 case ALTIVEC_BUILTIN_LVX_V4SF:
13279 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4sf,
13280 exp, target, false);
13281 case ALTIVEC_BUILTIN_LVX:
13282 case ALTIVEC_BUILTIN_LVX_V4SI:
13283 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v4si,
13284 exp, target, false);
13285 case ALTIVEC_BUILTIN_LVX_V8HI:
13286 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v8hi,
13287 exp, target, false);
13288 case ALTIVEC_BUILTIN_LVX_V16QI:
13289 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvx_v16qi,
13290 exp, target, false);
13291 case ALTIVEC_BUILTIN_LVLX:
13292 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlx,
13293 exp, target, true);
13294 case ALTIVEC_BUILTIN_LVLXL:
13295 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvlxl,
13296 exp, target, true);
13297 case ALTIVEC_BUILTIN_LVRX:
13298 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrx,
13299 exp, target, true);
13300 case ALTIVEC_BUILTIN_LVRXL:
13301 return altivec_expand_lv_builtin (CODE_FOR_altivec_lvrxl,
13302 exp, target, true);
13303 case VSX_BUILTIN_LXVD2X_V1TI:
13304 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v1ti,
13305 exp, target, false);
13306 case VSX_BUILTIN_LXVD2X_V2DF:
13307 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2df,
13308 exp, target, false);
13309 case VSX_BUILTIN_LXVD2X_V2DI:
13310 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v2di,
13311 exp, target, false);
13312 case VSX_BUILTIN_LXVW4X_V4SF:
13313 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4sf,
13314 exp, target, false);
13315 case VSX_BUILTIN_LXVW4X_V4SI:
13316 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v4si,
13317 exp, target, false);
13318 case VSX_BUILTIN_LXVW4X_V8HI:
13319 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v8hi,
13320 exp, target, false);
13321 case VSX_BUILTIN_LXVW4X_V16QI:
13322 return altivec_expand_lv_builtin (CODE_FOR_vsx_load_v16qi,
13323 exp, target, false);
13324 break;
13325 default:
13326 break;
13327 /* Fall through. */
13328 }
13329
13330 *expandedp = false;
13331 return NULL_RTX;
13332 }
13333
13334 /* Expand the builtin in EXP and store the result in TARGET. Store
13335 true in *EXPANDEDP if we found a builtin to expand. */
13336 static rtx
13337 paired_expand_builtin (tree exp, rtx target, bool * expandedp)
13338 {
13339 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13340 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13341 const struct builtin_description *d;
13342 size_t i;
13343
13344 *expandedp = true;
13345
13346 switch (fcode)
13347 {
13348 case PAIRED_BUILTIN_STX:
13349 return paired_expand_stv_builtin (CODE_FOR_paired_stx, exp);
13350 case PAIRED_BUILTIN_LX:
13351 return paired_expand_lv_builtin (CODE_FOR_paired_lx, exp, target);
13352 default:
13353 break;
13354 /* Fall through. */
13355 }
13356
13357 /* Expand the paired predicates. */
13358 d = bdesc_paired_preds;
13359 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); i++, d++)
13360 if (d->code == fcode)
13361 return paired_expand_predicate_builtin (d->icode, exp, target);
13362
13363 *expandedp = false;
13364 return NULL_RTX;
13365 }
13366
13367 /* Binops that need to be initialized manually, but can be expanded
13368 automagically by rs6000_expand_binop_builtin. */
13369 static const struct builtin_description bdesc_2arg_spe[] =
13370 {
13371 { RS6000_BTM_SPE, CODE_FOR_spe_evlddx, "__builtin_spe_evlddx", SPE_BUILTIN_EVLDDX },
13372 { RS6000_BTM_SPE, CODE_FOR_spe_evldwx, "__builtin_spe_evldwx", SPE_BUILTIN_EVLDWX },
13373 { RS6000_BTM_SPE, CODE_FOR_spe_evldhx, "__builtin_spe_evldhx", SPE_BUILTIN_EVLDHX },
13374 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhex, "__builtin_spe_evlwhex", SPE_BUILTIN_EVLWHEX },
13375 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhoux, "__builtin_spe_evlwhoux", SPE_BUILTIN_EVLWHOUX },
13376 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhosx, "__builtin_spe_evlwhosx", SPE_BUILTIN_EVLWHOSX },
13377 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplatx, "__builtin_spe_evlwwsplatx", SPE_BUILTIN_EVLWWSPLATX },
13378 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplatx, "__builtin_spe_evlwhsplatx", SPE_BUILTIN_EVLWHSPLATX },
13379 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplatx, "__builtin_spe_evlhhesplatx", SPE_BUILTIN_EVLHHESPLATX },
13380 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplatx, "__builtin_spe_evlhhousplatx", SPE_BUILTIN_EVLHHOUSPLATX },
13381 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplatx, "__builtin_spe_evlhhossplatx", SPE_BUILTIN_EVLHHOSSPLATX },
13382 { RS6000_BTM_SPE, CODE_FOR_spe_evldd, "__builtin_spe_evldd", SPE_BUILTIN_EVLDD },
13383 { RS6000_BTM_SPE, CODE_FOR_spe_evldw, "__builtin_spe_evldw", SPE_BUILTIN_EVLDW },
13384 { RS6000_BTM_SPE, CODE_FOR_spe_evldh, "__builtin_spe_evldh", SPE_BUILTIN_EVLDH },
13385 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhe, "__builtin_spe_evlwhe", SPE_BUILTIN_EVLWHE },
13386 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhou, "__builtin_spe_evlwhou", SPE_BUILTIN_EVLWHOU },
13387 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhos, "__builtin_spe_evlwhos", SPE_BUILTIN_EVLWHOS },
13388 { RS6000_BTM_SPE, CODE_FOR_spe_evlwwsplat, "__builtin_spe_evlwwsplat", SPE_BUILTIN_EVLWWSPLAT },
13389 { RS6000_BTM_SPE, CODE_FOR_spe_evlwhsplat, "__builtin_spe_evlwhsplat", SPE_BUILTIN_EVLWHSPLAT },
13390 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhesplat, "__builtin_spe_evlhhesplat", SPE_BUILTIN_EVLHHESPLAT },
13391 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhousplat, "__builtin_spe_evlhhousplat", SPE_BUILTIN_EVLHHOUSPLAT },
13392 { RS6000_BTM_SPE, CODE_FOR_spe_evlhhossplat, "__builtin_spe_evlhhossplat", SPE_BUILTIN_EVLHHOSSPLAT }
13393 };
13394
13395 /* Expand the builtin in EXP and store the result in TARGET. Store
13396 true in *EXPANDEDP if we found a builtin to expand.
13397
13398 This expands the SPE builtins that are not simple unary and binary
13399 operations. */
13400 static rtx
13401 spe_expand_builtin (tree exp, rtx target, bool *expandedp)
13402 {
13403 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13404 tree arg1, arg0;
13405 enum rs6000_builtins fcode = (enum rs6000_builtins) DECL_FUNCTION_CODE (fndecl);
13406 enum insn_code icode;
13407 enum machine_mode tmode, mode0;
13408 rtx pat, op0;
13409 const struct builtin_description *d;
13410 size_t i;
13411
13412 *expandedp = true;
13413
13414 /* Syntax check for a 5-bit unsigned immediate. */
13415 switch (fcode)
13416 {
13417 case SPE_BUILTIN_EVSTDD:
13418 case SPE_BUILTIN_EVSTDH:
13419 case SPE_BUILTIN_EVSTDW:
13420 case SPE_BUILTIN_EVSTWHE:
13421 case SPE_BUILTIN_EVSTWHO:
13422 case SPE_BUILTIN_EVSTWWE:
13423 case SPE_BUILTIN_EVSTWWO:
13424 arg1 = CALL_EXPR_ARG (exp, 2);
13425 if (TREE_CODE (arg1) != INTEGER_CST
13426 || TREE_INT_CST_LOW (arg1) & ~0x1f)
13427 {
13428 error ("argument 2 must be a 5-bit unsigned literal");
13429 return const0_rtx;
13430 }
13431 break;
13432 default:
13433 break;
13434 }
13435
13436 /* The evsplat*i instructions are not quite generic. */
13437 switch (fcode)
13438 {
13439 case SPE_BUILTIN_EVSPLATFI:
13440 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplatfi,
13441 exp, target);
13442 case SPE_BUILTIN_EVSPLATI:
13443 return rs6000_expand_unop_builtin (CODE_FOR_spe_evsplati,
13444 exp, target);
13445 default:
13446 break;
13447 }
13448
13449 d = bdesc_2arg_spe;
13450 for (i = 0; i < ARRAY_SIZE (bdesc_2arg_spe); ++i, ++d)
13451 if (d->code == fcode)
13452 return rs6000_expand_binop_builtin (d->icode, exp, target);
13453
13454 d = bdesc_spe_predicates;
13455 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, ++d)
13456 if (d->code == fcode)
13457 return spe_expand_predicate_builtin (d->icode, exp, target);
13458
13459 d = bdesc_spe_evsel;
13460 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, ++d)
13461 if (d->code == fcode)
13462 return spe_expand_evsel_builtin (d->icode, exp, target);
13463
13464 switch (fcode)
13465 {
13466 case SPE_BUILTIN_EVSTDDX:
13467 return spe_expand_stv_builtin (CODE_FOR_spe_evstddx, exp);
13468 case SPE_BUILTIN_EVSTDHX:
13469 return spe_expand_stv_builtin (CODE_FOR_spe_evstdhx, exp);
13470 case SPE_BUILTIN_EVSTDWX:
13471 return spe_expand_stv_builtin (CODE_FOR_spe_evstdwx, exp);
13472 case SPE_BUILTIN_EVSTWHEX:
13473 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhex, exp);
13474 case SPE_BUILTIN_EVSTWHOX:
13475 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhox, exp);
13476 case SPE_BUILTIN_EVSTWWEX:
13477 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwex, exp);
13478 case SPE_BUILTIN_EVSTWWOX:
13479 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwox, exp);
13480 case SPE_BUILTIN_EVSTDD:
13481 return spe_expand_stv_builtin (CODE_FOR_spe_evstdd, exp);
13482 case SPE_BUILTIN_EVSTDH:
13483 return spe_expand_stv_builtin (CODE_FOR_spe_evstdh, exp);
13484 case SPE_BUILTIN_EVSTDW:
13485 return spe_expand_stv_builtin (CODE_FOR_spe_evstdw, exp);
13486 case SPE_BUILTIN_EVSTWHE:
13487 return spe_expand_stv_builtin (CODE_FOR_spe_evstwhe, exp);
13488 case SPE_BUILTIN_EVSTWHO:
13489 return spe_expand_stv_builtin (CODE_FOR_spe_evstwho, exp);
13490 case SPE_BUILTIN_EVSTWWE:
13491 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwe, exp);
13492 case SPE_BUILTIN_EVSTWWO:
13493 return spe_expand_stv_builtin (CODE_FOR_spe_evstwwo, exp);
13494 case SPE_BUILTIN_MFSPEFSCR:
13495 icode = CODE_FOR_spe_mfspefscr;
13496 tmode = insn_data[icode].operand[0].mode;
13497
13498 if (target == 0
13499 || GET_MODE (target) != tmode
13500 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13501 target = gen_reg_rtx (tmode);
13502
13503 pat = GEN_FCN (icode) (target);
13504 if (! pat)
13505 return 0;
13506 emit_insn (pat);
13507 return target;
13508 case SPE_BUILTIN_MTSPEFSCR:
13509 icode = CODE_FOR_spe_mtspefscr;
13510 arg0 = CALL_EXPR_ARG (exp, 0);
13511 op0 = expand_normal (arg0);
13512 mode0 = insn_data[icode].operand[0].mode;
13513
13514 if (arg0 == error_mark_node)
13515 return const0_rtx;
13516
13517 if (! (*insn_data[icode].operand[0].predicate) (op0, mode0))
13518 op0 = copy_to_mode_reg (mode0, op0);
13519
13520 pat = GEN_FCN (icode) (op0);
13521 if (pat)
13522 emit_insn (pat);
13523 return NULL_RTX;
13524 default:
13525 break;
13526 }
13527
13528 *expandedp = false;
13529 return NULL_RTX;
13530 }
13531
13532 static rtx
13533 paired_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13534 {
13535 rtx pat, scratch, tmp;
13536 tree form = CALL_EXPR_ARG (exp, 0);
13537 tree arg0 = CALL_EXPR_ARG (exp, 1);
13538 tree arg1 = CALL_EXPR_ARG (exp, 2);
13539 rtx op0 = expand_normal (arg0);
13540 rtx op1 = expand_normal (arg1);
13541 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13542 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13543 int form_int;
13544 enum rtx_code code;
13545
13546 if (TREE_CODE (form) != INTEGER_CST)
13547 {
13548 error ("argument 1 of __builtin_paired_predicate must be a constant");
13549 return const0_rtx;
13550 }
13551 else
13552 form_int = TREE_INT_CST_LOW (form);
13553
13554 gcc_assert (mode0 == mode1);
13555
13556 if (arg0 == error_mark_node || arg1 == error_mark_node)
13557 return const0_rtx;
13558
13559 if (target == 0
13560 || GET_MODE (target) != SImode
13561 || !(*insn_data[icode].operand[0].predicate) (target, SImode))
13562 target = gen_reg_rtx (SImode);
13563 if (!(*insn_data[icode].operand[1].predicate) (op0, mode0))
13564 op0 = copy_to_mode_reg (mode0, op0);
13565 if (!(*insn_data[icode].operand[2].predicate) (op1, mode1))
13566 op1 = copy_to_mode_reg (mode1, op1);
13567
13568 scratch = gen_reg_rtx (CCFPmode);
13569
13570 pat = GEN_FCN (icode) (scratch, op0, op1);
13571 if (!pat)
13572 return const0_rtx;
13573
13574 emit_insn (pat);
13575
13576 switch (form_int)
13577 {
13578 /* LT bit. */
13579 case 0:
13580 code = LT;
13581 break;
13582 /* GT bit. */
13583 case 1:
13584 code = GT;
13585 break;
13586 /* EQ bit. */
13587 case 2:
13588 code = EQ;
13589 break;
13590 /* UN bit. */
13591 case 3:
13592 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
13593 return target;
13594 default:
13595 error ("argument 1 of __builtin_paired_predicate is out of range");
13596 return const0_rtx;
13597 }
13598
13599 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
13600 emit_move_insn (target, tmp);
13601 return target;
13602 }
13603
13604 static rtx
13605 spe_expand_predicate_builtin (enum insn_code icode, tree exp, rtx target)
13606 {
13607 rtx pat, scratch, tmp;
13608 tree form = CALL_EXPR_ARG (exp, 0);
13609 tree arg0 = CALL_EXPR_ARG (exp, 1);
13610 tree arg1 = CALL_EXPR_ARG (exp, 2);
13611 rtx op0 = expand_normal (arg0);
13612 rtx op1 = expand_normal (arg1);
13613 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13614 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13615 int form_int;
13616 enum rtx_code code;
13617
13618 if (TREE_CODE (form) != INTEGER_CST)
13619 {
13620 error ("argument 1 of __builtin_spe_predicate must be a constant");
13621 return const0_rtx;
13622 }
13623 else
13624 form_int = TREE_INT_CST_LOW (form);
13625
13626 gcc_assert (mode0 == mode1);
13627
13628 if (arg0 == error_mark_node || arg1 == error_mark_node)
13629 return const0_rtx;
13630
13631 if (target == 0
13632 || GET_MODE (target) != SImode
13633 || ! (*insn_data[icode].operand[0].predicate) (target, SImode))
13634 target = gen_reg_rtx (SImode);
13635
13636 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13637 op0 = copy_to_mode_reg (mode0, op0);
13638 if (! (*insn_data[icode].operand[2].predicate) (op1, mode1))
13639 op1 = copy_to_mode_reg (mode1, op1);
13640
13641 scratch = gen_reg_rtx (CCmode);
13642
13643 pat = GEN_FCN (icode) (scratch, op0, op1);
13644 if (! pat)
13645 return const0_rtx;
13646 emit_insn (pat);
13647
13648 /* There are 4 variants for each predicate: _any_, _all_, _upper_,
13649 _lower_. We use one compare, but look in different bits of the
13650 CR for each variant.
13651
13652 There are 2 elements in each SPE simd type (upper/lower). The CR
13653 bits are set as follows:
13654
13655 BIT0 | BIT 1 | BIT 2 | BIT 3
13656 U | L | (U | L) | (U & L)
13657
13658 So, for an "all" relationship, BIT 3 would be set.
13659 For an "any" relationship, BIT 2 would be set. Etc.
13660
13661 Following traditional nomenclature, these bits map to:
13662
13663 BIT0 | BIT 1 | BIT 2 | BIT 3
13664 LT | GT | EQ | OV
13665
13666 Later, we will generate rtl to look in the LT/EQ/EQ/OV bits.
13667 */
13668
13669 switch (form_int)
13670 {
13671 /* All variant. OV bit. */
13672 case 0:
13673 /* We need to get to the OV bit, which is the ORDERED bit. We
13674 could generate (ordered:SI (reg:CC xx) (const_int 0)), but
13675 that's ugly and will make validate_condition_mode die.
13676 So let's just use another pattern. */
13677 emit_insn (gen_move_from_CR_ov_bit (target, scratch));
13678 return target;
13679 /* Any variant. EQ bit. */
13680 case 1:
13681 code = EQ;
13682 break;
13683 /* Upper variant. LT bit. */
13684 case 2:
13685 code = LT;
13686 break;
13687 /* Lower variant. GT bit. */
13688 case 3:
13689 code = GT;
13690 break;
13691 default:
13692 error ("argument 1 of __builtin_spe_predicate is out of range");
13693 return const0_rtx;
13694 }
13695
13696 tmp = gen_rtx_fmt_ee (code, SImode, scratch, const0_rtx);
13697 emit_move_insn (target, tmp);
13698
13699 return target;
13700 }
13701
13702 /* The evsel builtins look like this:
13703
13704 e = __builtin_spe_evsel_OP (a, b, c, d);
13705
13706 and work like this:
13707
13708 e[upper] = a[upper] *OP* b[upper] ? c[upper] : d[upper];
13709 e[lower] = a[lower] *OP* b[lower] ? c[lower] : d[lower];
13710 */
13711
13712 static rtx
13713 spe_expand_evsel_builtin (enum insn_code icode, tree exp, rtx target)
13714 {
13715 rtx pat, scratch;
13716 tree arg0 = CALL_EXPR_ARG (exp, 0);
13717 tree arg1 = CALL_EXPR_ARG (exp, 1);
13718 tree arg2 = CALL_EXPR_ARG (exp, 2);
13719 tree arg3 = CALL_EXPR_ARG (exp, 3);
13720 rtx op0 = expand_normal (arg0);
13721 rtx op1 = expand_normal (arg1);
13722 rtx op2 = expand_normal (arg2);
13723 rtx op3 = expand_normal (arg3);
13724 enum machine_mode mode0 = insn_data[icode].operand[1].mode;
13725 enum machine_mode mode1 = insn_data[icode].operand[2].mode;
13726
13727 gcc_assert (mode0 == mode1);
13728
13729 if (arg0 == error_mark_node || arg1 == error_mark_node
13730 || arg2 == error_mark_node || arg3 == error_mark_node)
13731 return const0_rtx;
13732
13733 if (target == 0
13734 || GET_MODE (target) != mode0
13735 || ! (*insn_data[icode].operand[0].predicate) (target, mode0))
13736 target = gen_reg_rtx (mode0);
13737
13738 if (! (*insn_data[icode].operand[1].predicate) (op0, mode0))
13739 op0 = copy_to_mode_reg (mode0, op0);
13740 if (! (*insn_data[icode].operand[1].predicate) (op1, mode1))
13741 op1 = copy_to_mode_reg (mode0, op1);
13742 if (! (*insn_data[icode].operand[1].predicate) (op2, mode1))
13743 op2 = copy_to_mode_reg (mode0, op2);
13744 if (! (*insn_data[icode].operand[1].predicate) (op3, mode1))
13745 op3 = copy_to_mode_reg (mode0, op3);
13746
13747 /* Generate the compare. */
13748 scratch = gen_reg_rtx (CCmode);
13749 pat = GEN_FCN (icode) (scratch, op0, op1);
13750 if (! pat)
13751 return const0_rtx;
13752 emit_insn (pat);
13753
13754 if (mode0 == V2SImode)
13755 emit_insn (gen_spe_evsel (target, op2, op3, scratch));
13756 else
13757 emit_insn (gen_spe_evsel_fs (target, op2, op3, scratch));
13758
13759 return target;
13760 }
13761
13762 /* Raise an error message for a builtin function that is called without the
13763 appropriate target options being set. */
13764
13765 static void
13766 rs6000_invalid_builtin (enum rs6000_builtins fncode)
13767 {
13768 size_t uns_fncode = (size_t)fncode;
13769 const char *name = rs6000_builtin_info[uns_fncode].name;
13770 HOST_WIDE_INT fnmask = rs6000_builtin_info[uns_fncode].mask;
13771
13772 gcc_assert (name != NULL);
13773 if ((fnmask & RS6000_BTM_CELL) != 0)
13774 error ("Builtin function %s is only valid for the cell processor", name);
13775 else if ((fnmask & RS6000_BTM_VSX) != 0)
13776 error ("Builtin function %s requires the -mvsx option", name);
13777 else if ((fnmask & RS6000_BTM_HTM) != 0)
13778 error ("Builtin function %s requires the -mhtm option", name);
13779 else if ((fnmask & RS6000_BTM_ALTIVEC) != 0)
13780 error ("Builtin function %s requires the -maltivec option", name);
13781 else if ((fnmask & RS6000_BTM_PAIRED) != 0)
13782 error ("Builtin function %s requires the -mpaired option", name);
13783 else if ((fnmask & RS6000_BTM_SPE) != 0)
13784 error ("Builtin function %s requires the -mspe option", name);
13785 else if ((fnmask & (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
13786 == (RS6000_BTM_DFP | RS6000_BTM_P8_VECTOR))
13787 error ("Builtin function %s requires the -mhard-dfp and"
13788 " -mpower8-vector options", name);
13789 else if ((fnmask & RS6000_BTM_DFP) != 0)
13790 error ("Builtin function %s requires the -mhard-dfp option", name);
13791 else if ((fnmask & RS6000_BTM_P8_VECTOR) != 0)
13792 error ("Builtin function %s requires the -mpower8-vector option", name);
13793 else if ((fnmask & (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
13794 == (RS6000_BTM_HARD_FLOAT | RS6000_BTM_LDBL128))
13795 error ("Builtin function %s requires the -mhard-float and"
13796 " -mlong-double-128 options", name);
13797 else if ((fnmask & RS6000_BTM_HARD_FLOAT) != 0)
13798 error ("Builtin function %s requires the -mhard-float option", name);
13799 else
13800 error ("Builtin function %s is not supported with the current options",
13801 name);
13802 }
13803
13804 /* Expand an expression EXP that calls a built-in function,
13805 with result going to TARGET if that's convenient
13806 (and in mode MODE if that's convenient).
13807 SUBTARGET may be used as the target for computing one of EXP's operands.
13808 IGNORE is nonzero if the value is to be ignored. */
13809
13810 static rtx
13811 rs6000_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
13812 enum machine_mode mode ATTRIBUTE_UNUSED,
13813 int ignore ATTRIBUTE_UNUSED)
13814 {
13815 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
13816 enum rs6000_builtins fcode
13817 = (enum rs6000_builtins)DECL_FUNCTION_CODE (fndecl);
13818 size_t uns_fcode = (size_t)fcode;
13819 const struct builtin_description *d;
13820 size_t i;
13821 rtx ret;
13822 bool success;
13823 HOST_WIDE_INT mask = rs6000_builtin_info[uns_fcode].mask;
13824 bool func_valid_p = ((rs6000_builtin_mask & mask) == mask);
13825
13826 if (TARGET_DEBUG_BUILTIN)
13827 {
13828 enum insn_code icode = rs6000_builtin_info[uns_fcode].icode;
13829 const char *name1 = rs6000_builtin_info[uns_fcode].name;
13830 const char *name2 = ((icode != CODE_FOR_nothing)
13831 ? get_insn_name ((int)icode)
13832 : "nothing");
13833 const char *name3;
13834
13835 switch (rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK)
13836 {
13837 default: name3 = "unknown"; break;
13838 case RS6000_BTC_SPECIAL: name3 = "special"; break;
13839 case RS6000_BTC_UNARY: name3 = "unary"; break;
13840 case RS6000_BTC_BINARY: name3 = "binary"; break;
13841 case RS6000_BTC_TERNARY: name3 = "ternary"; break;
13842 case RS6000_BTC_PREDICATE: name3 = "predicate"; break;
13843 case RS6000_BTC_ABS: name3 = "abs"; break;
13844 case RS6000_BTC_EVSEL: name3 = "evsel"; break;
13845 case RS6000_BTC_DST: name3 = "dst"; break;
13846 }
13847
13848
13849 fprintf (stderr,
13850 "rs6000_expand_builtin, %s (%d), insn = %s (%d), type=%s%s\n",
13851 (name1) ? name1 : "---", fcode,
13852 (name2) ? name2 : "---", (int)icode,
13853 name3,
13854 func_valid_p ? "" : ", not valid");
13855 }
13856
13857 if (!func_valid_p)
13858 {
13859 rs6000_invalid_builtin (fcode);
13860
13861 /* Given it is invalid, just generate a normal call. */
13862 return expand_call (exp, target, ignore);
13863 }
13864
13865 switch (fcode)
13866 {
13867 case RS6000_BUILTIN_RECIP:
13868 return rs6000_expand_binop_builtin (CODE_FOR_recipdf3, exp, target);
13869
13870 case RS6000_BUILTIN_RECIPF:
13871 return rs6000_expand_binop_builtin (CODE_FOR_recipsf3, exp, target);
13872
13873 case RS6000_BUILTIN_RSQRTF:
13874 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtsf2, exp, target);
13875
13876 case RS6000_BUILTIN_RSQRT:
13877 return rs6000_expand_unop_builtin (CODE_FOR_rsqrtdf2, exp, target);
13878
13879 case POWER7_BUILTIN_BPERMD:
13880 return rs6000_expand_binop_builtin (((TARGET_64BIT)
13881 ? CODE_FOR_bpermd_di
13882 : CODE_FOR_bpermd_si), exp, target);
13883
13884 case RS6000_BUILTIN_GET_TB:
13885 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_get_timebase,
13886 target);
13887
13888 case RS6000_BUILTIN_MFTB:
13889 return rs6000_expand_zeroop_builtin (((TARGET_64BIT)
13890 ? CODE_FOR_rs6000_mftb_di
13891 : CODE_FOR_rs6000_mftb_si),
13892 target);
13893
13894 case RS6000_BUILTIN_MFFS:
13895 return rs6000_expand_zeroop_builtin (CODE_FOR_rs6000_mffs, target);
13896
13897 case RS6000_BUILTIN_MTFSF:
13898 return rs6000_expand_mtfsf_builtin (CODE_FOR_rs6000_mtfsf, exp);
13899
13900 case ALTIVEC_BUILTIN_MASK_FOR_LOAD:
13901 case ALTIVEC_BUILTIN_MASK_FOR_STORE:
13902 {
13903 int icode = (BYTES_BIG_ENDIAN ? (int) CODE_FOR_altivec_lvsr
13904 : (int) CODE_FOR_altivec_lvsl);
13905 enum machine_mode tmode = insn_data[icode].operand[0].mode;
13906 enum machine_mode mode = insn_data[icode].operand[1].mode;
13907 tree arg;
13908 rtx op, addr, pat;
13909
13910 gcc_assert (TARGET_ALTIVEC);
13911
13912 arg = CALL_EXPR_ARG (exp, 0);
13913 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
13914 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
13915 addr = memory_address (mode, op);
13916 if (fcode == ALTIVEC_BUILTIN_MASK_FOR_STORE)
13917 op = addr;
13918 else
13919 {
13920 /* For the load case need to negate the address. */
13921 op = gen_reg_rtx (GET_MODE (addr));
13922 emit_insn (gen_rtx_SET (VOIDmode, op,
13923 gen_rtx_NEG (GET_MODE (addr), addr)));
13924 }
13925 op = gen_rtx_MEM (mode, op);
13926
13927 if (target == 0
13928 || GET_MODE (target) != tmode
13929 || ! (*insn_data[icode].operand[0].predicate) (target, tmode))
13930 target = gen_reg_rtx (tmode);
13931
13932 /*pat = gen_altivec_lvsr (target, op);*/
13933 pat = GEN_FCN (icode) (target, op);
13934 if (!pat)
13935 return 0;
13936 emit_insn (pat);
13937
13938 return target;
13939 }
13940
13941 case ALTIVEC_BUILTIN_VCFUX:
13942 case ALTIVEC_BUILTIN_VCFSX:
13943 case ALTIVEC_BUILTIN_VCTUXS:
13944 case ALTIVEC_BUILTIN_VCTSXS:
13945 /* FIXME: There's got to be a nicer way to handle this case than
13946 constructing a new CALL_EXPR. */
13947 if (call_expr_nargs (exp) == 1)
13948 {
13949 exp = build_call_nary (TREE_TYPE (exp), CALL_EXPR_FN (exp),
13950 2, CALL_EXPR_ARG (exp, 0), integer_zero_node);
13951 }
13952 break;
13953
13954 default:
13955 break;
13956 }
13957
13958 if (TARGET_ALTIVEC)
13959 {
13960 ret = altivec_expand_builtin (exp, target, &success);
13961
13962 if (success)
13963 return ret;
13964 }
13965 if (TARGET_SPE)
13966 {
13967 ret = spe_expand_builtin (exp, target, &success);
13968
13969 if (success)
13970 return ret;
13971 }
13972 if (TARGET_PAIRED_FLOAT)
13973 {
13974 ret = paired_expand_builtin (exp, target, &success);
13975
13976 if (success)
13977 return ret;
13978 }
13979 if (TARGET_HTM)
13980 {
13981 ret = htm_expand_builtin (exp, target, &success);
13982
13983 if (success)
13984 return ret;
13985 }
13986
13987 unsigned attr = rs6000_builtin_info[uns_fcode].attr & RS6000_BTC_TYPE_MASK;
13988 gcc_assert (attr == RS6000_BTC_UNARY
13989 || attr == RS6000_BTC_BINARY
13990 || attr == RS6000_BTC_TERNARY);
13991
13992 /* Handle simple unary operations. */
13993 d = bdesc_1arg;
13994 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
13995 if (d->code == fcode)
13996 return rs6000_expand_unop_builtin (d->icode, exp, target);
13997
13998 /* Handle simple binary operations. */
13999 d = bdesc_2arg;
14000 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
14001 if (d->code == fcode)
14002 return rs6000_expand_binop_builtin (d->icode, exp, target);
14003
14004 /* Handle simple ternary operations. */
14005 d = bdesc_3arg;
14006 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
14007 if (d->code == fcode)
14008 return rs6000_expand_ternop_builtin (d->icode, exp, target);
14009
14010 gcc_unreachable ();
14011 }
14012
14013 static void
14014 rs6000_init_builtins (void)
14015 {
14016 tree tdecl;
14017 tree ftype;
14018 enum machine_mode mode;
14019
14020 if (TARGET_DEBUG_BUILTIN)
14021 fprintf (stderr, "rs6000_init_builtins%s%s%s%s\n",
14022 (TARGET_PAIRED_FLOAT) ? ", paired" : "",
14023 (TARGET_SPE) ? ", spe" : "",
14024 (TARGET_ALTIVEC) ? ", altivec" : "",
14025 (TARGET_VSX) ? ", vsx" : "");
14026
14027 V2SI_type_node = build_vector_type (intSI_type_node, 2);
14028 V2SF_type_node = build_vector_type (float_type_node, 2);
14029 V2DI_type_node = build_vector_type (intDI_type_node, 2);
14030 V2DF_type_node = build_vector_type (double_type_node, 2);
14031 V4HI_type_node = build_vector_type (intHI_type_node, 4);
14032 V4SI_type_node = build_vector_type (intSI_type_node, 4);
14033 V4SF_type_node = build_vector_type (float_type_node, 4);
14034 V8HI_type_node = build_vector_type (intHI_type_node, 8);
14035 V16QI_type_node = build_vector_type (intQI_type_node, 16);
14036
14037 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
14038 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
14039 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
14040 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
14041
14042 opaque_V2SF_type_node = build_opaque_vector_type (float_type_node, 2);
14043 opaque_V2SI_type_node = build_opaque_vector_type (intSI_type_node, 2);
14044 opaque_p_V2SI_type_node = build_pointer_type (opaque_V2SI_type_node);
14045 opaque_V4SI_type_node = build_opaque_vector_type (intSI_type_node, 4);
14046
14047 /* We use V1TI mode as a special container to hold __int128_t items that
14048 must live in VSX registers. */
14049 if (intTI_type_node)
14050 {
14051 V1TI_type_node = build_vector_type (intTI_type_node, 1);
14052 unsigned_V1TI_type_node = build_vector_type (unsigned_intTI_type_node, 1);
14053 }
14054
14055 /* The 'vector bool ...' types must be kept distinct from 'vector unsigned ...'
14056 types, especially in C++ land. Similarly, 'vector pixel' is distinct from
14057 'vector unsigned short'. */
14058
14059 bool_char_type_node = build_distinct_type_copy (unsigned_intQI_type_node);
14060 bool_short_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
14061 bool_int_type_node = build_distinct_type_copy (unsigned_intSI_type_node);
14062 bool_long_type_node = build_distinct_type_copy (unsigned_intDI_type_node);
14063 pixel_type_node = build_distinct_type_copy (unsigned_intHI_type_node);
14064
14065 long_integer_type_internal_node = long_integer_type_node;
14066 long_unsigned_type_internal_node = long_unsigned_type_node;
14067 long_long_integer_type_internal_node = long_long_integer_type_node;
14068 long_long_unsigned_type_internal_node = long_long_unsigned_type_node;
14069 intQI_type_internal_node = intQI_type_node;
14070 uintQI_type_internal_node = unsigned_intQI_type_node;
14071 intHI_type_internal_node = intHI_type_node;
14072 uintHI_type_internal_node = unsigned_intHI_type_node;
14073 intSI_type_internal_node = intSI_type_node;
14074 uintSI_type_internal_node = unsigned_intSI_type_node;
14075 intDI_type_internal_node = intDI_type_node;
14076 uintDI_type_internal_node = unsigned_intDI_type_node;
14077 intTI_type_internal_node = intTI_type_node;
14078 uintTI_type_internal_node = unsigned_intTI_type_node;
14079 float_type_internal_node = float_type_node;
14080 double_type_internal_node = double_type_node;
14081 long_double_type_internal_node = long_double_type_node;
14082 dfloat64_type_internal_node = dfloat64_type_node;
14083 dfloat128_type_internal_node = dfloat128_type_node;
14084 void_type_internal_node = void_type_node;
14085
14086 /* Initialize the modes for builtin_function_type, mapping a machine mode to
14087 tree type node. */
14088 builtin_mode_to_type[QImode][0] = integer_type_node;
14089 builtin_mode_to_type[HImode][0] = integer_type_node;
14090 builtin_mode_to_type[SImode][0] = intSI_type_node;
14091 builtin_mode_to_type[SImode][1] = unsigned_intSI_type_node;
14092 builtin_mode_to_type[DImode][0] = intDI_type_node;
14093 builtin_mode_to_type[DImode][1] = unsigned_intDI_type_node;
14094 builtin_mode_to_type[TImode][0] = intTI_type_node;
14095 builtin_mode_to_type[TImode][1] = unsigned_intTI_type_node;
14096 builtin_mode_to_type[SFmode][0] = float_type_node;
14097 builtin_mode_to_type[DFmode][0] = double_type_node;
14098 builtin_mode_to_type[TFmode][0] = long_double_type_node;
14099 builtin_mode_to_type[DDmode][0] = dfloat64_type_node;
14100 builtin_mode_to_type[TDmode][0] = dfloat128_type_node;
14101 builtin_mode_to_type[V1TImode][0] = V1TI_type_node;
14102 builtin_mode_to_type[V1TImode][1] = unsigned_V1TI_type_node;
14103 builtin_mode_to_type[V2SImode][0] = V2SI_type_node;
14104 builtin_mode_to_type[V2SFmode][0] = V2SF_type_node;
14105 builtin_mode_to_type[V2DImode][0] = V2DI_type_node;
14106 builtin_mode_to_type[V2DImode][1] = unsigned_V2DI_type_node;
14107 builtin_mode_to_type[V2DFmode][0] = V2DF_type_node;
14108 builtin_mode_to_type[V4HImode][0] = V4HI_type_node;
14109 builtin_mode_to_type[V4SImode][0] = V4SI_type_node;
14110 builtin_mode_to_type[V4SImode][1] = unsigned_V4SI_type_node;
14111 builtin_mode_to_type[V4SFmode][0] = V4SF_type_node;
14112 builtin_mode_to_type[V8HImode][0] = V8HI_type_node;
14113 builtin_mode_to_type[V8HImode][1] = unsigned_V8HI_type_node;
14114 builtin_mode_to_type[V16QImode][0] = V16QI_type_node;
14115 builtin_mode_to_type[V16QImode][1] = unsigned_V16QI_type_node;
14116
14117 tdecl = add_builtin_type ("__bool char", bool_char_type_node);
14118 TYPE_NAME (bool_char_type_node) = tdecl;
14119
14120 tdecl = add_builtin_type ("__bool short", bool_short_type_node);
14121 TYPE_NAME (bool_short_type_node) = tdecl;
14122
14123 tdecl = add_builtin_type ("__bool int", bool_int_type_node);
14124 TYPE_NAME (bool_int_type_node) = tdecl;
14125
14126 tdecl = add_builtin_type ("__pixel", pixel_type_node);
14127 TYPE_NAME (pixel_type_node) = tdecl;
14128
14129 bool_V16QI_type_node = build_vector_type (bool_char_type_node, 16);
14130 bool_V8HI_type_node = build_vector_type (bool_short_type_node, 8);
14131 bool_V4SI_type_node = build_vector_type (bool_int_type_node, 4);
14132 bool_V2DI_type_node = build_vector_type (bool_long_type_node, 2);
14133 pixel_V8HI_type_node = build_vector_type (pixel_type_node, 8);
14134
14135 tdecl = add_builtin_type ("__vector unsigned char", unsigned_V16QI_type_node);
14136 TYPE_NAME (unsigned_V16QI_type_node) = tdecl;
14137
14138 tdecl = add_builtin_type ("__vector signed char", V16QI_type_node);
14139 TYPE_NAME (V16QI_type_node) = tdecl;
14140
14141 tdecl = add_builtin_type ("__vector __bool char", bool_V16QI_type_node);
14142 TYPE_NAME ( bool_V16QI_type_node) = tdecl;
14143
14144 tdecl = add_builtin_type ("__vector unsigned short", unsigned_V8HI_type_node);
14145 TYPE_NAME (unsigned_V8HI_type_node) = tdecl;
14146
14147 tdecl = add_builtin_type ("__vector signed short", V8HI_type_node);
14148 TYPE_NAME (V8HI_type_node) = tdecl;
14149
14150 tdecl = add_builtin_type ("__vector __bool short", bool_V8HI_type_node);
14151 TYPE_NAME (bool_V8HI_type_node) = tdecl;
14152
14153 tdecl = add_builtin_type ("__vector unsigned int", unsigned_V4SI_type_node);
14154 TYPE_NAME (unsigned_V4SI_type_node) = tdecl;
14155
14156 tdecl = add_builtin_type ("__vector signed int", V4SI_type_node);
14157 TYPE_NAME (V4SI_type_node) = tdecl;
14158
14159 tdecl = add_builtin_type ("__vector __bool int", bool_V4SI_type_node);
14160 TYPE_NAME (bool_V4SI_type_node) = tdecl;
14161
14162 tdecl = add_builtin_type ("__vector float", V4SF_type_node);
14163 TYPE_NAME (V4SF_type_node) = tdecl;
14164
14165 tdecl = add_builtin_type ("__vector __pixel", pixel_V8HI_type_node);
14166 TYPE_NAME (pixel_V8HI_type_node) = tdecl;
14167
14168 tdecl = add_builtin_type ("__vector double", V2DF_type_node);
14169 TYPE_NAME (V2DF_type_node) = tdecl;
14170
14171 if (TARGET_POWERPC64)
14172 {
14173 tdecl = add_builtin_type ("__vector long", V2DI_type_node);
14174 TYPE_NAME (V2DI_type_node) = tdecl;
14175
14176 tdecl = add_builtin_type ("__vector unsigned long",
14177 unsigned_V2DI_type_node);
14178 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
14179
14180 tdecl = add_builtin_type ("__vector __bool long", bool_V2DI_type_node);
14181 TYPE_NAME (bool_V2DI_type_node) = tdecl;
14182 }
14183 else
14184 {
14185 tdecl = add_builtin_type ("__vector long long", V2DI_type_node);
14186 TYPE_NAME (V2DI_type_node) = tdecl;
14187
14188 tdecl = add_builtin_type ("__vector unsigned long long",
14189 unsigned_V2DI_type_node);
14190 TYPE_NAME (unsigned_V2DI_type_node) = tdecl;
14191
14192 tdecl = add_builtin_type ("__vector __bool long long",
14193 bool_V2DI_type_node);
14194 TYPE_NAME (bool_V2DI_type_node) = tdecl;
14195 }
14196
14197 if (V1TI_type_node)
14198 {
14199 tdecl = add_builtin_type ("__vector __int128", V1TI_type_node);
14200 TYPE_NAME (V1TI_type_node) = tdecl;
14201
14202 tdecl = add_builtin_type ("__vector unsigned __int128",
14203 unsigned_V1TI_type_node);
14204 TYPE_NAME (unsigned_V1TI_type_node) = tdecl;
14205 }
14206
14207 /* Paired and SPE builtins are only available if you build a compiler with
14208 the appropriate options, so only create those builtins with the
14209 appropriate compiler option. Create Altivec and VSX builtins on machines
14210 with at least the general purpose extensions (970 and newer) to allow the
14211 use of the target attribute. */
14212 if (TARGET_PAIRED_FLOAT)
14213 paired_init_builtins ();
14214 if (TARGET_SPE)
14215 spe_init_builtins ();
14216 if (TARGET_EXTRA_BUILTINS)
14217 altivec_init_builtins ();
14218 if (TARGET_HTM)
14219 htm_init_builtins ();
14220
14221 if (TARGET_EXTRA_BUILTINS || TARGET_SPE || TARGET_PAIRED_FLOAT)
14222 rs6000_common_init_builtins ();
14223
14224 ftype = builtin_function_type (DFmode, DFmode, DFmode, VOIDmode,
14225 RS6000_BUILTIN_RECIP, "__builtin_recipdiv");
14226 def_builtin ("__builtin_recipdiv", ftype, RS6000_BUILTIN_RECIP);
14227
14228 ftype = builtin_function_type (SFmode, SFmode, SFmode, VOIDmode,
14229 RS6000_BUILTIN_RECIPF, "__builtin_recipdivf");
14230 def_builtin ("__builtin_recipdivf", ftype, RS6000_BUILTIN_RECIPF);
14231
14232 ftype = builtin_function_type (DFmode, DFmode, VOIDmode, VOIDmode,
14233 RS6000_BUILTIN_RSQRT, "__builtin_rsqrt");
14234 def_builtin ("__builtin_rsqrt", ftype, RS6000_BUILTIN_RSQRT);
14235
14236 ftype = builtin_function_type (SFmode, SFmode, VOIDmode, VOIDmode,
14237 RS6000_BUILTIN_RSQRTF, "__builtin_rsqrtf");
14238 def_builtin ("__builtin_rsqrtf", ftype, RS6000_BUILTIN_RSQRTF);
14239
14240 mode = (TARGET_64BIT) ? DImode : SImode;
14241 ftype = builtin_function_type (mode, mode, mode, VOIDmode,
14242 POWER7_BUILTIN_BPERMD, "__builtin_bpermd");
14243 def_builtin ("__builtin_bpermd", ftype, POWER7_BUILTIN_BPERMD);
14244
14245 ftype = build_function_type_list (unsigned_intDI_type_node,
14246 NULL_TREE);
14247 def_builtin ("__builtin_ppc_get_timebase", ftype, RS6000_BUILTIN_GET_TB);
14248
14249 if (TARGET_64BIT)
14250 ftype = build_function_type_list (unsigned_intDI_type_node,
14251 NULL_TREE);
14252 else
14253 ftype = build_function_type_list (unsigned_intSI_type_node,
14254 NULL_TREE);
14255 def_builtin ("__builtin_ppc_mftb", ftype, RS6000_BUILTIN_MFTB);
14256
14257 ftype = build_function_type_list (double_type_node, NULL_TREE);
14258 def_builtin ("__builtin_mffs", ftype, RS6000_BUILTIN_MFFS);
14259
14260 ftype = build_function_type_list (void_type_node,
14261 intSI_type_node, double_type_node,
14262 NULL_TREE);
14263 def_builtin ("__builtin_mtfsf", ftype, RS6000_BUILTIN_MTFSF);
14264
14265 #if TARGET_XCOFF
14266 /* AIX libm provides clog as __clog. */
14267 if ((tdecl = builtin_decl_explicit (BUILT_IN_CLOG)) != NULL_TREE)
14268 set_user_assembler_name (tdecl, "__clog");
14269 #endif
14270
14271 #ifdef SUBTARGET_INIT_BUILTINS
14272 SUBTARGET_INIT_BUILTINS;
14273 #endif
14274 }
14275
14276 /* Returns the rs6000 builtin decl for CODE. */
14277
14278 static tree
14279 rs6000_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
14280 {
14281 HOST_WIDE_INT fnmask;
14282
14283 if (code >= RS6000_BUILTIN_COUNT)
14284 return error_mark_node;
14285
14286 fnmask = rs6000_builtin_info[code].mask;
14287 if ((fnmask & rs6000_builtin_mask) != fnmask)
14288 {
14289 rs6000_invalid_builtin ((enum rs6000_builtins)code);
14290 return error_mark_node;
14291 }
14292
14293 return rs6000_builtin_decls[code];
14294 }
14295
14296 static void
14297 spe_init_builtins (void)
14298 {
14299 tree puint_type_node = build_pointer_type (unsigned_type_node);
14300 tree pushort_type_node = build_pointer_type (short_unsigned_type_node);
14301 const struct builtin_description *d;
14302 size_t i;
14303
14304 tree v2si_ftype_4_v2si
14305 = build_function_type_list (opaque_V2SI_type_node,
14306 opaque_V2SI_type_node,
14307 opaque_V2SI_type_node,
14308 opaque_V2SI_type_node,
14309 opaque_V2SI_type_node,
14310 NULL_TREE);
14311
14312 tree v2sf_ftype_4_v2sf
14313 = build_function_type_list (opaque_V2SF_type_node,
14314 opaque_V2SF_type_node,
14315 opaque_V2SF_type_node,
14316 opaque_V2SF_type_node,
14317 opaque_V2SF_type_node,
14318 NULL_TREE);
14319
14320 tree int_ftype_int_v2si_v2si
14321 = build_function_type_list (integer_type_node,
14322 integer_type_node,
14323 opaque_V2SI_type_node,
14324 opaque_V2SI_type_node,
14325 NULL_TREE);
14326
14327 tree int_ftype_int_v2sf_v2sf
14328 = build_function_type_list (integer_type_node,
14329 integer_type_node,
14330 opaque_V2SF_type_node,
14331 opaque_V2SF_type_node,
14332 NULL_TREE);
14333
14334 tree void_ftype_v2si_puint_int
14335 = build_function_type_list (void_type_node,
14336 opaque_V2SI_type_node,
14337 puint_type_node,
14338 integer_type_node,
14339 NULL_TREE);
14340
14341 tree void_ftype_v2si_puint_char
14342 = build_function_type_list (void_type_node,
14343 opaque_V2SI_type_node,
14344 puint_type_node,
14345 char_type_node,
14346 NULL_TREE);
14347
14348 tree void_ftype_v2si_pv2si_int
14349 = build_function_type_list (void_type_node,
14350 opaque_V2SI_type_node,
14351 opaque_p_V2SI_type_node,
14352 integer_type_node,
14353 NULL_TREE);
14354
14355 tree void_ftype_v2si_pv2si_char
14356 = build_function_type_list (void_type_node,
14357 opaque_V2SI_type_node,
14358 opaque_p_V2SI_type_node,
14359 char_type_node,
14360 NULL_TREE);
14361
14362 tree void_ftype_int
14363 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
14364
14365 tree int_ftype_void
14366 = build_function_type_list (integer_type_node, NULL_TREE);
14367
14368 tree v2si_ftype_pv2si_int
14369 = build_function_type_list (opaque_V2SI_type_node,
14370 opaque_p_V2SI_type_node,
14371 integer_type_node,
14372 NULL_TREE);
14373
14374 tree v2si_ftype_puint_int
14375 = build_function_type_list (opaque_V2SI_type_node,
14376 puint_type_node,
14377 integer_type_node,
14378 NULL_TREE);
14379
14380 tree v2si_ftype_pushort_int
14381 = build_function_type_list (opaque_V2SI_type_node,
14382 pushort_type_node,
14383 integer_type_node,
14384 NULL_TREE);
14385
14386 tree v2si_ftype_signed_char
14387 = build_function_type_list (opaque_V2SI_type_node,
14388 signed_char_type_node,
14389 NULL_TREE);
14390
14391 add_builtin_type ("__ev64_opaque__", opaque_V2SI_type_node);
14392
14393 /* Initialize irregular SPE builtins. */
14394
14395 def_builtin ("__builtin_spe_mtspefscr", void_ftype_int, SPE_BUILTIN_MTSPEFSCR);
14396 def_builtin ("__builtin_spe_mfspefscr", int_ftype_void, SPE_BUILTIN_MFSPEFSCR);
14397 def_builtin ("__builtin_spe_evstddx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDDX);
14398 def_builtin ("__builtin_spe_evstdhx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDHX);
14399 def_builtin ("__builtin_spe_evstdwx", void_ftype_v2si_pv2si_int, SPE_BUILTIN_EVSTDWX);
14400 def_builtin ("__builtin_spe_evstwhex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHEX);
14401 def_builtin ("__builtin_spe_evstwhox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWHOX);
14402 def_builtin ("__builtin_spe_evstwwex", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWEX);
14403 def_builtin ("__builtin_spe_evstwwox", void_ftype_v2si_puint_int, SPE_BUILTIN_EVSTWWOX);
14404 def_builtin ("__builtin_spe_evstdd", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDD);
14405 def_builtin ("__builtin_spe_evstdh", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDH);
14406 def_builtin ("__builtin_spe_evstdw", void_ftype_v2si_pv2si_char, SPE_BUILTIN_EVSTDW);
14407 def_builtin ("__builtin_spe_evstwhe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHE);
14408 def_builtin ("__builtin_spe_evstwho", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWHO);
14409 def_builtin ("__builtin_spe_evstwwe", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWE);
14410 def_builtin ("__builtin_spe_evstwwo", void_ftype_v2si_puint_char, SPE_BUILTIN_EVSTWWO);
14411 def_builtin ("__builtin_spe_evsplatfi", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATFI);
14412 def_builtin ("__builtin_spe_evsplati", v2si_ftype_signed_char, SPE_BUILTIN_EVSPLATI);
14413
14414 /* Loads. */
14415 def_builtin ("__builtin_spe_evlddx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDDX);
14416 def_builtin ("__builtin_spe_evldwx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDWX);
14417 def_builtin ("__builtin_spe_evldhx", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDHX);
14418 def_builtin ("__builtin_spe_evlwhex", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHEX);
14419 def_builtin ("__builtin_spe_evlwhoux", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOUX);
14420 def_builtin ("__builtin_spe_evlwhosx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOSX);
14421 def_builtin ("__builtin_spe_evlwwsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLATX);
14422 def_builtin ("__builtin_spe_evlwhsplatx", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLATX);
14423 def_builtin ("__builtin_spe_evlhhesplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLATX);
14424 def_builtin ("__builtin_spe_evlhhousplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLATX);
14425 def_builtin ("__builtin_spe_evlhhossplatx", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLATX);
14426 def_builtin ("__builtin_spe_evldd", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDD);
14427 def_builtin ("__builtin_spe_evldw", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDW);
14428 def_builtin ("__builtin_spe_evldh", v2si_ftype_pv2si_int, SPE_BUILTIN_EVLDH);
14429 def_builtin ("__builtin_spe_evlhhesplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHESPLAT);
14430 def_builtin ("__builtin_spe_evlhhossplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOSSPLAT);
14431 def_builtin ("__builtin_spe_evlhhousplat", v2si_ftype_pushort_int, SPE_BUILTIN_EVLHHOUSPLAT);
14432 def_builtin ("__builtin_spe_evlwhe", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHE);
14433 def_builtin ("__builtin_spe_evlwhos", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOS);
14434 def_builtin ("__builtin_spe_evlwhou", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHOU);
14435 def_builtin ("__builtin_spe_evlwhsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWHSPLAT);
14436 def_builtin ("__builtin_spe_evlwwsplat", v2si_ftype_puint_int, SPE_BUILTIN_EVLWWSPLAT);
14437
14438 /* Predicates. */
14439 d = bdesc_spe_predicates;
14440 for (i = 0; i < ARRAY_SIZE (bdesc_spe_predicates); ++i, d++)
14441 {
14442 tree type;
14443
14444 switch (insn_data[d->icode].operand[1].mode)
14445 {
14446 case V2SImode:
14447 type = int_ftype_int_v2si_v2si;
14448 break;
14449 case V2SFmode:
14450 type = int_ftype_int_v2sf_v2sf;
14451 break;
14452 default:
14453 gcc_unreachable ();
14454 }
14455
14456 def_builtin (d->name, type, d->code);
14457 }
14458
14459 /* Evsel predicates. */
14460 d = bdesc_spe_evsel;
14461 for (i = 0; i < ARRAY_SIZE (bdesc_spe_evsel); ++i, d++)
14462 {
14463 tree type;
14464
14465 switch (insn_data[d->icode].operand[1].mode)
14466 {
14467 case V2SImode:
14468 type = v2si_ftype_4_v2si;
14469 break;
14470 case V2SFmode:
14471 type = v2sf_ftype_4_v2sf;
14472 break;
14473 default:
14474 gcc_unreachable ();
14475 }
14476
14477 def_builtin (d->name, type, d->code);
14478 }
14479 }
14480
14481 static void
14482 paired_init_builtins (void)
14483 {
14484 const struct builtin_description *d;
14485 size_t i;
14486
14487 tree int_ftype_int_v2sf_v2sf
14488 = build_function_type_list (integer_type_node,
14489 integer_type_node,
14490 V2SF_type_node,
14491 V2SF_type_node,
14492 NULL_TREE);
14493 tree pcfloat_type_node =
14494 build_pointer_type (build_qualified_type
14495 (float_type_node, TYPE_QUAL_CONST));
14496
14497 tree v2sf_ftype_long_pcfloat = build_function_type_list (V2SF_type_node,
14498 long_integer_type_node,
14499 pcfloat_type_node,
14500 NULL_TREE);
14501 tree void_ftype_v2sf_long_pcfloat =
14502 build_function_type_list (void_type_node,
14503 V2SF_type_node,
14504 long_integer_type_node,
14505 pcfloat_type_node,
14506 NULL_TREE);
14507
14508
14509 def_builtin ("__builtin_paired_lx", v2sf_ftype_long_pcfloat,
14510 PAIRED_BUILTIN_LX);
14511
14512
14513 def_builtin ("__builtin_paired_stx", void_ftype_v2sf_long_pcfloat,
14514 PAIRED_BUILTIN_STX);
14515
14516 /* Predicates. */
14517 d = bdesc_paired_preds;
14518 for (i = 0; i < ARRAY_SIZE (bdesc_paired_preds); ++i, d++)
14519 {
14520 tree type;
14521
14522 if (TARGET_DEBUG_BUILTIN)
14523 fprintf (stderr, "paired pred #%d, insn = %s [%d], mode = %s\n",
14524 (int)i, get_insn_name (d->icode), (int)d->icode,
14525 GET_MODE_NAME (insn_data[d->icode].operand[1].mode));
14526
14527 switch (insn_data[d->icode].operand[1].mode)
14528 {
14529 case V2SFmode:
14530 type = int_ftype_int_v2sf_v2sf;
14531 break;
14532 default:
14533 gcc_unreachable ();
14534 }
14535
14536 def_builtin (d->name, type, d->code);
14537 }
14538 }
14539
14540 static void
14541 altivec_init_builtins (void)
14542 {
14543 const struct builtin_description *d;
14544 size_t i;
14545 tree ftype;
14546 tree decl;
14547
14548 tree pvoid_type_node = build_pointer_type (void_type_node);
14549
14550 tree pcvoid_type_node
14551 = build_pointer_type (build_qualified_type (void_type_node,
14552 TYPE_QUAL_CONST));
14553
14554 tree int_ftype_opaque
14555 = build_function_type_list (integer_type_node,
14556 opaque_V4SI_type_node, NULL_TREE);
14557 tree opaque_ftype_opaque
14558 = build_function_type_list (integer_type_node, NULL_TREE);
14559 tree opaque_ftype_opaque_int
14560 = build_function_type_list (opaque_V4SI_type_node,
14561 opaque_V4SI_type_node, integer_type_node, NULL_TREE);
14562 tree opaque_ftype_opaque_opaque_int
14563 = build_function_type_list (opaque_V4SI_type_node,
14564 opaque_V4SI_type_node, opaque_V4SI_type_node,
14565 integer_type_node, NULL_TREE);
14566 tree int_ftype_int_opaque_opaque
14567 = build_function_type_list (integer_type_node,
14568 integer_type_node, opaque_V4SI_type_node,
14569 opaque_V4SI_type_node, NULL_TREE);
14570 tree int_ftype_int_v4si_v4si
14571 = build_function_type_list (integer_type_node,
14572 integer_type_node, V4SI_type_node,
14573 V4SI_type_node, NULL_TREE);
14574 tree int_ftype_int_v2di_v2di
14575 = build_function_type_list (integer_type_node,
14576 integer_type_node, V2DI_type_node,
14577 V2DI_type_node, NULL_TREE);
14578 tree void_ftype_v4si
14579 = build_function_type_list (void_type_node, V4SI_type_node, NULL_TREE);
14580 tree v8hi_ftype_void
14581 = build_function_type_list (V8HI_type_node, NULL_TREE);
14582 tree void_ftype_void
14583 = build_function_type_list (void_type_node, NULL_TREE);
14584 tree void_ftype_int
14585 = build_function_type_list (void_type_node, integer_type_node, NULL_TREE);
14586
14587 tree opaque_ftype_long_pcvoid
14588 = build_function_type_list (opaque_V4SI_type_node,
14589 long_integer_type_node, pcvoid_type_node,
14590 NULL_TREE);
14591 tree v16qi_ftype_long_pcvoid
14592 = build_function_type_list (V16QI_type_node,
14593 long_integer_type_node, pcvoid_type_node,
14594 NULL_TREE);
14595 tree v8hi_ftype_long_pcvoid
14596 = build_function_type_list (V8HI_type_node,
14597 long_integer_type_node, pcvoid_type_node,
14598 NULL_TREE);
14599 tree v4si_ftype_long_pcvoid
14600 = build_function_type_list (V4SI_type_node,
14601 long_integer_type_node, pcvoid_type_node,
14602 NULL_TREE);
14603 tree v4sf_ftype_long_pcvoid
14604 = build_function_type_list (V4SF_type_node,
14605 long_integer_type_node, pcvoid_type_node,
14606 NULL_TREE);
14607 tree v2df_ftype_long_pcvoid
14608 = build_function_type_list (V2DF_type_node,
14609 long_integer_type_node, pcvoid_type_node,
14610 NULL_TREE);
14611 tree v2di_ftype_long_pcvoid
14612 = build_function_type_list (V2DI_type_node,
14613 long_integer_type_node, pcvoid_type_node,
14614 NULL_TREE);
14615
14616 tree void_ftype_opaque_long_pvoid
14617 = build_function_type_list (void_type_node,
14618 opaque_V4SI_type_node, long_integer_type_node,
14619 pvoid_type_node, NULL_TREE);
14620 tree void_ftype_v4si_long_pvoid
14621 = build_function_type_list (void_type_node,
14622 V4SI_type_node, long_integer_type_node,
14623 pvoid_type_node, NULL_TREE);
14624 tree void_ftype_v16qi_long_pvoid
14625 = build_function_type_list (void_type_node,
14626 V16QI_type_node, long_integer_type_node,
14627 pvoid_type_node, NULL_TREE);
14628 tree void_ftype_v8hi_long_pvoid
14629 = build_function_type_list (void_type_node,
14630 V8HI_type_node, long_integer_type_node,
14631 pvoid_type_node, NULL_TREE);
14632 tree void_ftype_v4sf_long_pvoid
14633 = build_function_type_list (void_type_node,
14634 V4SF_type_node, long_integer_type_node,
14635 pvoid_type_node, NULL_TREE);
14636 tree void_ftype_v2df_long_pvoid
14637 = build_function_type_list (void_type_node,
14638 V2DF_type_node, long_integer_type_node,
14639 pvoid_type_node, NULL_TREE);
14640 tree void_ftype_v2di_long_pvoid
14641 = build_function_type_list (void_type_node,
14642 V2DI_type_node, long_integer_type_node,
14643 pvoid_type_node, NULL_TREE);
14644 tree int_ftype_int_v8hi_v8hi
14645 = build_function_type_list (integer_type_node,
14646 integer_type_node, V8HI_type_node,
14647 V8HI_type_node, NULL_TREE);
14648 tree int_ftype_int_v16qi_v16qi
14649 = build_function_type_list (integer_type_node,
14650 integer_type_node, V16QI_type_node,
14651 V16QI_type_node, NULL_TREE);
14652 tree int_ftype_int_v4sf_v4sf
14653 = build_function_type_list (integer_type_node,
14654 integer_type_node, V4SF_type_node,
14655 V4SF_type_node, NULL_TREE);
14656 tree int_ftype_int_v2df_v2df
14657 = build_function_type_list (integer_type_node,
14658 integer_type_node, V2DF_type_node,
14659 V2DF_type_node, NULL_TREE);
14660 tree v2di_ftype_v2di
14661 = build_function_type_list (V2DI_type_node, V2DI_type_node, NULL_TREE);
14662 tree v4si_ftype_v4si
14663 = build_function_type_list (V4SI_type_node, V4SI_type_node, NULL_TREE);
14664 tree v8hi_ftype_v8hi
14665 = build_function_type_list (V8HI_type_node, V8HI_type_node, NULL_TREE);
14666 tree v16qi_ftype_v16qi
14667 = build_function_type_list (V16QI_type_node, V16QI_type_node, NULL_TREE);
14668 tree v4sf_ftype_v4sf
14669 = build_function_type_list (V4SF_type_node, V4SF_type_node, NULL_TREE);
14670 tree v2df_ftype_v2df
14671 = build_function_type_list (V2DF_type_node, V2DF_type_node, NULL_TREE);
14672 tree void_ftype_pcvoid_int_int
14673 = build_function_type_list (void_type_node,
14674 pcvoid_type_node, integer_type_node,
14675 integer_type_node, NULL_TREE);
14676
14677 def_builtin ("__builtin_altivec_mtvscr", void_ftype_v4si, ALTIVEC_BUILTIN_MTVSCR);
14678 def_builtin ("__builtin_altivec_mfvscr", v8hi_ftype_void, ALTIVEC_BUILTIN_MFVSCR);
14679 def_builtin ("__builtin_altivec_dssall", void_ftype_void, ALTIVEC_BUILTIN_DSSALL);
14680 def_builtin ("__builtin_altivec_dss", void_ftype_int, ALTIVEC_BUILTIN_DSS);
14681 def_builtin ("__builtin_altivec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSL);
14682 def_builtin ("__builtin_altivec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVSR);
14683 def_builtin ("__builtin_altivec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEBX);
14684 def_builtin ("__builtin_altivec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEHX);
14685 def_builtin ("__builtin_altivec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVEWX);
14686 def_builtin ("__builtin_altivec_lvxl", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVXL);
14687 def_builtin ("__builtin_altivec_lvxl_v2df", v2df_ftype_long_pcvoid,
14688 ALTIVEC_BUILTIN_LVXL_V2DF);
14689 def_builtin ("__builtin_altivec_lvxl_v2di", v2di_ftype_long_pcvoid,
14690 ALTIVEC_BUILTIN_LVXL_V2DI);
14691 def_builtin ("__builtin_altivec_lvxl_v4sf", v4sf_ftype_long_pcvoid,
14692 ALTIVEC_BUILTIN_LVXL_V4SF);
14693 def_builtin ("__builtin_altivec_lvxl_v4si", v4si_ftype_long_pcvoid,
14694 ALTIVEC_BUILTIN_LVXL_V4SI);
14695 def_builtin ("__builtin_altivec_lvxl_v8hi", v8hi_ftype_long_pcvoid,
14696 ALTIVEC_BUILTIN_LVXL_V8HI);
14697 def_builtin ("__builtin_altivec_lvxl_v16qi", v16qi_ftype_long_pcvoid,
14698 ALTIVEC_BUILTIN_LVXL_V16QI);
14699 def_builtin ("__builtin_altivec_lvx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVX);
14700 def_builtin ("__builtin_altivec_lvx_v2df", v2df_ftype_long_pcvoid,
14701 ALTIVEC_BUILTIN_LVX_V2DF);
14702 def_builtin ("__builtin_altivec_lvx_v2di", v2di_ftype_long_pcvoid,
14703 ALTIVEC_BUILTIN_LVX_V2DI);
14704 def_builtin ("__builtin_altivec_lvx_v4sf", v4sf_ftype_long_pcvoid,
14705 ALTIVEC_BUILTIN_LVX_V4SF);
14706 def_builtin ("__builtin_altivec_lvx_v4si", v4si_ftype_long_pcvoid,
14707 ALTIVEC_BUILTIN_LVX_V4SI);
14708 def_builtin ("__builtin_altivec_lvx_v8hi", v8hi_ftype_long_pcvoid,
14709 ALTIVEC_BUILTIN_LVX_V8HI);
14710 def_builtin ("__builtin_altivec_lvx_v16qi", v16qi_ftype_long_pcvoid,
14711 ALTIVEC_BUILTIN_LVX_V16QI);
14712 def_builtin ("__builtin_altivec_stvx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVX);
14713 def_builtin ("__builtin_altivec_stvx_v2df", void_ftype_v2df_long_pvoid,
14714 ALTIVEC_BUILTIN_STVX_V2DF);
14715 def_builtin ("__builtin_altivec_stvx_v2di", void_ftype_v2di_long_pvoid,
14716 ALTIVEC_BUILTIN_STVX_V2DI);
14717 def_builtin ("__builtin_altivec_stvx_v4sf", void_ftype_v4sf_long_pvoid,
14718 ALTIVEC_BUILTIN_STVX_V4SF);
14719 def_builtin ("__builtin_altivec_stvx_v4si", void_ftype_v4si_long_pvoid,
14720 ALTIVEC_BUILTIN_STVX_V4SI);
14721 def_builtin ("__builtin_altivec_stvx_v8hi", void_ftype_v8hi_long_pvoid,
14722 ALTIVEC_BUILTIN_STVX_V8HI);
14723 def_builtin ("__builtin_altivec_stvx_v16qi", void_ftype_v16qi_long_pvoid,
14724 ALTIVEC_BUILTIN_STVX_V16QI);
14725 def_builtin ("__builtin_altivec_stvewx", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVEWX);
14726 def_builtin ("__builtin_altivec_stvxl", void_ftype_v4si_long_pvoid, ALTIVEC_BUILTIN_STVXL);
14727 def_builtin ("__builtin_altivec_stvxl_v2df", void_ftype_v2df_long_pvoid,
14728 ALTIVEC_BUILTIN_STVXL_V2DF);
14729 def_builtin ("__builtin_altivec_stvxl_v2di", void_ftype_v2di_long_pvoid,
14730 ALTIVEC_BUILTIN_STVXL_V2DI);
14731 def_builtin ("__builtin_altivec_stvxl_v4sf", void_ftype_v4sf_long_pvoid,
14732 ALTIVEC_BUILTIN_STVXL_V4SF);
14733 def_builtin ("__builtin_altivec_stvxl_v4si", void_ftype_v4si_long_pvoid,
14734 ALTIVEC_BUILTIN_STVXL_V4SI);
14735 def_builtin ("__builtin_altivec_stvxl_v8hi", void_ftype_v8hi_long_pvoid,
14736 ALTIVEC_BUILTIN_STVXL_V8HI);
14737 def_builtin ("__builtin_altivec_stvxl_v16qi", void_ftype_v16qi_long_pvoid,
14738 ALTIVEC_BUILTIN_STVXL_V16QI);
14739 def_builtin ("__builtin_altivec_stvebx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVEBX);
14740 def_builtin ("__builtin_altivec_stvehx", void_ftype_v8hi_long_pvoid, ALTIVEC_BUILTIN_STVEHX);
14741 def_builtin ("__builtin_vec_ld", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LD);
14742 def_builtin ("__builtin_vec_lde", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDE);
14743 def_builtin ("__builtin_vec_ldl", opaque_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LDL);
14744 def_builtin ("__builtin_vec_lvsl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSL);
14745 def_builtin ("__builtin_vec_lvsr", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVSR);
14746 def_builtin ("__builtin_vec_lvebx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEBX);
14747 def_builtin ("__builtin_vec_lvehx", v8hi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEHX);
14748 def_builtin ("__builtin_vec_lvewx", v4si_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVEWX);
14749 def_builtin ("__builtin_vec_st", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_ST);
14750 def_builtin ("__builtin_vec_ste", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STE);
14751 def_builtin ("__builtin_vec_stl", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STL);
14752 def_builtin ("__builtin_vec_stvewx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEWX);
14753 def_builtin ("__builtin_vec_stvebx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEBX);
14754 def_builtin ("__builtin_vec_stvehx", void_ftype_opaque_long_pvoid, ALTIVEC_BUILTIN_VEC_STVEHX);
14755
14756 def_builtin ("__builtin_vsx_lxvd2x_v2df", v2df_ftype_long_pcvoid,
14757 VSX_BUILTIN_LXVD2X_V2DF);
14758 def_builtin ("__builtin_vsx_lxvd2x_v2di", v2di_ftype_long_pcvoid,
14759 VSX_BUILTIN_LXVD2X_V2DI);
14760 def_builtin ("__builtin_vsx_lxvw4x_v4sf", v4sf_ftype_long_pcvoid,
14761 VSX_BUILTIN_LXVW4X_V4SF);
14762 def_builtin ("__builtin_vsx_lxvw4x_v4si", v4si_ftype_long_pcvoid,
14763 VSX_BUILTIN_LXVW4X_V4SI);
14764 def_builtin ("__builtin_vsx_lxvw4x_v8hi", v8hi_ftype_long_pcvoid,
14765 VSX_BUILTIN_LXVW4X_V8HI);
14766 def_builtin ("__builtin_vsx_lxvw4x_v16qi", v16qi_ftype_long_pcvoid,
14767 VSX_BUILTIN_LXVW4X_V16QI);
14768 def_builtin ("__builtin_vsx_stxvd2x_v2df", void_ftype_v2df_long_pvoid,
14769 VSX_BUILTIN_STXVD2X_V2DF);
14770 def_builtin ("__builtin_vsx_stxvd2x_v2di", void_ftype_v2di_long_pvoid,
14771 VSX_BUILTIN_STXVD2X_V2DI);
14772 def_builtin ("__builtin_vsx_stxvw4x_v4sf", void_ftype_v4sf_long_pvoid,
14773 VSX_BUILTIN_STXVW4X_V4SF);
14774 def_builtin ("__builtin_vsx_stxvw4x_v4si", void_ftype_v4si_long_pvoid,
14775 VSX_BUILTIN_STXVW4X_V4SI);
14776 def_builtin ("__builtin_vsx_stxvw4x_v8hi", void_ftype_v8hi_long_pvoid,
14777 VSX_BUILTIN_STXVW4X_V8HI);
14778 def_builtin ("__builtin_vsx_stxvw4x_v16qi", void_ftype_v16qi_long_pvoid,
14779 VSX_BUILTIN_STXVW4X_V16QI);
14780 def_builtin ("__builtin_vec_vsx_ld", opaque_ftype_long_pcvoid,
14781 VSX_BUILTIN_VEC_LD);
14782 def_builtin ("__builtin_vec_vsx_st", void_ftype_opaque_long_pvoid,
14783 VSX_BUILTIN_VEC_ST);
14784
14785 def_builtin ("__builtin_vec_step", int_ftype_opaque, ALTIVEC_BUILTIN_VEC_STEP);
14786 def_builtin ("__builtin_vec_splats", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_SPLATS);
14787 def_builtin ("__builtin_vec_promote", opaque_ftype_opaque, ALTIVEC_BUILTIN_VEC_PROMOTE);
14788
14789 def_builtin ("__builtin_vec_sld", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_SLD);
14790 def_builtin ("__builtin_vec_splat", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_SPLAT);
14791 def_builtin ("__builtin_vec_extract", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_EXTRACT);
14792 def_builtin ("__builtin_vec_insert", opaque_ftype_opaque_opaque_int, ALTIVEC_BUILTIN_VEC_INSERT);
14793 def_builtin ("__builtin_vec_vspltw", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTW);
14794 def_builtin ("__builtin_vec_vsplth", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTH);
14795 def_builtin ("__builtin_vec_vspltb", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VSPLTB);
14796 def_builtin ("__builtin_vec_ctf", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTF);
14797 def_builtin ("__builtin_vec_vcfsx", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFSX);
14798 def_builtin ("__builtin_vec_vcfux", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_VCFUX);
14799 def_builtin ("__builtin_vec_cts", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTS);
14800 def_builtin ("__builtin_vec_ctu", opaque_ftype_opaque_int, ALTIVEC_BUILTIN_VEC_CTU);
14801
14802 /* Cell builtins. */
14803 def_builtin ("__builtin_altivec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLX);
14804 def_builtin ("__builtin_altivec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVLXL);
14805 def_builtin ("__builtin_altivec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRX);
14806 def_builtin ("__builtin_altivec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_LVRXL);
14807
14808 def_builtin ("__builtin_vec_lvlx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLX);
14809 def_builtin ("__builtin_vec_lvlxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVLXL);
14810 def_builtin ("__builtin_vec_lvrx", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRX);
14811 def_builtin ("__builtin_vec_lvrxl", v16qi_ftype_long_pcvoid, ALTIVEC_BUILTIN_VEC_LVRXL);
14812
14813 def_builtin ("__builtin_altivec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLX);
14814 def_builtin ("__builtin_altivec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVLXL);
14815 def_builtin ("__builtin_altivec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRX);
14816 def_builtin ("__builtin_altivec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_STVRXL);
14817
14818 def_builtin ("__builtin_vec_stvlx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLX);
14819 def_builtin ("__builtin_vec_stvlxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVLXL);
14820 def_builtin ("__builtin_vec_stvrx", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRX);
14821 def_builtin ("__builtin_vec_stvrxl", void_ftype_v16qi_long_pvoid, ALTIVEC_BUILTIN_VEC_STVRXL);
14822
14823 /* Add the DST variants. */
14824 d = bdesc_dst;
14825 for (i = 0; i < ARRAY_SIZE (bdesc_dst); i++, d++)
14826 def_builtin (d->name, void_ftype_pcvoid_int_int, d->code);
14827
14828 /* Initialize the predicates. */
14829 d = bdesc_altivec_preds;
14830 for (i = 0; i < ARRAY_SIZE (bdesc_altivec_preds); i++, d++)
14831 {
14832 enum machine_mode mode1;
14833 tree type;
14834
14835 if (rs6000_overloaded_builtin_p (d->code))
14836 mode1 = VOIDmode;
14837 else
14838 mode1 = insn_data[d->icode].operand[1].mode;
14839
14840 switch (mode1)
14841 {
14842 case VOIDmode:
14843 type = int_ftype_int_opaque_opaque;
14844 break;
14845 case V2DImode:
14846 type = int_ftype_int_v2di_v2di;
14847 break;
14848 case V4SImode:
14849 type = int_ftype_int_v4si_v4si;
14850 break;
14851 case V8HImode:
14852 type = int_ftype_int_v8hi_v8hi;
14853 break;
14854 case V16QImode:
14855 type = int_ftype_int_v16qi_v16qi;
14856 break;
14857 case V4SFmode:
14858 type = int_ftype_int_v4sf_v4sf;
14859 break;
14860 case V2DFmode:
14861 type = int_ftype_int_v2df_v2df;
14862 break;
14863 default:
14864 gcc_unreachable ();
14865 }
14866
14867 def_builtin (d->name, type, d->code);
14868 }
14869
14870 /* Initialize the abs* operators. */
14871 d = bdesc_abs;
14872 for (i = 0; i < ARRAY_SIZE (bdesc_abs); i++, d++)
14873 {
14874 enum machine_mode mode0;
14875 tree type;
14876
14877 mode0 = insn_data[d->icode].operand[0].mode;
14878
14879 switch (mode0)
14880 {
14881 case V2DImode:
14882 type = v2di_ftype_v2di;
14883 break;
14884 case V4SImode:
14885 type = v4si_ftype_v4si;
14886 break;
14887 case V8HImode:
14888 type = v8hi_ftype_v8hi;
14889 break;
14890 case V16QImode:
14891 type = v16qi_ftype_v16qi;
14892 break;
14893 case V4SFmode:
14894 type = v4sf_ftype_v4sf;
14895 break;
14896 case V2DFmode:
14897 type = v2df_ftype_v2df;
14898 break;
14899 default:
14900 gcc_unreachable ();
14901 }
14902
14903 def_builtin (d->name, type, d->code);
14904 }
14905
14906 /* Initialize target builtin that implements
14907 targetm.vectorize.builtin_mask_for_load. */
14908
14909 decl = add_builtin_function ("__builtin_altivec_mask_for_load",
14910 v16qi_ftype_long_pcvoid,
14911 ALTIVEC_BUILTIN_MASK_FOR_LOAD,
14912 BUILT_IN_MD, NULL, NULL_TREE);
14913 TREE_READONLY (decl) = 1;
14914 /* Record the decl. Will be used by rs6000_builtin_mask_for_load. */
14915 altivec_builtin_mask_for_load = decl;
14916
14917 /* Access to the vec_init patterns. */
14918 ftype = build_function_type_list (V4SI_type_node, integer_type_node,
14919 integer_type_node, integer_type_node,
14920 integer_type_node, NULL_TREE);
14921 def_builtin ("__builtin_vec_init_v4si", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SI);
14922
14923 ftype = build_function_type_list (V8HI_type_node, short_integer_type_node,
14924 short_integer_type_node,
14925 short_integer_type_node,
14926 short_integer_type_node,
14927 short_integer_type_node,
14928 short_integer_type_node,
14929 short_integer_type_node,
14930 short_integer_type_node, NULL_TREE);
14931 def_builtin ("__builtin_vec_init_v8hi", ftype, ALTIVEC_BUILTIN_VEC_INIT_V8HI);
14932
14933 ftype = build_function_type_list (V16QI_type_node, char_type_node,
14934 char_type_node, char_type_node,
14935 char_type_node, char_type_node,
14936 char_type_node, char_type_node,
14937 char_type_node, char_type_node,
14938 char_type_node, char_type_node,
14939 char_type_node, char_type_node,
14940 char_type_node, char_type_node,
14941 char_type_node, NULL_TREE);
14942 def_builtin ("__builtin_vec_init_v16qi", ftype,
14943 ALTIVEC_BUILTIN_VEC_INIT_V16QI);
14944
14945 ftype = build_function_type_list (V4SF_type_node, float_type_node,
14946 float_type_node, float_type_node,
14947 float_type_node, NULL_TREE);
14948 def_builtin ("__builtin_vec_init_v4sf", ftype, ALTIVEC_BUILTIN_VEC_INIT_V4SF);
14949
14950 /* VSX builtins. */
14951 ftype = build_function_type_list (V2DF_type_node, double_type_node,
14952 double_type_node, NULL_TREE);
14953 def_builtin ("__builtin_vec_init_v2df", ftype, VSX_BUILTIN_VEC_INIT_V2DF);
14954
14955 ftype = build_function_type_list (V2DI_type_node, intDI_type_node,
14956 intDI_type_node, NULL_TREE);
14957 def_builtin ("__builtin_vec_init_v2di", ftype, VSX_BUILTIN_VEC_INIT_V2DI);
14958
14959 /* Access to the vec_set patterns. */
14960 ftype = build_function_type_list (V4SI_type_node, V4SI_type_node,
14961 intSI_type_node,
14962 integer_type_node, NULL_TREE);
14963 def_builtin ("__builtin_vec_set_v4si", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SI);
14964
14965 ftype = build_function_type_list (V8HI_type_node, V8HI_type_node,
14966 intHI_type_node,
14967 integer_type_node, NULL_TREE);
14968 def_builtin ("__builtin_vec_set_v8hi", ftype, ALTIVEC_BUILTIN_VEC_SET_V8HI);
14969
14970 ftype = build_function_type_list (V16QI_type_node, V16QI_type_node,
14971 intQI_type_node,
14972 integer_type_node, NULL_TREE);
14973 def_builtin ("__builtin_vec_set_v16qi", ftype, ALTIVEC_BUILTIN_VEC_SET_V16QI);
14974
14975 ftype = build_function_type_list (V4SF_type_node, V4SF_type_node,
14976 float_type_node,
14977 integer_type_node, NULL_TREE);
14978 def_builtin ("__builtin_vec_set_v4sf", ftype, ALTIVEC_BUILTIN_VEC_SET_V4SF);
14979
14980 ftype = build_function_type_list (V2DF_type_node, V2DF_type_node,
14981 double_type_node,
14982 integer_type_node, NULL_TREE);
14983 def_builtin ("__builtin_vec_set_v2df", ftype, VSX_BUILTIN_VEC_SET_V2DF);
14984
14985 ftype = build_function_type_list (V2DI_type_node, V2DI_type_node,
14986 intDI_type_node,
14987 integer_type_node, NULL_TREE);
14988 def_builtin ("__builtin_vec_set_v2di", ftype, VSX_BUILTIN_VEC_SET_V2DI);
14989
14990 /* Access to the vec_extract patterns. */
14991 ftype = build_function_type_list (intSI_type_node, V4SI_type_node,
14992 integer_type_node, NULL_TREE);
14993 def_builtin ("__builtin_vec_ext_v4si", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SI);
14994
14995 ftype = build_function_type_list (intHI_type_node, V8HI_type_node,
14996 integer_type_node, NULL_TREE);
14997 def_builtin ("__builtin_vec_ext_v8hi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V8HI);
14998
14999 ftype = build_function_type_list (intQI_type_node, V16QI_type_node,
15000 integer_type_node, NULL_TREE);
15001 def_builtin ("__builtin_vec_ext_v16qi", ftype, ALTIVEC_BUILTIN_VEC_EXT_V16QI);
15002
15003 ftype = build_function_type_list (float_type_node, V4SF_type_node,
15004 integer_type_node, NULL_TREE);
15005 def_builtin ("__builtin_vec_ext_v4sf", ftype, ALTIVEC_BUILTIN_VEC_EXT_V4SF);
15006
15007 ftype = build_function_type_list (double_type_node, V2DF_type_node,
15008 integer_type_node, NULL_TREE);
15009 def_builtin ("__builtin_vec_ext_v2df", ftype, VSX_BUILTIN_VEC_EXT_V2DF);
15010
15011 ftype = build_function_type_list (intDI_type_node, V2DI_type_node,
15012 integer_type_node, NULL_TREE);
15013 def_builtin ("__builtin_vec_ext_v2di", ftype, VSX_BUILTIN_VEC_EXT_V2DI);
15014
15015
15016 if (V1TI_type_node)
15017 {
15018 tree v1ti_ftype_long_pcvoid
15019 = build_function_type_list (V1TI_type_node,
15020 long_integer_type_node, pcvoid_type_node,
15021 NULL_TREE);
15022 tree void_ftype_v1ti_long_pvoid
15023 = build_function_type_list (void_type_node,
15024 V1TI_type_node, long_integer_type_node,
15025 pvoid_type_node, NULL_TREE);
15026 def_builtin ("__builtin_vsx_lxvd2x_v1ti", v1ti_ftype_long_pcvoid,
15027 VSX_BUILTIN_LXVD2X_V1TI);
15028 def_builtin ("__builtin_vsx_stxvd2x_v1ti", void_ftype_v1ti_long_pvoid,
15029 VSX_BUILTIN_STXVD2X_V1TI);
15030 ftype = build_function_type_list (V1TI_type_node, intTI_type_node,
15031 NULL_TREE, NULL_TREE);
15032 def_builtin ("__builtin_vec_init_v1ti", ftype, VSX_BUILTIN_VEC_INIT_V1TI);
15033 ftype = build_function_type_list (V1TI_type_node, V1TI_type_node,
15034 intTI_type_node,
15035 integer_type_node, NULL_TREE);
15036 def_builtin ("__builtin_vec_set_v1ti", ftype, VSX_BUILTIN_VEC_SET_V1TI);
15037 ftype = build_function_type_list (intTI_type_node, V1TI_type_node,
15038 integer_type_node, NULL_TREE);
15039 def_builtin ("__builtin_vec_ext_v1ti", ftype, VSX_BUILTIN_VEC_EXT_V1TI);
15040 }
15041
15042 }
15043
15044 static void
15045 htm_init_builtins (void)
15046 {
15047 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
15048 const struct builtin_description *d;
15049 size_t i;
15050
15051 d = bdesc_htm;
15052 for (i = 0; i < ARRAY_SIZE (bdesc_htm); i++, d++)
15053 {
15054 tree op[MAX_HTM_OPERANDS], type;
15055 HOST_WIDE_INT mask = d->mask;
15056 unsigned attr = rs6000_builtin_info[d->code].attr;
15057 bool void_func = (attr & RS6000_BTC_VOID);
15058 int attr_args = (attr & RS6000_BTC_TYPE_MASK);
15059 int nopnds = 0;
15060 tree argtype = (attr & RS6000_BTC_SPR) ? long_unsigned_type_node
15061 : unsigned_type_node;
15062
15063 if ((mask & builtin_mask) != mask)
15064 {
15065 if (TARGET_DEBUG_BUILTIN)
15066 fprintf (stderr, "htm_builtin, skip binary %s\n", d->name);
15067 continue;
15068 }
15069
15070 if (d->name == 0)
15071 {
15072 if (TARGET_DEBUG_BUILTIN)
15073 fprintf (stderr, "htm_builtin, bdesc_htm[%ld] no name\n",
15074 (long unsigned) i);
15075 continue;
15076 }
15077
15078 op[nopnds++] = (void_func) ? void_type_node : argtype;
15079
15080 if (attr_args == RS6000_BTC_UNARY)
15081 op[nopnds++] = argtype;
15082 else if (attr_args == RS6000_BTC_BINARY)
15083 {
15084 op[nopnds++] = argtype;
15085 op[nopnds++] = argtype;
15086 }
15087 else if (attr_args == RS6000_BTC_TERNARY)
15088 {
15089 op[nopnds++] = argtype;
15090 op[nopnds++] = argtype;
15091 op[nopnds++] = argtype;
15092 }
15093
15094 switch (nopnds)
15095 {
15096 case 1:
15097 type = build_function_type_list (op[0], NULL_TREE);
15098 break;
15099 case 2:
15100 type = build_function_type_list (op[0], op[1], NULL_TREE);
15101 break;
15102 case 3:
15103 type = build_function_type_list (op[0], op[1], op[2], NULL_TREE);
15104 break;
15105 case 4:
15106 type = build_function_type_list (op[0], op[1], op[2], op[3],
15107 NULL_TREE);
15108 break;
15109 default:
15110 gcc_unreachable ();
15111 }
15112
15113 def_builtin (d->name, type, d->code);
15114 }
15115 }
15116
15117 /* Hash function for builtin functions with up to 3 arguments and a return
15118 type. */
15119 static unsigned
15120 builtin_hash_function (const void *hash_entry)
15121 {
15122 unsigned ret = 0;
15123 int i;
15124 const struct builtin_hash_struct *bh =
15125 (const struct builtin_hash_struct *) hash_entry;
15126
15127 for (i = 0; i < 4; i++)
15128 {
15129 ret = (ret * (unsigned)MAX_MACHINE_MODE) + ((unsigned)bh->mode[i]);
15130 ret = (ret * 2) + bh->uns_p[i];
15131 }
15132
15133 return ret;
15134 }
15135
15136 /* Compare builtin hash entries H1 and H2 for equivalence. */
15137 static int
15138 builtin_hash_eq (const void *h1, const void *h2)
15139 {
15140 const struct builtin_hash_struct *p1 = (const struct builtin_hash_struct *) h1;
15141 const struct builtin_hash_struct *p2 = (const struct builtin_hash_struct *) h2;
15142
15143 return ((p1->mode[0] == p2->mode[0])
15144 && (p1->mode[1] == p2->mode[1])
15145 && (p1->mode[2] == p2->mode[2])
15146 && (p1->mode[3] == p2->mode[3])
15147 && (p1->uns_p[0] == p2->uns_p[0])
15148 && (p1->uns_p[1] == p2->uns_p[1])
15149 && (p1->uns_p[2] == p2->uns_p[2])
15150 && (p1->uns_p[3] == p2->uns_p[3]));
15151 }
15152
15153 /* Map types for builtin functions with an explicit return type and up to 3
15154 arguments. Functions with fewer than 3 arguments use VOIDmode as the type
15155 of the argument. */
15156 static tree
15157 builtin_function_type (enum machine_mode mode_ret, enum machine_mode mode_arg0,
15158 enum machine_mode mode_arg1, enum machine_mode mode_arg2,
15159 enum rs6000_builtins builtin, const char *name)
15160 {
15161 struct builtin_hash_struct h;
15162 struct builtin_hash_struct *h2;
15163 void **found;
15164 int num_args = 3;
15165 int i;
15166 tree ret_type = NULL_TREE;
15167 tree arg_type[3] = { NULL_TREE, NULL_TREE, NULL_TREE };
15168
15169 /* Create builtin_hash_table. */
15170 if (builtin_hash_table == NULL)
15171 builtin_hash_table = htab_create_ggc (1500, builtin_hash_function,
15172 builtin_hash_eq, NULL);
15173
15174 h.type = NULL_TREE;
15175 h.mode[0] = mode_ret;
15176 h.mode[1] = mode_arg0;
15177 h.mode[2] = mode_arg1;
15178 h.mode[3] = mode_arg2;
15179 h.uns_p[0] = 0;
15180 h.uns_p[1] = 0;
15181 h.uns_p[2] = 0;
15182 h.uns_p[3] = 0;
15183
15184 /* If the builtin is a type that produces unsigned results or takes unsigned
15185 arguments, and it is returned as a decl for the vectorizer (such as
15186 widening multiplies, permute), make sure the arguments and return value
15187 are type correct. */
15188 switch (builtin)
15189 {
15190 /* unsigned 1 argument functions. */
15191 case CRYPTO_BUILTIN_VSBOX:
15192 case P8V_BUILTIN_VGBBD:
15193 case MISC_BUILTIN_CDTBCD:
15194 case MISC_BUILTIN_CBCDTD:
15195 h.uns_p[0] = 1;
15196 h.uns_p[1] = 1;
15197 break;
15198
15199 /* unsigned 2 argument functions. */
15200 case ALTIVEC_BUILTIN_VMULEUB_UNS:
15201 case ALTIVEC_BUILTIN_VMULEUH_UNS:
15202 case ALTIVEC_BUILTIN_VMULOUB_UNS:
15203 case ALTIVEC_BUILTIN_VMULOUH_UNS:
15204 case CRYPTO_BUILTIN_VCIPHER:
15205 case CRYPTO_BUILTIN_VCIPHERLAST:
15206 case CRYPTO_BUILTIN_VNCIPHER:
15207 case CRYPTO_BUILTIN_VNCIPHERLAST:
15208 case CRYPTO_BUILTIN_VPMSUMB:
15209 case CRYPTO_BUILTIN_VPMSUMH:
15210 case CRYPTO_BUILTIN_VPMSUMW:
15211 case CRYPTO_BUILTIN_VPMSUMD:
15212 case CRYPTO_BUILTIN_VPMSUM:
15213 case MISC_BUILTIN_ADDG6S:
15214 case MISC_BUILTIN_DIVWEU:
15215 case MISC_BUILTIN_DIVWEUO:
15216 case MISC_BUILTIN_DIVDEU:
15217 case MISC_BUILTIN_DIVDEUO:
15218 h.uns_p[0] = 1;
15219 h.uns_p[1] = 1;
15220 h.uns_p[2] = 1;
15221 break;
15222
15223 /* unsigned 3 argument functions. */
15224 case ALTIVEC_BUILTIN_VPERM_16QI_UNS:
15225 case ALTIVEC_BUILTIN_VPERM_8HI_UNS:
15226 case ALTIVEC_BUILTIN_VPERM_4SI_UNS:
15227 case ALTIVEC_BUILTIN_VPERM_2DI_UNS:
15228 case ALTIVEC_BUILTIN_VSEL_16QI_UNS:
15229 case ALTIVEC_BUILTIN_VSEL_8HI_UNS:
15230 case ALTIVEC_BUILTIN_VSEL_4SI_UNS:
15231 case ALTIVEC_BUILTIN_VSEL_2DI_UNS:
15232 case VSX_BUILTIN_VPERM_16QI_UNS:
15233 case VSX_BUILTIN_VPERM_8HI_UNS:
15234 case VSX_BUILTIN_VPERM_4SI_UNS:
15235 case VSX_BUILTIN_VPERM_2DI_UNS:
15236 case VSX_BUILTIN_XXSEL_16QI_UNS:
15237 case VSX_BUILTIN_XXSEL_8HI_UNS:
15238 case VSX_BUILTIN_XXSEL_4SI_UNS:
15239 case VSX_BUILTIN_XXSEL_2DI_UNS:
15240 case CRYPTO_BUILTIN_VPERMXOR:
15241 case CRYPTO_BUILTIN_VPERMXOR_V2DI:
15242 case CRYPTO_BUILTIN_VPERMXOR_V4SI:
15243 case CRYPTO_BUILTIN_VPERMXOR_V8HI:
15244 case CRYPTO_BUILTIN_VPERMXOR_V16QI:
15245 case CRYPTO_BUILTIN_VSHASIGMAW:
15246 case CRYPTO_BUILTIN_VSHASIGMAD:
15247 case CRYPTO_BUILTIN_VSHASIGMA:
15248 h.uns_p[0] = 1;
15249 h.uns_p[1] = 1;
15250 h.uns_p[2] = 1;
15251 h.uns_p[3] = 1;
15252 break;
15253
15254 /* signed permute functions with unsigned char mask. */
15255 case ALTIVEC_BUILTIN_VPERM_16QI:
15256 case ALTIVEC_BUILTIN_VPERM_8HI:
15257 case ALTIVEC_BUILTIN_VPERM_4SI:
15258 case ALTIVEC_BUILTIN_VPERM_4SF:
15259 case ALTIVEC_BUILTIN_VPERM_2DI:
15260 case ALTIVEC_BUILTIN_VPERM_2DF:
15261 case VSX_BUILTIN_VPERM_16QI:
15262 case VSX_BUILTIN_VPERM_8HI:
15263 case VSX_BUILTIN_VPERM_4SI:
15264 case VSX_BUILTIN_VPERM_4SF:
15265 case VSX_BUILTIN_VPERM_2DI:
15266 case VSX_BUILTIN_VPERM_2DF:
15267 h.uns_p[3] = 1;
15268 break;
15269
15270 /* unsigned args, signed return. */
15271 case VSX_BUILTIN_XVCVUXDDP_UNS:
15272 case ALTIVEC_BUILTIN_UNSFLOAT_V4SI_V4SF:
15273 h.uns_p[1] = 1;
15274 break;
15275
15276 /* signed args, unsigned return. */
15277 case VSX_BUILTIN_XVCVDPUXDS_UNS:
15278 case ALTIVEC_BUILTIN_FIXUNS_V4SF_V4SI:
15279 case MISC_BUILTIN_UNPACK_TD:
15280 case MISC_BUILTIN_UNPACK_V1TI:
15281 h.uns_p[0] = 1;
15282 break;
15283
15284 /* unsigned arguments for 128-bit pack instructions. */
15285 case MISC_BUILTIN_PACK_TD:
15286 case MISC_BUILTIN_PACK_V1TI:
15287 h.uns_p[1] = 1;
15288 h.uns_p[2] = 1;
15289 break;
15290
15291 default:
15292 break;
15293 }
15294
15295 /* Figure out how many args are present. */
15296 while (num_args > 0 && h.mode[num_args] == VOIDmode)
15297 num_args--;
15298
15299 if (num_args == 0)
15300 fatal_error ("internal error: builtin function %s had no type", name);
15301
15302 ret_type = builtin_mode_to_type[h.mode[0]][h.uns_p[0]];
15303 if (!ret_type && h.uns_p[0])
15304 ret_type = builtin_mode_to_type[h.mode[0]][0];
15305
15306 if (!ret_type)
15307 fatal_error ("internal error: builtin function %s had an unexpected "
15308 "return type %s", name, GET_MODE_NAME (h.mode[0]));
15309
15310 for (i = 0; i < (int) ARRAY_SIZE (arg_type); i++)
15311 arg_type[i] = NULL_TREE;
15312
15313 for (i = 0; i < num_args; i++)
15314 {
15315 int m = (int) h.mode[i+1];
15316 int uns_p = h.uns_p[i+1];
15317
15318 arg_type[i] = builtin_mode_to_type[m][uns_p];
15319 if (!arg_type[i] && uns_p)
15320 arg_type[i] = builtin_mode_to_type[m][0];
15321
15322 if (!arg_type[i])
15323 fatal_error ("internal error: builtin function %s, argument %d "
15324 "had unexpected argument type %s", name, i,
15325 GET_MODE_NAME (m));
15326 }
15327
15328 found = htab_find_slot (builtin_hash_table, &h, INSERT);
15329 if (*found == NULL)
15330 {
15331 h2 = ggc_alloc<builtin_hash_struct> ();
15332 *h2 = h;
15333 *found = (void *)h2;
15334
15335 h2->type = build_function_type_list (ret_type, arg_type[0], arg_type[1],
15336 arg_type[2], NULL_TREE);
15337 }
15338
15339 return ((struct builtin_hash_struct *)(*found))->type;
15340 }
15341
15342 static void
15343 rs6000_common_init_builtins (void)
15344 {
15345 const struct builtin_description *d;
15346 size_t i;
15347
15348 tree opaque_ftype_opaque = NULL_TREE;
15349 tree opaque_ftype_opaque_opaque = NULL_TREE;
15350 tree opaque_ftype_opaque_opaque_opaque = NULL_TREE;
15351 tree v2si_ftype_qi = NULL_TREE;
15352 tree v2si_ftype_v2si_qi = NULL_TREE;
15353 tree v2si_ftype_int_qi = NULL_TREE;
15354 HOST_WIDE_INT builtin_mask = rs6000_builtin_mask;
15355
15356 if (!TARGET_PAIRED_FLOAT)
15357 {
15358 builtin_mode_to_type[V2SImode][0] = opaque_V2SI_type_node;
15359 builtin_mode_to_type[V2SFmode][0] = opaque_V2SF_type_node;
15360 }
15361
15362 /* Paired and SPE builtins are only available if you build a compiler with
15363 the appropriate options, so only create those builtins with the
15364 appropriate compiler option. Create Altivec and VSX builtins on machines
15365 with at least the general purpose extensions (970 and newer) to allow the
15366 use of the target attribute.. */
15367
15368 if (TARGET_EXTRA_BUILTINS)
15369 builtin_mask |= RS6000_BTM_COMMON;
15370
15371 /* Add the ternary operators. */
15372 d = bdesc_3arg;
15373 for (i = 0; i < ARRAY_SIZE (bdesc_3arg); i++, d++)
15374 {
15375 tree type;
15376 HOST_WIDE_INT mask = d->mask;
15377
15378 if ((mask & builtin_mask) != mask)
15379 {
15380 if (TARGET_DEBUG_BUILTIN)
15381 fprintf (stderr, "rs6000_builtin, skip ternary %s\n", d->name);
15382 continue;
15383 }
15384
15385 if (rs6000_overloaded_builtin_p (d->code))
15386 {
15387 if (! (type = opaque_ftype_opaque_opaque_opaque))
15388 type = opaque_ftype_opaque_opaque_opaque
15389 = build_function_type_list (opaque_V4SI_type_node,
15390 opaque_V4SI_type_node,
15391 opaque_V4SI_type_node,
15392 opaque_V4SI_type_node,
15393 NULL_TREE);
15394 }
15395 else
15396 {
15397 enum insn_code icode = d->icode;
15398 if (d->name == 0)
15399 {
15400 if (TARGET_DEBUG_BUILTIN)
15401 fprintf (stderr, "rs6000_builtin, bdesc_3arg[%ld] no name\n",
15402 (long unsigned)i);
15403
15404 continue;
15405 }
15406
15407 if (icode == CODE_FOR_nothing)
15408 {
15409 if (TARGET_DEBUG_BUILTIN)
15410 fprintf (stderr, "rs6000_builtin, skip ternary %s (no code)\n",
15411 d->name);
15412
15413 continue;
15414 }
15415
15416 type = builtin_function_type (insn_data[icode].operand[0].mode,
15417 insn_data[icode].operand[1].mode,
15418 insn_data[icode].operand[2].mode,
15419 insn_data[icode].operand[3].mode,
15420 d->code, d->name);
15421 }
15422
15423 def_builtin (d->name, type, d->code);
15424 }
15425
15426 /* Add the binary operators. */
15427 d = bdesc_2arg;
15428 for (i = 0; i < ARRAY_SIZE (bdesc_2arg); i++, d++)
15429 {
15430 enum machine_mode mode0, mode1, mode2;
15431 tree type;
15432 HOST_WIDE_INT mask = d->mask;
15433
15434 if ((mask & builtin_mask) != mask)
15435 {
15436 if (TARGET_DEBUG_BUILTIN)
15437 fprintf (stderr, "rs6000_builtin, skip binary %s\n", d->name);
15438 continue;
15439 }
15440
15441 if (rs6000_overloaded_builtin_p (d->code))
15442 {
15443 if (! (type = opaque_ftype_opaque_opaque))
15444 type = opaque_ftype_opaque_opaque
15445 = build_function_type_list (opaque_V4SI_type_node,
15446 opaque_V4SI_type_node,
15447 opaque_V4SI_type_node,
15448 NULL_TREE);
15449 }
15450 else
15451 {
15452 enum insn_code icode = d->icode;
15453 if (d->name == 0)
15454 {
15455 if (TARGET_DEBUG_BUILTIN)
15456 fprintf (stderr, "rs6000_builtin, bdesc_2arg[%ld] no name\n",
15457 (long unsigned)i);
15458
15459 continue;
15460 }
15461
15462 if (icode == CODE_FOR_nothing)
15463 {
15464 if (TARGET_DEBUG_BUILTIN)
15465 fprintf (stderr, "rs6000_builtin, skip binary %s (no code)\n",
15466 d->name);
15467
15468 continue;
15469 }
15470
15471 mode0 = insn_data[icode].operand[0].mode;
15472 mode1 = insn_data[icode].operand[1].mode;
15473 mode2 = insn_data[icode].operand[2].mode;
15474
15475 if (mode0 == V2SImode && mode1 == V2SImode && mode2 == QImode)
15476 {
15477 if (! (type = v2si_ftype_v2si_qi))
15478 type = v2si_ftype_v2si_qi
15479 = build_function_type_list (opaque_V2SI_type_node,
15480 opaque_V2SI_type_node,
15481 char_type_node,
15482 NULL_TREE);
15483 }
15484
15485 else if (mode0 == V2SImode && GET_MODE_CLASS (mode1) == MODE_INT
15486 && mode2 == QImode)
15487 {
15488 if (! (type = v2si_ftype_int_qi))
15489 type = v2si_ftype_int_qi
15490 = build_function_type_list (opaque_V2SI_type_node,
15491 integer_type_node,
15492 char_type_node,
15493 NULL_TREE);
15494 }
15495
15496 else
15497 type = builtin_function_type (mode0, mode1, mode2, VOIDmode,
15498 d->code, d->name);
15499 }
15500
15501 def_builtin (d->name, type, d->code);
15502 }
15503
15504 /* Add the simple unary operators. */
15505 d = bdesc_1arg;
15506 for (i = 0; i < ARRAY_SIZE (bdesc_1arg); i++, d++)
15507 {
15508 enum machine_mode mode0, mode1;
15509 tree type;
15510 HOST_WIDE_INT mask = d->mask;
15511
15512 if ((mask & builtin_mask) != mask)
15513 {
15514 if (TARGET_DEBUG_BUILTIN)
15515 fprintf (stderr, "rs6000_builtin, skip unary %s\n", d->name);
15516 continue;
15517 }
15518
15519 if (rs6000_overloaded_builtin_p (d->code))
15520 {
15521 if (! (type = opaque_ftype_opaque))
15522 type = opaque_ftype_opaque
15523 = build_function_type_list (opaque_V4SI_type_node,
15524 opaque_V4SI_type_node,
15525 NULL_TREE);
15526 }
15527 else
15528 {
15529 enum insn_code icode = d->icode;
15530 if (d->name == 0)
15531 {
15532 if (TARGET_DEBUG_BUILTIN)
15533 fprintf (stderr, "rs6000_builtin, bdesc_1arg[%ld] no name\n",
15534 (long unsigned)i);
15535
15536 continue;
15537 }
15538
15539 if (icode == CODE_FOR_nothing)
15540 {
15541 if (TARGET_DEBUG_BUILTIN)
15542 fprintf (stderr, "rs6000_builtin, skip unary %s (no code)\n",
15543 d->name);
15544
15545 continue;
15546 }
15547
15548 mode0 = insn_data[icode].operand[0].mode;
15549 mode1 = insn_data[icode].operand[1].mode;
15550
15551 if (mode0 == V2SImode && mode1 == QImode)
15552 {
15553 if (! (type = v2si_ftype_qi))
15554 type = v2si_ftype_qi
15555 = build_function_type_list (opaque_V2SI_type_node,
15556 char_type_node,
15557 NULL_TREE);
15558 }
15559
15560 else
15561 type = builtin_function_type (mode0, mode1, VOIDmode, VOIDmode,
15562 d->code, d->name);
15563 }
15564
15565 def_builtin (d->name, type, d->code);
15566 }
15567 }
15568
15569 static void
15570 rs6000_init_libfuncs (void)
15571 {
15572 if (!TARGET_IEEEQUAD)
15573 /* AIX/Darwin/64-bit Linux quad floating point routines. */
15574 if (!TARGET_XL_COMPAT)
15575 {
15576 set_optab_libfunc (add_optab, TFmode, "__gcc_qadd");
15577 set_optab_libfunc (sub_optab, TFmode, "__gcc_qsub");
15578 set_optab_libfunc (smul_optab, TFmode, "__gcc_qmul");
15579 set_optab_libfunc (sdiv_optab, TFmode, "__gcc_qdiv");
15580
15581 if (!(TARGET_HARD_FLOAT && (TARGET_FPRS || TARGET_E500_DOUBLE)))
15582 {
15583 set_optab_libfunc (neg_optab, TFmode, "__gcc_qneg");
15584 set_optab_libfunc (eq_optab, TFmode, "__gcc_qeq");
15585 set_optab_libfunc (ne_optab, TFmode, "__gcc_qne");
15586 set_optab_libfunc (gt_optab, TFmode, "__gcc_qgt");
15587 set_optab_libfunc (ge_optab, TFmode, "__gcc_qge");
15588 set_optab_libfunc (lt_optab, TFmode, "__gcc_qlt");
15589 set_optab_libfunc (le_optab, TFmode, "__gcc_qle");
15590
15591 set_conv_libfunc (sext_optab, TFmode, SFmode, "__gcc_stoq");
15592 set_conv_libfunc (sext_optab, TFmode, DFmode, "__gcc_dtoq");
15593 set_conv_libfunc (trunc_optab, SFmode, TFmode, "__gcc_qtos");
15594 set_conv_libfunc (trunc_optab, DFmode, TFmode, "__gcc_qtod");
15595 set_conv_libfunc (sfix_optab, SImode, TFmode, "__gcc_qtoi");
15596 set_conv_libfunc (ufix_optab, SImode, TFmode, "__gcc_qtou");
15597 set_conv_libfunc (sfloat_optab, TFmode, SImode, "__gcc_itoq");
15598 set_conv_libfunc (ufloat_optab, TFmode, SImode, "__gcc_utoq");
15599 }
15600
15601 if (!(TARGET_HARD_FLOAT && TARGET_FPRS))
15602 set_optab_libfunc (unord_optab, TFmode, "__gcc_qunord");
15603 }
15604 else
15605 {
15606 set_optab_libfunc (add_optab, TFmode, "_xlqadd");
15607 set_optab_libfunc (sub_optab, TFmode, "_xlqsub");
15608 set_optab_libfunc (smul_optab, TFmode, "_xlqmul");
15609 set_optab_libfunc (sdiv_optab, TFmode, "_xlqdiv");
15610 }
15611 else
15612 {
15613 /* 32-bit SVR4 quad floating point routines. */
15614
15615 set_optab_libfunc (add_optab, TFmode, "_q_add");
15616 set_optab_libfunc (sub_optab, TFmode, "_q_sub");
15617 set_optab_libfunc (neg_optab, TFmode, "_q_neg");
15618 set_optab_libfunc (smul_optab, TFmode, "_q_mul");
15619 set_optab_libfunc (sdiv_optab, TFmode, "_q_div");
15620 if (TARGET_PPC_GPOPT)
15621 set_optab_libfunc (sqrt_optab, TFmode, "_q_sqrt");
15622
15623 set_optab_libfunc (eq_optab, TFmode, "_q_feq");
15624 set_optab_libfunc (ne_optab, TFmode, "_q_fne");
15625 set_optab_libfunc (gt_optab, TFmode, "_q_fgt");
15626 set_optab_libfunc (ge_optab, TFmode, "_q_fge");
15627 set_optab_libfunc (lt_optab, TFmode, "_q_flt");
15628 set_optab_libfunc (le_optab, TFmode, "_q_fle");
15629
15630 set_conv_libfunc (sext_optab, TFmode, SFmode, "_q_stoq");
15631 set_conv_libfunc (sext_optab, TFmode, DFmode, "_q_dtoq");
15632 set_conv_libfunc (trunc_optab, SFmode, TFmode, "_q_qtos");
15633 set_conv_libfunc (trunc_optab, DFmode, TFmode, "_q_qtod");
15634 set_conv_libfunc (sfix_optab, SImode, TFmode, "_q_qtoi");
15635 set_conv_libfunc (ufix_optab, SImode, TFmode, "_q_qtou");
15636 set_conv_libfunc (sfloat_optab, TFmode, SImode, "_q_itoq");
15637 set_conv_libfunc (ufloat_optab, TFmode, SImode, "_q_utoq");
15638 }
15639 }
15640
15641 \f
15642 /* Expand a block clear operation, and return 1 if successful. Return 0
15643 if we should let the compiler generate normal code.
15644
15645 operands[0] is the destination
15646 operands[1] is the length
15647 operands[3] is the alignment */
15648
15649 int
15650 expand_block_clear (rtx operands[])
15651 {
15652 rtx orig_dest = operands[0];
15653 rtx bytes_rtx = operands[1];
15654 rtx align_rtx = operands[3];
15655 bool constp = (GET_CODE (bytes_rtx) == CONST_INT);
15656 HOST_WIDE_INT align;
15657 HOST_WIDE_INT bytes;
15658 int offset;
15659 int clear_bytes;
15660 int clear_step;
15661
15662 /* If this is not a fixed size move, just call memcpy */
15663 if (! constp)
15664 return 0;
15665
15666 /* This must be a fixed size alignment */
15667 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
15668 align = INTVAL (align_rtx) * BITS_PER_UNIT;
15669
15670 /* Anything to clear? */
15671 bytes = INTVAL (bytes_rtx);
15672 if (bytes <= 0)
15673 return 1;
15674
15675 /* Use the builtin memset after a point, to avoid huge code bloat.
15676 When optimize_size, avoid any significant code bloat; calling
15677 memset is about 4 instructions, so allow for one instruction to
15678 load zero and three to do clearing. */
15679 if (TARGET_ALTIVEC && align >= 128)
15680 clear_step = 16;
15681 else if (TARGET_POWERPC64 && (align >= 64 || !STRICT_ALIGNMENT))
15682 clear_step = 8;
15683 else if (TARGET_SPE && align >= 64)
15684 clear_step = 8;
15685 else
15686 clear_step = 4;
15687
15688 if (optimize_size && bytes > 3 * clear_step)
15689 return 0;
15690 if (! optimize_size && bytes > 8 * clear_step)
15691 return 0;
15692
15693 for (offset = 0; bytes > 0; offset += clear_bytes, bytes -= clear_bytes)
15694 {
15695 enum machine_mode mode = BLKmode;
15696 rtx dest;
15697
15698 if (bytes >= 16 && TARGET_ALTIVEC && align >= 128)
15699 {
15700 clear_bytes = 16;
15701 mode = V4SImode;
15702 }
15703 else if (bytes >= 8 && TARGET_SPE && align >= 64)
15704 {
15705 clear_bytes = 8;
15706 mode = V2SImode;
15707 }
15708 else if (bytes >= 8 && TARGET_POWERPC64
15709 && (align >= 64 || !STRICT_ALIGNMENT))
15710 {
15711 clear_bytes = 8;
15712 mode = DImode;
15713 if (offset == 0 && align < 64)
15714 {
15715 rtx addr;
15716
15717 /* If the address form is reg+offset with offset not a
15718 multiple of four, reload into reg indirect form here
15719 rather than waiting for reload. This way we get one
15720 reload, not one per store. */
15721 addr = XEXP (orig_dest, 0);
15722 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
15723 && GET_CODE (XEXP (addr, 1)) == CONST_INT
15724 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
15725 {
15726 addr = copy_addr_to_reg (addr);
15727 orig_dest = replace_equiv_address (orig_dest, addr);
15728 }
15729 }
15730 }
15731 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
15732 { /* move 4 bytes */
15733 clear_bytes = 4;
15734 mode = SImode;
15735 }
15736 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
15737 { /* move 2 bytes */
15738 clear_bytes = 2;
15739 mode = HImode;
15740 }
15741 else /* move 1 byte at a time */
15742 {
15743 clear_bytes = 1;
15744 mode = QImode;
15745 }
15746
15747 dest = adjust_address (orig_dest, mode, offset);
15748
15749 emit_move_insn (dest, CONST0_RTX (mode));
15750 }
15751
15752 return 1;
15753 }
15754
15755 \f
15756 /* Expand a block move operation, and return 1 if successful. Return 0
15757 if we should let the compiler generate normal code.
15758
15759 operands[0] is the destination
15760 operands[1] is the source
15761 operands[2] is the length
15762 operands[3] is the alignment */
15763
15764 #define MAX_MOVE_REG 4
15765
15766 int
15767 expand_block_move (rtx operands[])
15768 {
15769 rtx orig_dest = operands[0];
15770 rtx orig_src = operands[1];
15771 rtx bytes_rtx = operands[2];
15772 rtx align_rtx = operands[3];
15773 int constp = (GET_CODE (bytes_rtx) == CONST_INT);
15774 int align;
15775 int bytes;
15776 int offset;
15777 int move_bytes;
15778 rtx stores[MAX_MOVE_REG];
15779 int num_reg = 0;
15780
15781 /* If this is not a fixed size move, just call memcpy */
15782 if (! constp)
15783 return 0;
15784
15785 /* This must be a fixed size alignment */
15786 gcc_assert (GET_CODE (align_rtx) == CONST_INT);
15787 align = INTVAL (align_rtx) * BITS_PER_UNIT;
15788
15789 /* Anything to move? */
15790 bytes = INTVAL (bytes_rtx);
15791 if (bytes <= 0)
15792 return 1;
15793
15794 if (bytes > rs6000_block_move_inline_limit)
15795 return 0;
15796
15797 for (offset = 0; bytes > 0; offset += move_bytes, bytes -= move_bytes)
15798 {
15799 union {
15800 rtx (*movmemsi) (rtx, rtx, rtx, rtx);
15801 rtx (*mov) (rtx, rtx);
15802 } gen_func;
15803 enum machine_mode mode = BLKmode;
15804 rtx src, dest;
15805
15806 /* Altivec first, since it will be faster than a string move
15807 when it applies, and usually not significantly larger. */
15808 if (TARGET_ALTIVEC && bytes >= 16 && align >= 128)
15809 {
15810 move_bytes = 16;
15811 mode = V4SImode;
15812 gen_func.mov = gen_movv4si;
15813 }
15814 else if (TARGET_SPE && bytes >= 8 && align >= 64)
15815 {
15816 move_bytes = 8;
15817 mode = V2SImode;
15818 gen_func.mov = gen_movv2si;
15819 }
15820 else if (TARGET_STRING
15821 && bytes > 24 /* move up to 32 bytes at a time */
15822 && ! fixed_regs[5]
15823 && ! fixed_regs[6]
15824 && ! fixed_regs[7]
15825 && ! fixed_regs[8]
15826 && ! fixed_regs[9]
15827 && ! fixed_regs[10]
15828 && ! fixed_regs[11]
15829 && ! fixed_regs[12])
15830 {
15831 move_bytes = (bytes > 32) ? 32 : bytes;
15832 gen_func.movmemsi = gen_movmemsi_8reg;
15833 }
15834 else if (TARGET_STRING
15835 && bytes > 16 /* move up to 24 bytes at a time */
15836 && ! fixed_regs[5]
15837 && ! fixed_regs[6]
15838 && ! fixed_regs[7]
15839 && ! fixed_regs[8]
15840 && ! fixed_regs[9]
15841 && ! fixed_regs[10])
15842 {
15843 move_bytes = (bytes > 24) ? 24 : bytes;
15844 gen_func.movmemsi = gen_movmemsi_6reg;
15845 }
15846 else if (TARGET_STRING
15847 && bytes > 8 /* move up to 16 bytes at a time */
15848 && ! fixed_regs[5]
15849 && ! fixed_regs[6]
15850 && ! fixed_regs[7]
15851 && ! fixed_regs[8])
15852 {
15853 move_bytes = (bytes > 16) ? 16 : bytes;
15854 gen_func.movmemsi = gen_movmemsi_4reg;
15855 }
15856 else if (bytes >= 8 && TARGET_POWERPC64
15857 && (align >= 64 || !STRICT_ALIGNMENT))
15858 {
15859 move_bytes = 8;
15860 mode = DImode;
15861 gen_func.mov = gen_movdi;
15862 if (offset == 0 && align < 64)
15863 {
15864 rtx addr;
15865
15866 /* If the address form is reg+offset with offset not a
15867 multiple of four, reload into reg indirect form here
15868 rather than waiting for reload. This way we get one
15869 reload, not one per load and/or store. */
15870 addr = XEXP (orig_dest, 0);
15871 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
15872 && GET_CODE (XEXP (addr, 1)) == CONST_INT
15873 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
15874 {
15875 addr = copy_addr_to_reg (addr);
15876 orig_dest = replace_equiv_address (orig_dest, addr);
15877 }
15878 addr = XEXP (orig_src, 0);
15879 if ((GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM)
15880 && GET_CODE (XEXP (addr, 1)) == CONST_INT
15881 && (INTVAL (XEXP (addr, 1)) & 3) != 0)
15882 {
15883 addr = copy_addr_to_reg (addr);
15884 orig_src = replace_equiv_address (orig_src, addr);
15885 }
15886 }
15887 }
15888 else if (TARGET_STRING && bytes > 4 && !TARGET_POWERPC64)
15889 { /* move up to 8 bytes at a time */
15890 move_bytes = (bytes > 8) ? 8 : bytes;
15891 gen_func.movmemsi = gen_movmemsi_2reg;
15892 }
15893 else if (bytes >= 4 && (align >= 32 || !STRICT_ALIGNMENT))
15894 { /* move 4 bytes */
15895 move_bytes = 4;
15896 mode = SImode;
15897 gen_func.mov = gen_movsi;
15898 }
15899 else if (bytes >= 2 && (align >= 16 || !STRICT_ALIGNMENT))
15900 { /* move 2 bytes */
15901 move_bytes = 2;
15902 mode = HImode;
15903 gen_func.mov = gen_movhi;
15904 }
15905 else if (TARGET_STRING && bytes > 1)
15906 { /* move up to 4 bytes at a time */
15907 move_bytes = (bytes > 4) ? 4 : bytes;
15908 gen_func.movmemsi = gen_movmemsi_1reg;
15909 }
15910 else /* move 1 byte at a time */
15911 {
15912 move_bytes = 1;
15913 mode = QImode;
15914 gen_func.mov = gen_movqi;
15915 }
15916
15917 src = adjust_address (orig_src, mode, offset);
15918 dest = adjust_address (orig_dest, mode, offset);
15919
15920 if (mode != BLKmode)
15921 {
15922 rtx tmp_reg = gen_reg_rtx (mode);
15923
15924 emit_insn ((*gen_func.mov) (tmp_reg, src));
15925 stores[num_reg++] = (*gen_func.mov) (dest, tmp_reg);
15926 }
15927
15928 if (mode == BLKmode || num_reg >= MAX_MOVE_REG || bytes == move_bytes)
15929 {
15930 int i;
15931 for (i = 0; i < num_reg; i++)
15932 emit_insn (stores[i]);
15933 num_reg = 0;
15934 }
15935
15936 if (mode == BLKmode)
15937 {
15938 /* Move the address into scratch registers. The movmemsi
15939 patterns require zero offset. */
15940 if (!REG_P (XEXP (src, 0)))
15941 {
15942 rtx src_reg = copy_addr_to_reg (XEXP (src, 0));
15943 src = replace_equiv_address (src, src_reg);
15944 }
15945 set_mem_size (src, move_bytes);
15946
15947 if (!REG_P (XEXP (dest, 0)))
15948 {
15949 rtx dest_reg = copy_addr_to_reg (XEXP (dest, 0));
15950 dest = replace_equiv_address (dest, dest_reg);
15951 }
15952 set_mem_size (dest, move_bytes);
15953
15954 emit_insn ((*gen_func.movmemsi) (dest, src,
15955 GEN_INT (move_bytes & 31),
15956 align_rtx));
15957 }
15958 }
15959
15960 return 1;
15961 }
15962
15963 \f
15964 /* Return a string to perform a load_multiple operation.
15965 operands[0] is the vector.
15966 operands[1] is the source address.
15967 operands[2] is the first destination register. */
15968
15969 const char *
15970 rs6000_output_load_multiple (rtx operands[3])
15971 {
15972 /* We have to handle the case where the pseudo used to contain the address
15973 is assigned to one of the output registers. */
15974 int i, j;
15975 int words = XVECLEN (operands[0], 0);
15976 rtx xop[10];
15977
15978 if (XVECLEN (operands[0], 0) == 1)
15979 return "lwz %2,0(%1)";
15980
15981 for (i = 0; i < words; i++)
15982 if (refers_to_regno_p (REGNO (operands[2]) + i,
15983 REGNO (operands[2]) + i + 1, operands[1], 0))
15984 {
15985 if (i == words-1)
15986 {
15987 xop[0] = GEN_INT (4 * (words-1));
15988 xop[1] = operands[1];
15989 xop[2] = operands[2];
15990 output_asm_insn ("lswi %2,%1,%0\n\tlwz %1,%0(%1)", xop);
15991 return "";
15992 }
15993 else if (i == 0)
15994 {
15995 xop[0] = GEN_INT (4 * (words-1));
15996 xop[1] = operands[1];
15997 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + 1);
15998 output_asm_insn ("addi %1,%1,4\n\tlswi %2,%1,%0\n\tlwz %1,-4(%1)", xop);
15999 return "";
16000 }
16001 else
16002 {
16003 for (j = 0; j < words; j++)
16004 if (j != i)
16005 {
16006 xop[0] = GEN_INT (j * 4);
16007 xop[1] = operands[1];
16008 xop[2] = gen_rtx_REG (SImode, REGNO (operands[2]) + j);
16009 output_asm_insn ("lwz %2,%0(%1)", xop);
16010 }
16011 xop[0] = GEN_INT (i * 4);
16012 xop[1] = operands[1];
16013 output_asm_insn ("lwz %1,%0(%1)", xop);
16014 return "";
16015 }
16016 }
16017
16018 return "lswi %2,%1,%N0";
16019 }
16020
16021 \f
16022 /* A validation routine: say whether CODE, a condition code, and MODE
16023 match. The other alternatives either don't make sense or should
16024 never be generated. */
16025
16026 void
16027 validate_condition_mode (enum rtx_code code, enum machine_mode mode)
16028 {
16029 gcc_assert ((GET_RTX_CLASS (code) == RTX_COMPARE
16030 || GET_RTX_CLASS (code) == RTX_COMM_COMPARE)
16031 && GET_MODE_CLASS (mode) == MODE_CC);
16032
16033 /* These don't make sense. */
16034 gcc_assert ((code != GT && code != LT && code != GE && code != LE)
16035 || mode != CCUNSmode);
16036
16037 gcc_assert ((code != GTU && code != LTU && code != GEU && code != LEU)
16038 || mode == CCUNSmode);
16039
16040 gcc_assert (mode == CCFPmode
16041 || (code != ORDERED && code != UNORDERED
16042 && code != UNEQ && code != LTGT
16043 && code != UNGT && code != UNLT
16044 && code != UNGE && code != UNLE));
16045
16046 /* These should never be generated except for
16047 flag_finite_math_only. */
16048 gcc_assert (mode != CCFPmode
16049 || flag_finite_math_only
16050 || (code != LE && code != GE
16051 && code != UNEQ && code != LTGT
16052 && code != UNGT && code != UNLT));
16053
16054 /* These are invalid; the information is not there. */
16055 gcc_assert (mode != CCEQmode || code == EQ || code == NE);
16056 }
16057
16058 \f
16059 /* Return 1 if ANDOP is a mask that has no bits on that are not in the
16060 mask required to convert the result of a rotate insn into a shift
16061 left insn of SHIFTOP bits. Both are known to be SImode CONST_INT. */
16062
16063 int
16064 includes_lshift_p (rtx shiftop, rtx andop)
16065 {
16066 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
16067
16068 shift_mask <<= INTVAL (shiftop);
16069
16070 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
16071 }
16072
16073 /* Similar, but for right shift. */
16074
16075 int
16076 includes_rshift_p (rtx shiftop, rtx andop)
16077 {
16078 unsigned HOST_WIDE_INT shift_mask = ~(unsigned HOST_WIDE_INT) 0;
16079
16080 shift_mask >>= INTVAL (shiftop);
16081
16082 return (INTVAL (andop) & 0xffffffff & ~shift_mask) == 0;
16083 }
16084
16085 /* Return 1 if ANDOP is a mask suitable for use with an rldic insn
16086 to perform a left shift. It must have exactly SHIFTOP least
16087 significant 0's, then one or more 1's, then zero or more 0's. */
16088
16089 int
16090 includes_rldic_lshift_p (rtx shiftop, rtx andop)
16091 {
16092 if (GET_CODE (andop) == CONST_INT)
16093 {
16094 HOST_WIDE_INT c, lsb, shift_mask;
16095
16096 c = INTVAL (andop);
16097 if (c == 0 || c == ~0)
16098 return 0;
16099
16100 shift_mask = ~0;
16101 shift_mask <<= INTVAL (shiftop);
16102
16103 /* Find the least significant one bit. */
16104 lsb = c & -c;
16105
16106 /* It must coincide with the LSB of the shift mask. */
16107 if (-lsb != shift_mask)
16108 return 0;
16109
16110 /* Invert to look for the next transition (if any). */
16111 c = ~c;
16112
16113 /* Remove the low group of ones (originally low group of zeros). */
16114 c &= -lsb;
16115
16116 /* Again find the lsb, and check we have all 1's above. */
16117 lsb = c & -c;
16118 return c == -lsb;
16119 }
16120 else
16121 return 0;
16122 }
16123
16124 /* Return 1 if ANDOP is a mask suitable for use with an rldicr insn
16125 to perform a left shift. It must have SHIFTOP or more least
16126 significant 0's, with the remainder of the word 1's. */
16127
16128 int
16129 includes_rldicr_lshift_p (rtx shiftop, rtx andop)
16130 {
16131 if (GET_CODE (andop) == CONST_INT)
16132 {
16133 HOST_WIDE_INT c, lsb, shift_mask;
16134
16135 shift_mask = ~0;
16136 shift_mask <<= INTVAL (shiftop);
16137 c = INTVAL (andop);
16138
16139 /* Find the least significant one bit. */
16140 lsb = c & -c;
16141
16142 /* It must be covered by the shift mask.
16143 This test also rejects c == 0. */
16144 if ((lsb & shift_mask) == 0)
16145 return 0;
16146
16147 /* Check we have all 1's above the transition, and reject all 1's. */
16148 return c == -lsb && lsb != 1;
16149 }
16150 else
16151 return 0;
16152 }
16153
16154 /* Return 1 if operands will generate a valid arguments to rlwimi
16155 instruction for insert with right shift in 64-bit mode. The mask may
16156 not start on the first bit or stop on the last bit because wrap-around
16157 effects of instruction do not correspond to semantics of RTL insn. */
16158
16159 int
16160 insvdi_rshift_rlwimi_p (rtx sizeop, rtx startop, rtx shiftop)
16161 {
16162 if (INTVAL (startop) > 32
16163 && INTVAL (startop) < 64
16164 && INTVAL (sizeop) > 1
16165 && INTVAL (sizeop) + INTVAL (startop) < 64
16166 && INTVAL (shiftop) > 0
16167 && INTVAL (sizeop) + INTVAL (shiftop) < 32
16168 && (64 - (INTVAL (shiftop) & 63)) >= INTVAL (sizeop))
16169 return 1;
16170
16171 return 0;
16172 }
16173
16174 /* Return 1 if REGNO (reg1) == REGNO (reg2) - 1 making them candidates
16175 for lfq and stfq insns iff the registers are hard registers. */
16176
16177 int
16178 registers_ok_for_quad_peep (rtx reg1, rtx reg2)
16179 {
16180 /* We might have been passed a SUBREG. */
16181 if (GET_CODE (reg1) != REG || GET_CODE (reg2) != REG)
16182 return 0;
16183
16184 /* We might have been passed non floating point registers. */
16185 if (!FP_REGNO_P (REGNO (reg1))
16186 || !FP_REGNO_P (REGNO (reg2)))
16187 return 0;
16188
16189 return (REGNO (reg1) == REGNO (reg2) - 1);
16190 }
16191
16192 /* Return 1 if addr1 and addr2 are suitable for lfq or stfq insn.
16193 addr1 and addr2 must be in consecutive memory locations
16194 (addr2 == addr1 + 8). */
16195
16196 int
16197 mems_ok_for_quad_peep (rtx mem1, rtx mem2)
16198 {
16199 rtx addr1, addr2;
16200 unsigned int reg1, reg2;
16201 int offset1, offset2;
16202
16203 /* The mems cannot be volatile. */
16204 if (MEM_VOLATILE_P (mem1) || MEM_VOLATILE_P (mem2))
16205 return 0;
16206
16207 addr1 = XEXP (mem1, 0);
16208 addr2 = XEXP (mem2, 0);
16209
16210 /* Extract an offset (if used) from the first addr. */
16211 if (GET_CODE (addr1) == PLUS)
16212 {
16213 /* If not a REG, return zero. */
16214 if (GET_CODE (XEXP (addr1, 0)) != REG)
16215 return 0;
16216 else
16217 {
16218 reg1 = REGNO (XEXP (addr1, 0));
16219 /* The offset must be constant! */
16220 if (GET_CODE (XEXP (addr1, 1)) != CONST_INT)
16221 return 0;
16222 offset1 = INTVAL (XEXP (addr1, 1));
16223 }
16224 }
16225 else if (GET_CODE (addr1) != REG)
16226 return 0;
16227 else
16228 {
16229 reg1 = REGNO (addr1);
16230 /* This was a simple (mem (reg)) expression. Offset is 0. */
16231 offset1 = 0;
16232 }
16233
16234 /* And now for the second addr. */
16235 if (GET_CODE (addr2) == PLUS)
16236 {
16237 /* If not a REG, return zero. */
16238 if (GET_CODE (XEXP (addr2, 0)) != REG)
16239 return 0;
16240 else
16241 {
16242 reg2 = REGNO (XEXP (addr2, 0));
16243 /* The offset must be constant. */
16244 if (GET_CODE (XEXP (addr2, 1)) != CONST_INT)
16245 return 0;
16246 offset2 = INTVAL (XEXP (addr2, 1));
16247 }
16248 }
16249 else if (GET_CODE (addr2) != REG)
16250 return 0;
16251 else
16252 {
16253 reg2 = REGNO (addr2);
16254 /* This was a simple (mem (reg)) expression. Offset is 0. */
16255 offset2 = 0;
16256 }
16257
16258 /* Both of these must have the same base register. */
16259 if (reg1 != reg2)
16260 return 0;
16261
16262 /* The offset for the second addr must be 8 more than the first addr. */
16263 if (offset2 != offset1 + 8)
16264 return 0;
16265
16266 /* All the tests passed. addr1 and addr2 are valid for lfq or stfq
16267 instructions. */
16268 return 1;
16269 }
16270 \f
16271
16272 rtx
16273 rs6000_secondary_memory_needed_rtx (enum machine_mode mode)
16274 {
16275 static bool eliminated = false;
16276 rtx ret;
16277
16278 if (mode != SDmode || TARGET_NO_SDMODE_STACK)
16279 ret = assign_stack_local (mode, GET_MODE_SIZE (mode), 0);
16280 else
16281 {
16282 rtx mem = cfun->machine->sdmode_stack_slot;
16283 gcc_assert (mem != NULL_RTX);
16284
16285 if (!eliminated)
16286 {
16287 mem = eliminate_regs (mem, VOIDmode, NULL_RTX);
16288 cfun->machine->sdmode_stack_slot = mem;
16289 eliminated = true;
16290 }
16291 ret = mem;
16292 }
16293
16294 if (TARGET_DEBUG_ADDR)
16295 {
16296 fprintf (stderr, "\nrs6000_secondary_memory_needed_rtx, mode %s, rtx:\n",
16297 GET_MODE_NAME (mode));
16298 if (!ret)
16299 fprintf (stderr, "\tNULL_RTX\n");
16300 else
16301 debug_rtx (ret);
16302 }
16303
16304 return ret;
16305 }
16306
16307 /* Return the mode to be used for memory when a secondary memory
16308 location is needed. For SDmode values we need to use DDmode, in
16309 all other cases we can use the same mode. */
16310 enum machine_mode
16311 rs6000_secondary_memory_needed_mode (enum machine_mode mode)
16312 {
16313 if (lra_in_progress && mode == SDmode)
16314 return DDmode;
16315 return mode;
16316 }
16317
16318 static tree
16319 rs6000_check_sdmode (tree *tp, int *walk_subtrees, void *data ATTRIBUTE_UNUSED)
16320 {
16321 /* Don't walk into types. */
16322 if (*tp == NULL_TREE || *tp == error_mark_node || TYPE_P (*tp))
16323 {
16324 *walk_subtrees = 0;
16325 return NULL_TREE;
16326 }
16327
16328 switch (TREE_CODE (*tp))
16329 {
16330 case VAR_DECL:
16331 case PARM_DECL:
16332 case FIELD_DECL:
16333 case RESULT_DECL:
16334 case SSA_NAME:
16335 case REAL_CST:
16336 case MEM_REF:
16337 case VIEW_CONVERT_EXPR:
16338 if (TYPE_MODE (TREE_TYPE (*tp)) == SDmode)
16339 return *tp;
16340 break;
16341 default:
16342 break;
16343 }
16344
16345 return NULL_TREE;
16346 }
16347
16348 /* Classify a register type. Because the FMRGOW/FMRGEW instructions only work
16349 on traditional floating point registers, and the VMRGOW/VMRGEW instructions
16350 only work on the traditional altivec registers, note if an altivec register
16351 was chosen. */
16352
16353 static enum rs6000_reg_type
16354 register_to_reg_type (rtx reg, bool *is_altivec)
16355 {
16356 HOST_WIDE_INT regno;
16357 enum reg_class rclass;
16358
16359 if (GET_CODE (reg) == SUBREG)
16360 reg = SUBREG_REG (reg);
16361
16362 if (!REG_P (reg))
16363 return NO_REG_TYPE;
16364
16365 regno = REGNO (reg);
16366 if (regno >= FIRST_PSEUDO_REGISTER)
16367 {
16368 if (!lra_in_progress && !reload_in_progress && !reload_completed)
16369 return PSEUDO_REG_TYPE;
16370
16371 regno = true_regnum (reg);
16372 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
16373 return PSEUDO_REG_TYPE;
16374 }
16375
16376 gcc_assert (regno >= 0);
16377
16378 if (is_altivec && ALTIVEC_REGNO_P (regno))
16379 *is_altivec = true;
16380
16381 rclass = rs6000_regno_regclass[regno];
16382 return reg_class_to_reg_type[(int)rclass];
16383 }
16384
16385 /* Helper function for rs6000_secondary_reload to return true if a move to a
16386 different register classe is really a simple move. */
16387
16388 static bool
16389 rs6000_secondary_reload_simple_move (enum rs6000_reg_type to_type,
16390 enum rs6000_reg_type from_type,
16391 enum machine_mode mode)
16392 {
16393 int size;
16394
16395 /* Add support for various direct moves available. In this function, we only
16396 look at cases where we don't need any extra registers, and one or more
16397 simple move insns are issued. At present, 32-bit integers are not allowed
16398 in FPR/VSX registers. Single precision binary floating is not a simple
16399 move because we need to convert to the single precision memory layout.
16400 The 4-byte SDmode can be moved. */
16401 size = GET_MODE_SIZE (mode);
16402 if (TARGET_DIRECT_MOVE
16403 && ((mode == SDmode) || (TARGET_POWERPC64 && size == 8))
16404 && ((to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16405 || (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)))
16406 return true;
16407
16408 else if (TARGET_MFPGPR && TARGET_POWERPC64 && size == 8
16409 && ((to_type == GPR_REG_TYPE && from_type == FPR_REG_TYPE)
16410 || (to_type == FPR_REG_TYPE && from_type == GPR_REG_TYPE)))
16411 return true;
16412
16413 else if ((size == 4 || (TARGET_POWERPC64 && size == 8))
16414 && ((to_type == GPR_REG_TYPE && from_type == SPR_REG_TYPE)
16415 || (to_type == SPR_REG_TYPE && from_type == GPR_REG_TYPE)))
16416 return true;
16417
16418 return false;
16419 }
16420
16421 /* Power8 helper function for rs6000_secondary_reload, handle all of the
16422 special direct moves that involve allocating an extra register, return the
16423 insn code of the helper function if there is such a function or
16424 CODE_FOR_nothing if not. */
16425
16426 static bool
16427 rs6000_secondary_reload_direct_move (enum rs6000_reg_type to_type,
16428 enum rs6000_reg_type from_type,
16429 enum machine_mode mode,
16430 secondary_reload_info *sri,
16431 bool altivec_p)
16432 {
16433 bool ret = false;
16434 enum insn_code icode = CODE_FOR_nothing;
16435 int cost = 0;
16436 int size = GET_MODE_SIZE (mode);
16437
16438 if (TARGET_POWERPC64)
16439 {
16440 if (size == 16)
16441 {
16442 /* Handle moving 128-bit values from GPRs to VSX point registers on
16443 power8 when running in 64-bit mode using XXPERMDI to glue the two
16444 64-bit values back together. */
16445 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
16446 {
16447 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
16448 icode = reg_addr[mode].reload_vsx_gpr;
16449 }
16450
16451 /* Handle moving 128-bit values from VSX point registers to GPRs on
16452 power8 when running in 64-bit mode using XXPERMDI to get access to the
16453 bottom 64-bit value. */
16454 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16455 {
16456 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
16457 icode = reg_addr[mode].reload_gpr_vsx;
16458 }
16459 }
16460
16461 else if (mode == SFmode)
16462 {
16463 if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16464 {
16465 cost = 3; /* xscvdpspn, mfvsrd, and. */
16466 icode = reg_addr[mode].reload_gpr_vsx;
16467 }
16468
16469 else if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
16470 {
16471 cost = 2; /* mtvsrz, xscvspdpn. */
16472 icode = reg_addr[mode].reload_vsx_gpr;
16473 }
16474 }
16475 }
16476
16477 if (TARGET_POWERPC64 && size == 16)
16478 {
16479 /* Handle moving 128-bit values from GPRs to VSX point registers on
16480 power8 when running in 64-bit mode using XXPERMDI to glue the two
16481 64-bit values back together. */
16482 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE)
16483 {
16484 cost = 3; /* 2 mtvsrd's, 1 xxpermdi. */
16485 icode = reg_addr[mode].reload_vsx_gpr;
16486 }
16487
16488 /* Handle moving 128-bit values from VSX point registers to GPRs on
16489 power8 when running in 64-bit mode using XXPERMDI to get access to the
16490 bottom 64-bit value. */
16491 else if (to_type == GPR_REG_TYPE && from_type == VSX_REG_TYPE)
16492 {
16493 cost = 3; /* 2 mfvsrd's, 1 xxpermdi. */
16494 icode = reg_addr[mode].reload_gpr_vsx;
16495 }
16496 }
16497
16498 else if (!TARGET_POWERPC64 && size == 8)
16499 {
16500 /* Handle moving 64-bit values from GPRs to floating point registers on
16501 power8 when running in 32-bit mode using FMRGOW to glue the two 32-bit
16502 values back together. Altivec register classes must be handled
16503 specially since a different instruction is used, and the secondary
16504 reload support requires a single instruction class in the scratch
16505 register constraint. However, right now TFmode is not allowed in
16506 Altivec registers, so the pattern will never match. */
16507 if (to_type == VSX_REG_TYPE && from_type == GPR_REG_TYPE && !altivec_p)
16508 {
16509 cost = 3; /* 2 mtvsrwz's, 1 fmrgow. */
16510 icode = reg_addr[mode].reload_fpr_gpr;
16511 }
16512 }
16513
16514 if (icode != CODE_FOR_nothing)
16515 {
16516 ret = true;
16517 if (sri)
16518 {
16519 sri->icode = icode;
16520 sri->extra_cost = cost;
16521 }
16522 }
16523
16524 return ret;
16525 }
16526
16527 /* Return whether a move between two register classes can be done either
16528 directly (simple move) or via a pattern that uses a single extra temporary
16529 (using power8's direct move in this case. */
16530
16531 static bool
16532 rs6000_secondary_reload_move (enum rs6000_reg_type to_type,
16533 enum rs6000_reg_type from_type,
16534 enum machine_mode mode,
16535 secondary_reload_info *sri,
16536 bool altivec_p)
16537 {
16538 /* Fall back to load/store reloads if either type is not a register. */
16539 if (to_type == NO_REG_TYPE || from_type == NO_REG_TYPE)
16540 return false;
16541
16542 /* If we haven't allocated registers yet, assume the move can be done for the
16543 standard register types. */
16544 if ((to_type == PSEUDO_REG_TYPE && from_type == PSEUDO_REG_TYPE)
16545 || (to_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (from_type))
16546 || (from_type == PSEUDO_REG_TYPE && IS_STD_REG_TYPE (to_type)))
16547 return true;
16548
16549 /* Moves to the same set of registers is a simple move for non-specialized
16550 registers. */
16551 if (to_type == from_type && IS_STD_REG_TYPE (to_type))
16552 return true;
16553
16554 /* Check whether a simple move can be done directly. */
16555 if (rs6000_secondary_reload_simple_move (to_type, from_type, mode))
16556 {
16557 if (sri)
16558 {
16559 sri->icode = CODE_FOR_nothing;
16560 sri->extra_cost = 0;
16561 }
16562 return true;
16563 }
16564
16565 /* Now check if we can do it in a few steps. */
16566 return rs6000_secondary_reload_direct_move (to_type, from_type, mode, sri,
16567 altivec_p);
16568 }
16569
16570 /* Inform reload about cases where moving X with a mode MODE to a register in
16571 RCLASS requires an extra scratch or immediate register. Return the class
16572 needed for the immediate register.
16573
16574 For VSX and Altivec, we may need a register to convert sp+offset into
16575 reg+sp.
16576
16577 For misaligned 64-bit gpr loads and stores we need a register to
16578 convert an offset address to indirect. */
16579
16580 static reg_class_t
16581 rs6000_secondary_reload (bool in_p,
16582 rtx x,
16583 reg_class_t rclass_i,
16584 enum machine_mode mode,
16585 secondary_reload_info *sri)
16586 {
16587 enum reg_class rclass = (enum reg_class) rclass_i;
16588 reg_class_t ret = ALL_REGS;
16589 enum insn_code icode;
16590 bool default_p = false;
16591
16592 sri->icode = CODE_FOR_nothing;
16593 icode = ((in_p)
16594 ? reg_addr[mode].reload_load
16595 : reg_addr[mode].reload_store);
16596
16597 if (REG_P (x) || register_operand (x, mode))
16598 {
16599 enum rs6000_reg_type to_type = reg_class_to_reg_type[(int)rclass];
16600 bool altivec_p = (rclass == ALTIVEC_REGS);
16601 enum rs6000_reg_type from_type = register_to_reg_type (x, &altivec_p);
16602
16603 if (!in_p)
16604 {
16605 enum rs6000_reg_type exchange = to_type;
16606 to_type = from_type;
16607 from_type = exchange;
16608 }
16609
16610 /* Can we do a direct move of some sort? */
16611 if (rs6000_secondary_reload_move (to_type, from_type, mode, sri,
16612 altivec_p))
16613 {
16614 icode = (enum insn_code)sri->icode;
16615 default_p = false;
16616 ret = NO_REGS;
16617 }
16618 }
16619
16620 /* Handle vector moves with reload helper functions. */
16621 if (ret == ALL_REGS && icode != CODE_FOR_nothing)
16622 {
16623 ret = NO_REGS;
16624 sri->icode = CODE_FOR_nothing;
16625 sri->extra_cost = 0;
16626
16627 if (GET_CODE (x) == MEM)
16628 {
16629 rtx addr = XEXP (x, 0);
16630
16631 /* Loads to and stores from gprs can do reg+offset, and wouldn't need
16632 an extra register in that case, but it would need an extra
16633 register if the addressing is reg+reg or (reg+reg)&(-16). Special
16634 case load/store quad. */
16635 if (rclass == GENERAL_REGS || rclass == BASE_REGS)
16636 {
16637 if (TARGET_POWERPC64 && TARGET_QUAD_MEMORY
16638 && GET_MODE_SIZE (mode) == 16
16639 && quad_memory_operand (x, mode))
16640 {
16641 sri->icode = icode;
16642 sri->extra_cost = 2;
16643 }
16644
16645 else if (!legitimate_indirect_address_p (addr, false)
16646 && !rs6000_legitimate_offset_address_p (PTImode, addr,
16647 false, true))
16648 {
16649 sri->icode = icode;
16650 /* account for splitting the loads, and converting the
16651 address from reg+reg to reg. */
16652 sri->extra_cost = (((TARGET_64BIT) ? 3 : 5)
16653 + ((GET_CODE (addr) == AND) ? 1 : 0));
16654 }
16655 }
16656 /* Allow scalar loads to/from the traditional floating point
16657 registers, even if VSX memory is set. */
16658 else if ((rclass == FLOAT_REGS || rclass == NO_REGS)
16659 && (GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
16660 && (legitimate_indirect_address_p (addr, false)
16661 || legitimate_indirect_address_p (addr, false)
16662 || rs6000_legitimate_offset_address_p (mode, addr,
16663 false, true)))
16664
16665 ;
16666 /* Loads to and stores from vector registers can only do reg+reg
16667 addressing. Altivec registers can also do (reg+reg)&(-16). Allow
16668 scalar modes loading up the traditional floating point registers
16669 to use offset addresses. */
16670 else if (rclass == VSX_REGS || rclass == ALTIVEC_REGS
16671 || rclass == FLOAT_REGS || rclass == NO_REGS)
16672 {
16673 if (!VECTOR_MEM_ALTIVEC_P (mode)
16674 && GET_CODE (addr) == AND
16675 && GET_CODE (XEXP (addr, 1)) == CONST_INT
16676 && INTVAL (XEXP (addr, 1)) == -16
16677 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
16678 || legitimate_indexed_address_p (XEXP (addr, 0), false)))
16679 {
16680 sri->icode = icode;
16681 sri->extra_cost = ((GET_CODE (XEXP (addr, 0)) == PLUS)
16682 ? 2 : 1);
16683 }
16684 else if (!legitimate_indirect_address_p (addr, false)
16685 && (rclass == NO_REGS
16686 || !legitimate_indexed_address_p (addr, false)))
16687 {
16688 sri->icode = icode;
16689 sri->extra_cost = 1;
16690 }
16691 else
16692 icode = CODE_FOR_nothing;
16693 }
16694 /* Any other loads, including to pseudo registers which haven't been
16695 assigned to a register yet, default to require a scratch
16696 register. */
16697 else
16698 {
16699 sri->icode = icode;
16700 sri->extra_cost = 2;
16701 }
16702 }
16703 else if (REG_P (x))
16704 {
16705 int regno = true_regnum (x);
16706
16707 icode = CODE_FOR_nothing;
16708 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
16709 default_p = true;
16710 else
16711 {
16712 enum reg_class xclass = REGNO_REG_CLASS (regno);
16713 enum rs6000_reg_type rtype1 = reg_class_to_reg_type[(int)rclass];
16714 enum rs6000_reg_type rtype2 = reg_class_to_reg_type[(int)xclass];
16715
16716 /* If memory is needed, use default_secondary_reload to create the
16717 stack slot. */
16718 if (rtype1 != rtype2 || !IS_STD_REG_TYPE (rtype1))
16719 default_p = true;
16720 else
16721 ret = NO_REGS;
16722 }
16723 }
16724 else
16725 default_p = true;
16726 }
16727 else if (TARGET_POWERPC64
16728 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
16729 && MEM_P (x)
16730 && GET_MODE_SIZE (GET_MODE (x)) >= UNITS_PER_WORD)
16731 {
16732 rtx addr = XEXP (x, 0);
16733 rtx off = address_offset (addr);
16734
16735 if (off != NULL_RTX)
16736 {
16737 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
16738 unsigned HOST_WIDE_INT offset = INTVAL (off);
16739
16740 /* We need a secondary reload when our legitimate_address_p
16741 says the address is good (as otherwise the entire address
16742 will be reloaded), and the offset is not a multiple of
16743 four or we have an address wrap. Address wrap will only
16744 occur for LO_SUMs since legitimate_offset_address_p
16745 rejects addresses for 16-byte mems that will wrap. */
16746 if (GET_CODE (addr) == LO_SUM
16747 ? (1 /* legitimate_address_p allows any offset for lo_sum */
16748 && ((offset & 3) != 0
16749 || ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra))
16750 : (offset + 0x8000 < 0x10000 - extra /* legitimate_address_p */
16751 && (offset & 3) != 0))
16752 {
16753 if (in_p)
16754 sri->icode = CODE_FOR_reload_di_load;
16755 else
16756 sri->icode = CODE_FOR_reload_di_store;
16757 sri->extra_cost = 2;
16758 ret = NO_REGS;
16759 }
16760 else
16761 default_p = true;
16762 }
16763 else
16764 default_p = true;
16765 }
16766 else if (!TARGET_POWERPC64
16767 && reg_class_to_reg_type[(int)rclass] == GPR_REG_TYPE
16768 && MEM_P (x)
16769 && GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD)
16770 {
16771 rtx addr = XEXP (x, 0);
16772 rtx off = address_offset (addr);
16773
16774 if (off != NULL_RTX)
16775 {
16776 unsigned int extra = GET_MODE_SIZE (GET_MODE (x)) - UNITS_PER_WORD;
16777 unsigned HOST_WIDE_INT offset = INTVAL (off);
16778
16779 /* We need a secondary reload when our legitimate_address_p
16780 says the address is good (as otherwise the entire address
16781 will be reloaded), and we have a wrap.
16782
16783 legitimate_lo_sum_address_p allows LO_SUM addresses to
16784 have any offset so test for wrap in the low 16 bits.
16785
16786 legitimate_offset_address_p checks for the range
16787 [-0x8000,0x7fff] for mode size of 8 and [-0x8000,0x7ff7]
16788 for mode size of 16. We wrap at [0x7ffc,0x7fff] and
16789 [0x7ff4,0x7fff] respectively, so test for the
16790 intersection of these ranges, [0x7ffc,0x7fff] and
16791 [0x7ff4,0x7ff7] respectively.
16792
16793 Note that the address we see here may have been
16794 manipulated by legitimize_reload_address. */
16795 if (GET_CODE (addr) == LO_SUM
16796 ? ((offset & 0xffff) ^ 0x8000) >= 0x10000 - extra
16797 : offset - (0x8000 - extra) < UNITS_PER_WORD)
16798 {
16799 if (in_p)
16800 sri->icode = CODE_FOR_reload_si_load;
16801 else
16802 sri->icode = CODE_FOR_reload_si_store;
16803 sri->extra_cost = 2;
16804 ret = NO_REGS;
16805 }
16806 else
16807 default_p = true;
16808 }
16809 else
16810 default_p = true;
16811 }
16812 else
16813 default_p = true;
16814
16815 if (default_p)
16816 ret = default_secondary_reload (in_p, x, rclass, mode, sri);
16817
16818 gcc_assert (ret != ALL_REGS);
16819
16820 if (TARGET_DEBUG_ADDR)
16821 {
16822 fprintf (stderr,
16823 "\nrs6000_secondary_reload, return %s, in_p = %s, rclass = %s, "
16824 "mode = %s",
16825 reg_class_names[ret],
16826 in_p ? "true" : "false",
16827 reg_class_names[rclass],
16828 GET_MODE_NAME (mode));
16829
16830 if (default_p)
16831 fprintf (stderr, ", default secondary reload");
16832
16833 if (sri->icode != CODE_FOR_nothing)
16834 fprintf (stderr, ", reload func = %s, extra cost = %d\n",
16835 insn_data[sri->icode].name, sri->extra_cost);
16836 else
16837 fprintf (stderr, "\n");
16838
16839 debug_rtx (x);
16840 }
16841
16842 return ret;
16843 }
16844
16845 /* Better tracing for rs6000_secondary_reload_inner. */
16846
16847 static void
16848 rs6000_secondary_reload_trace (int line, rtx reg, rtx mem, rtx scratch,
16849 bool store_p)
16850 {
16851 rtx set, clobber;
16852
16853 gcc_assert (reg != NULL_RTX && mem != NULL_RTX && scratch != NULL_RTX);
16854
16855 fprintf (stderr, "rs6000_secondary_reload_inner:%d, type = %s\n", line,
16856 store_p ? "store" : "load");
16857
16858 if (store_p)
16859 set = gen_rtx_SET (VOIDmode, mem, reg);
16860 else
16861 set = gen_rtx_SET (VOIDmode, reg, mem);
16862
16863 clobber = gen_rtx_CLOBBER (VOIDmode, scratch);
16864 debug_rtx (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, set, clobber)));
16865 }
16866
16867 static void
16868 rs6000_secondary_reload_fail (int line, rtx reg, rtx mem, rtx scratch,
16869 bool store_p)
16870 {
16871 rs6000_secondary_reload_trace (line, reg, mem, scratch, store_p);
16872 gcc_unreachable ();
16873 }
16874
16875 /* Fixup reload addresses for Altivec or VSX loads/stores to change SP+offset
16876 to SP+reg addressing. */
16877
16878 void
16879 rs6000_secondary_reload_inner (rtx reg, rtx mem, rtx scratch, bool store_p)
16880 {
16881 int regno = true_regnum (reg);
16882 enum machine_mode mode = GET_MODE (reg);
16883 enum reg_class rclass;
16884 rtx addr;
16885 rtx and_op2 = NULL_RTX;
16886 rtx addr_op1;
16887 rtx addr_op2;
16888 rtx scratch_or_premodify = scratch;
16889 rtx and_rtx;
16890 rtx cc_clobber;
16891
16892 if (TARGET_DEBUG_ADDR)
16893 rs6000_secondary_reload_trace (__LINE__, reg, mem, scratch, store_p);
16894
16895 if (regno < 0 || regno >= FIRST_PSEUDO_REGISTER)
16896 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16897
16898 if (GET_CODE (mem) != MEM)
16899 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16900
16901 rclass = REGNO_REG_CLASS (regno);
16902 addr = find_replacement (&XEXP (mem, 0));
16903
16904 switch (rclass)
16905 {
16906 /* GPRs can handle reg + small constant, all other addresses need to use
16907 the scratch register. */
16908 case GENERAL_REGS:
16909 case BASE_REGS:
16910 if (GET_CODE (addr) == AND)
16911 {
16912 and_op2 = XEXP (addr, 1);
16913 addr = find_replacement (&XEXP (addr, 0));
16914 }
16915
16916 if (GET_CODE (addr) == PRE_MODIFY)
16917 {
16918 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
16919 if (!REG_P (scratch_or_premodify))
16920 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16921
16922 addr = find_replacement (&XEXP (addr, 1));
16923 if (GET_CODE (addr) != PLUS)
16924 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16925 }
16926
16927 if (GET_CODE (addr) == PLUS
16928 && (and_op2 != NULL_RTX
16929 || !rs6000_legitimate_offset_address_p (PTImode, addr,
16930 false, true)))
16931 {
16932 /* find_replacement already recurses into both operands of
16933 PLUS so we don't need to call it here. */
16934 addr_op1 = XEXP (addr, 0);
16935 addr_op2 = XEXP (addr, 1);
16936 if (!legitimate_indirect_address_p (addr_op1, false))
16937 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
16938
16939 if (!REG_P (addr_op2)
16940 && (GET_CODE (addr_op2) != CONST_INT
16941 || !satisfies_constraint_I (addr_op2)))
16942 {
16943 if (TARGET_DEBUG_ADDR)
16944 {
16945 fprintf (stderr,
16946 "\nMove plus addr to register %s, mode = %s: ",
16947 rs6000_reg_names[REGNO (scratch)],
16948 GET_MODE_NAME (mode));
16949 debug_rtx (addr_op2);
16950 }
16951 rs6000_emit_move (scratch, addr_op2, Pmode);
16952 addr_op2 = scratch;
16953 }
16954
16955 emit_insn (gen_rtx_SET (VOIDmode,
16956 scratch_or_premodify,
16957 gen_rtx_PLUS (Pmode,
16958 addr_op1,
16959 addr_op2)));
16960
16961 addr = scratch_or_premodify;
16962 scratch_or_premodify = scratch;
16963 }
16964 else if (!legitimate_indirect_address_p (addr, false)
16965 && !rs6000_legitimate_offset_address_p (PTImode, addr,
16966 false, true))
16967 {
16968 if (TARGET_DEBUG_ADDR)
16969 {
16970 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
16971 rs6000_reg_names[REGNO (scratch_or_premodify)],
16972 GET_MODE_NAME (mode));
16973 debug_rtx (addr);
16974 }
16975 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
16976 addr = scratch_or_premodify;
16977 scratch_or_premodify = scratch;
16978 }
16979 break;
16980
16981 /* Float registers can do offset+reg addressing for scalar types. */
16982 case FLOAT_REGS:
16983 if (legitimate_indirect_address_p (addr, false) /* reg */
16984 || legitimate_indexed_address_p (addr, false) /* reg+reg */
16985 || ((GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8)
16986 && and_op2 == NULL_RTX
16987 && scratch_or_premodify == scratch
16988 && rs6000_legitimate_offset_address_p (mode, addr, false, false)))
16989 break;
16990
16991 /* If this isn't a legacy floating point load/store, fall through to the
16992 VSX defaults. */
16993
16994 /* VSX/Altivec registers can only handle reg+reg addressing. Move other
16995 addresses into a scratch register. */
16996 case VSX_REGS:
16997 case ALTIVEC_REGS:
16998
16999 /* With float regs, we need to handle the AND ourselves, since we can't
17000 use the Altivec instruction with an implicit AND -16. Allow scalar
17001 loads to float registers to use reg+offset even if VSX. */
17002 if (GET_CODE (addr) == AND
17003 && (rclass != ALTIVEC_REGS || GET_MODE_SIZE (mode) != 16
17004 || GET_CODE (XEXP (addr, 1)) != CONST_INT
17005 || INTVAL (XEXP (addr, 1)) != -16
17006 || !VECTOR_MEM_ALTIVEC_P (mode)))
17007 {
17008 and_op2 = XEXP (addr, 1);
17009 addr = find_replacement (&XEXP (addr, 0));
17010 }
17011
17012 /* If we aren't using a VSX load, save the PRE_MODIFY register and use it
17013 as the address later. */
17014 if (GET_CODE (addr) == PRE_MODIFY
17015 && ((ALTIVEC_OR_VSX_VECTOR_MODE (mode)
17016 && (rclass != FLOAT_REGS
17017 || (GET_MODE_SIZE (mode) != 4 && GET_MODE_SIZE (mode) != 8)))
17018 || and_op2 != NULL_RTX
17019 || !legitimate_indexed_address_p (XEXP (addr, 1), false)))
17020 {
17021 scratch_or_premodify = find_replacement (&XEXP (addr, 0));
17022 if (!legitimate_indirect_address_p (scratch_or_premodify, false))
17023 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17024
17025 addr = find_replacement (&XEXP (addr, 1));
17026 if (GET_CODE (addr) != PLUS)
17027 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17028 }
17029
17030 if (legitimate_indirect_address_p (addr, false) /* reg */
17031 || legitimate_indexed_address_p (addr, false) /* reg+reg */
17032 || (GET_CODE (addr) == AND /* Altivec memory */
17033 && rclass == ALTIVEC_REGS
17034 && GET_CODE (XEXP (addr, 1)) == CONST_INT
17035 && INTVAL (XEXP (addr, 1)) == -16
17036 && (legitimate_indirect_address_p (XEXP (addr, 0), false)
17037 || legitimate_indexed_address_p (XEXP (addr, 0), false))))
17038 ;
17039
17040 else if (GET_CODE (addr) == PLUS)
17041 {
17042 addr_op1 = XEXP (addr, 0);
17043 addr_op2 = XEXP (addr, 1);
17044 if (!REG_P (addr_op1))
17045 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17046
17047 if (TARGET_DEBUG_ADDR)
17048 {
17049 fprintf (stderr, "\nMove plus addr to register %s, mode = %s: ",
17050 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
17051 debug_rtx (addr_op2);
17052 }
17053 rs6000_emit_move (scratch, addr_op2, Pmode);
17054 emit_insn (gen_rtx_SET (VOIDmode,
17055 scratch_or_premodify,
17056 gen_rtx_PLUS (Pmode,
17057 addr_op1,
17058 scratch)));
17059 addr = scratch_or_premodify;
17060 scratch_or_premodify = scratch;
17061 }
17062
17063 else if (GET_CODE (addr) == SYMBOL_REF || GET_CODE (addr) == CONST
17064 || GET_CODE (addr) == CONST_INT || GET_CODE (addr) == LO_SUM
17065 || REG_P (addr))
17066 {
17067 if (TARGET_DEBUG_ADDR)
17068 {
17069 fprintf (stderr, "\nMove addr to register %s, mode = %s: ",
17070 rs6000_reg_names[REGNO (scratch_or_premodify)],
17071 GET_MODE_NAME (mode));
17072 debug_rtx (addr);
17073 }
17074
17075 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
17076 addr = scratch_or_premodify;
17077 scratch_or_premodify = scratch;
17078 }
17079
17080 else
17081 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17082
17083 break;
17084
17085 default:
17086 rs6000_secondary_reload_fail (__LINE__, reg, mem, scratch, store_p);
17087 }
17088
17089 /* If the original address involved a pre-modify that we couldn't use the VSX
17090 memory instruction with update, and we haven't taken care of already,
17091 store the address in the pre-modify register and use that as the
17092 address. */
17093 if (scratch_or_premodify != scratch && scratch_or_premodify != addr)
17094 {
17095 emit_insn (gen_rtx_SET (VOIDmode, scratch_or_premodify, addr));
17096 addr = scratch_or_premodify;
17097 }
17098
17099 /* If the original address involved an AND -16 and we couldn't use an ALTIVEC
17100 memory instruction, recreate the AND now, including the clobber which is
17101 generated by the general ANDSI3/ANDDI3 patterns for the
17102 andi. instruction. */
17103 if (and_op2 != NULL_RTX)
17104 {
17105 if (! legitimate_indirect_address_p (addr, false))
17106 {
17107 emit_insn (gen_rtx_SET (VOIDmode, scratch, addr));
17108 addr = scratch;
17109 }
17110
17111 if (TARGET_DEBUG_ADDR)
17112 {
17113 fprintf (stderr, "\nAnd addr to register %s, mode = %s: ",
17114 rs6000_reg_names[REGNO (scratch)], GET_MODE_NAME (mode));
17115 debug_rtx (and_op2);
17116 }
17117
17118 and_rtx = gen_rtx_SET (VOIDmode,
17119 scratch,
17120 gen_rtx_AND (Pmode,
17121 addr,
17122 and_op2));
17123
17124 cc_clobber = gen_rtx_CLOBBER (CCmode, gen_rtx_SCRATCH (CCmode));
17125 emit_insn (gen_rtx_PARALLEL (VOIDmode,
17126 gen_rtvec (2, and_rtx, cc_clobber)));
17127 addr = scratch;
17128 }
17129
17130 /* Adjust the address if it changed. */
17131 if (addr != XEXP (mem, 0))
17132 {
17133 mem = replace_equiv_address_nv (mem, addr);
17134 if (TARGET_DEBUG_ADDR)
17135 fprintf (stderr, "\nrs6000_secondary_reload_inner, mem adjusted.\n");
17136 }
17137
17138 /* Now create the move. */
17139 if (store_p)
17140 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
17141 else
17142 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
17143
17144 return;
17145 }
17146
17147 /* Convert reloads involving 64-bit gprs and misaligned offset
17148 addressing, or multiple 32-bit gprs and offsets that are too large,
17149 to use indirect addressing. */
17150
17151 void
17152 rs6000_secondary_reload_gpr (rtx reg, rtx mem, rtx scratch, bool store_p)
17153 {
17154 int regno = true_regnum (reg);
17155 enum reg_class rclass;
17156 rtx addr;
17157 rtx scratch_or_premodify = scratch;
17158
17159 if (TARGET_DEBUG_ADDR)
17160 {
17161 fprintf (stderr, "\nrs6000_secondary_reload_gpr, type = %s\n",
17162 store_p ? "store" : "load");
17163 fprintf (stderr, "reg:\n");
17164 debug_rtx (reg);
17165 fprintf (stderr, "mem:\n");
17166 debug_rtx (mem);
17167 fprintf (stderr, "scratch:\n");
17168 debug_rtx (scratch);
17169 }
17170
17171 gcc_assert (regno >= 0 && regno < FIRST_PSEUDO_REGISTER);
17172 gcc_assert (GET_CODE (mem) == MEM);
17173 rclass = REGNO_REG_CLASS (regno);
17174 gcc_assert (rclass == GENERAL_REGS || rclass == BASE_REGS);
17175 addr = XEXP (mem, 0);
17176
17177 if (GET_CODE (addr) == PRE_MODIFY)
17178 {
17179 scratch_or_premodify = XEXP (addr, 0);
17180 gcc_assert (REG_P (scratch_or_premodify));
17181 addr = XEXP (addr, 1);
17182 }
17183 gcc_assert (GET_CODE (addr) == PLUS || GET_CODE (addr) == LO_SUM);
17184
17185 rs6000_emit_move (scratch_or_premodify, addr, Pmode);
17186
17187 mem = replace_equiv_address_nv (mem, scratch_or_premodify);
17188
17189 /* Now create the move. */
17190 if (store_p)
17191 emit_insn (gen_rtx_SET (VOIDmode, mem, reg));
17192 else
17193 emit_insn (gen_rtx_SET (VOIDmode, reg, mem));
17194
17195 return;
17196 }
17197
17198 /* Allocate a 64-bit stack slot to be used for copying SDmode values through if
17199 this function has any SDmode references. If we are on a power7 or later, we
17200 don't need the 64-bit stack slot since the LFIWZX and STIFWX instructions
17201 can load/store the value. */
17202
17203 static void
17204 rs6000_alloc_sdmode_stack_slot (void)
17205 {
17206 tree t;
17207 basic_block bb;
17208 gimple_stmt_iterator gsi;
17209
17210 gcc_assert (cfun->machine->sdmode_stack_slot == NULL_RTX);
17211 /* We use a different approach for dealing with the secondary
17212 memory in LRA. */
17213 if (ira_use_lra_p)
17214 return;
17215
17216 if (TARGET_NO_SDMODE_STACK)
17217 return;
17218
17219 FOR_EACH_BB_FN (bb, cfun)
17220 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
17221 {
17222 tree ret = walk_gimple_op (gsi_stmt (gsi), rs6000_check_sdmode, NULL);
17223 if (ret)
17224 {
17225 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
17226 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
17227 SDmode, 0);
17228 return;
17229 }
17230 }
17231
17232 /* Check for any SDmode parameters of the function. */
17233 for (t = DECL_ARGUMENTS (cfun->decl); t; t = DECL_CHAIN (t))
17234 {
17235 if (TREE_TYPE (t) == error_mark_node)
17236 continue;
17237
17238 if (TYPE_MODE (TREE_TYPE (t)) == SDmode
17239 || TYPE_MODE (DECL_ARG_TYPE (t)) == SDmode)
17240 {
17241 rtx stack = assign_stack_local (DDmode, GET_MODE_SIZE (DDmode), 0);
17242 cfun->machine->sdmode_stack_slot = adjust_address_nv (stack,
17243 SDmode, 0);
17244 return;
17245 }
17246 }
17247 }
17248
17249 static void
17250 rs6000_instantiate_decls (void)
17251 {
17252 if (cfun->machine->sdmode_stack_slot != NULL_RTX)
17253 instantiate_decl_rtl (cfun->machine->sdmode_stack_slot);
17254 }
17255
17256 /* Given an rtx X being reloaded into a reg required to be
17257 in class CLASS, return the class of reg to actually use.
17258 In general this is just CLASS; but on some machines
17259 in some cases it is preferable to use a more restrictive class.
17260
17261 On the RS/6000, we have to return NO_REGS when we want to reload a
17262 floating-point CONST_DOUBLE to force it to be copied to memory.
17263
17264 We also don't want to reload integer values into floating-point
17265 registers if we can at all help it. In fact, this can
17266 cause reload to die, if it tries to generate a reload of CTR
17267 into a FP register and discovers it doesn't have the memory location
17268 required.
17269
17270 ??? Would it be a good idea to have reload do the converse, that is
17271 try to reload floating modes into FP registers if possible?
17272 */
17273
17274 static enum reg_class
17275 rs6000_preferred_reload_class (rtx x, enum reg_class rclass)
17276 {
17277 enum machine_mode mode = GET_MODE (x);
17278
17279 if (TARGET_VSX && x == CONST0_RTX (mode) && VSX_REG_CLASS_P (rclass))
17280 return rclass;
17281
17282 if (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
17283 && (rclass == ALTIVEC_REGS || rclass == VSX_REGS)
17284 && easy_vector_constant (x, mode))
17285 return ALTIVEC_REGS;
17286
17287 if ((CONSTANT_P (x) || GET_CODE (x) == PLUS))
17288 {
17289 if (reg_class_subset_p (GENERAL_REGS, rclass))
17290 return GENERAL_REGS;
17291 if (reg_class_subset_p (BASE_REGS, rclass))
17292 return BASE_REGS;
17293 return NO_REGS;
17294 }
17295
17296 if (GET_MODE_CLASS (mode) == MODE_INT && rclass == NON_SPECIAL_REGS)
17297 return GENERAL_REGS;
17298
17299 /* For VSX, prefer the traditional registers for 64-bit values because we can
17300 use the non-VSX loads. Prefer the Altivec registers if Altivec is
17301 handling the vector operations (i.e. V16QI, V8HI, and V4SI), or if we
17302 prefer Altivec loads.. */
17303 if (rclass == VSX_REGS)
17304 {
17305 if (MEM_P (x) && reg_addr[mode].scalar_in_vmx_p)
17306 {
17307 rtx addr = XEXP (x, 0);
17308 if (rs6000_legitimate_offset_address_p (mode, addr, false, true)
17309 || legitimate_lo_sum_address_p (mode, addr, false))
17310 return FLOAT_REGS;
17311 }
17312 else if (GET_MODE_SIZE (mode) <= 8 && !reg_addr[mode].scalar_in_vmx_p)
17313 return FLOAT_REGS;
17314
17315 if (VECTOR_UNIT_ALTIVEC_P (mode) || VECTOR_MEM_ALTIVEC_P (mode)
17316 || mode == V1TImode)
17317 return ALTIVEC_REGS;
17318
17319 return rclass;
17320 }
17321
17322 return rclass;
17323 }
17324
17325 /* Debug version of rs6000_preferred_reload_class. */
17326 static enum reg_class
17327 rs6000_debug_preferred_reload_class (rtx x, enum reg_class rclass)
17328 {
17329 enum reg_class ret = rs6000_preferred_reload_class (x, rclass);
17330
17331 fprintf (stderr,
17332 "\nrs6000_preferred_reload_class, return %s, rclass = %s, "
17333 "mode = %s, x:\n",
17334 reg_class_names[ret], reg_class_names[rclass],
17335 GET_MODE_NAME (GET_MODE (x)));
17336 debug_rtx (x);
17337
17338 return ret;
17339 }
17340
17341 /* If we are copying between FP or AltiVec registers and anything else, we need
17342 a memory location. The exception is when we are targeting ppc64 and the
17343 move to/from fpr to gpr instructions are available. Also, under VSX, you
17344 can copy vector registers from the FP register set to the Altivec register
17345 set and vice versa. */
17346
17347 static bool
17348 rs6000_secondary_memory_needed (enum reg_class from_class,
17349 enum reg_class to_class,
17350 enum machine_mode mode)
17351 {
17352 enum rs6000_reg_type from_type, to_type;
17353 bool altivec_p = ((from_class == ALTIVEC_REGS)
17354 || (to_class == ALTIVEC_REGS));
17355
17356 /* If a simple/direct move is available, we don't need secondary memory */
17357 from_type = reg_class_to_reg_type[(int)from_class];
17358 to_type = reg_class_to_reg_type[(int)to_class];
17359
17360 if (rs6000_secondary_reload_move (to_type, from_type, mode,
17361 (secondary_reload_info *)0, altivec_p))
17362 return false;
17363
17364 /* If we have a floating point or vector register class, we need to use
17365 memory to transfer the data. */
17366 if (IS_FP_VECT_REG_TYPE (from_type) || IS_FP_VECT_REG_TYPE (to_type))
17367 return true;
17368
17369 return false;
17370 }
17371
17372 /* Debug version of rs6000_secondary_memory_needed. */
17373 static bool
17374 rs6000_debug_secondary_memory_needed (enum reg_class from_class,
17375 enum reg_class to_class,
17376 enum machine_mode mode)
17377 {
17378 bool ret = rs6000_secondary_memory_needed (from_class, to_class, mode);
17379
17380 fprintf (stderr,
17381 "rs6000_secondary_memory_needed, return: %s, from_class = %s, "
17382 "to_class = %s, mode = %s\n",
17383 ret ? "true" : "false",
17384 reg_class_names[from_class],
17385 reg_class_names[to_class],
17386 GET_MODE_NAME (mode));
17387
17388 return ret;
17389 }
17390
17391 /* Return the register class of a scratch register needed to copy IN into
17392 or out of a register in RCLASS in MODE. If it can be done directly,
17393 NO_REGS is returned. */
17394
17395 static enum reg_class
17396 rs6000_secondary_reload_class (enum reg_class rclass, enum machine_mode mode,
17397 rtx in)
17398 {
17399 int regno;
17400
17401 if (TARGET_ELF || (DEFAULT_ABI == ABI_DARWIN
17402 #if TARGET_MACHO
17403 && MACHOPIC_INDIRECT
17404 #endif
17405 ))
17406 {
17407 /* We cannot copy a symbolic operand directly into anything
17408 other than BASE_REGS for TARGET_ELF. So indicate that a
17409 register from BASE_REGS is needed as an intermediate
17410 register.
17411
17412 On Darwin, pic addresses require a load from memory, which
17413 needs a base register. */
17414 if (rclass != BASE_REGS
17415 && (GET_CODE (in) == SYMBOL_REF
17416 || GET_CODE (in) == HIGH
17417 || GET_CODE (in) == LABEL_REF
17418 || GET_CODE (in) == CONST))
17419 return BASE_REGS;
17420 }
17421
17422 if (GET_CODE (in) == REG)
17423 {
17424 regno = REGNO (in);
17425 if (regno >= FIRST_PSEUDO_REGISTER)
17426 {
17427 regno = true_regnum (in);
17428 if (regno >= FIRST_PSEUDO_REGISTER)
17429 regno = -1;
17430 }
17431 }
17432 else if (GET_CODE (in) == SUBREG)
17433 {
17434 regno = true_regnum (in);
17435 if (regno >= FIRST_PSEUDO_REGISTER)
17436 regno = -1;
17437 }
17438 else
17439 regno = -1;
17440
17441 /* We can place anything into GENERAL_REGS and can put GENERAL_REGS
17442 into anything. */
17443 if (rclass == GENERAL_REGS || rclass == BASE_REGS
17444 || (regno >= 0 && INT_REGNO_P (regno)))
17445 return NO_REGS;
17446
17447 /* Constants, memory, and FP registers can go into FP registers. */
17448 if ((regno == -1 || FP_REGNO_P (regno))
17449 && (rclass == FLOAT_REGS || rclass == NON_SPECIAL_REGS))
17450 return (mode != SDmode || lra_in_progress) ? NO_REGS : GENERAL_REGS;
17451
17452 /* Memory, and FP/altivec registers can go into fp/altivec registers under
17453 VSX. However, for scalar variables, use the traditional floating point
17454 registers so that we can use offset+register addressing. */
17455 if (TARGET_VSX
17456 && (regno == -1 || VSX_REGNO_P (regno))
17457 && VSX_REG_CLASS_P (rclass))
17458 {
17459 if (GET_MODE_SIZE (mode) < 16)
17460 return FLOAT_REGS;
17461
17462 return NO_REGS;
17463 }
17464
17465 /* Memory, and AltiVec registers can go into AltiVec registers. */
17466 if ((regno == -1 || ALTIVEC_REGNO_P (regno))
17467 && rclass == ALTIVEC_REGS)
17468 return NO_REGS;
17469
17470 /* We can copy among the CR registers. */
17471 if ((rclass == CR_REGS || rclass == CR0_REGS)
17472 && regno >= 0 && CR_REGNO_P (regno))
17473 return NO_REGS;
17474
17475 /* Otherwise, we need GENERAL_REGS. */
17476 return GENERAL_REGS;
17477 }
17478
17479 /* Debug version of rs6000_secondary_reload_class. */
17480 static enum reg_class
17481 rs6000_debug_secondary_reload_class (enum reg_class rclass,
17482 enum machine_mode mode, rtx in)
17483 {
17484 enum reg_class ret = rs6000_secondary_reload_class (rclass, mode, in);
17485 fprintf (stderr,
17486 "\nrs6000_secondary_reload_class, return %s, rclass = %s, "
17487 "mode = %s, input rtx:\n",
17488 reg_class_names[ret], reg_class_names[rclass],
17489 GET_MODE_NAME (mode));
17490 debug_rtx (in);
17491
17492 return ret;
17493 }
17494
17495 /* Return nonzero if for CLASS a mode change from FROM to TO is invalid. */
17496
17497 static bool
17498 rs6000_cannot_change_mode_class (enum machine_mode from,
17499 enum machine_mode to,
17500 enum reg_class rclass)
17501 {
17502 unsigned from_size = GET_MODE_SIZE (from);
17503 unsigned to_size = GET_MODE_SIZE (to);
17504
17505 if (from_size != to_size)
17506 {
17507 enum reg_class xclass = (TARGET_VSX) ? VSX_REGS : FLOAT_REGS;
17508
17509 if (reg_classes_intersect_p (xclass, rclass))
17510 {
17511 unsigned to_nregs = hard_regno_nregs[FIRST_FPR_REGNO][to];
17512 unsigned from_nregs = hard_regno_nregs[FIRST_FPR_REGNO][from];
17513
17514 /* Don't allow 64-bit types to overlap with 128-bit types that take a
17515 single register under VSX because the scalar part of the register
17516 is in the upper 64-bits, and not the lower 64-bits. Types like
17517 TFmode/TDmode that take 2 scalar register can overlap. 128-bit
17518 IEEE floating point can't overlap, and neither can small
17519 values. */
17520
17521 if (TARGET_IEEEQUAD && (to == TFmode || from == TFmode))
17522 return true;
17523
17524 /* TDmode in floating-mode registers must always go into a register
17525 pair with the most significant word in the even-numbered register
17526 to match ISA requirements. In little-endian mode, this does not
17527 match subreg numbering, so we cannot allow subregs. */
17528 if (!BYTES_BIG_ENDIAN && (to == TDmode || from == TDmode))
17529 return true;
17530
17531 if (from_size < 8 || to_size < 8)
17532 return true;
17533
17534 if (from_size == 8 && (8 * to_nregs) != to_size)
17535 return true;
17536
17537 if (to_size == 8 && (8 * from_nregs) != from_size)
17538 return true;
17539
17540 return false;
17541 }
17542 else
17543 return false;
17544 }
17545
17546 if (TARGET_E500_DOUBLE
17547 && ((((to) == DFmode) + ((from) == DFmode)) == 1
17548 || (((to) == TFmode) + ((from) == TFmode)) == 1
17549 || (((to) == DDmode) + ((from) == DDmode)) == 1
17550 || (((to) == TDmode) + ((from) == TDmode)) == 1
17551 || (((to) == DImode) + ((from) == DImode)) == 1))
17552 return true;
17553
17554 /* Since the VSX register set includes traditional floating point registers
17555 and altivec registers, just check for the size being different instead of
17556 trying to check whether the modes are vector modes. Otherwise it won't
17557 allow say DF and DI to change classes. For types like TFmode and TDmode
17558 that take 2 64-bit registers, rather than a single 128-bit register, don't
17559 allow subregs of those types to other 128 bit types. */
17560 if (TARGET_VSX && VSX_REG_CLASS_P (rclass))
17561 {
17562 unsigned num_regs = (from_size + 15) / 16;
17563 if (hard_regno_nregs[FIRST_FPR_REGNO][to] > num_regs
17564 || hard_regno_nregs[FIRST_FPR_REGNO][from] > num_regs)
17565 return true;
17566
17567 return (from_size != 8 && from_size != 16);
17568 }
17569
17570 if (TARGET_ALTIVEC && rclass == ALTIVEC_REGS
17571 && (ALTIVEC_VECTOR_MODE (from) + ALTIVEC_VECTOR_MODE (to)) == 1)
17572 return true;
17573
17574 if (TARGET_SPE && (SPE_VECTOR_MODE (from) + SPE_VECTOR_MODE (to)) == 1
17575 && reg_classes_intersect_p (GENERAL_REGS, rclass))
17576 return true;
17577
17578 return false;
17579 }
17580
17581 /* Debug version of rs6000_cannot_change_mode_class. */
17582 static bool
17583 rs6000_debug_cannot_change_mode_class (enum machine_mode from,
17584 enum machine_mode to,
17585 enum reg_class rclass)
17586 {
17587 bool ret = rs6000_cannot_change_mode_class (from, to, rclass);
17588
17589 fprintf (stderr,
17590 "rs6000_cannot_change_mode_class, return %s, from = %s, "
17591 "to = %s, rclass = %s\n",
17592 ret ? "true" : "false",
17593 GET_MODE_NAME (from), GET_MODE_NAME (to),
17594 reg_class_names[rclass]);
17595
17596 return ret;
17597 }
17598 \f
17599 /* Return a string to do a move operation of 128 bits of data. */
17600
17601 const char *
17602 rs6000_output_move_128bit (rtx operands[])
17603 {
17604 rtx dest = operands[0];
17605 rtx src = operands[1];
17606 enum machine_mode mode = GET_MODE (dest);
17607 int dest_regno;
17608 int src_regno;
17609 bool dest_gpr_p, dest_fp_p, dest_vmx_p, dest_vsx_p;
17610 bool src_gpr_p, src_fp_p, src_vmx_p, src_vsx_p;
17611
17612 if (REG_P (dest))
17613 {
17614 dest_regno = REGNO (dest);
17615 dest_gpr_p = INT_REGNO_P (dest_regno);
17616 dest_fp_p = FP_REGNO_P (dest_regno);
17617 dest_vmx_p = ALTIVEC_REGNO_P (dest_regno);
17618 dest_vsx_p = dest_fp_p | dest_vmx_p;
17619 }
17620 else
17621 {
17622 dest_regno = -1;
17623 dest_gpr_p = dest_fp_p = dest_vmx_p = dest_vsx_p = false;
17624 }
17625
17626 if (REG_P (src))
17627 {
17628 src_regno = REGNO (src);
17629 src_gpr_p = INT_REGNO_P (src_regno);
17630 src_fp_p = FP_REGNO_P (src_regno);
17631 src_vmx_p = ALTIVEC_REGNO_P (src_regno);
17632 src_vsx_p = src_fp_p | src_vmx_p;
17633 }
17634 else
17635 {
17636 src_regno = -1;
17637 src_gpr_p = src_fp_p = src_vmx_p = src_vsx_p = false;
17638 }
17639
17640 /* Register moves. */
17641 if (dest_regno >= 0 && src_regno >= 0)
17642 {
17643 if (dest_gpr_p)
17644 {
17645 if (src_gpr_p)
17646 return "#";
17647
17648 else if (TARGET_VSX && TARGET_DIRECT_MOVE && src_vsx_p)
17649 return "#";
17650 }
17651
17652 else if (TARGET_VSX && dest_vsx_p)
17653 {
17654 if (src_vsx_p)
17655 return "xxlor %x0,%x1,%x1";
17656
17657 else if (TARGET_DIRECT_MOVE && src_gpr_p)
17658 return "#";
17659 }
17660
17661 else if (TARGET_ALTIVEC && dest_vmx_p && src_vmx_p)
17662 return "vor %0,%1,%1";
17663
17664 else if (dest_fp_p && src_fp_p)
17665 return "#";
17666 }
17667
17668 /* Loads. */
17669 else if (dest_regno >= 0 && MEM_P (src))
17670 {
17671 if (dest_gpr_p)
17672 {
17673 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
17674 return "lq %0,%1";
17675 else
17676 return "#";
17677 }
17678
17679 else if (TARGET_ALTIVEC && dest_vmx_p
17680 && altivec_indexed_or_indirect_operand (src, mode))
17681 return "lvx %0,%y1";
17682
17683 else if (TARGET_VSX && dest_vsx_p)
17684 {
17685 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
17686 return "lxvw4x %x0,%y1";
17687 else
17688 return "lxvd2x %x0,%y1";
17689 }
17690
17691 else if (TARGET_ALTIVEC && dest_vmx_p)
17692 return "lvx %0,%y1";
17693
17694 else if (dest_fp_p)
17695 return "#";
17696 }
17697
17698 /* Stores. */
17699 else if (src_regno >= 0 && MEM_P (dest))
17700 {
17701 if (src_gpr_p)
17702 {
17703 if (TARGET_QUAD_MEMORY && quad_load_store_p (dest, src))
17704 return "stq %1,%0";
17705 else
17706 return "#";
17707 }
17708
17709 else if (TARGET_ALTIVEC && src_vmx_p
17710 && altivec_indexed_or_indirect_operand (src, mode))
17711 return "stvx %1,%y0";
17712
17713 else if (TARGET_VSX && src_vsx_p)
17714 {
17715 if (mode == V16QImode || mode == V8HImode || mode == V4SImode)
17716 return "stxvw4x %x1,%y0";
17717 else
17718 return "stxvd2x %x1,%y0";
17719 }
17720
17721 else if (TARGET_ALTIVEC && src_vmx_p)
17722 return "stvx %1,%y0";
17723
17724 else if (src_fp_p)
17725 return "#";
17726 }
17727
17728 /* Constants. */
17729 else if (dest_regno >= 0
17730 && (GET_CODE (src) == CONST_INT
17731 || GET_CODE (src) == CONST_WIDE_INT
17732 || GET_CODE (src) == CONST_DOUBLE
17733 || GET_CODE (src) == CONST_VECTOR))
17734 {
17735 if (dest_gpr_p)
17736 return "#";
17737
17738 else if (TARGET_VSX && dest_vsx_p && zero_constant (src, mode))
17739 return "xxlxor %x0,%x0,%x0";
17740
17741 else if (TARGET_ALTIVEC && dest_vmx_p)
17742 return output_vec_const_move (operands);
17743 }
17744
17745 if (TARGET_DEBUG_ADDR)
17746 {
17747 fprintf (stderr, "\n===== Bad 128 bit move:\n");
17748 debug_rtx (gen_rtx_SET (VOIDmode, dest, src));
17749 }
17750
17751 gcc_unreachable ();
17752 }
17753
17754 /* Validate a 128-bit move. */
17755 bool
17756 rs6000_move_128bit_ok_p (rtx operands[])
17757 {
17758 enum machine_mode mode = GET_MODE (operands[0]);
17759 return (gpc_reg_operand (operands[0], mode)
17760 || gpc_reg_operand (operands[1], mode));
17761 }
17762
17763 /* Return true if a 128-bit move needs to be split. */
17764 bool
17765 rs6000_split_128bit_ok_p (rtx operands[])
17766 {
17767 if (!reload_completed)
17768 return false;
17769
17770 if (!gpr_or_gpr_p (operands[0], operands[1]))
17771 return false;
17772
17773 if (quad_load_store_p (operands[0], operands[1]))
17774 return false;
17775
17776 return true;
17777 }
17778
17779 \f
17780 /* Given a comparison operation, return the bit number in CCR to test. We
17781 know this is a valid comparison.
17782
17783 SCC_P is 1 if this is for an scc. That means that %D will have been
17784 used instead of %C, so the bits will be in different places.
17785
17786 Return -1 if OP isn't a valid comparison for some reason. */
17787
17788 int
17789 ccr_bit (rtx op, int scc_p)
17790 {
17791 enum rtx_code code = GET_CODE (op);
17792 enum machine_mode cc_mode;
17793 int cc_regnum;
17794 int base_bit;
17795 rtx reg;
17796
17797 if (!COMPARISON_P (op))
17798 return -1;
17799
17800 reg = XEXP (op, 0);
17801
17802 gcc_assert (GET_CODE (reg) == REG && CR_REGNO_P (REGNO (reg)));
17803
17804 cc_mode = GET_MODE (reg);
17805 cc_regnum = REGNO (reg);
17806 base_bit = 4 * (cc_regnum - CR0_REGNO);
17807
17808 validate_condition_mode (code, cc_mode);
17809
17810 /* When generating a sCOND operation, only positive conditions are
17811 allowed. */
17812 gcc_assert (!scc_p
17813 || code == EQ || code == GT || code == LT || code == UNORDERED
17814 || code == GTU || code == LTU);
17815
17816 switch (code)
17817 {
17818 case NE:
17819 return scc_p ? base_bit + 3 : base_bit + 2;
17820 case EQ:
17821 return base_bit + 2;
17822 case GT: case GTU: case UNLE:
17823 return base_bit + 1;
17824 case LT: case LTU: case UNGE:
17825 return base_bit;
17826 case ORDERED: case UNORDERED:
17827 return base_bit + 3;
17828
17829 case GE: case GEU:
17830 /* If scc, we will have done a cror to put the bit in the
17831 unordered position. So test that bit. For integer, this is ! LT
17832 unless this is an scc insn. */
17833 return scc_p ? base_bit + 3 : base_bit;
17834
17835 case LE: case LEU:
17836 return scc_p ? base_bit + 3 : base_bit + 1;
17837
17838 default:
17839 gcc_unreachable ();
17840 }
17841 }
17842 \f
17843 /* Return the GOT register. */
17844
17845 rtx
17846 rs6000_got_register (rtx value ATTRIBUTE_UNUSED)
17847 {
17848 /* The second flow pass currently (June 1999) can't update
17849 regs_ever_live without disturbing other parts of the compiler, so
17850 update it here to make the prolog/epilogue code happy. */
17851 if (!can_create_pseudo_p ()
17852 && !df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
17853 df_set_regs_ever_live (RS6000_PIC_OFFSET_TABLE_REGNUM, true);
17854
17855 crtl->uses_pic_offset_table = 1;
17856
17857 return pic_offset_table_rtx;
17858 }
17859 \f
17860 static rs6000_stack_t stack_info;
17861
17862 /* Function to init struct machine_function.
17863 This will be called, via a pointer variable,
17864 from push_function_context. */
17865
17866 static struct machine_function *
17867 rs6000_init_machine_status (void)
17868 {
17869 stack_info.reload_completed = 0;
17870 return ggc_cleared_alloc<machine_function> ();
17871 }
17872 \f
17873 #define INT_P(X) (GET_CODE (X) == CONST_INT && GET_MODE (X) == VOIDmode)
17874
17875 int
17876 extract_MB (rtx op)
17877 {
17878 int i;
17879 unsigned long val = INTVAL (op);
17880
17881 /* If the high bit is zero, the value is the first 1 bit we find
17882 from the left. */
17883 if ((val & 0x80000000) == 0)
17884 {
17885 gcc_assert (val & 0xffffffff);
17886
17887 i = 1;
17888 while (((val <<= 1) & 0x80000000) == 0)
17889 ++i;
17890 return i;
17891 }
17892
17893 /* If the high bit is set and the low bit is not, or the mask is all
17894 1's, the value is zero. */
17895 if ((val & 1) == 0 || (val & 0xffffffff) == 0xffffffff)
17896 return 0;
17897
17898 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
17899 from the right. */
17900 i = 31;
17901 while (((val >>= 1) & 1) != 0)
17902 --i;
17903
17904 return i;
17905 }
17906
17907 int
17908 extract_ME (rtx op)
17909 {
17910 int i;
17911 unsigned long val = INTVAL (op);
17912
17913 /* If the low bit is zero, the value is the first 1 bit we find from
17914 the right. */
17915 if ((val & 1) == 0)
17916 {
17917 gcc_assert (val & 0xffffffff);
17918
17919 i = 30;
17920 while (((val >>= 1) & 1) == 0)
17921 --i;
17922
17923 return i;
17924 }
17925
17926 /* If the low bit is set and the high bit is not, or the mask is all
17927 1's, the value is 31. */
17928 if ((val & 0x80000000) == 0 || (val & 0xffffffff) == 0xffffffff)
17929 return 31;
17930
17931 /* Otherwise we have a wrap-around mask. Look for the first 0 bit
17932 from the left. */
17933 i = 0;
17934 while (((val <<= 1) & 0x80000000) != 0)
17935 ++i;
17936
17937 return i;
17938 }
17939
17940 /* Locate some local-dynamic symbol still in use by this function
17941 so that we can print its name in some tls_ld pattern. */
17942
17943 static const char *
17944 rs6000_get_some_local_dynamic_name (void)
17945 {
17946 rtx_insn *insn;
17947
17948 if (cfun->machine->some_ld_name)
17949 return cfun->machine->some_ld_name;
17950
17951 for (insn = get_insns (); insn ; insn = NEXT_INSN (insn))
17952 if (INSN_P (insn)
17953 && for_each_rtx (&PATTERN (insn),
17954 rs6000_get_some_local_dynamic_name_1, 0))
17955 return cfun->machine->some_ld_name;
17956
17957 gcc_unreachable ();
17958 }
17959
17960 /* Helper function for rs6000_get_some_local_dynamic_name. */
17961
17962 static int
17963 rs6000_get_some_local_dynamic_name_1 (rtx *px, void *data ATTRIBUTE_UNUSED)
17964 {
17965 rtx x = *px;
17966
17967 if (GET_CODE (x) == SYMBOL_REF)
17968 {
17969 const char *str = XSTR (x, 0);
17970 if (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC)
17971 {
17972 cfun->machine->some_ld_name = str;
17973 return 1;
17974 }
17975 }
17976
17977 return 0;
17978 }
17979
17980 /* Write out a function code label. */
17981
17982 void
17983 rs6000_output_function_entry (FILE *file, const char *fname)
17984 {
17985 if (fname[0] != '.')
17986 {
17987 switch (DEFAULT_ABI)
17988 {
17989 default:
17990 gcc_unreachable ();
17991
17992 case ABI_AIX:
17993 if (DOT_SYMBOLS)
17994 putc ('.', file);
17995 else
17996 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "L.");
17997 break;
17998
17999 case ABI_ELFv2:
18000 case ABI_V4:
18001 case ABI_DARWIN:
18002 break;
18003 }
18004 }
18005
18006 RS6000_OUTPUT_BASENAME (file, fname);
18007 }
18008
18009 /* Print an operand. Recognize special options, documented below. */
18010
18011 #if TARGET_ELF
18012 #define SMALL_DATA_RELOC ((rs6000_sdata == SDATA_EABI) ? "sda21" : "sdarel")
18013 #define SMALL_DATA_REG ((rs6000_sdata == SDATA_EABI) ? 0 : 13)
18014 #else
18015 #define SMALL_DATA_RELOC "sda21"
18016 #define SMALL_DATA_REG 0
18017 #endif
18018
18019 void
18020 print_operand (FILE *file, rtx x, int code)
18021 {
18022 int i;
18023 unsigned HOST_WIDE_INT uval;
18024
18025 switch (code)
18026 {
18027 /* %a is output_address. */
18028
18029 case 'b':
18030 /* If constant, low-order 16 bits of constant, unsigned.
18031 Otherwise, write normally. */
18032 if (INT_P (x))
18033 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 0xffff);
18034 else
18035 print_operand (file, x, 0);
18036 return;
18037
18038 case 'B':
18039 /* If the low-order bit is zero, write 'r'; otherwise, write 'l'
18040 for 64-bit mask direction. */
18041 putc (((INTVAL (x) & 1) == 0 ? 'r' : 'l'), file);
18042 return;
18043
18044 /* %c is output_addr_const if a CONSTANT_ADDRESS_P, otherwise
18045 output_operand. */
18046
18047 case 'D':
18048 /* Like 'J' but get to the GT bit only. */
18049 gcc_assert (REG_P (x));
18050
18051 /* Bit 1 is GT bit. */
18052 i = 4 * (REGNO (x) - CR0_REGNO) + 1;
18053
18054 /* Add one for shift count in rlinm for scc. */
18055 fprintf (file, "%d", i + 1);
18056 return;
18057
18058 case 'e':
18059 /* If the low 16 bits are 0, but some other bit is set, write 's'. */
18060 if (! INT_P (x))
18061 {
18062 output_operand_lossage ("invalid %%e value");
18063 return;
18064 }
18065
18066 uval = INTVAL (x);
18067 if ((uval & 0xffff) == 0 && uval != 0)
18068 putc ('s', file);
18069 return;
18070
18071 case 'E':
18072 /* X is a CR register. Print the number of the EQ bit of the CR */
18073 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
18074 output_operand_lossage ("invalid %%E value");
18075 else
18076 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO) + 2);
18077 return;
18078
18079 case 'f':
18080 /* X is a CR register. Print the shift count needed to move it
18081 to the high-order four bits. */
18082 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
18083 output_operand_lossage ("invalid %%f value");
18084 else
18085 fprintf (file, "%d", 4 * (REGNO (x) - CR0_REGNO));
18086 return;
18087
18088 case 'F':
18089 /* Similar, but print the count for the rotate in the opposite
18090 direction. */
18091 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
18092 output_operand_lossage ("invalid %%F value");
18093 else
18094 fprintf (file, "%d", 32 - 4 * (REGNO (x) - CR0_REGNO));
18095 return;
18096
18097 case 'G':
18098 /* X is a constant integer. If it is negative, print "m",
18099 otherwise print "z". This is to make an aze or ame insn. */
18100 if (GET_CODE (x) != CONST_INT)
18101 output_operand_lossage ("invalid %%G value");
18102 else if (INTVAL (x) >= 0)
18103 putc ('z', file);
18104 else
18105 putc ('m', file);
18106 return;
18107
18108 case 'h':
18109 /* If constant, output low-order five bits. Otherwise, write
18110 normally. */
18111 if (INT_P (x))
18112 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 31);
18113 else
18114 print_operand (file, x, 0);
18115 return;
18116
18117 case 'H':
18118 /* If constant, output low-order six bits. Otherwise, write
18119 normally. */
18120 if (INT_P (x))
18121 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 63);
18122 else
18123 print_operand (file, x, 0);
18124 return;
18125
18126 case 'I':
18127 /* Print `i' if this is a constant, else nothing. */
18128 if (INT_P (x))
18129 putc ('i', file);
18130 return;
18131
18132 case 'j':
18133 /* Write the bit number in CCR for jump. */
18134 i = ccr_bit (x, 0);
18135 if (i == -1)
18136 output_operand_lossage ("invalid %%j code");
18137 else
18138 fprintf (file, "%d", i);
18139 return;
18140
18141 case 'J':
18142 /* Similar, but add one for shift count in rlinm for scc and pass
18143 scc flag to `ccr_bit'. */
18144 i = ccr_bit (x, 1);
18145 if (i == -1)
18146 output_operand_lossage ("invalid %%J code");
18147 else
18148 /* If we want bit 31, write a shift count of zero, not 32. */
18149 fprintf (file, "%d", i == 31 ? 0 : i + 1);
18150 return;
18151
18152 case 'k':
18153 /* X must be a constant. Write the 1's complement of the
18154 constant. */
18155 if (! INT_P (x))
18156 output_operand_lossage ("invalid %%k value");
18157 else
18158 fprintf (file, HOST_WIDE_INT_PRINT_DEC, ~ INTVAL (x));
18159 return;
18160
18161 case 'K':
18162 /* X must be a symbolic constant on ELF. Write an
18163 expression suitable for an 'addi' that adds in the low 16
18164 bits of the MEM. */
18165 if (GET_CODE (x) == CONST)
18166 {
18167 if (GET_CODE (XEXP (x, 0)) != PLUS
18168 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
18169 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
18170 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
18171 output_operand_lossage ("invalid %%K value");
18172 }
18173 print_operand_address (file, x);
18174 fputs ("@l", file);
18175 return;
18176
18177 /* %l is output_asm_label. */
18178
18179 case 'L':
18180 /* Write second word of DImode or DFmode reference. Works on register
18181 or non-indexed memory only. */
18182 if (REG_P (x))
18183 fputs (reg_names[REGNO (x) + 1], file);
18184 else if (MEM_P (x))
18185 {
18186 /* Handle possible auto-increment. Since it is pre-increment and
18187 we have already done it, we can just use an offset of word. */
18188 if (GET_CODE (XEXP (x, 0)) == PRE_INC
18189 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
18190 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
18191 UNITS_PER_WORD));
18192 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18193 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0),
18194 UNITS_PER_WORD));
18195 else
18196 output_address (XEXP (adjust_address_nv (x, SImode,
18197 UNITS_PER_WORD),
18198 0));
18199
18200 if (small_data_operand (x, GET_MODE (x)))
18201 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18202 reg_names[SMALL_DATA_REG]);
18203 }
18204 return;
18205
18206 case 'm':
18207 /* MB value for a mask operand. */
18208 if (! mask_operand (x, SImode))
18209 output_operand_lossage ("invalid %%m value");
18210
18211 fprintf (file, "%d", extract_MB (x));
18212 return;
18213
18214 case 'M':
18215 /* ME value for a mask operand. */
18216 if (! mask_operand (x, SImode))
18217 output_operand_lossage ("invalid %%M value");
18218
18219 fprintf (file, "%d", extract_ME (x));
18220 return;
18221
18222 /* %n outputs the negative of its operand. */
18223
18224 case 'N':
18225 /* Write the number of elements in the vector times 4. */
18226 if (GET_CODE (x) != PARALLEL)
18227 output_operand_lossage ("invalid %%N value");
18228 else
18229 fprintf (file, "%d", XVECLEN (x, 0) * 4);
18230 return;
18231
18232 case 'O':
18233 /* Similar, but subtract 1 first. */
18234 if (GET_CODE (x) != PARALLEL)
18235 output_operand_lossage ("invalid %%O value");
18236 else
18237 fprintf (file, "%d", (XVECLEN (x, 0) - 1) * 4);
18238 return;
18239
18240 case 'p':
18241 /* X is a CONST_INT that is a power of two. Output the logarithm. */
18242 if (! INT_P (x)
18243 || INTVAL (x) < 0
18244 || (i = exact_log2 (INTVAL (x))) < 0)
18245 output_operand_lossage ("invalid %%p value");
18246 else
18247 fprintf (file, "%d", i);
18248 return;
18249
18250 case 'P':
18251 /* The operand must be an indirect memory reference. The result
18252 is the register name. */
18253 if (GET_CODE (x) != MEM || GET_CODE (XEXP (x, 0)) != REG
18254 || REGNO (XEXP (x, 0)) >= 32)
18255 output_operand_lossage ("invalid %%P value");
18256 else
18257 fputs (reg_names[REGNO (XEXP (x, 0))], file);
18258 return;
18259
18260 case 'q':
18261 /* This outputs the logical code corresponding to a boolean
18262 expression. The expression may have one or both operands
18263 negated (if one, only the first one). For condition register
18264 logical operations, it will also treat the negated
18265 CR codes as NOTs, but not handle NOTs of them. */
18266 {
18267 const char *const *t = 0;
18268 const char *s;
18269 enum rtx_code code = GET_CODE (x);
18270 static const char * const tbl[3][3] = {
18271 { "and", "andc", "nor" },
18272 { "or", "orc", "nand" },
18273 { "xor", "eqv", "xor" } };
18274
18275 if (code == AND)
18276 t = tbl[0];
18277 else if (code == IOR)
18278 t = tbl[1];
18279 else if (code == XOR)
18280 t = tbl[2];
18281 else
18282 output_operand_lossage ("invalid %%q value");
18283
18284 if (GET_CODE (XEXP (x, 0)) != NOT)
18285 s = t[0];
18286 else
18287 {
18288 if (GET_CODE (XEXP (x, 1)) == NOT)
18289 s = t[2];
18290 else
18291 s = t[1];
18292 }
18293
18294 fputs (s, file);
18295 }
18296 return;
18297
18298 case 'Q':
18299 if (! TARGET_MFCRF)
18300 return;
18301 fputc (',', file);
18302 /* FALLTHRU */
18303
18304 case 'R':
18305 /* X is a CR register. Print the mask for `mtcrf'. */
18306 if (GET_CODE (x) != REG || ! CR_REGNO_P (REGNO (x)))
18307 output_operand_lossage ("invalid %%R value");
18308 else
18309 fprintf (file, "%d", 128 >> (REGNO (x) - CR0_REGNO));
18310 return;
18311
18312 case 's':
18313 /* Low 5 bits of 32 - value */
18314 if (! INT_P (x))
18315 output_operand_lossage ("invalid %%s value");
18316 else
18317 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (32 - INTVAL (x)) & 31);
18318 return;
18319
18320 case 'S':
18321 /* PowerPC64 mask position. All 0's is excluded.
18322 CONST_INT 32-bit mask is considered sign-extended so any
18323 transition must occur within the CONST_INT, not on the boundary. */
18324 if (! mask64_operand (x, DImode))
18325 output_operand_lossage ("invalid %%S value");
18326
18327 uval = INTVAL (x);
18328
18329 if (uval & 1) /* Clear Left */
18330 {
18331 #if HOST_BITS_PER_WIDE_INT > 64
18332 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
18333 #endif
18334 i = 64;
18335 }
18336 else /* Clear Right */
18337 {
18338 uval = ~uval;
18339 #if HOST_BITS_PER_WIDE_INT > 64
18340 uval &= ((unsigned HOST_WIDE_INT) 1 << 64) - 1;
18341 #endif
18342 i = 63;
18343 }
18344 while (uval != 0)
18345 --i, uval >>= 1;
18346 gcc_assert (i >= 0);
18347 fprintf (file, "%d", i);
18348 return;
18349
18350 case 't':
18351 /* Like 'J' but get to the OVERFLOW/UNORDERED bit. */
18352 gcc_assert (REG_P (x) && GET_MODE (x) == CCmode);
18353
18354 /* Bit 3 is OV bit. */
18355 i = 4 * (REGNO (x) - CR0_REGNO) + 3;
18356
18357 /* If we want bit 31, write a shift count of zero, not 32. */
18358 fprintf (file, "%d", i == 31 ? 0 : i + 1);
18359 return;
18360
18361 case 'T':
18362 /* Print the symbolic name of a branch target register. */
18363 if (GET_CODE (x) != REG || (REGNO (x) != LR_REGNO
18364 && REGNO (x) != CTR_REGNO))
18365 output_operand_lossage ("invalid %%T value");
18366 else if (REGNO (x) == LR_REGNO)
18367 fputs ("lr", file);
18368 else
18369 fputs ("ctr", file);
18370 return;
18371
18372 case 'u':
18373 /* High-order or low-order 16 bits of constant, whichever is non-zero,
18374 for use in unsigned operand. */
18375 if (! INT_P (x))
18376 {
18377 output_operand_lossage ("invalid %%u value");
18378 return;
18379 }
18380
18381 uval = INTVAL (x);
18382 if ((uval & 0xffff) == 0)
18383 uval >>= 16;
18384
18385 fprintf (file, HOST_WIDE_INT_PRINT_HEX, uval & 0xffff);
18386 return;
18387
18388 case 'v':
18389 /* High-order 16 bits of constant for use in signed operand. */
18390 if (! INT_P (x))
18391 output_operand_lossage ("invalid %%v value");
18392 else
18393 fprintf (file, HOST_WIDE_INT_PRINT_HEX,
18394 (INTVAL (x) >> 16) & 0xffff);
18395 return;
18396
18397 case 'U':
18398 /* Print `u' if this has an auto-increment or auto-decrement. */
18399 if (MEM_P (x)
18400 && (GET_CODE (XEXP (x, 0)) == PRE_INC
18401 || GET_CODE (XEXP (x, 0)) == PRE_DEC
18402 || GET_CODE (XEXP (x, 0)) == PRE_MODIFY))
18403 putc ('u', file);
18404 return;
18405
18406 case 'V':
18407 /* Print the trap code for this operand. */
18408 switch (GET_CODE (x))
18409 {
18410 case EQ:
18411 fputs ("eq", file); /* 4 */
18412 break;
18413 case NE:
18414 fputs ("ne", file); /* 24 */
18415 break;
18416 case LT:
18417 fputs ("lt", file); /* 16 */
18418 break;
18419 case LE:
18420 fputs ("le", file); /* 20 */
18421 break;
18422 case GT:
18423 fputs ("gt", file); /* 8 */
18424 break;
18425 case GE:
18426 fputs ("ge", file); /* 12 */
18427 break;
18428 case LTU:
18429 fputs ("llt", file); /* 2 */
18430 break;
18431 case LEU:
18432 fputs ("lle", file); /* 6 */
18433 break;
18434 case GTU:
18435 fputs ("lgt", file); /* 1 */
18436 break;
18437 case GEU:
18438 fputs ("lge", file); /* 5 */
18439 break;
18440 default:
18441 gcc_unreachable ();
18442 }
18443 break;
18444
18445 case 'w':
18446 /* If constant, low-order 16 bits of constant, signed. Otherwise, write
18447 normally. */
18448 if (INT_P (x))
18449 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
18450 ((INTVAL (x) & 0xffff) ^ 0x8000) - 0x8000);
18451 else
18452 print_operand (file, x, 0);
18453 return;
18454
18455 case 'W':
18456 /* MB value for a PowerPC64 rldic operand. */
18457 i = clz_hwi (INTVAL (x));
18458
18459 fprintf (file, "%d", i);
18460 return;
18461
18462 case 'x':
18463 /* X is a FPR or Altivec register used in a VSX context. */
18464 if (GET_CODE (x) != REG || !VSX_REGNO_P (REGNO (x)))
18465 output_operand_lossage ("invalid %%x value");
18466 else
18467 {
18468 int reg = REGNO (x);
18469 int vsx_reg = (FP_REGNO_P (reg)
18470 ? reg - 32
18471 : reg - FIRST_ALTIVEC_REGNO + 32);
18472
18473 #ifdef TARGET_REGNAMES
18474 if (TARGET_REGNAMES)
18475 fprintf (file, "%%vs%d", vsx_reg);
18476 else
18477 #endif
18478 fprintf (file, "%d", vsx_reg);
18479 }
18480 return;
18481
18482 case 'X':
18483 if (MEM_P (x)
18484 && (legitimate_indexed_address_p (XEXP (x, 0), 0)
18485 || (GET_CODE (XEXP (x, 0)) == PRE_MODIFY
18486 && legitimate_indexed_address_p (XEXP (XEXP (x, 0), 1), 0))))
18487 putc ('x', file);
18488 return;
18489
18490 case 'Y':
18491 /* Like 'L', for third word of TImode/PTImode */
18492 if (REG_P (x))
18493 fputs (reg_names[REGNO (x) + 2], file);
18494 else if (MEM_P (x))
18495 {
18496 if (GET_CODE (XEXP (x, 0)) == PRE_INC
18497 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
18498 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
18499 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18500 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 8));
18501 else
18502 output_address (XEXP (adjust_address_nv (x, SImode, 8), 0));
18503 if (small_data_operand (x, GET_MODE (x)))
18504 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18505 reg_names[SMALL_DATA_REG]);
18506 }
18507 return;
18508
18509 case 'z':
18510 /* X is a SYMBOL_REF. Write out the name preceded by a
18511 period and without any trailing data in brackets. Used for function
18512 names. If we are configured for System V (or the embedded ABI) on
18513 the PowerPC, do not emit the period, since those systems do not use
18514 TOCs and the like. */
18515 gcc_assert (GET_CODE (x) == SYMBOL_REF);
18516
18517 /* For macho, check to see if we need a stub. */
18518 if (TARGET_MACHO)
18519 {
18520 const char *name = XSTR (x, 0);
18521 #if TARGET_MACHO
18522 if (darwin_emit_branch_islands
18523 && MACHOPIC_INDIRECT
18524 && machopic_classify_symbol (x) == MACHOPIC_UNDEFINED_FUNCTION)
18525 name = machopic_indirection_name (x, /*stub_p=*/true);
18526 #endif
18527 assemble_name (file, name);
18528 }
18529 else if (!DOT_SYMBOLS)
18530 assemble_name (file, XSTR (x, 0));
18531 else
18532 rs6000_output_function_entry (file, XSTR (x, 0));
18533 return;
18534
18535 case 'Z':
18536 /* Like 'L', for last word of TImode/PTImode. */
18537 if (REG_P (x))
18538 fputs (reg_names[REGNO (x) + 3], file);
18539 else if (MEM_P (x))
18540 {
18541 if (GET_CODE (XEXP (x, 0)) == PRE_INC
18542 || GET_CODE (XEXP (x, 0)) == PRE_DEC)
18543 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
18544 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18545 output_address (plus_constant (Pmode, XEXP (XEXP (x, 0), 0), 12));
18546 else
18547 output_address (XEXP (adjust_address_nv (x, SImode, 12), 0));
18548 if (small_data_operand (x, GET_MODE (x)))
18549 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18550 reg_names[SMALL_DATA_REG]);
18551 }
18552 return;
18553
18554 /* Print AltiVec or SPE memory operand. */
18555 case 'y':
18556 {
18557 rtx tmp;
18558
18559 gcc_assert (MEM_P (x));
18560
18561 tmp = XEXP (x, 0);
18562
18563 /* Ugly hack because %y is overloaded. */
18564 if ((TARGET_SPE || TARGET_E500_DOUBLE)
18565 && (GET_MODE_SIZE (GET_MODE (x)) == 8
18566 || GET_MODE (x) == TFmode
18567 || GET_MODE (x) == TImode
18568 || GET_MODE (x) == PTImode))
18569 {
18570 /* Handle [reg]. */
18571 if (REG_P (tmp))
18572 {
18573 fprintf (file, "0(%s)", reg_names[REGNO (tmp)]);
18574 break;
18575 }
18576 /* Handle [reg+UIMM]. */
18577 else if (GET_CODE (tmp) == PLUS &&
18578 GET_CODE (XEXP (tmp, 1)) == CONST_INT)
18579 {
18580 int x;
18581
18582 gcc_assert (REG_P (XEXP (tmp, 0)));
18583
18584 x = INTVAL (XEXP (tmp, 1));
18585 fprintf (file, "%d(%s)", x, reg_names[REGNO (XEXP (tmp, 0))]);
18586 break;
18587 }
18588
18589 /* Fall through. Must be [reg+reg]. */
18590 }
18591 if (VECTOR_MEM_ALTIVEC_P (GET_MODE (x))
18592 && GET_CODE (tmp) == AND
18593 && GET_CODE (XEXP (tmp, 1)) == CONST_INT
18594 && INTVAL (XEXP (tmp, 1)) == -16)
18595 tmp = XEXP (tmp, 0);
18596 else if (VECTOR_MEM_VSX_P (GET_MODE (x))
18597 && GET_CODE (tmp) == PRE_MODIFY)
18598 tmp = XEXP (tmp, 1);
18599 if (REG_P (tmp))
18600 fprintf (file, "0,%s", reg_names[REGNO (tmp)]);
18601 else
18602 {
18603 if (GET_CODE (tmp) != PLUS
18604 || !REG_P (XEXP (tmp, 0))
18605 || !REG_P (XEXP (tmp, 1)))
18606 {
18607 output_operand_lossage ("invalid %%y value, try using the 'Z' constraint");
18608 break;
18609 }
18610
18611 if (REGNO (XEXP (tmp, 0)) == 0)
18612 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 1)) ],
18613 reg_names[ REGNO (XEXP (tmp, 0)) ]);
18614 else
18615 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (tmp, 0)) ],
18616 reg_names[ REGNO (XEXP (tmp, 1)) ]);
18617 }
18618 break;
18619 }
18620
18621 case 0:
18622 if (REG_P (x))
18623 fprintf (file, "%s", reg_names[REGNO (x)]);
18624 else if (MEM_P (x))
18625 {
18626 /* We need to handle PRE_INC and PRE_DEC here, since we need to
18627 know the width from the mode. */
18628 if (GET_CODE (XEXP (x, 0)) == PRE_INC)
18629 fprintf (file, "%d(%s)", GET_MODE_SIZE (GET_MODE (x)),
18630 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
18631 else if (GET_CODE (XEXP (x, 0)) == PRE_DEC)
18632 fprintf (file, "%d(%s)", - GET_MODE_SIZE (GET_MODE (x)),
18633 reg_names[REGNO (XEXP (XEXP (x, 0), 0))]);
18634 else if (GET_CODE (XEXP (x, 0)) == PRE_MODIFY)
18635 output_address (XEXP (XEXP (x, 0), 1));
18636 else
18637 output_address (XEXP (x, 0));
18638 }
18639 else
18640 {
18641 if (toc_relative_expr_p (x, false))
18642 /* This hack along with a corresponding hack in
18643 rs6000_output_addr_const_extra arranges to output addends
18644 where the assembler expects to find them. eg.
18645 (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 4)
18646 without this hack would be output as "x@toc+4". We
18647 want "x+4@toc". */
18648 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
18649 else
18650 output_addr_const (file, x);
18651 }
18652 return;
18653
18654 case '&':
18655 assemble_name (file, rs6000_get_some_local_dynamic_name ());
18656 return;
18657
18658 default:
18659 output_operand_lossage ("invalid %%xn code");
18660 }
18661 }
18662 \f
18663 /* Print the address of an operand. */
18664
18665 void
18666 print_operand_address (FILE *file, rtx x)
18667 {
18668 if (REG_P (x))
18669 fprintf (file, "0(%s)", reg_names[ REGNO (x) ]);
18670 else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == CONST
18671 || GET_CODE (x) == LABEL_REF)
18672 {
18673 output_addr_const (file, x);
18674 if (small_data_operand (x, GET_MODE (x)))
18675 fprintf (file, "@%s(%s)", SMALL_DATA_RELOC,
18676 reg_names[SMALL_DATA_REG]);
18677 else
18678 gcc_assert (!TARGET_TOC);
18679 }
18680 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
18681 && REG_P (XEXP (x, 1)))
18682 {
18683 if (REGNO (XEXP (x, 0)) == 0)
18684 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 1)) ],
18685 reg_names[ REGNO (XEXP (x, 0)) ]);
18686 else
18687 fprintf (file, "%s,%s", reg_names[ REGNO (XEXP (x, 0)) ],
18688 reg_names[ REGNO (XEXP (x, 1)) ]);
18689 }
18690 else if (GET_CODE (x) == PLUS && REG_P (XEXP (x, 0))
18691 && GET_CODE (XEXP (x, 1)) == CONST_INT)
18692 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
18693 INTVAL (XEXP (x, 1)), reg_names[ REGNO (XEXP (x, 0)) ]);
18694 #if TARGET_MACHO
18695 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
18696 && CONSTANT_P (XEXP (x, 1)))
18697 {
18698 fprintf (file, "lo16(");
18699 output_addr_const (file, XEXP (x, 1));
18700 fprintf (file, ")(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
18701 }
18702 #endif
18703 #if TARGET_ELF
18704 else if (GET_CODE (x) == LO_SUM && REG_P (XEXP (x, 0))
18705 && CONSTANT_P (XEXP (x, 1)))
18706 {
18707 output_addr_const (file, XEXP (x, 1));
18708 fprintf (file, "@l(%s)", reg_names[ REGNO (XEXP (x, 0)) ]);
18709 }
18710 #endif
18711 else if (toc_relative_expr_p (x, false))
18712 {
18713 /* This hack along with a corresponding hack in
18714 rs6000_output_addr_const_extra arranges to output addends
18715 where the assembler expects to find them. eg.
18716 (lo_sum (reg 9)
18717 . (plus (unspec [(symbol_ref ("x")) (reg 2)] tocrel) 8))
18718 without this hack would be output as "x@toc+8@l(9)". We
18719 want "x+8@toc@l(9)". */
18720 output_addr_const (file, CONST_CAST_RTX (tocrel_base));
18721 if (GET_CODE (x) == LO_SUM)
18722 fprintf (file, "@l(%s)", reg_names[REGNO (XEXP (x, 0))]);
18723 else
18724 fprintf (file, "(%s)", reg_names[REGNO (XVECEXP (tocrel_base, 0, 1))]);
18725 }
18726 else
18727 gcc_unreachable ();
18728 }
18729 \f
18730 /* Implement TARGET_OUTPUT_ADDR_CONST_EXTRA. */
18731
18732 static bool
18733 rs6000_output_addr_const_extra (FILE *file, rtx x)
18734 {
18735 if (GET_CODE (x) == UNSPEC)
18736 switch (XINT (x, 1))
18737 {
18738 case UNSPEC_TOCREL:
18739 gcc_checking_assert (GET_CODE (XVECEXP (x, 0, 0)) == SYMBOL_REF
18740 && REG_P (XVECEXP (x, 0, 1))
18741 && REGNO (XVECEXP (x, 0, 1)) == TOC_REGISTER);
18742 output_addr_const (file, XVECEXP (x, 0, 0));
18743 if (x == tocrel_base && tocrel_offset != const0_rtx)
18744 {
18745 if (INTVAL (tocrel_offset) >= 0)
18746 fprintf (file, "+");
18747 output_addr_const (file, CONST_CAST_RTX (tocrel_offset));
18748 }
18749 if (!TARGET_AIX || (TARGET_ELF && TARGET_MINIMAL_TOC))
18750 {
18751 putc ('-', file);
18752 assemble_name (file, toc_label_name);
18753 }
18754 else if (TARGET_ELF)
18755 fputs ("@toc", file);
18756 return true;
18757
18758 #if TARGET_MACHO
18759 case UNSPEC_MACHOPIC_OFFSET:
18760 output_addr_const (file, XVECEXP (x, 0, 0));
18761 putc ('-', file);
18762 machopic_output_function_base_name (file);
18763 return true;
18764 #endif
18765 }
18766 return false;
18767 }
18768 \f
18769 /* Target hook for assembling integer objects. The PowerPC version has
18770 to handle fixup entries for relocatable code if RELOCATABLE_NEEDS_FIXUP
18771 is defined. It also needs to handle DI-mode objects on 64-bit
18772 targets. */
18773
18774 static bool
18775 rs6000_assemble_integer (rtx x, unsigned int size, int aligned_p)
18776 {
18777 #ifdef RELOCATABLE_NEEDS_FIXUP
18778 /* Special handling for SI values. */
18779 if (RELOCATABLE_NEEDS_FIXUP && size == 4 && aligned_p)
18780 {
18781 static int recurse = 0;
18782
18783 /* For -mrelocatable, we mark all addresses that need to be fixed up in
18784 the .fixup section. Since the TOC section is already relocated, we
18785 don't need to mark it here. We used to skip the text section, but it
18786 should never be valid for relocated addresses to be placed in the text
18787 section. */
18788 if (TARGET_RELOCATABLE
18789 && in_section != toc_section
18790 && !recurse
18791 && !CONST_SCALAR_INT_P (x)
18792 && CONSTANT_P (x))
18793 {
18794 char buf[256];
18795
18796 recurse = 1;
18797 ASM_GENERATE_INTERNAL_LABEL (buf, "LCP", fixuplabelno);
18798 fixuplabelno++;
18799 ASM_OUTPUT_LABEL (asm_out_file, buf);
18800 fprintf (asm_out_file, "\t.long\t(");
18801 output_addr_const (asm_out_file, x);
18802 fprintf (asm_out_file, ")@fixup\n");
18803 fprintf (asm_out_file, "\t.section\t\".fixup\",\"aw\"\n");
18804 ASM_OUTPUT_ALIGN (asm_out_file, 2);
18805 fprintf (asm_out_file, "\t.long\t");
18806 assemble_name (asm_out_file, buf);
18807 fprintf (asm_out_file, "\n\t.previous\n");
18808 recurse = 0;
18809 return true;
18810 }
18811 /* Remove initial .'s to turn a -mcall-aixdesc function
18812 address into the address of the descriptor, not the function
18813 itself. */
18814 else if (GET_CODE (x) == SYMBOL_REF
18815 && XSTR (x, 0)[0] == '.'
18816 && DEFAULT_ABI == ABI_AIX)
18817 {
18818 const char *name = XSTR (x, 0);
18819 while (*name == '.')
18820 name++;
18821
18822 fprintf (asm_out_file, "\t.long\t%s\n", name);
18823 return true;
18824 }
18825 }
18826 #endif /* RELOCATABLE_NEEDS_FIXUP */
18827 return default_assemble_integer (x, size, aligned_p);
18828 }
18829
18830 #if defined (HAVE_GAS_HIDDEN) && !TARGET_MACHO
18831 /* Emit an assembler directive to set symbol visibility for DECL to
18832 VISIBILITY_TYPE. */
18833
18834 static void
18835 rs6000_assemble_visibility (tree decl, int vis)
18836 {
18837 if (TARGET_XCOFF)
18838 return;
18839
18840 /* Functions need to have their entry point symbol visibility set as
18841 well as their descriptor symbol visibility. */
18842 if (DEFAULT_ABI == ABI_AIX
18843 && DOT_SYMBOLS
18844 && TREE_CODE (decl) == FUNCTION_DECL)
18845 {
18846 static const char * const visibility_types[] = {
18847 NULL, "internal", "hidden", "protected"
18848 };
18849
18850 const char *name, *type;
18851
18852 name = ((* targetm.strip_name_encoding)
18853 (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl))));
18854 type = visibility_types[vis];
18855
18856 fprintf (asm_out_file, "\t.%s\t%s\n", type, name);
18857 fprintf (asm_out_file, "\t.%s\t.%s\n", type, name);
18858 }
18859 else
18860 default_assemble_visibility (decl, vis);
18861 }
18862 #endif
18863 \f
18864 enum rtx_code
18865 rs6000_reverse_condition (enum machine_mode mode, enum rtx_code code)
18866 {
18867 /* Reversal of FP compares takes care -- an ordered compare
18868 becomes an unordered compare and vice versa. */
18869 if (mode == CCFPmode
18870 && (!flag_finite_math_only
18871 || code == UNLT || code == UNLE || code == UNGT || code == UNGE
18872 || code == UNEQ || code == LTGT))
18873 return reverse_condition_maybe_unordered (code);
18874 else
18875 return reverse_condition (code);
18876 }
18877
18878 /* Generate a compare for CODE. Return a brand-new rtx that
18879 represents the result of the compare. */
18880
18881 static rtx
18882 rs6000_generate_compare (rtx cmp, enum machine_mode mode)
18883 {
18884 enum machine_mode comp_mode;
18885 rtx compare_result;
18886 enum rtx_code code = GET_CODE (cmp);
18887 rtx op0 = XEXP (cmp, 0);
18888 rtx op1 = XEXP (cmp, 1);
18889
18890 if (FLOAT_MODE_P (mode))
18891 comp_mode = CCFPmode;
18892 else if (code == GTU || code == LTU
18893 || code == GEU || code == LEU)
18894 comp_mode = CCUNSmode;
18895 else if ((code == EQ || code == NE)
18896 && unsigned_reg_p (op0)
18897 && (unsigned_reg_p (op1)
18898 || (CONST_INT_P (op1) && INTVAL (op1) != 0)))
18899 /* These are unsigned values, perhaps there will be a later
18900 ordering compare that can be shared with this one. */
18901 comp_mode = CCUNSmode;
18902 else
18903 comp_mode = CCmode;
18904
18905 /* If we have an unsigned compare, make sure we don't have a signed value as
18906 an immediate. */
18907 if (comp_mode == CCUNSmode && GET_CODE (op1) == CONST_INT
18908 && INTVAL (op1) < 0)
18909 {
18910 op0 = copy_rtx_if_shared (op0);
18911 op1 = force_reg (GET_MODE (op0), op1);
18912 cmp = gen_rtx_fmt_ee (code, GET_MODE (cmp), op0, op1);
18913 }
18914
18915 /* First, the compare. */
18916 compare_result = gen_reg_rtx (comp_mode);
18917
18918 /* E500 FP compare instructions on the GPRs. Yuck! */
18919 if ((!TARGET_FPRS && TARGET_HARD_FLOAT)
18920 && FLOAT_MODE_P (mode))
18921 {
18922 rtx cmp, or_result, compare_result2;
18923 enum machine_mode op_mode = GET_MODE (op0);
18924 bool reverse_p;
18925
18926 if (op_mode == VOIDmode)
18927 op_mode = GET_MODE (op1);
18928
18929 /* First reverse the condition codes that aren't directly supported. */
18930 switch (code)
18931 {
18932 case NE:
18933 case UNLT:
18934 case UNLE:
18935 case UNGT:
18936 case UNGE:
18937 code = reverse_condition_maybe_unordered (code);
18938 reverse_p = true;
18939 break;
18940
18941 case EQ:
18942 case LT:
18943 case LE:
18944 case GT:
18945 case GE:
18946 reverse_p = false;
18947 break;
18948
18949 default:
18950 gcc_unreachable ();
18951 }
18952
18953 /* The E500 FP compare instructions toggle the GT bit (CR bit 1) only.
18954 This explains the following mess. */
18955
18956 switch (code)
18957 {
18958 case EQ:
18959 switch (op_mode)
18960 {
18961 case SFmode:
18962 cmp = (flag_finite_math_only && !flag_trapping_math)
18963 ? gen_tstsfeq_gpr (compare_result, op0, op1)
18964 : gen_cmpsfeq_gpr (compare_result, op0, op1);
18965 break;
18966
18967 case DFmode:
18968 cmp = (flag_finite_math_only && !flag_trapping_math)
18969 ? gen_tstdfeq_gpr (compare_result, op0, op1)
18970 : gen_cmpdfeq_gpr (compare_result, op0, op1);
18971 break;
18972
18973 case TFmode:
18974 cmp = (flag_finite_math_only && !flag_trapping_math)
18975 ? gen_tsttfeq_gpr (compare_result, op0, op1)
18976 : gen_cmptfeq_gpr (compare_result, op0, op1);
18977 break;
18978
18979 default:
18980 gcc_unreachable ();
18981 }
18982 break;
18983
18984 case GT:
18985 case GE:
18986 switch (op_mode)
18987 {
18988 case SFmode:
18989 cmp = (flag_finite_math_only && !flag_trapping_math)
18990 ? gen_tstsfgt_gpr (compare_result, op0, op1)
18991 : gen_cmpsfgt_gpr (compare_result, op0, op1);
18992 break;
18993
18994 case DFmode:
18995 cmp = (flag_finite_math_only && !flag_trapping_math)
18996 ? gen_tstdfgt_gpr (compare_result, op0, op1)
18997 : gen_cmpdfgt_gpr (compare_result, op0, op1);
18998 break;
18999
19000 case TFmode:
19001 cmp = (flag_finite_math_only && !flag_trapping_math)
19002 ? gen_tsttfgt_gpr (compare_result, op0, op1)
19003 : gen_cmptfgt_gpr (compare_result, op0, op1);
19004 break;
19005
19006 default:
19007 gcc_unreachable ();
19008 }
19009 break;
19010
19011 case LT:
19012 case LE:
19013 switch (op_mode)
19014 {
19015 case SFmode:
19016 cmp = (flag_finite_math_only && !flag_trapping_math)
19017 ? gen_tstsflt_gpr (compare_result, op0, op1)
19018 : gen_cmpsflt_gpr (compare_result, op0, op1);
19019 break;
19020
19021 case DFmode:
19022 cmp = (flag_finite_math_only && !flag_trapping_math)
19023 ? gen_tstdflt_gpr (compare_result, op0, op1)
19024 : gen_cmpdflt_gpr (compare_result, op0, op1);
19025 break;
19026
19027 case TFmode:
19028 cmp = (flag_finite_math_only && !flag_trapping_math)
19029 ? gen_tsttflt_gpr (compare_result, op0, op1)
19030 : gen_cmptflt_gpr (compare_result, op0, op1);
19031 break;
19032
19033 default:
19034 gcc_unreachable ();
19035 }
19036 break;
19037
19038 default:
19039 gcc_unreachable ();
19040 }
19041
19042 /* Synthesize LE and GE from LT/GT || EQ. */
19043 if (code == LE || code == GE)
19044 {
19045 emit_insn (cmp);
19046
19047 compare_result2 = gen_reg_rtx (CCFPmode);
19048
19049 /* Do the EQ. */
19050 switch (op_mode)
19051 {
19052 case SFmode:
19053 cmp = (flag_finite_math_only && !flag_trapping_math)
19054 ? gen_tstsfeq_gpr (compare_result2, op0, op1)
19055 : gen_cmpsfeq_gpr (compare_result2, op0, op1);
19056 break;
19057
19058 case DFmode:
19059 cmp = (flag_finite_math_only && !flag_trapping_math)
19060 ? gen_tstdfeq_gpr (compare_result2, op0, op1)
19061 : gen_cmpdfeq_gpr (compare_result2, op0, op1);
19062 break;
19063
19064 case TFmode:
19065 cmp = (flag_finite_math_only && !flag_trapping_math)
19066 ? gen_tsttfeq_gpr (compare_result2, op0, op1)
19067 : gen_cmptfeq_gpr (compare_result2, op0, op1);
19068 break;
19069
19070 default:
19071 gcc_unreachable ();
19072 }
19073
19074 emit_insn (cmp);
19075
19076 /* OR them together. */
19077 or_result = gen_reg_rtx (CCFPmode);
19078 cmp = gen_e500_cr_ior_compare (or_result, compare_result,
19079 compare_result2);
19080 compare_result = or_result;
19081 }
19082
19083 code = reverse_p ? NE : EQ;
19084
19085 emit_insn (cmp);
19086 }
19087 else
19088 {
19089 /* Generate XLC-compatible TFmode compare as PARALLEL with extra
19090 CLOBBERs to match cmptf_internal2 pattern. */
19091 if (comp_mode == CCFPmode && TARGET_XL_COMPAT
19092 && GET_MODE (op0) == TFmode
19093 && !TARGET_IEEEQUAD
19094 && TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_LONG_DOUBLE_128)
19095 emit_insn (gen_rtx_PARALLEL (VOIDmode,
19096 gen_rtvec (10,
19097 gen_rtx_SET (VOIDmode,
19098 compare_result,
19099 gen_rtx_COMPARE (comp_mode, op0, op1)),
19100 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19101 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19102 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19103 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19104 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19105 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19106 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19107 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (DFmode)),
19108 gen_rtx_CLOBBER (VOIDmode, gen_rtx_SCRATCH (Pmode)))));
19109 else if (GET_CODE (op1) == UNSPEC
19110 && XINT (op1, 1) == UNSPEC_SP_TEST)
19111 {
19112 rtx op1b = XVECEXP (op1, 0, 0);
19113 comp_mode = CCEQmode;
19114 compare_result = gen_reg_rtx (CCEQmode);
19115 if (TARGET_64BIT)
19116 emit_insn (gen_stack_protect_testdi (compare_result, op0, op1b));
19117 else
19118 emit_insn (gen_stack_protect_testsi (compare_result, op0, op1b));
19119 }
19120 else
19121 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
19122 gen_rtx_COMPARE (comp_mode, op0, op1)));
19123 }
19124
19125 /* Some kinds of FP comparisons need an OR operation;
19126 under flag_finite_math_only we don't bother. */
19127 if (FLOAT_MODE_P (mode)
19128 && !flag_finite_math_only
19129 && !(TARGET_HARD_FLOAT && !TARGET_FPRS)
19130 && (code == LE || code == GE
19131 || code == UNEQ || code == LTGT
19132 || code == UNGT || code == UNLT))
19133 {
19134 enum rtx_code or1, or2;
19135 rtx or1_rtx, or2_rtx, compare2_rtx;
19136 rtx or_result = gen_reg_rtx (CCEQmode);
19137
19138 switch (code)
19139 {
19140 case LE: or1 = LT; or2 = EQ; break;
19141 case GE: or1 = GT; or2 = EQ; break;
19142 case UNEQ: or1 = UNORDERED; or2 = EQ; break;
19143 case LTGT: or1 = LT; or2 = GT; break;
19144 case UNGT: or1 = UNORDERED; or2 = GT; break;
19145 case UNLT: or1 = UNORDERED; or2 = LT; break;
19146 default: gcc_unreachable ();
19147 }
19148 validate_condition_mode (or1, comp_mode);
19149 validate_condition_mode (or2, comp_mode);
19150 or1_rtx = gen_rtx_fmt_ee (or1, SImode, compare_result, const0_rtx);
19151 or2_rtx = gen_rtx_fmt_ee (or2, SImode, compare_result, const0_rtx);
19152 compare2_rtx = gen_rtx_COMPARE (CCEQmode,
19153 gen_rtx_IOR (SImode, or1_rtx, or2_rtx),
19154 const_true_rtx);
19155 emit_insn (gen_rtx_SET (VOIDmode, or_result, compare2_rtx));
19156
19157 compare_result = or_result;
19158 code = EQ;
19159 }
19160
19161 validate_condition_mode (code, GET_MODE (compare_result));
19162
19163 return gen_rtx_fmt_ee (code, VOIDmode, compare_result, const0_rtx);
19164 }
19165
19166
19167 /* Emit the RTL for an sISEL pattern. */
19168
19169 void
19170 rs6000_emit_sISEL (enum machine_mode mode ATTRIBUTE_UNUSED, rtx operands[])
19171 {
19172 rs6000_emit_int_cmove (operands[0], operands[1], const1_rtx, const0_rtx);
19173 }
19174
19175 void
19176 rs6000_emit_sCOND (enum machine_mode mode, rtx operands[])
19177 {
19178 rtx condition_rtx;
19179 enum machine_mode op_mode;
19180 enum rtx_code cond_code;
19181 rtx result = operands[0];
19182
19183 if (TARGET_ISEL && (mode == SImode || mode == DImode))
19184 {
19185 rs6000_emit_sISEL (mode, operands);
19186 return;
19187 }
19188
19189 condition_rtx = rs6000_generate_compare (operands[1], mode);
19190 cond_code = GET_CODE (condition_rtx);
19191
19192 if (FLOAT_MODE_P (mode)
19193 && !TARGET_FPRS && TARGET_HARD_FLOAT)
19194 {
19195 rtx t;
19196
19197 PUT_MODE (condition_rtx, SImode);
19198 t = XEXP (condition_rtx, 0);
19199
19200 gcc_assert (cond_code == NE || cond_code == EQ);
19201
19202 if (cond_code == NE)
19203 emit_insn (gen_e500_flip_gt_bit (t, t));
19204
19205 emit_insn (gen_move_from_CR_gt_bit (result, t));
19206 return;
19207 }
19208
19209 if (cond_code == NE
19210 || cond_code == GE || cond_code == LE
19211 || cond_code == GEU || cond_code == LEU
19212 || cond_code == ORDERED || cond_code == UNGE || cond_code == UNLE)
19213 {
19214 rtx not_result = gen_reg_rtx (CCEQmode);
19215 rtx not_op, rev_cond_rtx;
19216 enum machine_mode cc_mode;
19217
19218 cc_mode = GET_MODE (XEXP (condition_rtx, 0));
19219
19220 rev_cond_rtx = gen_rtx_fmt_ee (rs6000_reverse_condition (cc_mode, cond_code),
19221 SImode, XEXP (condition_rtx, 0), const0_rtx);
19222 not_op = gen_rtx_COMPARE (CCEQmode, rev_cond_rtx, const0_rtx);
19223 emit_insn (gen_rtx_SET (VOIDmode, not_result, not_op));
19224 condition_rtx = gen_rtx_EQ (VOIDmode, not_result, const0_rtx);
19225 }
19226
19227 op_mode = GET_MODE (XEXP (operands[1], 0));
19228 if (op_mode == VOIDmode)
19229 op_mode = GET_MODE (XEXP (operands[1], 1));
19230
19231 if (TARGET_POWERPC64 && (op_mode == DImode || FLOAT_MODE_P (mode)))
19232 {
19233 PUT_MODE (condition_rtx, DImode);
19234 convert_move (result, condition_rtx, 0);
19235 }
19236 else
19237 {
19238 PUT_MODE (condition_rtx, SImode);
19239 emit_insn (gen_rtx_SET (VOIDmode, result, condition_rtx));
19240 }
19241 }
19242
19243 /* Emit a branch of kind CODE to location LOC. */
19244
19245 void
19246 rs6000_emit_cbranch (enum machine_mode mode, rtx operands[])
19247 {
19248 rtx condition_rtx, loc_ref;
19249
19250 condition_rtx = rs6000_generate_compare (operands[0], mode);
19251 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
19252 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
19253 gen_rtx_IF_THEN_ELSE (VOIDmode, condition_rtx,
19254 loc_ref, pc_rtx)));
19255 }
19256
19257 /* Return the string to output a conditional branch to LABEL, which is
19258 the operand template of the label, or NULL if the branch is really a
19259 conditional return.
19260
19261 OP is the conditional expression. XEXP (OP, 0) is assumed to be a
19262 condition code register and its mode specifies what kind of
19263 comparison we made.
19264
19265 REVERSED is nonzero if we should reverse the sense of the comparison.
19266
19267 INSN is the insn. */
19268
19269 char *
19270 output_cbranch (rtx op, const char *label, int reversed, rtx_insn *insn)
19271 {
19272 static char string[64];
19273 enum rtx_code code = GET_CODE (op);
19274 rtx cc_reg = XEXP (op, 0);
19275 enum machine_mode mode = GET_MODE (cc_reg);
19276 int cc_regno = REGNO (cc_reg) - CR0_REGNO;
19277 int need_longbranch = label != NULL && get_attr_length (insn) == 8;
19278 int really_reversed = reversed ^ need_longbranch;
19279 char *s = string;
19280 const char *ccode;
19281 const char *pred;
19282 rtx note;
19283
19284 validate_condition_mode (code, mode);
19285
19286 /* Work out which way this really branches. We could use
19287 reverse_condition_maybe_unordered here always but this
19288 makes the resulting assembler clearer. */
19289 if (really_reversed)
19290 {
19291 /* Reversal of FP compares takes care -- an ordered compare
19292 becomes an unordered compare and vice versa. */
19293 if (mode == CCFPmode)
19294 code = reverse_condition_maybe_unordered (code);
19295 else
19296 code = reverse_condition (code);
19297 }
19298
19299 if ((!TARGET_FPRS && TARGET_HARD_FLOAT) && mode == CCFPmode)
19300 {
19301 /* The efscmp/tst* instructions twiddle bit 2, which maps nicely
19302 to the GT bit. */
19303 switch (code)
19304 {
19305 case EQ:
19306 /* Opposite of GT. */
19307 code = GT;
19308 break;
19309
19310 case NE:
19311 code = UNLE;
19312 break;
19313
19314 default:
19315 gcc_unreachable ();
19316 }
19317 }
19318
19319 switch (code)
19320 {
19321 /* Not all of these are actually distinct opcodes, but
19322 we distinguish them for clarity of the resulting assembler. */
19323 case NE: case LTGT:
19324 ccode = "ne"; break;
19325 case EQ: case UNEQ:
19326 ccode = "eq"; break;
19327 case GE: case GEU:
19328 ccode = "ge"; break;
19329 case GT: case GTU: case UNGT:
19330 ccode = "gt"; break;
19331 case LE: case LEU:
19332 ccode = "le"; break;
19333 case LT: case LTU: case UNLT:
19334 ccode = "lt"; break;
19335 case UNORDERED: ccode = "un"; break;
19336 case ORDERED: ccode = "nu"; break;
19337 case UNGE: ccode = "nl"; break;
19338 case UNLE: ccode = "ng"; break;
19339 default:
19340 gcc_unreachable ();
19341 }
19342
19343 /* Maybe we have a guess as to how likely the branch is. */
19344 pred = "";
19345 note = find_reg_note (insn, REG_BR_PROB, NULL_RTX);
19346 if (note != NULL_RTX)
19347 {
19348 /* PROB is the difference from 50%. */
19349 int prob = XINT (note, 0) - REG_BR_PROB_BASE / 2;
19350
19351 /* Only hint for highly probable/improbable branches on newer
19352 cpus as static prediction overrides processor dynamic
19353 prediction. For older cpus we may as well always hint, but
19354 assume not taken for branches that are very close to 50% as a
19355 mispredicted taken branch is more expensive than a
19356 mispredicted not-taken branch. */
19357 if (rs6000_always_hint
19358 || (abs (prob) > REG_BR_PROB_BASE / 100 * 48
19359 && br_prob_note_reliable_p (note)))
19360 {
19361 if (abs (prob) > REG_BR_PROB_BASE / 20
19362 && ((prob > 0) ^ need_longbranch))
19363 pred = "+";
19364 else
19365 pred = "-";
19366 }
19367 }
19368
19369 if (label == NULL)
19370 s += sprintf (s, "b%slr%s ", ccode, pred);
19371 else
19372 s += sprintf (s, "b%s%s ", ccode, pred);
19373
19374 /* We need to escape any '%' characters in the reg_names string.
19375 Assume they'd only be the first character.... */
19376 if (reg_names[cc_regno + CR0_REGNO][0] == '%')
19377 *s++ = '%';
19378 s += sprintf (s, "%s", reg_names[cc_regno + CR0_REGNO]);
19379
19380 if (label != NULL)
19381 {
19382 /* If the branch distance was too far, we may have to use an
19383 unconditional branch to go the distance. */
19384 if (need_longbranch)
19385 s += sprintf (s, ",$+8\n\tb %s", label);
19386 else
19387 s += sprintf (s, ",%s", label);
19388 }
19389
19390 return string;
19391 }
19392
19393 /* Return the string to flip the GT bit on a CR. */
19394 char *
19395 output_e500_flip_gt_bit (rtx dst, rtx src)
19396 {
19397 static char string[64];
19398 int a, b;
19399
19400 gcc_assert (GET_CODE (dst) == REG && CR_REGNO_P (REGNO (dst))
19401 && GET_CODE (src) == REG && CR_REGNO_P (REGNO (src)));
19402
19403 /* GT bit. */
19404 a = 4 * (REGNO (dst) - CR0_REGNO) + 1;
19405 b = 4 * (REGNO (src) - CR0_REGNO) + 1;
19406
19407 sprintf (string, "crnot %d,%d", a, b);
19408 return string;
19409 }
19410
19411 /* Return insn for VSX or Altivec comparisons. */
19412
19413 static rtx
19414 rs6000_emit_vector_compare_inner (enum rtx_code code, rtx op0, rtx op1)
19415 {
19416 rtx mask;
19417 enum machine_mode mode = GET_MODE (op0);
19418
19419 switch (code)
19420 {
19421 default:
19422 break;
19423
19424 case GE:
19425 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
19426 return NULL_RTX;
19427
19428 case EQ:
19429 case GT:
19430 case GTU:
19431 case ORDERED:
19432 case UNORDERED:
19433 case UNEQ:
19434 case LTGT:
19435 mask = gen_reg_rtx (mode);
19436 emit_insn (gen_rtx_SET (VOIDmode,
19437 mask,
19438 gen_rtx_fmt_ee (code, mode, op0, op1)));
19439 return mask;
19440 }
19441
19442 return NULL_RTX;
19443 }
19444
19445 /* Emit vector compare for operands OP0 and OP1 using code RCODE.
19446 DMODE is expected destination mode. This is a recursive function. */
19447
19448 static rtx
19449 rs6000_emit_vector_compare (enum rtx_code rcode,
19450 rtx op0, rtx op1,
19451 enum machine_mode dmode)
19452 {
19453 rtx mask;
19454 bool swap_operands = false;
19455 bool try_again = false;
19456
19457 gcc_assert (VECTOR_UNIT_ALTIVEC_OR_VSX_P (dmode));
19458 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
19459
19460 /* See if the comparison works as is. */
19461 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
19462 if (mask)
19463 return mask;
19464
19465 switch (rcode)
19466 {
19467 case LT:
19468 rcode = GT;
19469 swap_operands = true;
19470 try_again = true;
19471 break;
19472 case LTU:
19473 rcode = GTU;
19474 swap_operands = true;
19475 try_again = true;
19476 break;
19477 case NE:
19478 case UNLE:
19479 case UNLT:
19480 case UNGE:
19481 case UNGT:
19482 /* Invert condition and try again.
19483 e.g., A != B becomes ~(A==B). */
19484 {
19485 enum rtx_code rev_code;
19486 enum insn_code nor_code;
19487 rtx mask2;
19488
19489 rev_code = reverse_condition_maybe_unordered (rcode);
19490 if (rev_code == UNKNOWN)
19491 return NULL_RTX;
19492
19493 nor_code = optab_handler (one_cmpl_optab, dmode);
19494 if (nor_code == CODE_FOR_nothing)
19495 return NULL_RTX;
19496
19497 mask2 = rs6000_emit_vector_compare (rev_code, op0, op1, dmode);
19498 if (!mask2)
19499 return NULL_RTX;
19500
19501 mask = gen_reg_rtx (dmode);
19502 emit_insn (GEN_FCN (nor_code) (mask, mask2));
19503 return mask;
19504 }
19505 break;
19506 case GE:
19507 case GEU:
19508 case LE:
19509 case LEU:
19510 /* Try GT/GTU/LT/LTU OR EQ */
19511 {
19512 rtx c_rtx, eq_rtx;
19513 enum insn_code ior_code;
19514 enum rtx_code new_code;
19515
19516 switch (rcode)
19517 {
19518 case GE:
19519 new_code = GT;
19520 break;
19521
19522 case GEU:
19523 new_code = GTU;
19524 break;
19525
19526 case LE:
19527 new_code = LT;
19528 break;
19529
19530 case LEU:
19531 new_code = LTU;
19532 break;
19533
19534 default:
19535 gcc_unreachable ();
19536 }
19537
19538 ior_code = optab_handler (ior_optab, dmode);
19539 if (ior_code == CODE_FOR_nothing)
19540 return NULL_RTX;
19541
19542 c_rtx = rs6000_emit_vector_compare (new_code, op0, op1, dmode);
19543 if (!c_rtx)
19544 return NULL_RTX;
19545
19546 eq_rtx = rs6000_emit_vector_compare (EQ, op0, op1, dmode);
19547 if (!eq_rtx)
19548 return NULL_RTX;
19549
19550 mask = gen_reg_rtx (dmode);
19551 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
19552 return mask;
19553 }
19554 break;
19555 default:
19556 return NULL_RTX;
19557 }
19558
19559 if (try_again)
19560 {
19561 if (swap_operands)
19562 {
19563 rtx tmp;
19564 tmp = op0;
19565 op0 = op1;
19566 op1 = tmp;
19567 }
19568
19569 mask = rs6000_emit_vector_compare_inner (rcode, op0, op1);
19570 if (mask)
19571 return mask;
19572 }
19573
19574 /* You only get two chances. */
19575 return NULL_RTX;
19576 }
19577
19578 /* Emit vector conditional expression. DEST is destination. OP_TRUE and
19579 OP_FALSE are two VEC_COND_EXPR operands. CC_OP0 and CC_OP1 are the two
19580 operands for the relation operation COND. */
19581
19582 int
19583 rs6000_emit_vector_cond_expr (rtx dest, rtx op_true, rtx op_false,
19584 rtx cond, rtx cc_op0, rtx cc_op1)
19585 {
19586 enum machine_mode dest_mode = GET_MODE (dest);
19587 enum machine_mode mask_mode = GET_MODE (cc_op0);
19588 enum rtx_code rcode = GET_CODE (cond);
19589 enum machine_mode cc_mode = CCmode;
19590 rtx mask;
19591 rtx cond2;
19592 rtx tmp;
19593 bool invert_move = false;
19594
19595 if (VECTOR_UNIT_NONE_P (dest_mode))
19596 return 0;
19597
19598 gcc_assert (GET_MODE_SIZE (dest_mode) == GET_MODE_SIZE (mask_mode)
19599 && GET_MODE_NUNITS (dest_mode) == GET_MODE_NUNITS (mask_mode));
19600
19601 switch (rcode)
19602 {
19603 /* Swap operands if we can, and fall back to doing the operation as
19604 specified, and doing a NOR to invert the test. */
19605 case NE:
19606 case UNLE:
19607 case UNLT:
19608 case UNGE:
19609 case UNGT:
19610 /* Invert condition and try again.
19611 e.g., A = (B != C) ? D : E becomes A = (B == C) ? E : D. */
19612 invert_move = true;
19613 rcode = reverse_condition_maybe_unordered (rcode);
19614 if (rcode == UNKNOWN)
19615 return 0;
19616 break;
19617
19618 /* Mark unsigned tests with CCUNSmode. */
19619 case GTU:
19620 case GEU:
19621 case LTU:
19622 case LEU:
19623 cc_mode = CCUNSmode;
19624 break;
19625
19626 default:
19627 break;
19628 }
19629
19630 /* Get the vector mask for the given relational operations. */
19631 mask = rs6000_emit_vector_compare (rcode, cc_op0, cc_op1, mask_mode);
19632
19633 if (!mask)
19634 return 0;
19635
19636 if (invert_move)
19637 {
19638 tmp = op_true;
19639 op_true = op_false;
19640 op_false = tmp;
19641 }
19642
19643 cond2 = gen_rtx_fmt_ee (NE, cc_mode, gen_lowpart (dest_mode, mask),
19644 CONST0_RTX (dest_mode));
19645 emit_insn (gen_rtx_SET (VOIDmode,
19646 dest,
19647 gen_rtx_IF_THEN_ELSE (dest_mode,
19648 cond2,
19649 op_true,
19650 op_false)));
19651 return 1;
19652 }
19653
19654 /* Emit a conditional move: move TRUE_COND to DEST if OP of the
19655 operands of the last comparison is nonzero/true, FALSE_COND if it
19656 is zero/false. Return 0 if the hardware has no such operation. */
19657
19658 int
19659 rs6000_emit_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
19660 {
19661 enum rtx_code code = GET_CODE (op);
19662 rtx op0 = XEXP (op, 0);
19663 rtx op1 = XEXP (op, 1);
19664 REAL_VALUE_TYPE c1;
19665 enum machine_mode compare_mode = GET_MODE (op0);
19666 enum machine_mode result_mode = GET_MODE (dest);
19667 rtx temp;
19668 bool is_against_zero;
19669
19670 /* These modes should always match. */
19671 if (GET_MODE (op1) != compare_mode
19672 /* In the isel case however, we can use a compare immediate, so
19673 op1 may be a small constant. */
19674 && (!TARGET_ISEL || !short_cint_operand (op1, VOIDmode)))
19675 return 0;
19676 if (GET_MODE (true_cond) != result_mode)
19677 return 0;
19678 if (GET_MODE (false_cond) != result_mode)
19679 return 0;
19680
19681 /* Don't allow using floating point comparisons for integer results for
19682 now. */
19683 if (FLOAT_MODE_P (compare_mode) && !FLOAT_MODE_P (result_mode))
19684 return 0;
19685
19686 /* First, work out if the hardware can do this at all, or
19687 if it's too slow.... */
19688 if (!FLOAT_MODE_P (compare_mode))
19689 {
19690 if (TARGET_ISEL)
19691 return rs6000_emit_int_cmove (dest, op, true_cond, false_cond);
19692 return 0;
19693 }
19694 else if (TARGET_HARD_FLOAT && !TARGET_FPRS
19695 && SCALAR_FLOAT_MODE_P (compare_mode))
19696 return 0;
19697
19698 is_against_zero = op1 == CONST0_RTX (compare_mode);
19699
19700 /* A floating-point subtract might overflow, underflow, or produce
19701 an inexact result, thus changing the floating-point flags, so it
19702 can't be generated if we care about that. It's safe if one side
19703 of the construct is zero, since then no subtract will be
19704 generated. */
19705 if (SCALAR_FLOAT_MODE_P (compare_mode)
19706 && flag_trapping_math && ! is_against_zero)
19707 return 0;
19708
19709 /* Eliminate half of the comparisons by switching operands, this
19710 makes the remaining code simpler. */
19711 if (code == UNLT || code == UNGT || code == UNORDERED || code == NE
19712 || code == LTGT || code == LT || code == UNLE)
19713 {
19714 code = reverse_condition_maybe_unordered (code);
19715 temp = true_cond;
19716 true_cond = false_cond;
19717 false_cond = temp;
19718 }
19719
19720 /* UNEQ and LTGT take four instructions for a comparison with zero,
19721 it'll probably be faster to use a branch here too. */
19722 if (code == UNEQ && HONOR_NANS (compare_mode))
19723 return 0;
19724
19725 if (GET_CODE (op1) == CONST_DOUBLE)
19726 REAL_VALUE_FROM_CONST_DOUBLE (c1, op1);
19727
19728 /* We're going to try to implement comparisons by performing
19729 a subtract, then comparing against zero. Unfortunately,
19730 Inf - Inf is NaN which is not zero, and so if we don't
19731 know that the operand is finite and the comparison
19732 would treat EQ different to UNORDERED, we can't do it. */
19733 if (HONOR_INFINITIES (compare_mode)
19734 && code != GT && code != UNGE
19735 && (GET_CODE (op1) != CONST_DOUBLE || real_isinf (&c1))
19736 /* Constructs of the form (a OP b ? a : b) are safe. */
19737 && ((! rtx_equal_p (op0, false_cond) && ! rtx_equal_p (op1, false_cond))
19738 || (! rtx_equal_p (op0, true_cond)
19739 && ! rtx_equal_p (op1, true_cond))))
19740 return 0;
19741
19742 /* At this point we know we can use fsel. */
19743
19744 /* Reduce the comparison to a comparison against zero. */
19745 if (! is_against_zero)
19746 {
19747 temp = gen_reg_rtx (compare_mode);
19748 emit_insn (gen_rtx_SET (VOIDmode, temp,
19749 gen_rtx_MINUS (compare_mode, op0, op1)));
19750 op0 = temp;
19751 op1 = CONST0_RTX (compare_mode);
19752 }
19753
19754 /* If we don't care about NaNs we can reduce some of the comparisons
19755 down to faster ones. */
19756 if (! HONOR_NANS (compare_mode))
19757 switch (code)
19758 {
19759 case GT:
19760 code = LE;
19761 temp = true_cond;
19762 true_cond = false_cond;
19763 false_cond = temp;
19764 break;
19765 case UNGE:
19766 code = GE;
19767 break;
19768 case UNEQ:
19769 code = EQ;
19770 break;
19771 default:
19772 break;
19773 }
19774
19775 /* Now, reduce everything down to a GE. */
19776 switch (code)
19777 {
19778 case GE:
19779 break;
19780
19781 case LE:
19782 temp = gen_reg_rtx (compare_mode);
19783 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
19784 op0 = temp;
19785 break;
19786
19787 case ORDERED:
19788 temp = gen_reg_rtx (compare_mode);
19789 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_ABS (compare_mode, op0)));
19790 op0 = temp;
19791 break;
19792
19793 case EQ:
19794 temp = gen_reg_rtx (compare_mode);
19795 emit_insn (gen_rtx_SET (VOIDmode, temp,
19796 gen_rtx_NEG (compare_mode,
19797 gen_rtx_ABS (compare_mode, op0))));
19798 op0 = temp;
19799 break;
19800
19801 case UNGE:
19802 /* a UNGE 0 <-> (a GE 0 || -a UNLT 0) */
19803 temp = gen_reg_rtx (result_mode);
19804 emit_insn (gen_rtx_SET (VOIDmode, temp,
19805 gen_rtx_IF_THEN_ELSE (result_mode,
19806 gen_rtx_GE (VOIDmode,
19807 op0, op1),
19808 true_cond, false_cond)));
19809 false_cond = true_cond;
19810 true_cond = temp;
19811
19812 temp = gen_reg_rtx (compare_mode);
19813 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
19814 op0 = temp;
19815 break;
19816
19817 case GT:
19818 /* a GT 0 <-> (a GE 0 && -a UNLT 0) */
19819 temp = gen_reg_rtx (result_mode);
19820 emit_insn (gen_rtx_SET (VOIDmode, temp,
19821 gen_rtx_IF_THEN_ELSE (result_mode,
19822 gen_rtx_GE (VOIDmode,
19823 op0, op1),
19824 true_cond, false_cond)));
19825 true_cond = false_cond;
19826 false_cond = temp;
19827
19828 temp = gen_reg_rtx (compare_mode);
19829 emit_insn (gen_rtx_SET (VOIDmode, temp, gen_rtx_NEG (compare_mode, op0)));
19830 op0 = temp;
19831 break;
19832
19833 default:
19834 gcc_unreachable ();
19835 }
19836
19837 emit_insn (gen_rtx_SET (VOIDmode, dest,
19838 gen_rtx_IF_THEN_ELSE (result_mode,
19839 gen_rtx_GE (VOIDmode,
19840 op0, op1),
19841 true_cond, false_cond)));
19842 return 1;
19843 }
19844
19845 /* Same as above, but for ints (isel). */
19846
19847 static int
19848 rs6000_emit_int_cmove (rtx dest, rtx op, rtx true_cond, rtx false_cond)
19849 {
19850 rtx condition_rtx, cr;
19851 enum machine_mode mode = GET_MODE (dest);
19852 enum rtx_code cond_code;
19853 rtx (*isel_func) (rtx, rtx, rtx, rtx, rtx);
19854 bool signedp;
19855
19856 if (mode != SImode && (!TARGET_POWERPC64 || mode != DImode))
19857 return 0;
19858
19859 /* We still have to do the compare, because isel doesn't do a
19860 compare, it just looks at the CRx bits set by a previous compare
19861 instruction. */
19862 condition_rtx = rs6000_generate_compare (op, mode);
19863 cond_code = GET_CODE (condition_rtx);
19864 cr = XEXP (condition_rtx, 0);
19865 signedp = GET_MODE (cr) == CCmode;
19866
19867 isel_func = (mode == SImode
19868 ? (signedp ? gen_isel_signed_si : gen_isel_unsigned_si)
19869 : (signedp ? gen_isel_signed_di : gen_isel_unsigned_di));
19870
19871 switch (cond_code)
19872 {
19873 case LT: case GT: case LTU: case GTU: case EQ:
19874 /* isel handles these directly. */
19875 break;
19876
19877 default:
19878 /* We need to swap the sense of the comparison. */
19879 {
19880 rtx t = true_cond;
19881 true_cond = false_cond;
19882 false_cond = t;
19883 PUT_CODE (condition_rtx, reverse_condition (cond_code));
19884 }
19885 break;
19886 }
19887
19888 false_cond = force_reg (mode, false_cond);
19889 if (true_cond != const0_rtx)
19890 true_cond = force_reg (mode, true_cond);
19891
19892 emit_insn (isel_func (dest, condition_rtx, true_cond, false_cond, cr));
19893
19894 return 1;
19895 }
19896
19897 const char *
19898 output_isel (rtx *operands)
19899 {
19900 enum rtx_code code;
19901
19902 code = GET_CODE (operands[1]);
19903
19904 if (code == GE || code == GEU || code == LE || code == LEU || code == NE)
19905 {
19906 gcc_assert (GET_CODE (operands[2]) == REG
19907 && GET_CODE (operands[3]) == REG);
19908 PUT_CODE (operands[1], reverse_condition (code));
19909 return "isel %0,%3,%2,%j1";
19910 }
19911
19912 return "isel %0,%2,%3,%j1";
19913 }
19914
19915 void
19916 rs6000_emit_minmax (rtx dest, enum rtx_code code, rtx op0, rtx op1)
19917 {
19918 enum machine_mode mode = GET_MODE (op0);
19919 enum rtx_code c;
19920 rtx target;
19921
19922 /* VSX/altivec have direct min/max insns. */
19923 if ((code == SMAX || code == SMIN)
19924 && (VECTOR_UNIT_ALTIVEC_OR_VSX_P (mode)
19925 || (mode == SFmode && VECTOR_UNIT_VSX_P (DFmode))))
19926 {
19927 emit_insn (gen_rtx_SET (VOIDmode,
19928 dest,
19929 gen_rtx_fmt_ee (code, mode, op0, op1)));
19930 return;
19931 }
19932
19933 if (code == SMAX || code == SMIN)
19934 c = GE;
19935 else
19936 c = GEU;
19937
19938 if (code == SMAX || code == UMAX)
19939 target = emit_conditional_move (dest, c, op0, op1, mode,
19940 op0, op1, mode, 0);
19941 else
19942 target = emit_conditional_move (dest, c, op0, op1, mode,
19943 op1, op0, mode, 0);
19944 gcc_assert (target);
19945 if (target != dest)
19946 emit_move_insn (dest, target);
19947 }
19948
19949 /* A subroutine of the atomic operation splitters. Jump to LABEL if
19950 COND is true. Mark the jump as unlikely to be taken. */
19951
19952 static void
19953 emit_unlikely_jump (rtx cond, rtx label)
19954 {
19955 int very_unlikely = REG_BR_PROB_BASE / 100 - 1;
19956 rtx x;
19957
19958 x = gen_rtx_IF_THEN_ELSE (VOIDmode, cond, label, pc_rtx);
19959 x = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, x));
19960 add_int_reg_note (x, REG_BR_PROB, very_unlikely);
19961 }
19962
19963 /* A subroutine of the atomic operation splitters. Emit a load-locked
19964 instruction in MODE. For QI/HImode, possibly use a pattern than includes
19965 the zero_extend operation. */
19966
19967 static void
19968 emit_load_locked (enum machine_mode mode, rtx reg, rtx mem)
19969 {
19970 rtx (*fn) (rtx, rtx) = NULL;
19971
19972 switch (mode)
19973 {
19974 case QImode:
19975 fn = gen_load_lockedqi;
19976 break;
19977 case HImode:
19978 fn = gen_load_lockedhi;
19979 break;
19980 case SImode:
19981 if (GET_MODE (mem) == QImode)
19982 fn = gen_load_lockedqi_si;
19983 else if (GET_MODE (mem) == HImode)
19984 fn = gen_load_lockedhi_si;
19985 else
19986 fn = gen_load_lockedsi;
19987 break;
19988 case DImode:
19989 fn = gen_load_lockeddi;
19990 break;
19991 case TImode:
19992 fn = gen_load_lockedti;
19993 break;
19994 default:
19995 gcc_unreachable ();
19996 }
19997 emit_insn (fn (reg, mem));
19998 }
19999
20000 /* A subroutine of the atomic operation splitters. Emit a store-conditional
20001 instruction in MODE. */
20002
20003 static void
20004 emit_store_conditional (enum machine_mode mode, rtx res, rtx mem, rtx val)
20005 {
20006 rtx (*fn) (rtx, rtx, rtx) = NULL;
20007
20008 switch (mode)
20009 {
20010 case QImode:
20011 fn = gen_store_conditionalqi;
20012 break;
20013 case HImode:
20014 fn = gen_store_conditionalhi;
20015 break;
20016 case SImode:
20017 fn = gen_store_conditionalsi;
20018 break;
20019 case DImode:
20020 fn = gen_store_conditionaldi;
20021 break;
20022 case TImode:
20023 fn = gen_store_conditionalti;
20024 break;
20025 default:
20026 gcc_unreachable ();
20027 }
20028
20029 /* Emit sync before stwcx. to address PPC405 Erratum. */
20030 if (PPC405_ERRATUM77)
20031 emit_insn (gen_hwsync ());
20032
20033 emit_insn (fn (res, mem, val));
20034 }
20035
20036 /* Expand barriers before and after a load_locked/store_cond sequence. */
20037
20038 static rtx
20039 rs6000_pre_atomic_barrier (rtx mem, enum memmodel model)
20040 {
20041 rtx addr = XEXP (mem, 0);
20042 int strict_p = (reload_in_progress || reload_completed);
20043
20044 if (!legitimate_indirect_address_p (addr, strict_p)
20045 && !legitimate_indexed_address_p (addr, strict_p))
20046 {
20047 addr = force_reg (Pmode, addr);
20048 mem = replace_equiv_address_nv (mem, addr);
20049 }
20050
20051 switch (model)
20052 {
20053 case MEMMODEL_RELAXED:
20054 case MEMMODEL_CONSUME:
20055 case MEMMODEL_ACQUIRE:
20056 break;
20057 case MEMMODEL_RELEASE:
20058 case MEMMODEL_ACQ_REL:
20059 emit_insn (gen_lwsync ());
20060 break;
20061 case MEMMODEL_SEQ_CST:
20062 emit_insn (gen_hwsync ());
20063 break;
20064 default:
20065 gcc_unreachable ();
20066 }
20067 return mem;
20068 }
20069
20070 static void
20071 rs6000_post_atomic_barrier (enum memmodel model)
20072 {
20073 switch (model)
20074 {
20075 case MEMMODEL_RELAXED:
20076 case MEMMODEL_CONSUME:
20077 case MEMMODEL_RELEASE:
20078 break;
20079 case MEMMODEL_ACQUIRE:
20080 case MEMMODEL_ACQ_REL:
20081 case MEMMODEL_SEQ_CST:
20082 emit_insn (gen_isync ());
20083 break;
20084 default:
20085 gcc_unreachable ();
20086 }
20087 }
20088
20089 /* A subroutine of the various atomic expanders. For sub-word operations,
20090 we must adjust things to operate on SImode. Given the original MEM,
20091 return a new aligned memory. Also build and return the quantities by
20092 which to shift and mask. */
20093
20094 static rtx
20095 rs6000_adjust_atomic_subword (rtx orig_mem, rtx *pshift, rtx *pmask)
20096 {
20097 rtx addr, align, shift, mask, mem;
20098 HOST_WIDE_INT shift_mask;
20099 enum machine_mode mode = GET_MODE (orig_mem);
20100
20101 /* For smaller modes, we have to implement this via SImode. */
20102 shift_mask = (mode == QImode ? 0x18 : 0x10);
20103
20104 addr = XEXP (orig_mem, 0);
20105 addr = force_reg (GET_MODE (addr), addr);
20106
20107 /* Aligned memory containing subword. Generate a new memory. We
20108 do not want any of the existing MEM_ATTR data, as we're now
20109 accessing memory outside the original object. */
20110 align = expand_simple_binop (Pmode, AND, addr, GEN_INT (-4),
20111 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20112 mem = gen_rtx_MEM (SImode, align);
20113 MEM_VOLATILE_P (mem) = MEM_VOLATILE_P (orig_mem);
20114 if (MEM_ALIAS_SET (orig_mem) == ALIAS_SET_MEMORY_BARRIER)
20115 set_mem_alias_set (mem, ALIAS_SET_MEMORY_BARRIER);
20116
20117 /* Shift amount for subword relative to aligned word. */
20118 shift = gen_reg_rtx (SImode);
20119 addr = gen_lowpart (SImode, addr);
20120 emit_insn (gen_rlwinm (shift, addr, GEN_INT (3), GEN_INT (shift_mask)));
20121 if (BYTES_BIG_ENDIAN)
20122 shift = expand_simple_binop (SImode, XOR, shift, GEN_INT (shift_mask),
20123 shift, 1, OPTAB_LIB_WIDEN);
20124 *pshift = shift;
20125
20126 /* Mask for insertion. */
20127 mask = expand_simple_binop (SImode, ASHIFT, GEN_INT (GET_MODE_MASK (mode)),
20128 shift, NULL_RTX, 1, OPTAB_LIB_WIDEN);
20129 *pmask = mask;
20130
20131 return mem;
20132 }
20133
20134 /* A subroutine of the various atomic expanders. For sub-word operands,
20135 combine OLDVAL and NEWVAL via MASK. Returns a new pseduo. */
20136
20137 static rtx
20138 rs6000_mask_atomic_subword (rtx oldval, rtx newval, rtx mask)
20139 {
20140 rtx x;
20141
20142 x = gen_reg_rtx (SImode);
20143 emit_insn (gen_rtx_SET (VOIDmode, x,
20144 gen_rtx_AND (SImode,
20145 gen_rtx_NOT (SImode, mask),
20146 oldval)));
20147
20148 x = expand_simple_binop (SImode, IOR, newval, x, x, 1, OPTAB_LIB_WIDEN);
20149
20150 return x;
20151 }
20152
20153 /* A subroutine of the various atomic expanders. For sub-word operands,
20154 extract WIDE to NARROW via SHIFT. */
20155
20156 static void
20157 rs6000_finish_atomic_subword (rtx narrow, rtx wide, rtx shift)
20158 {
20159 wide = expand_simple_binop (SImode, LSHIFTRT, wide, shift,
20160 wide, 1, OPTAB_LIB_WIDEN);
20161 emit_move_insn (narrow, gen_lowpart (GET_MODE (narrow), wide));
20162 }
20163
20164 /* Expand an atomic compare and swap operation. */
20165
20166 void
20167 rs6000_expand_atomic_compare_and_swap (rtx operands[])
20168 {
20169 rtx boolval, retval, mem, oldval, newval, cond;
20170 rtx label1, label2, x, mask, shift;
20171 enum machine_mode mode, orig_mode;
20172 enum memmodel mod_s, mod_f;
20173 bool is_weak;
20174
20175 boolval = operands[0];
20176 retval = operands[1];
20177 mem = operands[2];
20178 oldval = operands[3];
20179 newval = operands[4];
20180 is_weak = (INTVAL (operands[5]) != 0);
20181 mod_s = (enum memmodel) INTVAL (operands[6]);
20182 mod_f = (enum memmodel) INTVAL (operands[7]);
20183 orig_mode = mode = GET_MODE (mem);
20184
20185 mask = shift = NULL_RTX;
20186 if (mode == QImode || mode == HImode)
20187 {
20188 /* Before power8, we didn't have access to lbarx/lharx, so generate a
20189 lwarx and shift/mask operations. With power8, we need to do the
20190 comparison in SImode, but the store is still done in QI/HImode. */
20191 oldval = convert_modes (SImode, mode, oldval, 1);
20192
20193 if (!TARGET_SYNC_HI_QI)
20194 {
20195 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
20196
20197 /* Shift and mask OLDVAL into position with the word. */
20198 oldval = expand_simple_binop (SImode, ASHIFT, oldval, shift,
20199 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20200
20201 /* Shift and mask NEWVAL into position within the word. */
20202 newval = convert_modes (SImode, mode, newval, 1);
20203 newval = expand_simple_binop (SImode, ASHIFT, newval, shift,
20204 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20205 }
20206
20207 /* Prepare to adjust the return value. */
20208 retval = gen_reg_rtx (SImode);
20209 mode = SImode;
20210 }
20211 else if (reg_overlap_mentioned_p (retval, oldval))
20212 oldval = copy_to_reg (oldval);
20213
20214 mem = rs6000_pre_atomic_barrier (mem, mod_s);
20215
20216 label1 = NULL_RTX;
20217 if (!is_weak)
20218 {
20219 label1 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
20220 emit_label (XEXP (label1, 0));
20221 }
20222 label2 = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
20223
20224 emit_load_locked (mode, retval, mem);
20225
20226 x = retval;
20227 if (mask)
20228 {
20229 x = expand_simple_binop (SImode, AND, retval, mask,
20230 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20231 }
20232
20233 cond = gen_reg_rtx (CCmode);
20234 /* If we have TImode, synthesize a comparison. */
20235 if (mode != TImode)
20236 x = gen_rtx_COMPARE (CCmode, x, oldval);
20237 else
20238 {
20239 rtx xor1_result = gen_reg_rtx (DImode);
20240 rtx xor2_result = gen_reg_rtx (DImode);
20241 rtx or_result = gen_reg_rtx (DImode);
20242 rtx new_word0 = simplify_gen_subreg (DImode, x, TImode, 0);
20243 rtx new_word1 = simplify_gen_subreg (DImode, x, TImode, 8);
20244 rtx old_word0 = simplify_gen_subreg (DImode, oldval, TImode, 0);
20245 rtx old_word1 = simplify_gen_subreg (DImode, oldval, TImode, 8);
20246
20247 emit_insn (gen_xordi3 (xor1_result, new_word0, old_word0));
20248 emit_insn (gen_xordi3 (xor2_result, new_word1, old_word1));
20249 emit_insn (gen_iordi3 (or_result, xor1_result, xor2_result));
20250 x = gen_rtx_COMPARE (CCmode, or_result, const0_rtx);
20251 }
20252
20253 emit_insn (gen_rtx_SET (VOIDmode, cond, x));
20254
20255 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20256 emit_unlikely_jump (x, label2);
20257
20258 x = newval;
20259 if (mask)
20260 x = rs6000_mask_atomic_subword (retval, newval, mask);
20261
20262 emit_store_conditional (orig_mode, cond, mem, x);
20263
20264 if (!is_weak)
20265 {
20266 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20267 emit_unlikely_jump (x, label1);
20268 }
20269
20270 if (mod_f != MEMMODEL_RELAXED)
20271 emit_label (XEXP (label2, 0));
20272
20273 rs6000_post_atomic_barrier (mod_s);
20274
20275 if (mod_f == MEMMODEL_RELAXED)
20276 emit_label (XEXP (label2, 0));
20277
20278 if (shift)
20279 rs6000_finish_atomic_subword (operands[1], retval, shift);
20280 else if (mode != GET_MODE (operands[1]))
20281 convert_move (operands[1], retval, 1);
20282
20283 /* In all cases, CR0 contains EQ on success, and NE on failure. */
20284 x = gen_rtx_EQ (SImode, cond, const0_rtx);
20285 emit_insn (gen_rtx_SET (VOIDmode, boolval, x));
20286 }
20287
20288 /* Expand an atomic exchange operation. */
20289
20290 void
20291 rs6000_expand_atomic_exchange (rtx operands[])
20292 {
20293 rtx retval, mem, val, cond;
20294 enum machine_mode mode;
20295 enum memmodel model;
20296 rtx label, x, mask, shift;
20297
20298 retval = operands[0];
20299 mem = operands[1];
20300 val = operands[2];
20301 model = (enum memmodel) INTVAL (operands[3]);
20302 mode = GET_MODE (mem);
20303
20304 mask = shift = NULL_RTX;
20305 if (!TARGET_SYNC_HI_QI && (mode == QImode || mode == HImode))
20306 {
20307 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
20308
20309 /* Shift and mask VAL into position with the word. */
20310 val = convert_modes (SImode, mode, val, 1);
20311 val = expand_simple_binop (SImode, ASHIFT, val, shift,
20312 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20313
20314 /* Prepare to adjust the return value. */
20315 retval = gen_reg_rtx (SImode);
20316 mode = SImode;
20317 }
20318
20319 mem = rs6000_pre_atomic_barrier (mem, model);
20320
20321 label = gen_rtx_LABEL_REF (VOIDmode, gen_label_rtx ());
20322 emit_label (XEXP (label, 0));
20323
20324 emit_load_locked (mode, retval, mem);
20325
20326 x = val;
20327 if (mask)
20328 x = rs6000_mask_atomic_subword (retval, val, mask);
20329
20330 cond = gen_reg_rtx (CCmode);
20331 emit_store_conditional (mode, cond, mem, x);
20332
20333 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20334 emit_unlikely_jump (x, label);
20335
20336 rs6000_post_atomic_barrier (model);
20337
20338 if (shift)
20339 rs6000_finish_atomic_subword (operands[0], retval, shift);
20340 }
20341
20342 /* Expand an atomic fetch-and-operate pattern. CODE is the binary operation
20343 to perform. MEM is the memory on which to operate. VAL is the second
20344 operand of the binary operator. BEFORE and AFTER are optional locations to
20345 return the value of MEM either before of after the operation. MODEL_RTX
20346 is a CONST_INT containing the memory model to use. */
20347
20348 void
20349 rs6000_expand_atomic_op (enum rtx_code code, rtx mem, rtx val,
20350 rtx orig_before, rtx orig_after, rtx model_rtx)
20351 {
20352 enum memmodel model = (enum memmodel) INTVAL (model_rtx);
20353 enum machine_mode mode = GET_MODE (mem);
20354 enum machine_mode store_mode = mode;
20355 rtx label, x, cond, mask, shift;
20356 rtx before = orig_before, after = orig_after;
20357
20358 mask = shift = NULL_RTX;
20359 /* On power8, we want to use SImode for the operation. On previous systems,
20360 use the operation in a subword and shift/mask to get the proper byte or
20361 halfword. */
20362 if (mode == QImode || mode == HImode)
20363 {
20364 if (TARGET_SYNC_HI_QI)
20365 {
20366 val = convert_modes (SImode, mode, val, 1);
20367
20368 /* Prepare to adjust the return value. */
20369 before = gen_reg_rtx (SImode);
20370 if (after)
20371 after = gen_reg_rtx (SImode);
20372 mode = SImode;
20373 }
20374 else
20375 {
20376 mem = rs6000_adjust_atomic_subword (mem, &shift, &mask);
20377
20378 /* Shift and mask VAL into position with the word. */
20379 val = convert_modes (SImode, mode, val, 1);
20380 val = expand_simple_binop (SImode, ASHIFT, val, shift,
20381 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20382
20383 switch (code)
20384 {
20385 case IOR:
20386 case XOR:
20387 /* We've already zero-extended VAL. That is sufficient to
20388 make certain that it does not affect other bits. */
20389 mask = NULL;
20390 break;
20391
20392 case AND:
20393 /* If we make certain that all of the other bits in VAL are
20394 set, that will be sufficient to not affect other bits. */
20395 x = gen_rtx_NOT (SImode, mask);
20396 x = gen_rtx_IOR (SImode, x, val);
20397 emit_insn (gen_rtx_SET (VOIDmode, val, x));
20398 mask = NULL;
20399 break;
20400
20401 case NOT:
20402 case PLUS:
20403 case MINUS:
20404 /* These will all affect bits outside the field and need
20405 adjustment via MASK within the loop. */
20406 break;
20407
20408 default:
20409 gcc_unreachable ();
20410 }
20411
20412 /* Prepare to adjust the return value. */
20413 before = gen_reg_rtx (SImode);
20414 if (after)
20415 after = gen_reg_rtx (SImode);
20416 store_mode = mode = SImode;
20417 }
20418 }
20419
20420 mem = rs6000_pre_atomic_barrier (mem, model);
20421
20422 label = gen_label_rtx ();
20423 emit_label (label);
20424 label = gen_rtx_LABEL_REF (VOIDmode, label);
20425
20426 if (before == NULL_RTX)
20427 before = gen_reg_rtx (mode);
20428
20429 emit_load_locked (mode, before, mem);
20430
20431 if (code == NOT)
20432 {
20433 x = expand_simple_binop (mode, AND, before, val,
20434 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20435 after = expand_simple_unop (mode, NOT, x, after, 1);
20436 }
20437 else
20438 {
20439 after = expand_simple_binop (mode, code, before, val,
20440 after, 1, OPTAB_LIB_WIDEN);
20441 }
20442
20443 x = after;
20444 if (mask)
20445 {
20446 x = expand_simple_binop (SImode, AND, after, mask,
20447 NULL_RTX, 1, OPTAB_LIB_WIDEN);
20448 x = rs6000_mask_atomic_subword (before, x, mask);
20449 }
20450 else if (store_mode != mode)
20451 x = convert_modes (store_mode, mode, x, 1);
20452
20453 cond = gen_reg_rtx (CCmode);
20454 emit_store_conditional (store_mode, cond, mem, x);
20455
20456 x = gen_rtx_NE (VOIDmode, cond, const0_rtx);
20457 emit_unlikely_jump (x, label);
20458
20459 rs6000_post_atomic_barrier (model);
20460
20461 if (shift)
20462 {
20463 /* QImode/HImode on machines without lbarx/lharx where we do a lwarx and
20464 then do the calcuations in a SImode register. */
20465 if (orig_before)
20466 rs6000_finish_atomic_subword (orig_before, before, shift);
20467 if (orig_after)
20468 rs6000_finish_atomic_subword (orig_after, after, shift);
20469 }
20470 else if (store_mode != mode)
20471 {
20472 /* QImode/HImode on machines with lbarx/lharx where we do the native
20473 operation and then do the calcuations in a SImode register. */
20474 if (orig_before)
20475 convert_move (orig_before, before, 1);
20476 if (orig_after)
20477 convert_move (orig_after, after, 1);
20478 }
20479 else if (orig_after && after != orig_after)
20480 emit_move_insn (orig_after, after);
20481 }
20482
20483 /* Emit instructions to move SRC to DST. Called by splitters for
20484 multi-register moves. It will emit at most one instruction for
20485 each register that is accessed; that is, it won't emit li/lis pairs
20486 (or equivalent for 64-bit code). One of SRC or DST must be a hard
20487 register. */
20488
20489 void
20490 rs6000_split_multireg_move (rtx dst, rtx src)
20491 {
20492 /* The register number of the first register being moved. */
20493 int reg;
20494 /* The mode that is to be moved. */
20495 enum machine_mode mode;
20496 /* The mode that the move is being done in, and its size. */
20497 enum machine_mode reg_mode;
20498 int reg_mode_size;
20499 /* The number of registers that will be moved. */
20500 int nregs;
20501
20502 reg = REG_P (dst) ? REGNO (dst) : REGNO (src);
20503 mode = GET_MODE (dst);
20504 nregs = hard_regno_nregs[reg][mode];
20505 if (FP_REGNO_P (reg))
20506 reg_mode = DECIMAL_FLOAT_MODE_P (mode) ? DDmode :
20507 ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT) ? DFmode : SFmode);
20508 else if (ALTIVEC_REGNO_P (reg))
20509 reg_mode = V16QImode;
20510 else if (TARGET_E500_DOUBLE && mode == TFmode)
20511 reg_mode = DFmode;
20512 else
20513 reg_mode = word_mode;
20514 reg_mode_size = GET_MODE_SIZE (reg_mode);
20515
20516 gcc_assert (reg_mode_size * nregs == GET_MODE_SIZE (mode));
20517
20518 /* TDmode residing in FP registers is special, since the ISA requires that
20519 the lower-numbered word of a register pair is always the most significant
20520 word, even in little-endian mode. This does not match the usual subreg
20521 semantics, so we cannnot use simplify_gen_subreg in those cases. Access
20522 the appropriate constituent registers "by hand" in little-endian mode.
20523
20524 Note we do not need to check for destructive overlap here since TDmode
20525 can only reside in even/odd register pairs. */
20526 if (FP_REGNO_P (reg) && DECIMAL_FLOAT_MODE_P (mode) && !BYTES_BIG_ENDIAN)
20527 {
20528 rtx p_src, p_dst;
20529 int i;
20530
20531 for (i = 0; i < nregs; i++)
20532 {
20533 if (REG_P (src) && FP_REGNO_P (REGNO (src)))
20534 p_src = gen_rtx_REG (reg_mode, REGNO (src) + nregs - 1 - i);
20535 else
20536 p_src = simplify_gen_subreg (reg_mode, src, mode,
20537 i * reg_mode_size);
20538
20539 if (REG_P (dst) && FP_REGNO_P (REGNO (dst)))
20540 p_dst = gen_rtx_REG (reg_mode, REGNO (dst) + nregs - 1 - i);
20541 else
20542 p_dst = simplify_gen_subreg (reg_mode, dst, mode,
20543 i * reg_mode_size);
20544
20545 emit_insn (gen_rtx_SET (VOIDmode, p_dst, p_src));
20546 }
20547
20548 return;
20549 }
20550
20551 if (REG_P (src) && REG_P (dst) && (REGNO (src) < REGNO (dst)))
20552 {
20553 /* Move register range backwards, if we might have destructive
20554 overlap. */
20555 int i;
20556 for (i = nregs - 1; i >= 0; i--)
20557 emit_insn (gen_rtx_SET (VOIDmode,
20558 simplify_gen_subreg (reg_mode, dst, mode,
20559 i * reg_mode_size),
20560 simplify_gen_subreg (reg_mode, src, mode,
20561 i * reg_mode_size)));
20562 }
20563 else
20564 {
20565 int i;
20566 int j = -1;
20567 bool used_update = false;
20568 rtx restore_basereg = NULL_RTX;
20569
20570 if (MEM_P (src) && INT_REGNO_P (reg))
20571 {
20572 rtx breg;
20573
20574 if (GET_CODE (XEXP (src, 0)) == PRE_INC
20575 || GET_CODE (XEXP (src, 0)) == PRE_DEC)
20576 {
20577 rtx delta_rtx;
20578 breg = XEXP (XEXP (src, 0), 0);
20579 delta_rtx = (GET_CODE (XEXP (src, 0)) == PRE_INC
20580 ? GEN_INT (GET_MODE_SIZE (GET_MODE (src)))
20581 : GEN_INT (-GET_MODE_SIZE (GET_MODE (src))));
20582 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
20583 src = replace_equiv_address (src, breg);
20584 }
20585 else if (! rs6000_offsettable_memref_p (src, reg_mode))
20586 {
20587 if (GET_CODE (XEXP (src, 0)) == PRE_MODIFY)
20588 {
20589 rtx basereg = XEXP (XEXP (src, 0), 0);
20590 if (TARGET_UPDATE)
20591 {
20592 rtx ndst = simplify_gen_subreg (reg_mode, dst, mode, 0);
20593 emit_insn (gen_rtx_SET (VOIDmode, ndst,
20594 gen_rtx_MEM (reg_mode, XEXP (src, 0))));
20595 used_update = true;
20596 }
20597 else
20598 emit_insn (gen_rtx_SET (VOIDmode, basereg,
20599 XEXP (XEXP (src, 0), 1)));
20600 src = replace_equiv_address (src, basereg);
20601 }
20602 else
20603 {
20604 rtx basereg = gen_rtx_REG (Pmode, reg);
20605 emit_insn (gen_rtx_SET (VOIDmode, basereg, XEXP (src, 0)));
20606 src = replace_equiv_address (src, basereg);
20607 }
20608 }
20609
20610 breg = XEXP (src, 0);
20611 if (GET_CODE (breg) == PLUS || GET_CODE (breg) == LO_SUM)
20612 breg = XEXP (breg, 0);
20613
20614 /* If the base register we are using to address memory is
20615 also a destination reg, then change that register last. */
20616 if (REG_P (breg)
20617 && REGNO (breg) >= REGNO (dst)
20618 && REGNO (breg) < REGNO (dst) + nregs)
20619 j = REGNO (breg) - REGNO (dst);
20620 }
20621 else if (MEM_P (dst) && INT_REGNO_P (reg))
20622 {
20623 rtx breg;
20624
20625 if (GET_CODE (XEXP (dst, 0)) == PRE_INC
20626 || GET_CODE (XEXP (dst, 0)) == PRE_DEC)
20627 {
20628 rtx delta_rtx;
20629 breg = XEXP (XEXP (dst, 0), 0);
20630 delta_rtx = (GET_CODE (XEXP (dst, 0)) == PRE_INC
20631 ? GEN_INT (GET_MODE_SIZE (GET_MODE (dst)))
20632 : GEN_INT (-GET_MODE_SIZE (GET_MODE (dst))));
20633
20634 /* We have to update the breg before doing the store.
20635 Use store with update, if available. */
20636
20637 if (TARGET_UPDATE)
20638 {
20639 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
20640 emit_insn (TARGET_32BIT
20641 ? (TARGET_POWERPC64
20642 ? gen_movdi_si_update (breg, breg, delta_rtx, nsrc)
20643 : gen_movsi_update (breg, breg, delta_rtx, nsrc))
20644 : gen_movdi_di_update (breg, breg, delta_rtx, nsrc));
20645 used_update = true;
20646 }
20647 else
20648 emit_insn (gen_add3_insn (breg, breg, delta_rtx));
20649 dst = replace_equiv_address (dst, breg);
20650 }
20651 else if (!rs6000_offsettable_memref_p (dst, reg_mode)
20652 && GET_CODE (XEXP (dst, 0)) != LO_SUM)
20653 {
20654 if (GET_CODE (XEXP (dst, 0)) == PRE_MODIFY)
20655 {
20656 rtx basereg = XEXP (XEXP (dst, 0), 0);
20657 if (TARGET_UPDATE)
20658 {
20659 rtx nsrc = simplify_gen_subreg (reg_mode, src, mode, 0);
20660 emit_insn (gen_rtx_SET (VOIDmode,
20661 gen_rtx_MEM (reg_mode, XEXP (dst, 0)), nsrc));
20662 used_update = true;
20663 }
20664 else
20665 emit_insn (gen_rtx_SET (VOIDmode, basereg,
20666 XEXP (XEXP (dst, 0), 1)));
20667 dst = replace_equiv_address (dst, basereg);
20668 }
20669 else
20670 {
20671 rtx basereg = XEXP (XEXP (dst, 0), 0);
20672 rtx offsetreg = XEXP (XEXP (dst, 0), 1);
20673 gcc_assert (GET_CODE (XEXP (dst, 0)) == PLUS
20674 && REG_P (basereg)
20675 && REG_P (offsetreg)
20676 && REGNO (basereg) != REGNO (offsetreg));
20677 if (REGNO (basereg) == 0)
20678 {
20679 rtx tmp = offsetreg;
20680 offsetreg = basereg;
20681 basereg = tmp;
20682 }
20683 emit_insn (gen_add3_insn (basereg, basereg, offsetreg));
20684 restore_basereg = gen_sub3_insn (basereg, basereg, offsetreg);
20685 dst = replace_equiv_address (dst, basereg);
20686 }
20687 }
20688 else if (GET_CODE (XEXP (dst, 0)) != LO_SUM)
20689 gcc_assert (rs6000_offsettable_memref_p (dst, reg_mode));
20690 }
20691
20692 for (i = 0; i < nregs; i++)
20693 {
20694 /* Calculate index to next subword. */
20695 ++j;
20696 if (j == nregs)
20697 j = 0;
20698
20699 /* If compiler already emitted move of first word by
20700 store with update, no need to do anything. */
20701 if (j == 0 && used_update)
20702 continue;
20703
20704 emit_insn (gen_rtx_SET (VOIDmode,
20705 simplify_gen_subreg (reg_mode, dst, mode,
20706 j * reg_mode_size),
20707 simplify_gen_subreg (reg_mode, src, mode,
20708 j * reg_mode_size)));
20709 }
20710 if (restore_basereg != NULL_RTX)
20711 emit_insn (restore_basereg);
20712 }
20713 }
20714
20715 \f
20716 /* This page contains routines that are used to determine what the
20717 function prologue and epilogue code will do and write them out. */
20718
20719 static inline bool
20720 save_reg_p (int r)
20721 {
20722 return !call_used_regs[r] && df_regs_ever_live_p (r);
20723 }
20724
20725 /* Return the first fixed-point register that is required to be
20726 saved. 32 if none. */
20727
20728 int
20729 first_reg_to_save (void)
20730 {
20731 int first_reg;
20732
20733 /* Find lowest numbered live register. */
20734 for (first_reg = 13; first_reg <= 31; first_reg++)
20735 if (save_reg_p (first_reg))
20736 break;
20737
20738 if (first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM
20739 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
20740 || (DEFAULT_ABI == ABI_DARWIN && flag_pic)
20741 || (TARGET_TOC && TARGET_MINIMAL_TOC))
20742 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))
20743 first_reg = RS6000_PIC_OFFSET_TABLE_REGNUM;
20744
20745 #if TARGET_MACHO
20746 if (flag_pic
20747 && crtl->uses_pic_offset_table
20748 && first_reg > RS6000_PIC_OFFSET_TABLE_REGNUM)
20749 return RS6000_PIC_OFFSET_TABLE_REGNUM;
20750 #endif
20751
20752 return first_reg;
20753 }
20754
20755 /* Similar, for FP regs. */
20756
20757 int
20758 first_fp_reg_to_save (void)
20759 {
20760 int first_reg;
20761
20762 /* Find lowest numbered live register. */
20763 for (first_reg = 14 + 32; first_reg <= 63; first_reg++)
20764 if (save_reg_p (first_reg))
20765 break;
20766
20767 return first_reg;
20768 }
20769
20770 /* Similar, for AltiVec regs. */
20771
20772 static int
20773 first_altivec_reg_to_save (void)
20774 {
20775 int i;
20776
20777 /* Stack frame remains as is unless we are in AltiVec ABI. */
20778 if (! TARGET_ALTIVEC_ABI)
20779 return LAST_ALTIVEC_REGNO + 1;
20780
20781 /* On Darwin, the unwind routines are compiled without
20782 TARGET_ALTIVEC, and use save_world to save/restore the
20783 altivec registers when necessary. */
20784 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
20785 && ! TARGET_ALTIVEC)
20786 return FIRST_ALTIVEC_REGNO + 20;
20787
20788 /* Find lowest numbered live register. */
20789 for (i = FIRST_ALTIVEC_REGNO + 20; i <= LAST_ALTIVEC_REGNO; ++i)
20790 if (save_reg_p (i))
20791 break;
20792
20793 return i;
20794 }
20795
20796 /* Return a 32-bit mask of the AltiVec registers we need to set in
20797 VRSAVE. Bit n of the return value is 1 if Vn is live. The MSB in
20798 the 32-bit word is 0. */
20799
20800 static unsigned int
20801 compute_vrsave_mask (void)
20802 {
20803 unsigned int i, mask = 0;
20804
20805 /* On Darwin, the unwind routines are compiled without
20806 TARGET_ALTIVEC, and use save_world to save/restore the
20807 call-saved altivec registers when necessary. */
20808 if (DEFAULT_ABI == ABI_DARWIN && crtl->calls_eh_return
20809 && ! TARGET_ALTIVEC)
20810 mask |= 0xFFF;
20811
20812 /* First, find out if we use _any_ altivec registers. */
20813 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
20814 if (df_regs_ever_live_p (i))
20815 mask |= ALTIVEC_REG_BIT (i);
20816
20817 if (mask == 0)
20818 return mask;
20819
20820 /* Next, remove the argument registers from the set. These must
20821 be in the VRSAVE mask set by the caller, so we don't need to add
20822 them in again. More importantly, the mask we compute here is
20823 used to generate CLOBBERs in the set_vrsave insn, and we do not
20824 wish the argument registers to die. */
20825 for (i = crtl->args.info.vregno - 1; i >= ALTIVEC_ARG_MIN_REG; --i)
20826 mask &= ~ALTIVEC_REG_BIT (i);
20827
20828 /* Similarly, remove the return value from the set. */
20829 {
20830 bool yes = false;
20831 diddle_return_value (is_altivec_return_reg, &yes);
20832 if (yes)
20833 mask &= ~ALTIVEC_REG_BIT (ALTIVEC_ARG_RETURN);
20834 }
20835
20836 return mask;
20837 }
20838
20839 /* For a very restricted set of circumstances, we can cut down the
20840 size of prologues/epilogues by calling our own save/restore-the-world
20841 routines. */
20842
20843 static void
20844 compute_save_world_info (rs6000_stack_t *info_ptr)
20845 {
20846 info_ptr->world_save_p = 1;
20847 info_ptr->world_save_p
20848 = (WORLD_SAVE_P (info_ptr)
20849 && DEFAULT_ABI == ABI_DARWIN
20850 && !cfun->has_nonlocal_label
20851 && info_ptr->first_fp_reg_save == FIRST_SAVED_FP_REGNO
20852 && info_ptr->first_gp_reg_save == FIRST_SAVED_GP_REGNO
20853 && info_ptr->first_altivec_reg_save == FIRST_SAVED_ALTIVEC_REGNO
20854 && info_ptr->cr_save_p);
20855
20856 /* This will not work in conjunction with sibcalls. Make sure there
20857 are none. (This check is expensive, but seldom executed.) */
20858 if (WORLD_SAVE_P (info_ptr))
20859 {
20860 rtx insn;
20861 for (insn = get_last_insn_anywhere (); insn; insn = PREV_INSN (insn))
20862 if (CALL_P (insn) && SIBLING_CALL_P (insn))
20863 {
20864 info_ptr->world_save_p = 0;
20865 break;
20866 }
20867 }
20868
20869 if (WORLD_SAVE_P (info_ptr))
20870 {
20871 /* Even if we're not touching VRsave, make sure there's room on the
20872 stack for it, if it looks like we're calling SAVE_WORLD, which
20873 will attempt to save it. */
20874 info_ptr->vrsave_size = 4;
20875
20876 /* If we are going to save the world, we need to save the link register too. */
20877 info_ptr->lr_save_p = 1;
20878
20879 /* "Save" the VRsave register too if we're saving the world. */
20880 if (info_ptr->vrsave_mask == 0)
20881 info_ptr->vrsave_mask = compute_vrsave_mask ();
20882
20883 /* Because the Darwin register save/restore routines only handle
20884 F14 .. F31 and V20 .. V31 as per the ABI, perform a consistency
20885 check. */
20886 gcc_assert (info_ptr->first_fp_reg_save >= FIRST_SAVED_FP_REGNO
20887 && (info_ptr->first_altivec_reg_save
20888 >= FIRST_SAVED_ALTIVEC_REGNO));
20889 }
20890 return;
20891 }
20892
20893
20894 static void
20895 is_altivec_return_reg (rtx reg, void *xyes)
20896 {
20897 bool *yes = (bool *) xyes;
20898 if (REGNO (reg) == ALTIVEC_ARG_RETURN)
20899 *yes = true;
20900 }
20901
20902 \f
20903 /* Look for user-defined global regs in the range FIRST to LAST-1.
20904 We should not restore these, and so cannot use lmw or out-of-line
20905 restore functions if there are any. We also can't save them
20906 (well, emit frame notes for them), because frame unwinding during
20907 exception handling will restore saved registers. */
20908
20909 static bool
20910 global_regs_p (unsigned first, unsigned last)
20911 {
20912 while (first < last)
20913 if (global_regs[first++])
20914 return true;
20915 return false;
20916 }
20917
20918 /* Determine the strategy for savings/restoring registers. */
20919
20920 enum {
20921 SAVRES_MULTIPLE = 0x1,
20922 SAVE_INLINE_FPRS = 0x2,
20923 SAVE_INLINE_GPRS = 0x4,
20924 REST_INLINE_FPRS = 0x8,
20925 REST_INLINE_GPRS = 0x10,
20926 SAVE_NOINLINE_GPRS_SAVES_LR = 0x20,
20927 SAVE_NOINLINE_FPRS_SAVES_LR = 0x40,
20928 REST_NOINLINE_FPRS_DOESNT_RESTORE_LR = 0x80,
20929 SAVE_INLINE_VRS = 0x100,
20930 REST_INLINE_VRS = 0x200
20931 };
20932
20933 static int
20934 rs6000_savres_strategy (rs6000_stack_t *info,
20935 bool using_static_chain_p)
20936 {
20937 int strategy = 0;
20938 bool lr_save_p;
20939
20940 if (TARGET_MULTIPLE
20941 && !TARGET_POWERPC64
20942 && !(TARGET_SPE_ABI && info->spe_64bit_regs_used)
20943 && info->first_gp_reg_save < 31
20944 && !global_regs_p (info->first_gp_reg_save, 32))
20945 strategy |= SAVRES_MULTIPLE;
20946
20947 if (crtl->calls_eh_return
20948 || cfun->machine->ra_need_lr)
20949 strategy |= (SAVE_INLINE_FPRS | REST_INLINE_FPRS
20950 | SAVE_INLINE_GPRS | REST_INLINE_GPRS
20951 | SAVE_INLINE_VRS | REST_INLINE_VRS);
20952
20953 if (info->first_fp_reg_save == 64
20954 /* The out-of-line FP routines use double-precision stores;
20955 we can't use those routines if we don't have such stores. */
20956 || (TARGET_HARD_FLOAT && !TARGET_DOUBLE_FLOAT)
20957 || global_regs_p (info->first_fp_reg_save, 64))
20958 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20959
20960 if (info->first_gp_reg_save == 32
20961 || (!(strategy & SAVRES_MULTIPLE)
20962 && global_regs_p (info->first_gp_reg_save, 32)))
20963 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20964
20965 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO + 1
20966 || global_regs_p (info->first_altivec_reg_save, LAST_ALTIVEC_REGNO + 1))
20967 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20968
20969 /* Define cutoff for using out-of-line functions to save registers. */
20970 if (DEFAULT_ABI == ABI_V4 || TARGET_ELF)
20971 {
20972 if (!optimize_size)
20973 {
20974 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20975 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20976 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20977 }
20978 else
20979 {
20980 /* Prefer out-of-line restore if it will exit. */
20981 if (info->first_fp_reg_save > 61)
20982 strategy |= SAVE_INLINE_FPRS;
20983 if (info->first_gp_reg_save > 29)
20984 {
20985 if (info->first_fp_reg_save == 64)
20986 strategy |= SAVE_INLINE_GPRS;
20987 else
20988 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
20989 }
20990 if (info->first_altivec_reg_save == LAST_ALTIVEC_REGNO)
20991 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
20992 }
20993 }
20994 else if (DEFAULT_ABI == ABI_DARWIN)
20995 {
20996 if (info->first_fp_reg_save > 60)
20997 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
20998 if (info->first_gp_reg_save > 29)
20999 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
21000 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
21001 }
21002 else
21003 {
21004 gcc_checking_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
21005 if (info->first_fp_reg_save > 61)
21006 strategy |= SAVE_INLINE_FPRS | REST_INLINE_FPRS;
21007 strategy |= SAVE_INLINE_GPRS | REST_INLINE_GPRS;
21008 strategy |= SAVE_INLINE_VRS | REST_INLINE_VRS;
21009 }
21010
21011 /* Don't bother to try to save things out-of-line if r11 is occupied
21012 by the static chain. It would require too much fiddling and the
21013 static chain is rarely used anyway. FPRs are saved w.r.t the stack
21014 pointer on Darwin, and AIX uses r1 or r12. */
21015 if (using_static_chain_p
21016 && (DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN))
21017 strategy |= ((DEFAULT_ABI == ABI_DARWIN ? 0 : SAVE_INLINE_FPRS)
21018 | SAVE_INLINE_GPRS
21019 | SAVE_INLINE_VRS | REST_INLINE_VRS);
21020
21021 /* We can only use the out-of-line routines to restore if we've
21022 saved all the registers from first_fp_reg_save in the prologue.
21023 Otherwise, we risk loading garbage. */
21024 if ((strategy & (SAVE_INLINE_FPRS | REST_INLINE_FPRS)) == SAVE_INLINE_FPRS)
21025 {
21026 int i;
21027
21028 for (i = info->first_fp_reg_save; i < 64; i++)
21029 if (!save_reg_p (i))
21030 {
21031 strategy |= REST_INLINE_FPRS;
21032 break;
21033 }
21034 }
21035
21036 /* If we are going to use store multiple, then don't even bother
21037 with the out-of-line routines, since the store-multiple
21038 instruction will always be smaller. */
21039 if ((strategy & SAVRES_MULTIPLE))
21040 strategy |= SAVE_INLINE_GPRS;
21041
21042 /* info->lr_save_p isn't yet set if the only reason lr needs to be
21043 saved is an out-of-line save or restore. Set up the value for
21044 the next test (excluding out-of-line gpr restore). */
21045 lr_save_p = (info->lr_save_p
21046 || !(strategy & SAVE_INLINE_GPRS)
21047 || !(strategy & SAVE_INLINE_FPRS)
21048 || !(strategy & SAVE_INLINE_VRS)
21049 || !(strategy & REST_INLINE_FPRS)
21050 || !(strategy & REST_INLINE_VRS));
21051
21052 /* The situation is more complicated with load multiple. We'd
21053 prefer to use the out-of-line routines for restores, since the
21054 "exit" out-of-line routines can handle the restore of LR and the
21055 frame teardown. However if doesn't make sense to use the
21056 out-of-line routine if that is the only reason we'd need to save
21057 LR, and we can't use the "exit" out-of-line gpr restore if we
21058 have saved some fprs; In those cases it is advantageous to use
21059 load multiple when available. */
21060 if ((strategy & SAVRES_MULTIPLE)
21061 && (!lr_save_p
21062 || info->first_fp_reg_save != 64))
21063 strategy |= REST_INLINE_GPRS;
21064
21065 /* Saving CR interferes with the exit routines used on the SPE, so
21066 just punt here. */
21067 if (TARGET_SPE_ABI
21068 && info->spe_64bit_regs_used
21069 && info->cr_save_p)
21070 strategy |= REST_INLINE_GPRS;
21071
21072 /* We can only use load multiple or the out-of-line routines to
21073 restore if we've used store multiple or out-of-line routines
21074 in the prologue, i.e. if we've saved all the registers from
21075 first_gp_reg_save. Otherwise, we risk loading garbage. */
21076 if ((strategy & (SAVE_INLINE_GPRS | REST_INLINE_GPRS | SAVRES_MULTIPLE))
21077 == SAVE_INLINE_GPRS)
21078 {
21079 int i;
21080
21081 for (i = info->first_gp_reg_save; i < 32; i++)
21082 if (!save_reg_p (i))
21083 {
21084 strategy |= REST_INLINE_GPRS;
21085 break;
21086 }
21087 }
21088
21089 if (TARGET_ELF && TARGET_64BIT)
21090 {
21091 if (!(strategy & SAVE_INLINE_FPRS))
21092 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
21093 else if (!(strategy & SAVE_INLINE_GPRS)
21094 && info->first_fp_reg_save == 64)
21095 strategy |= SAVE_NOINLINE_GPRS_SAVES_LR;
21096 }
21097 else if (TARGET_AIX && !(strategy & REST_INLINE_FPRS))
21098 strategy |= REST_NOINLINE_FPRS_DOESNT_RESTORE_LR;
21099
21100 if (TARGET_MACHO && !(strategy & SAVE_INLINE_FPRS))
21101 strategy |= SAVE_NOINLINE_FPRS_SAVES_LR;
21102
21103 return strategy;
21104 }
21105
21106 /* Calculate the stack information for the current function. This is
21107 complicated by having two separate calling sequences, the AIX calling
21108 sequence and the V.4 calling sequence.
21109
21110 AIX (and Darwin/Mac OS X) stack frames look like:
21111 32-bit 64-bit
21112 SP----> +---------------------------------------+
21113 | back chain to caller | 0 0
21114 +---------------------------------------+
21115 | saved CR | 4 8 (8-11)
21116 +---------------------------------------+
21117 | saved LR | 8 16
21118 +---------------------------------------+
21119 | reserved for compilers | 12 24
21120 +---------------------------------------+
21121 | reserved for binders | 16 32
21122 +---------------------------------------+
21123 | saved TOC pointer | 20 40
21124 +---------------------------------------+
21125 | Parameter save area (P) | 24 48
21126 +---------------------------------------+
21127 | Alloca space (A) | 24+P etc.
21128 +---------------------------------------+
21129 | Local variable space (L) | 24+P+A
21130 +---------------------------------------+
21131 | Float/int conversion temporary (X) | 24+P+A+L
21132 +---------------------------------------+
21133 | Save area for AltiVec registers (W) | 24+P+A+L+X
21134 +---------------------------------------+
21135 | AltiVec alignment padding (Y) | 24+P+A+L+X+W
21136 +---------------------------------------+
21137 | Save area for VRSAVE register (Z) | 24+P+A+L+X+W+Y
21138 +---------------------------------------+
21139 | Save area for GP registers (G) | 24+P+A+X+L+X+W+Y+Z
21140 +---------------------------------------+
21141 | Save area for FP registers (F) | 24+P+A+X+L+X+W+Y+Z+G
21142 +---------------------------------------+
21143 old SP->| back chain to caller's caller |
21144 +---------------------------------------+
21145
21146 The required alignment for AIX configurations is two words (i.e., 8
21147 or 16 bytes).
21148
21149 The ELFv2 ABI is a variant of the AIX ABI. Stack frames look like:
21150
21151 SP----> +---------------------------------------+
21152 | Back chain to caller | 0
21153 +---------------------------------------+
21154 | Save area for CR | 8
21155 +---------------------------------------+
21156 | Saved LR | 16
21157 +---------------------------------------+
21158 | Saved TOC pointer | 24
21159 +---------------------------------------+
21160 | Parameter save area (P) | 32
21161 +---------------------------------------+
21162 | Alloca space (A) | 32+P
21163 +---------------------------------------+
21164 | Local variable space (L) | 32+P+A
21165 +---------------------------------------+
21166 | Save area for AltiVec registers (W) | 32+P+A+L
21167 +---------------------------------------+
21168 | AltiVec alignment padding (Y) | 32+P+A+L+W
21169 +---------------------------------------+
21170 | Save area for GP registers (G) | 32+P+A+L+W+Y
21171 +---------------------------------------+
21172 | Save area for FP registers (F) | 32+P+A+L+W+Y+G
21173 +---------------------------------------+
21174 old SP->| back chain to caller's caller | 32+P+A+L+W+Y+G+F
21175 +---------------------------------------+
21176
21177
21178 V.4 stack frames look like:
21179
21180 SP----> +---------------------------------------+
21181 | back chain to caller | 0
21182 +---------------------------------------+
21183 | caller's saved LR | 4
21184 +---------------------------------------+
21185 | Parameter save area (P) | 8
21186 +---------------------------------------+
21187 | Alloca space (A) | 8+P
21188 +---------------------------------------+
21189 | Varargs save area (V) | 8+P+A
21190 +---------------------------------------+
21191 | Local variable space (L) | 8+P+A+V
21192 +---------------------------------------+
21193 | Float/int conversion temporary (X) | 8+P+A+V+L
21194 +---------------------------------------+
21195 | Save area for AltiVec registers (W) | 8+P+A+V+L+X
21196 +---------------------------------------+
21197 | AltiVec alignment padding (Y) | 8+P+A+V+L+X+W
21198 +---------------------------------------+
21199 | Save area for VRSAVE register (Z) | 8+P+A+V+L+X+W+Y
21200 +---------------------------------------+
21201 | SPE: area for 64-bit GP registers |
21202 +---------------------------------------+
21203 | SPE alignment padding |
21204 +---------------------------------------+
21205 | saved CR (C) | 8+P+A+V+L+X+W+Y+Z
21206 +---------------------------------------+
21207 | Save area for GP registers (G) | 8+P+A+V+L+X+W+Y+Z+C
21208 +---------------------------------------+
21209 | Save area for FP registers (F) | 8+P+A+V+L+X+W+Y+Z+C+G
21210 +---------------------------------------+
21211 old SP->| back chain to caller's caller |
21212 +---------------------------------------+
21213
21214 The required alignment for V.4 is 16 bytes, or 8 bytes if -meabi is
21215 given. (But note below and in sysv4.h that we require only 8 and
21216 may round up the size of our stack frame anyways. The historical
21217 reason is early versions of powerpc-linux which didn't properly
21218 align the stack at program startup. A happy side-effect is that
21219 -mno-eabi libraries can be used with -meabi programs.)
21220
21221 The EABI configuration defaults to the V.4 layout. However,
21222 the stack alignment requirements may differ. If -mno-eabi is not
21223 given, the required stack alignment is 8 bytes; if -mno-eabi is
21224 given, the required alignment is 16 bytes. (But see V.4 comment
21225 above.) */
21226
21227 #ifndef ABI_STACK_BOUNDARY
21228 #define ABI_STACK_BOUNDARY STACK_BOUNDARY
21229 #endif
21230
21231 static rs6000_stack_t *
21232 rs6000_stack_info (void)
21233 {
21234 rs6000_stack_t *info_ptr = &stack_info;
21235 int reg_size = TARGET_32BIT ? 4 : 8;
21236 int ehrd_size;
21237 int ehcr_size;
21238 int save_align;
21239 int first_gp;
21240 HOST_WIDE_INT non_fixed_size;
21241 bool using_static_chain_p;
21242
21243 if (reload_completed && info_ptr->reload_completed)
21244 return info_ptr;
21245
21246 memset (info_ptr, 0, sizeof (*info_ptr));
21247 info_ptr->reload_completed = reload_completed;
21248
21249 if (TARGET_SPE)
21250 {
21251 /* Cache value so we don't rescan instruction chain over and over. */
21252 if (cfun->machine->insn_chain_scanned_p == 0)
21253 cfun->machine->insn_chain_scanned_p
21254 = spe_func_has_64bit_regs_p () + 1;
21255 info_ptr->spe_64bit_regs_used = cfun->machine->insn_chain_scanned_p - 1;
21256 }
21257
21258 /* Select which calling sequence. */
21259 info_ptr->abi = DEFAULT_ABI;
21260
21261 /* Calculate which registers need to be saved & save area size. */
21262 info_ptr->first_gp_reg_save = first_reg_to_save ();
21263 /* Assume that we will have to save RS6000_PIC_OFFSET_TABLE_REGNUM,
21264 even if it currently looks like we won't. Reload may need it to
21265 get at a constant; if so, it will have already created a constant
21266 pool entry for it. */
21267 if (((TARGET_TOC && TARGET_MINIMAL_TOC)
21268 || (flag_pic == 1 && DEFAULT_ABI == ABI_V4)
21269 || (flag_pic && DEFAULT_ABI == ABI_DARWIN))
21270 && crtl->uses_const_pool
21271 && info_ptr->first_gp_reg_save > RS6000_PIC_OFFSET_TABLE_REGNUM)
21272 first_gp = RS6000_PIC_OFFSET_TABLE_REGNUM;
21273 else
21274 first_gp = info_ptr->first_gp_reg_save;
21275
21276 info_ptr->gp_size = reg_size * (32 - first_gp);
21277
21278 /* For the SPE, we have an additional upper 32-bits on each GPR.
21279 Ideally we should save the entire 64-bits only when the upper
21280 half is used in SIMD instructions. Since we only record
21281 registers live (not the size they are used in), this proves
21282 difficult because we'd have to traverse the instruction chain at
21283 the right time, taking reload into account. This is a real pain,
21284 so we opt to save the GPRs in 64-bits always if but one register
21285 gets used in 64-bits. Otherwise, all the registers in the frame
21286 get saved in 32-bits.
21287
21288 So... since when we save all GPRs (except the SP) in 64-bits, the
21289 traditional GP save area will be empty. */
21290 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21291 info_ptr->gp_size = 0;
21292
21293 info_ptr->first_fp_reg_save = first_fp_reg_to_save ();
21294 info_ptr->fp_size = 8 * (64 - info_ptr->first_fp_reg_save);
21295
21296 info_ptr->first_altivec_reg_save = first_altivec_reg_to_save ();
21297 info_ptr->altivec_size = 16 * (LAST_ALTIVEC_REGNO + 1
21298 - info_ptr->first_altivec_reg_save);
21299
21300 /* Does this function call anything? */
21301 info_ptr->calls_p = (! crtl->is_leaf
21302 || cfun->machine->ra_needs_full_frame);
21303
21304 /* Determine if we need to save the condition code registers. */
21305 if (df_regs_ever_live_p (CR2_REGNO)
21306 || df_regs_ever_live_p (CR3_REGNO)
21307 || df_regs_ever_live_p (CR4_REGNO))
21308 {
21309 info_ptr->cr_save_p = 1;
21310 if (DEFAULT_ABI == ABI_V4)
21311 info_ptr->cr_size = reg_size;
21312 }
21313
21314 /* If the current function calls __builtin_eh_return, then we need
21315 to allocate stack space for registers that will hold data for
21316 the exception handler. */
21317 if (crtl->calls_eh_return)
21318 {
21319 unsigned int i;
21320 for (i = 0; EH_RETURN_DATA_REGNO (i) != INVALID_REGNUM; ++i)
21321 continue;
21322
21323 /* SPE saves EH registers in 64-bits. */
21324 ehrd_size = i * (TARGET_SPE_ABI
21325 && info_ptr->spe_64bit_regs_used != 0
21326 ? UNITS_PER_SPE_WORD : UNITS_PER_WORD);
21327 }
21328 else
21329 ehrd_size = 0;
21330
21331 /* In the ELFv2 ABI, we also need to allocate space for separate
21332 CR field save areas if the function calls __builtin_eh_return. */
21333 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
21334 {
21335 /* This hard-codes that we have three call-saved CR fields. */
21336 ehcr_size = 3 * reg_size;
21337 /* We do *not* use the regular CR save mechanism. */
21338 info_ptr->cr_save_p = 0;
21339 }
21340 else
21341 ehcr_size = 0;
21342
21343 /* Determine various sizes. */
21344 info_ptr->reg_size = reg_size;
21345 info_ptr->fixed_size = RS6000_SAVE_AREA;
21346 info_ptr->vars_size = RS6000_ALIGN (get_frame_size (), 8);
21347 info_ptr->parm_size = RS6000_ALIGN (crtl->outgoing_args_size,
21348 TARGET_ALTIVEC ? 16 : 8);
21349 if (FRAME_GROWS_DOWNWARD)
21350 info_ptr->vars_size
21351 += RS6000_ALIGN (info_ptr->fixed_size + info_ptr->vars_size
21352 + info_ptr->parm_size,
21353 ABI_STACK_BOUNDARY / BITS_PER_UNIT)
21354 - (info_ptr->fixed_size + info_ptr->vars_size
21355 + info_ptr->parm_size);
21356
21357 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21358 info_ptr->spe_gp_size = 8 * (32 - first_gp);
21359 else
21360 info_ptr->spe_gp_size = 0;
21361
21362 if (TARGET_ALTIVEC_ABI)
21363 info_ptr->vrsave_mask = compute_vrsave_mask ();
21364 else
21365 info_ptr->vrsave_mask = 0;
21366
21367 if (TARGET_ALTIVEC_VRSAVE && info_ptr->vrsave_mask)
21368 info_ptr->vrsave_size = 4;
21369 else
21370 info_ptr->vrsave_size = 0;
21371
21372 compute_save_world_info (info_ptr);
21373
21374 /* Calculate the offsets. */
21375 switch (DEFAULT_ABI)
21376 {
21377 case ABI_NONE:
21378 default:
21379 gcc_unreachable ();
21380
21381 case ABI_AIX:
21382 case ABI_ELFv2:
21383 case ABI_DARWIN:
21384 info_ptr->fp_save_offset = - info_ptr->fp_size;
21385 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
21386
21387 if (TARGET_ALTIVEC_ABI)
21388 {
21389 info_ptr->vrsave_save_offset
21390 = info_ptr->gp_save_offset - info_ptr->vrsave_size;
21391
21392 /* Align stack so vector save area is on a quadword boundary.
21393 The padding goes above the vectors. */
21394 if (info_ptr->altivec_size != 0)
21395 info_ptr->altivec_padding_size
21396 = info_ptr->vrsave_save_offset & 0xF;
21397 else
21398 info_ptr->altivec_padding_size = 0;
21399
21400 info_ptr->altivec_save_offset
21401 = info_ptr->vrsave_save_offset
21402 - info_ptr->altivec_padding_size
21403 - info_ptr->altivec_size;
21404 gcc_assert (info_ptr->altivec_size == 0
21405 || info_ptr->altivec_save_offset % 16 == 0);
21406
21407 /* Adjust for AltiVec case. */
21408 info_ptr->ehrd_offset = info_ptr->altivec_save_offset - ehrd_size;
21409 }
21410 else
21411 info_ptr->ehrd_offset = info_ptr->gp_save_offset - ehrd_size;
21412
21413 info_ptr->ehcr_offset = info_ptr->ehrd_offset - ehcr_size;
21414 info_ptr->cr_save_offset = reg_size; /* first word when 64-bit. */
21415 info_ptr->lr_save_offset = 2*reg_size;
21416 break;
21417
21418 case ABI_V4:
21419 info_ptr->fp_save_offset = - info_ptr->fp_size;
21420 info_ptr->gp_save_offset = info_ptr->fp_save_offset - info_ptr->gp_size;
21421 info_ptr->cr_save_offset = info_ptr->gp_save_offset - info_ptr->cr_size;
21422
21423 if (TARGET_SPE_ABI && info_ptr->spe_64bit_regs_used != 0)
21424 {
21425 /* Align stack so SPE GPR save area is aligned on a
21426 double-word boundary. */
21427 if (info_ptr->spe_gp_size != 0 && info_ptr->cr_save_offset != 0)
21428 info_ptr->spe_padding_size
21429 = 8 - (-info_ptr->cr_save_offset % 8);
21430 else
21431 info_ptr->spe_padding_size = 0;
21432
21433 info_ptr->spe_gp_save_offset
21434 = info_ptr->cr_save_offset
21435 - info_ptr->spe_padding_size
21436 - info_ptr->spe_gp_size;
21437
21438 /* Adjust for SPE case. */
21439 info_ptr->ehrd_offset = info_ptr->spe_gp_save_offset;
21440 }
21441 else if (TARGET_ALTIVEC_ABI)
21442 {
21443 info_ptr->vrsave_save_offset
21444 = info_ptr->cr_save_offset - info_ptr->vrsave_size;
21445
21446 /* Align stack so vector save area is on a quadword boundary. */
21447 if (info_ptr->altivec_size != 0)
21448 info_ptr->altivec_padding_size
21449 = 16 - (-info_ptr->vrsave_save_offset % 16);
21450 else
21451 info_ptr->altivec_padding_size = 0;
21452
21453 info_ptr->altivec_save_offset
21454 = info_ptr->vrsave_save_offset
21455 - info_ptr->altivec_padding_size
21456 - info_ptr->altivec_size;
21457
21458 /* Adjust for AltiVec case. */
21459 info_ptr->ehrd_offset = info_ptr->altivec_save_offset;
21460 }
21461 else
21462 info_ptr->ehrd_offset = info_ptr->cr_save_offset;
21463 info_ptr->ehrd_offset -= ehrd_size;
21464 info_ptr->lr_save_offset = reg_size;
21465 break;
21466 }
21467
21468 save_align = (TARGET_ALTIVEC_ABI || DEFAULT_ABI == ABI_DARWIN) ? 16 : 8;
21469 info_ptr->save_size = RS6000_ALIGN (info_ptr->fp_size
21470 + info_ptr->gp_size
21471 + info_ptr->altivec_size
21472 + info_ptr->altivec_padding_size
21473 + info_ptr->spe_gp_size
21474 + info_ptr->spe_padding_size
21475 + ehrd_size
21476 + ehcr_size
21477 + info_ptr->cr_size
21478 + info_ptr->vrsave_size,
21479 save_align);
21480
21481 non_fixed_size = (info_ptr->vars_size
21482 + info_ptr->parm_size
21483 + info_ptr->save_size);
21484
21485 info_ptr->total_size = RS6000_ALIGN (non_fixed_size + info_ptr->fixed_size,
21486 ABI_STACK_BOUNDARY / BITS_PER_UNIT);
21487
21488 /* Determine if we need to save the link register. */
21489 if (info_ptr->calls_p
21490 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21491 && crtl->profile
21492 && !TARGET_PROFILE_KERNEL)
21493 || (DEFAULT_ABI == ABI_V4 && cfun->calls_alloca)
21494 #ifdef TARGET_RELOCATABLE
21495 || (TARGET_RELOCATABLE && (get_pool_size () != 0))
21496 #endif
21497 || rs6000_ra_ever_killed ())
21498 info_ptr->lr_save_p = 1;
21499
21500 using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
21501 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
21502 && call_used_regs[STATIC_CHAIN_REGNUM]);
21503 info_ptr->savres_strategy = rs6000_savres_strategy (info_ptr,
21504 using_static_chain_p);
21505
21506 if (!(info_ptr->savres_strategy & SAVE_INLINE_GPRS)
21507 || !(info_ptr->savres_strategy & SAVE_INLINE_FPRS)
21508 || !(info_ptr->savres_strategy & SAVE_INLINE_VRS)
21509 || !(info_ptr->savres_strategy & REST_INLINE_GPRS)
21510 || !(info_ptr->savres_strategy & REST_INLINE_FPRS)
21511 || !(info_ptr->savres_strategy & REST_INLINE_VRS))
21512 info_ptr->lr_save_p = 1;
21513
21514 if (info_ptr->lr_save_p)
21515 df_set_regs_ever_live (LR_REGNO, true);
21516
21517 /* Determine if we need to allocate any stack frame:
21518
21519 For AIX we need to push the stack if a frame pointer is needed
21520 (because the stack might be dynamically adjusted), if we are
21521 debugging, if we make calls, or if the sum of fp_save, gp_save,
21522 and local variables are more than the space needed to save all
21523 non-volatile registers: 32-bit: 18*8 + 19*4 = 220 or 64-bit: 18*8
21524 + 18*8 = 288 (GPR13 reserved).
21525
21526 For V.4 we don't have the stack cushion that AIX uses, but assume
21527 that the debugger can handle stackless frames. */
21528
21529 if (info_ptr->calls_p)
21530 info_ptr->push_p = 1;
21531
21532 else if (DEFAULT_ABI == ABI_V4)
21533 info_ptr->push_p = non_fixed_size != 0;
21534
21535 else if (frame_pointer_needed)
21536 info_ptr->push_p = 1;
21537
21538 else if (TARGET_XCOFF && write_symbols != NO_DEBUG)
21539 info_ptr->push_p = 1;
21540
21541 else
21542 info_ptr->push_p = non_fixed_size > (TARGET_32BIT ? 220 : 288);
21543
21544 /* Zero offsets if we're not saving those registers. */
21545 if (info_ptr->fp_size == 0)
21546 info_ptr->fp_save_offset = 0;
21547
21548 if (info_ptr->gp_size == 0)
21549 info_ptr->gp_save_offset = 0;
21550
21551 if (! TARGET_ALTIVEC_ABI || info_ptr->altivec_size == 0)
21552 info_ptr->altivec_save_offset = 0;
21553
21554 /* Zero VRSAVE offset if not saved and restored. */
21555 if (! TARGET_ALTIVEC_VRSAVE || info_ptr->vrsave_mask == 0)
21556 info_ptr->vrsave_save_offset = 0;
21557
21558 if (! TARGET_SPE_ABI
21559 || info_ptr->spe_64bit_regs_used == 0
21560 || info_ptr->spe_gp_size == 0)
21561 info_ptr->spe_gp_save_offset = 0;
21562
21563 if (! info_ptr->lr_save_p)
21564 info_ptr->lr_save_offset = 0;
21565
21566 if (! info_ptr->cr_save_p)
21567 info_ptr->cr_save_offset = 0;
21568
21569 return info_ptr;
21570 }
21571
21572 /* Return true if the current function uses any GPRs in 64-bit SIMD
21573 mode. */
21574
21575 static bool
21576 spe_func_has_64bit_regs_p (void)
21577 {
21578 rtx_insn *insns, *insn;
21579
21580 /* Functions that save and restore all the call-saved registers will
21581 need to save/restore the registers in 64-bits. */
21582 if (crtl->calls_eh_return
21583 || cfun->calls_setjmp
21584 || crtl->has_nonlocal_goto)
21585 return true;
21586
21587 insns = get_insns ();
21588
21589 for (insn = NEXT_INSN (insns); insn != NULL_RTX; insn = NEXT_INSN (insn))
21590 {
21591 if (INSN_P (insn))
21592 {
21593 rtx i;
21594
21595 /* FIXME: This should be implemented with attributes...
21596
21597 (set_attr "spe64" "true")....then,
21598 if (get_spe64(insn)) return true;
21599
21600 It's the only reliable way to do the stuff below. */
21601
21602 i = PATTERN (insn);
21603 if (GET_CODE (i) == SET)
21604 {
21605 enum machine_mode mode = GET_MODE (SET_SRC (i));
21606
21607 if (SPE_VECTOR_MODE (mode))
21608 return true;
21609 if (TARGET_E500_DOUBLE && (mode == DFmode || mode == TFmode))
21610 return true;
21611 }
21612 }
21613 }
21614
21615 return false;
21616 }
21617
21618 static void
21619 debug_stack_info (rs6000_stack_t *info)
21620 {
21621 const char *abi_string;
21622
21623 if (! info)
21624 info = rs6000_stack_info ();
21625
21626 fprintf (stderr, "\nStack information for function %s:\n",
21627 ((current_function_decl && DECL_NAME (current_function_decl))
21628 ? IDENTIFIER_POINTER (DECL_NAME (current_function_decl))
21629 : "<unknown>"));
21630
21631 switch (info->abi)
21632 {
21633 default: abi_string = "Unknown"; break;
21634 case ABI_NONE: abi_string = "NONE"; break;
21635 case ABI_AIX: abi_string = "AIX"; break;
21636 case ABI_ELFv2: abi_string = "ELFv2"; break;
21637 case ABI_DARWIN: abi_string = "Darwin"; break;
21638 case ABI_V4: abi_string = "V.4"; break;
21639 }
21640
21641 fprintf (stderr, "\tABI = %5s\n", abi_string);
21642
21643 if (TARGET_ALTIVEC_ABI)
21644 fprintf (stderr, "\tALTIVEC ABI extensions enabled.\n");
21645
21646 if (TARGET_SPE_ABI)
21647 fprintf (stderr, "\tSPE ABI extensions enabled.\n");
21648
21649 if (info->first_gp_reg_save != 32)
21650 fprintf (stderr, "\tfirst_gp_reg_save = %5d\n", info->first_gp_reg_save);
21651
21652 if (info->first_fp_reg_save != 64)
21653 fprintf (stderr, "\tfirst_fp_reg_save = %5d\n", info->first_fp_reg_save);
21654
21655 if (info->first_altivec_reg_save <= LAST_ALTIVEC_REGNO)
21656 fprintf (stderr, "\tfirst_altivec_reg_save = %5d\n",
21657 info->first_altivec_reg_save);
21658
21659 if (info->lr_save_p)
21660 fprintf (stderr, "\tlr_save_p = %5d\n", info->lr_save_p);
21661
21662 if (info->cr_save_p)
21663 fprintf (stderr, "\tcr_save_p = %5d\n", info->cr_save_p);
21664
21665 if (info->vrsave_mask)
21666 fprintf (stderr, "\tvrsave_mask = 0x%x\n", info->vrsave_mask);
21667
21668 if (info->push_p)
21669 fprintf (stderr, "\tpush_p = %5d\n", info->push_p);
21670
21671 if (info->calls_p)
21672 fprintf (stderr, "\tcalls_p = %5d\n", info->calls_p);
21673
21674 if (info->gp_save_offset)
21675 fprintf (stderr, "\tgp_save_offset = %5d\n", info->gp_save_offset);
21676
21677 if (info->fp_save_offset)
21678 fprintf (stderr, "\tfp_save_offset = %5d\n", info->fp_save_offset);
21679
21680 if (info->altivec_save_offset)
21681 fprintf (stderr, "\taltivec_save_offset = %5d\n",
21682 info->altivec_save_offset);
21683
21684 if (info->spe_gp_save_offset)
21685 fprintf (stderr, "\tspe_gp_save_offset = %5d\n",
21686 info->spe_gp_save_offset);
21687
21688 if (info->vrsave_save_offset)
21689 fprintf (stderr, "\tvrsave_save_offset = %5d\n",
21690 info->vrsave_save_offset);
21691
21692 if (info->lr_save_offset)
21693 fprintf (stderr, "\tlr_save_offset = %5d\n", info->lr_save_offset);
21694
21695 if (info->cr_save_offset)
21696 fprintf (stderr, "\tcr_save_offset = %5d\n", info->cr_save_offset);
21697
21698 if (info->varargs_save_offset)
21699 fprintf (stderr, "\tvarargs_save_offset = %5d\n", info->varargs_save_offset);
21700
21701 if (info->total_size)
21702 fprintf (stderr, "\ttotal_size = "HOST_WIDE_INT_PRINT_DEC"\n",
21703 info->total_size);
21704
21705 if (info->vars_size)
21706 fprintf (stderr, "\tvars_size = "HOST_WIDE_INT_PRINT_DEC"\n",
21707 info->vars_size);
21708
21709 if (info->parm_size)
21710 fprintf (stderr, "\tparm_size = %5d\n", info->parm_size);
21711
21712 if (info->fixed_size)
21713 fprintf (stderr, "\tfixed_size = %5d\n", info->fixed_size);
21714
21715 if (info->gp_size)
21716 fprintf (stderr, "\tgp_size = %5d\n", info->gp_size);
21717
21718 if (info->spe_gp_size)
21719 fprintf (stderr, "\tspe_gp_size = %5d\n", info->spe_gp_size);
21720
21721 if (info->fp_size)
21722 fprintf (stderr, "\tfp_size = %5d\n", info->fp_size);
21723
21724 if (info->altivec_size)
21725 fprintf (stderr, "\taltivec_size = %5d\n", info->altivec_size);
21726
21727 if (info->vrsave_size)
21728 fprintf (stderr, "\tvrsave_size = %5d\n", info->vrsave_size);
21729
21730 if (info->altivec_padding_size)
21731 fprintf (stderr, "\taltivec_padding_size= %5d\n",
21732 info->altivec_padding_size);
21733
21734 if (info->spe_padding_size)
21735 fprintf (stderr, "\tspe_padding_size = %5d\n",
21736 info->spe_padding_size);
21737
21738 if (info->cr_size)
21739 fprintf (stderr, "\tcr_size = %5d\n", info->cr_size);
21740
21741 if (info->save_size)
21742 fprintf (stderr, "\tsave_size = %5d\n", info->save_size);
21743
21744 if (info->reg_size != 4)
21745 fprintf (stderr, "\treg_size = %5d\n", info->reg_size);
21746
21747 fprintf (stderr, "\tsave-strategy = %04x\n", info->savres_strategy);
21748
21749 fprintf (stderr, "\n");
21750 }
21751
21752 rtx
21753 rs6000_return_addr (int count, rtx frame)
21754 {
21755 /* Currently we don't optimize very well between prolog and body
21756 code and for PIC code the code can be actually quite bad, so
21757 don't try to be too clever here. */
21758 if (count != 0
21759 || ((DEFAULT_ABI == ABI_V4 || DEFAULT_ABI == ABI_DARWIN) && flag_pic))
21760 {
21761 cfun->machine->ra_needs_full_frame = 1;
21762
21763 return
21764 gen_rtx_MEM
21765 (Pmode,
21766 memory_address
21767 (Pmode,
21768 plus_constant (Pmode,
21769 copy_to_reg
21770 (gen_rtx_MEM (Pmode,
21771 memory_address (Pmode, frame))),
21772 RETURN_ADDRESS_OFFSET)));
21773 }
21774
21775 cfun->machine->ra_need_lr = 1;
21776 return get_hard_reg_initial_val (Pmode, LR_REGNO);
21777 }
21778
21779 /* Say whether a function is a candidate for sibcall handling or not. */
21780
21781 static bool
21782 rs6000_function_ok_for_sibcall (tree decl, tree exp)
21783 {
21784 tree fntype;
21785
21786 if (decl)
21787 fntype = TREE_TYPE (decl);
21788 else
21789 fntype = TREE_TYPE (TREE_TYPE (CALL_EXPR_FN (exp)));
21790
21791 /* We can't do it if the called function has more vector parameters
21792 than the current function; there's nowhere to put the VRsave code. */
21793 if (TARGET_ALTIVEC_ABI
21794 && TARGET_ALTIVEC_VRSAVE
21795 && !(decl && decl == current_function_decl))
21796 {
21797 function_args_iterator args_iter;
21798 tree type;
21799 int nvreg = 0;
21800
21801 /* Functions with vector parameters are required to have a
21802 prototype, so the argument type info must be available
21803 here. */
21804 FOREACH_FUNCTION_ARGS(fntype, type, args_iter)
21805 if (TREE_CODE (type) == VECTOR_TYPE
21806 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
21807 nvreg++;
21808
21809 FOREACH_FUNCTION_ARGS(TREE_TYPE (current_function_decl), type, args_iter)
21810 if (TREE_CODE (type) == VECTOR_TYPE
21811 && ALTIVEC_OR_VSX_VECTOR_MODE (TYPE_MODE (type)))
21812 nvreg--;
21813
21814 if (nvreg > 0)
21815 return false;
21816 }
21817
21818 /* Under the AIX or ELFv2 ABIs we can't allow calls to non-local
21819 functions, because the callee may have a different TOC pointer to
21820 the caller and there's no way to ensure we restore the TOC when
21821 we return. With the secure-plt SYSV ABI we can't make non-local
21822 calls when -fpic/PIC because the plt call stubs use r30. */
21823 if (DEFAULT_ABI == ABI_DARWIN
21824 || ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
21825 && decl
21826 && !DECL_EXTERNAL (decl)
21827 && (*targetm.binds_local_p) (decl))
21828 || (DEFAULT_ABI == ABI_V4
21829 && (!TARGET_SECURE_PLT
21830 || !flag_pic
21831 || (decl
21832 && (*targetm.binds_local_p) (decl)))))
21833 {
21834 tree attr_list = TYPE_ATTRIBUTES (fntype);
21835
21836 if (!lookup_attribute ("longcall", attr_list)
21837 || lookup_attribute ("shortcall", attr_list))
21838 return true;
21839 }
21840
21841 return false;
21842 }
21843
21844 static int
21845 rs6000_ra_ever_killed (void)
21846 {
21847 rtx_insn *top;
21848 rtx reg;
21849 rtx_insn *insn;
21850
21851 if (cfun->is_thunk)
21852 return 0;
21853
21854 if (cfun->machine->lr_save_state)
21855 return cfun->machine->lr_save_state - 1;
21856
21857 /* regs_ever_live has LR marked as used if any sibcalls are present,
21858 but this should not force saving and restoring in the
21859 pro/epilogue. Likewise, reg_set_between_p thinks a sibcall
21860 clobbers LR, so that is inappropriate. */
21861
21862 /* Also, the prologue can generate a store into LR that
21863 doesn't really count, like this:
21864
21865 move LR->R0
21866 bcl to set PIC register
21867 move LR->R31
21868 move R0->LR
21869
21870 When we're called from the epilogue, we need to avoid counting
21871 this as a store. */
21872
21873 push_topmost_sequence ();
21874 top = get_insns ();
21875 pop_topmost_sequence ();
21876 reg = gen_rtx_REG (Pmode, LR_REGNO);
21877
21878 for (insn = NEXT_INSN (top); insn != NULL_RTX; insn = NEXT_INSN (insn))
21879 {
21880 if (INSN_P (insn))
21881 {
21882 if (CALL_P (insn))
21883 {
21884 if (!SIBLING_CALL_P (insn))
21885 return 1;
21886 }
21887 else if (find_regno_note (insn, REG_INC, LR_REGNO))
21888 return 1;
21889 else if (set_of (reg, insn) != NULL_RTX
21890 && !prologue_epilogue_contains (insn))
21891 return 1;
21892 }
21893 }
21894 return 0;
21895 }
21896 \f
21897 /* Emit instructions needed to load the TOC register.
21898 This is only needed when TARGET_TOC, TARGET_MINIMAL_TOC, and there is
21899 a constant pool; or for SVR4 -fpic. */
21900
21901 void
21902 rs6000_emit_load_toc_table (int fromprolog)
21903 {
21904 rtx dest;
21905 dest = gen_rtx_REG (Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
21906
21907 if (TARGET_ELF && TARGET_SECURE_PLT && DEFAULT_ABI == ABI_V4 && flag_pic)
21908 {
21909 char buf[30];
21910 rtx lab, tmp1, tmp2, got;
21911
21912 lab = gen_label_rtx ();
21913 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (lab));
21914 lab = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21915 if (flag_pic == 2)
21916 got = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
21917 else
21918 got = rs6000_got_sym ();
21919 tmp1 = tmp2 = dest;
21920 if (!fromprolog)
21921 {
21922 tmp1 = gen_reg_rtx (Pmode);
21923 tmp2 = gen_reg_rtx (Pmode);
21924 }
21925 emit_insn (gen_load_toc_v4_PIC_1 (lab));
21926 emit_move_insn (tmp1, gen_rtx_REG (Pmode, LR_REGNO));
21927 emit_insn (gen_load_toc_v4_PIC_3b (tmp2, tmp1, got, lab));
21928 emit_insn (gen_load_toc_v4_PIC_3c (dest, tmp2, got, lab));
21929 }
21930 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 1)
21931 {
21932 emit_insn (gen_load_toc_v4_pic_si ());
21933 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
21934 }
21935 else if (TARGET_ELF && DEFAULT_ABI == ABI_V4 && flag_pic == 2)
21936 {
21937 char buf[30];
21938 rtx temp0 = (fromprolog
21939 ? gen_rtx_REG (Pmode, 0)
21940 : gen_reg_rtx (Pmode));
21941
21942 if (fromprolog)
21943 {
21944 rtx symF, symL;
21945
21946 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
21947 symF = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21948
21949 ASM_GENERATE_INTERNAL_LABEL (buf, "LCL", rs6000_pic_labelno);
21950 symL = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21951
21952 emit_insn (gen_load_toc_v4_PIC_1 (symF));
21953 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
21954 emit_insn (gen_load_toc_v4_PIC_2 (temp0, dest, symL, symF));
21955 }
21956 else
21957 {
21958 rtx tocsym, lab;
21959
21960 tocsym = gen_rtx_SYMBOL_REF (Pmode, toc_label_name);
21961 lab = gen_label_rtx ();
21962 emit_insn (gen_load_toc_v4_PIC_1b (tocsym, lab));
21963 emit_move_insn (dest, gen_rtx_REG (Pmode, LR_REGNO));
21964 if (TARGET_LINK_STACK)
21965 emit_insn (gen_addsi3 (dest, dest, GEN_INT (4)));
21966 emit_move_insn (temp0, gen_rtx_MEM (Pmode, dest));
21967 }
21968 emit_insn (gen_addsi3 (dest, temp0, dest));
21969 }
21970 else if (TARGET_ELF && !TARGET_AIX && flag_pic == 0 && TARGET_MINIMAL_TOC)
21971 {
21972 /* This is for AIX code running in non-PIC ELF32. */
21973 char buf[30];
21974 rtx realsym;
21975 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
21976 realsym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf));
21977
21978 emit_insn (gen_elf_high (dest, realsym));
21979 emit_insn (gen_elf_low (dest, dest, realsym));
21980 }
21981 else
21982 {
21983 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
21984
21985 if (TARGET_32BIT)
21986 emit_insn (gen_load_toc_aix_si (dest));
21987 else
21988 emit_insn (gen_load_toc_aix_di (dest));
21989 }
21990 }
21991
21992 /* Emit instructions to restore the link register after determining where
21993 its value has been stored. */
21994
21995 void
21996 rs6000_emit_eh_reg_restore (rtx source, rtx scratch)
21997 {
21998 rs6000_stack_t *info = rs6000_stack_info ();
21999 rtx operands[2];
22000
22001 operands[0] = source;
22002 operands[1] = scratch;
22003
22004 if (info->lr_save_p)
22005 {
22006 rtx frame_rtx = stack_pointer_rtx;
22007 HOST_WIDE_INT sp_offset = 0;
22008 rtx tmp;
22009
22010 if (frame_pointer_needed
22011 || cfun->calls_alloca
22012 || info->total_size > 32767)
22013 {
22014 tmp = gen_frame_mem (Pmode, frame_rtx);
22015 emit_move_insn (operands[1], tmp);
22016 frame_rtx = operands[1];
22017 }
22018 else if (info->push_p)
22019 sp_offset = info->total_size;
22020
22021 tmp = plus_constant (Pmode, frame_rtx,
22022 info->lr_save_offset + sp_offset);
22023 tmp = gen_frame_mem (Pmode, tmp);
22024 emit_move_insn (tmp, operands[0]);
22025 }
22026 else
22027 emit_move_insn (gen_rtx_REG (Pmode, LR_REGNO), operands[0]);
22028
22029 /* Freeze lr_save_p. We've just emitted rtl that depends on the
22030 state of lr_save_p so any change from here on would be a bug. In
22031 particular, stop rs6000_ra_ever_killed from considering the SET
22032 of lr we may have added just above. */
22033 cfun->machine->lr_save_state = info->lr_save_p + 1;
22034 }
22035
22036 static GTY(()) alias_set_type set = -1;
22037
22038 alias_set_type
22039 get_TOC_alias_set (void)
22040 {
22041 if (set == -1)
22042 set = new_alias_set ();
22043 return set;
22044 }
22045
22046 /* This returns nonzero if the current function uses the TOC. This is
22047 determined by the presence of (use (unspec ... UNSPEC_TOC)), which
22048 is generated by the ABI_V4 load_toc_* patterns. */
22049 #if TARGET_ELF
22050 static int
22051 uses_TOC (void)
22052 {
22053 rtx insn;
22054
22055 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
22056 if (INSN_P (insn))
22057 {
22058 rtx pat = PATTERN (insn);
22059 int i;
22060
22061 if (GET_CODE (pat) == PARALLEL)
22062 for (i = 0; i < XVECLEN (pat, 0); i++)
22063 {
22064 rtx sub = XVECEXP (pat, 0, i);
22065 if (GET_CODE (sub) == USE)
22066 {
22067 sub = XEXP (sub, 0);
22068 if (GET_CODE (sub) == UNSPEC
22069 && XINT (sub, 1) == UNSPEC_TOC)
22070 return 1;
22071 }
22072 }
22073 }
22074 return 0;
22075 }
22076 #endif
22077
22078 rtx
22079 create_TOC_reference (rtx symbol, rtx largetoc_reg)
22080 {
22081 rtx tocrel, tocreg, hi;
22082
22083 if (TARGET_DEBUG_ADDR)
22084 {
22085 if (GET_CODE (symbol) == SYMBOL_REF)
22086 fprintf (stderr, "\ncreate_TOC_reference, (symbol_ref %s)\n",
22087 XSTR (symbol, 0));
22088 else
22089 {
22090 fprintf (stderr, "\ncreate_TOC_reference, code %s:\n",
22091 GET_RTX_NAME (GET_CODE (symbol)));
22092 debug_rtx (symbol);
22093 }
22094 }
22095
22096 if (!can_create_pseudo_p ())
22097 df_set_regs_ever_live (TOC_REGISTER, true);
22098
22099 tocreg = gen_rtx_REG (Pmode, TOC_REGISTER);
22100 tocrel = gen_rtx_UNSPEC (Pmode, gen_rtvec (2, symbol, tocreg), UNSPEC_TOCREL);
22101 if (TARGET_CMODEL == CMODEL_SMALL || can_create_pseudo_p ())
22102 return tocrel;
22103
22104 hi = gen_rtx_HIGH (Pmode, copy_rtx (tocrel));
22105 if (largetoc_reg != NULL)
22106 {
22107 emit_move_insn (largetoc_reg, hi);
22108 hi = largetoc_reg;
22109 }
22110 return gen_rtx_LO_SUM (Pmode, hi, tocrel);
22111 }
22112
22113 /* Issue assembly directives that create a reference to the given DWARF
22114 FRAME_TABLE_LABEL from the current function section. */
22115 void
22116 rs6000_aix_asm_output_dwarf_table_ref (char * frame_table_label)
22117 {
22118 fprintf (asm_out_file, "\t.ref %s\n",
22119 (* targetm.strip_name_encoding) (frame_table_label));
22120 }
22121 \f
22122 /* This ties together stack memory (MEM with an alias set of frame_alias_set)
22123 and the change to the stack pointer. */
22124
22125 static void
22126 rs6000_emit_stack_tie (rtx fp, bool hard_frame_needed)
22127 {
22128 rtvec p;
22129 int i;
22130 rtx regs[3];
22131
22132 i = 0;
22133 regs[i++] = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
22134 if (hard_frame_needed)
22135 regs[i++] = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
22136 if (!(REGNO (fp) == STACK_POINTER_REGNUM
22137 || (hard_frame_needed
22138 && REGNO (fp) == HARD_FRAME_POINTER_REGNUM)))
22139 regs[i++] = fp;
22140
22141 p = rtvec_alloc (i);
22142 while (--i >= 0)
22143 {
22144 rtx mem = gen_frame_mem (BLKmode, regs[i]);
22145 RTVEC_ELT (p, i) = gen_rtx_SET (VOIDmode, mem, const0_rtx);
22146 }
22147
22148 emit_insn (gen_stack_tie (gen_rtx_PARALLEL (VOIDmode, p)));
22149 }
22150
22151 /* Emit the correct code for allocating stack space, as insns.
22152 If COPY_REG, make sure a copy of the old frame is left there.
22153 The generated code may use hard register 0 as a temporary. */
22154
22155 static void
22156 rs6000_emit_allocate_stack (HOST_WIDE_INT size, rtx copy_reg, int copy_off)
22157 {
22158 rtx_insn *insn;
22159 rtx stack_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
22160 rtx tmp_reg = gen_rtx_REG (Pmode, 0);
22161 rtx todec = gen_int_mode (-size, Pmode);
22162 rtx par, set, mem;
22163
22164 if (INTVAL (todec) != -size)
22165 {
22166 warning (0, "stack frame too large");
22167 emit_insn (gen_trap ());
22168 return;
22169 }
22170
22171 if (crtl->limit_stack)
22172 {
22173 if (REG_P (stack_limit_rtx)
22174 && REGNO (stack_limit_rtx) > 1
22175 && REGNO (stack_limit_rtx) <= 31)
22176 {
22177 emit_insn (gen_add3_insn (tmp_reg, stack_limit_rtx, GEN_INT (size)));
22178 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
22179 const0_rtx));
22180 }
22181 else if (GET_CODE (stack_limit_rtx) == SYMBOL_REF
22182 && TARGET_32BIT
22183 && DEFAULT_ABI == ABI_V4)
22184 {
22185 rtx toload = gen_rtx_CONST (VOIDmode,
22186 gen_rtx_PLUS (Pmode,
22187 stack_limit_rtx,
22188 GEN_INT (size)));
22189
22190 emit_insn (gen_elf_high (tmp_reg, toload));
22191 emit_insn (gen_elf_low (tmp_reg, tmp_reg, toload));
22192 emit_insn (gen_cond_trap (LTU, stack_reg, tmp_reg,
22193 const0_rtx));
22194 }
22195 else
22196 warning (0, "stack limit expression is not supported");
22197 }
22198
22199 if (copy_reg)
22200 {
22201 if (copy_off != 0)
22202 emit_insn (gen_add3_insn (copy_reg, stack_reg, GEN_INT (copy_off)));
22203 else
22204 emit_move_insn (copy_reg, stack_reg);
22205 }
22206
22207 if (size > 32767)
22208 {
22209 /* Need a note here so that try_split doesn't get confused. */
22210 if (get_last_insn () == NULL_RTX)
22211 emit_note (NOTE_INSN_DELETED);
22212 insn = emit_move_insn (tmp_reg, todec);
22213 try_split (PATTERN (insn), insn, 0);
22214 todec = tmp_reg;
22215 }
22216
22217 insn = emit_insn (TARGET_32BIT
22218 ? gen_movsi_update_stack (stack_reg, stack_reg,
22219 todec, stack_reg)
22220 : gen_movdi_di_update_stack (stack_reg, stack_reg,
22221 todec, stack_reg));
22222 /* Since we didn't use gen_frame_mem to generate the MEM, grab
22223 it now and set the alias set/attributes. The above gen_*_update
22224 calls will generate a PARALLEL with the MEM set being the first
22225 operation. */
22226 par = PATTERN (insn);
22227 gcc_assert (GET_CODE (par) == PARALLEL);
22228 set = XVECEXP (par, 0, 0);
22229 gcc_assert (GET_CODE (set) == SET);
22230 mem = SET_DEST (set);
22231 gcc_assert (MEM_P (mem));
22232 MEM_NOTRAP_P (mem) = 1;
22233 set_mem_alias_set (mem, get_frame_alias_set ());
22234
22235 RTX_FRAME_RELATED_P (insn) = 1;
22236 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
22237 gen_rtx_SET (VOIDmode, stack_reg,
22238 gen_rtx_PLUS (Pmode, stack_reg,
22239 GEN_INT (-size))));
22240 }
22241
22242 #define PROBE_INTERVAL (1 << STACK_CHECK_PROBE_INTERVAL_EXP)
22243
22244 #if PROBE_INTERVAL > 32768
22245 #error Cannot use indexed addressing mode for stack probing
22246 #endif
22247
22248 /* Emit code to probe a range of stack addresses from FIRST to FIRST+SIZE,
22249 inclusive. These are offsets from the current stack pointer. */
22250
22251 static void
22252 rs6000_emit_probe_stack_range (HOST_WIDE_INT first, HOST_WIDE_INT size)
22253 {
22254 /* See if we have a constant small number of probes to generate. If so,
22255 that's the easy case. */
22256 if (first + size <= 32768)
22257 {
22258 HOST_WIDE_INT i;
22259
22260 /* Probe at FIRST + N * PROBE_INTERVAL for values of N from 1 until
22261 it exceeds SIZE. If only one probe is needed, this will not
22262 generate any code. Then probe at FIRST + SIZE. */
22263 for (i = PROBE_INTERVAL; i < size; i += PROBE_INTERVAL)
22264 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
22265 -(first + i)));
22266
22267 emit_stack_probe (plus_constant (Pmode, stack_pointer_rtx,
22268 -(first + size)));
22269 }
22270
22271 /* Otherwise, do the same as above, but in a loop. Note that we must be
22272 extra careful with variables wrapping around because we might be at
22273 the very top (or the very bottom) of the address space and we have
22274 to be able to handle this case properly; in particular, we use an
22275 equality test for the loop condition. */
22276 else
22277 {
22278 HOST_WIDE_INT rounded_size;
22279 rtx r12 = gen_rtx_REG (Pmode, 12);
22280 rtx r0 = gen_rtx_REG (Pmode, 0);
22281
22282 /* Sanity check for the addressing mode we're going to use. */
22283 gcc_assert (first <= 32768);
22284
22285 /* Step 1: round SIZE to the previous multiple of the interval. */
22286
22287 rounded_size = size & -PROBE_INTERVAL;
22288
22289
22290 /* Step 2: compute initial and final value of the loop counter. */
22291
22292 /* TEST_ADDR = SP + FIRST. */
22293 emit_insn (gen_rtx_SET (VOIDmode, r12,
22294 plus_constant (Pmode, stack_pointer_rtx,
22295 -first)));
22296
22297 /* LAST_ADDR = SP + FIRST + ROUNDED_SIZE. */
22298 if (rounded_size > 32768)
22299 {
22300 emit_move_insn (r0, GEN_INT (-rounded_size));
22301 emit_insn (gen_rtx_SET (VOIDmode, r0,
22302 gen_rtx_PLUS (Pmode, r12, r0)));
22303 }
22304 else
22305 emit_insn (gen_rtx_SET (VOIDmode, r0,
22306 plus_constant (Pmode, r12, -rounded_size)));
22307
22308
22309 /* Step 3: the loop
22310
22311 while (TEST_ADDR != LAST_ADDR)
22312 {
22313 TEST_ADDR = TEST_ADDR + PROBE_INTERVAL
22314 probe at TEST_ADDR
22315 }
22316
22317 probes at FIRST + N * PROBE_INTERVAL for values of N from 1
22318 until it is equal to ROUNDED_SIZE. */
22319
22320 if (TARGET_64BIT)
22321 emit_insn (gen_probe_stack_rangedi (r12, r12, r0));
22322 else
22323 emit_insn (gen_probe_stack_rangesi (r12, r12, r0));
22324
22325
22326 /* Step 4: probe at FIRST + SIZE if we cannot assert at compile-time
22327 that SIZE is equal to ROUNDED_SIZE. */
22328
22329 if (size != rounded_size)
22330 emit_stack_probe (plus_constant (Pmode, r12, rounded_size - size));
22331 }
22332 }
22333
22334 /* Probe a range of stack addresses from REG1 to REG2 inclusive. These are
22335 absolute addresses. */
22336
22337 const char *
22338 output_probe_stack_range (rtx reg1, rtx reg2)
22339 {
22340 static int labelno = 0;
22341 char loop_lab[32], end_lab[32];
22342 rtx xops[2];
22343
22344 ASM_GENERATE_INTERNAL_LABEL (loop_lab, "LPSRL", labelno);
22345 ASM_GENERATE_INTERNAL_LABEL (end_lab, "LPSRE", labelno++);
22346
22347 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, loop_lab);
22348
22349 /* Jump to END_LAB if TEST_ADDR == LAST_ADDR. */
22350 xops[0] = reg1;
22351 xops[1] = reg2;
22352 if (TARGET_64BIT)
22353 output_asm_insn ("cmpd 0,%0,%1", xops);
22354 else
22355 output_asm_insn ("cmpw 0,%0,%1", xops);
22356
22357 fputs ("\tbeq 0,", asm_out_file);
22358 assemble_name_raw (asm_out_file, end_lab);
22359 fputc ('\n', asm_out_file);
22360
22361 /* TEST_ADDR = TEST_ADDR + PROBE_INTERVAL. */
22362 xops[1] = GEN_INT (-PROBE_INTERVAL);
22363 output_asm_insn ("addi %0,%0,%1", xops);
22364
22365 /* Probe at TEST_ADDR and branch. */
22366 xops[1] = gen_rtx_REG (Pmode, 0);
22367 output_asm_insn ("stw %1,0(%0)", xops);
22368 fprintf (asm_out_file, "\tb ");
22369 assemble_name_raw (asm_out_file, loop_lab);
22370 fputc ('\n', asm_out_file);
22371
22372 ASM_OUTPUT_INTERNAL_LABEL (asm_out_file, end_lab);
22373
22374 return "";
22375 }
22376
22377 /* Add to 'insn' a note which is PATTERN (INSN) but with REG replaced
22378 with (plus:P (reg 1) VAL), and with REG2 replaced with RREG if REG2
22379 is not NULL. It would be nice if dwarf2out_frame_debug_expr could
22380 deduce these equivalences by itself so it wasn't necessary to hold
22381 its hand so much. Don't be tempted to always supply d2_f_d_e with
22382 the actual cfa register, ie. r31 when we are using a hard frame
22383 pointer. That fails when saving regs off r1, and sched moves the
22384 r31 setup past the reg saves. */
22385
22386 static rtx
22387 rs6000_frame_related (rtx insn, rtx reg, HOST_WIDE_INT val,
22388 rtx reg2, rtx rreg, rtx split_reg)
22389 {
22390 rtx real, temp;
22391
22392 if (REGNO (reg) == STACK_POINTER_REGNUM && reg2 == NULL_RTX)
22393 {
22394 /* No need for any replacement. Just set RTX_FRAME_RELATED_P. */
22395 int i;
22396
22397 gcc_checking_assert (val == 0);
22398 real = PATTERN (insn);
22399 if (GET_CODE (real) == PARALLEL)
22400 for (i = 0; i < XVECLEN (real, 0); i++)
22401 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
22402 {
22403 rtx set = XVECEXP (real, 0, i);
22404
22405 RTX_FRAME_RELATED_P (set) = 1;
22406 }
22407 RTX_FRAME_RELATED_P (insn) = 1;
22408 return insn;
22409 }
22410
22411 /* copy_rtx will not make unique copies of registers, so we need to
22412 ensure we don't have unwanted sharing here. */
22413 if (reg == reg2)
22414 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
22415
22416 if (reg == rreg)
22417 reg = gen_raw_REG (GET_MODE (reg), REGNO (reg));
22418
22419 real = copy_rtx (PATTERN (insn));
22420
22421 if (reg2 != NULL_RTX)
22422 real = replace_rtx (real, reg2, rreg);
22423
22424 if (REGNO (reg) == STACK_POINTER_REGNUM)
22425 gcc_checking_assert (val == 0);
22426 else
22427 real = replace_rtx (real, reg,
22428 gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode,
22429 STACK_POINTER_REGNUM),
22430 GEN_INT (val)));
22431
22432 /* We expect that 'real' is either a SET or a PARALLEL containing
22433 SETs (and possibly other stuff). In a PARALLEL, all the SETs
22434 are important so they all have to be marked RTX_FRAME_RELATED_P. */
22435
22436 if (GET_CODE (real) == SET)
22437 {
22438 rtx set = real;
22439
22440 temp = simplify_rtx (SET_SRC (set));
22441 if (temp)
22442 SET_SRC (set) = temp;
22443 temp = simplify_rtx (SET_DEST (set));
22444 if (temp)
22445 SET_DEST (set) = temp;
22446 if (GET_CODE (SET_DEST (set)) == MEM)
22447 {
22448 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
22449 if (temp)
22450 XEXP (SET_DEST (set), 0) = temp;
22451 }
22452 }
22453 else
22454 {
22455 int i;
22456
22457 gcc_assert (GET_CODE (real) == PARALLEL);
22458 for (i = 0; i < XVECLEN (real, 0); i++)
22459 if (GET_CODE (XVECEXP (real, 0, i)) == SET)
22460 {
22461 rtx set = XVECEXP (real, 0, i);
22462
22463 temp = simplify_rtx (SET_SRC (set));
22464 if (temp)
22465 SET_SRC (set) = temp;
22466 temp = simplify_rtx (SET_DEST (set));
22467 if (temp)
22468 SET_DEST (set) = temp;
22469 if (GET_CODE (SET_DEST (set)) == MEM)
22470 {
22471 temp = simplify_rtx (XEXP (SET_DEST (set), 0));
22472 if (temp)
22473 XEXP (SET_DEST (set), 0) = temp;
22474 }
22475 RTX_FRAME_RELATED_P (set) = 1;
22476 }
22477 }
22478
22479 /* If a store insn has been split into multiple insns, the
22480 true source register is given by split_reg. */
22481 if (split_reg != NULL_RTX)
22482 real = gen_rtx_SET (VOIDmode, SET_DEST (real), split_reg);
22483
22484 RTX_FRAME_RELATED_P (insn) = 1;
22485 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
22486
22487 return insn;
22488 }
22489
22490 /* Returns an insn that has a vrsave set operation with the
22491 appropriate CLOBBERs. */
22492
22493 static rtx
22494 generate_set_vrsave (rtx reg, rs6000_stack_t *info, int epiloguep)
22495 {
22496 int nclobs, i;
22497 rtx insn, clobs[TOTAL_ALTIVEC_REGS + 1];
22498 rtx vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
22499
22500 clobs[0]
22501 = gen_rtx_SET (VOIDmode,
22502 vrsave,
22503 gen_rtx_UNSPEC_VOLATILE (SImode,
22504 gen_rtvec (2, reg, vrsave),
22505 UNSPECV_SET_VRSAVE));
22506
22507 nclobs = 1;
22508
22509 /* We need to clobber the registers in the mask so the scheduler
22510 does not move sets to VRSAVE before sets of AltiVec registers.
22511
22512 However, if the function receives nonlocal gotos, reload will set
22513 all call saved registers live. We will end up with:
22514
22515 (set (reg 999) (mem))
22516 (parallel [ (set (reg vrsave) (unspec blah))
22517 (clobber (reg 999))])
22518
22519 The clobber will cause the store into reg 999 to be dead, and
22520 flow will attempt to delete an epilogue insn. In this case, we
22521 need an unspec use/set of the register. */
22522
22523 for (i = FIRST_ALTIVEC_REGNO; i <= LAST_ALTIVEC_REGNO; ++i)
22524 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
22525 {
22526 if (!epiloguep || call_used_regs [i])
22527 clobs[nclobs++] = gen_rtx_CLOBBER (VOIDmode,
22528 gen_rtx_REG (V4SImode, i));
22529 else
22530 {
22531 rtx reg = gen_rtx_REG (V4SImode, i);
22532
22533 clobs[nclobs++]
22534 = gen_rtx_SET (VOIDmode,
22535 reg,
22536 gen_rtx_UNSPEC (V4SImode,
22537 gen_rtvec (1, reg), 27));
22538 }
22539 }
22540
22541 insn = gen_rtx_PARALLEL (VOIDmode, rtvec_alloc (nclobs));
22542
22543 for (i = 0; i < nclobs; ++i)
22544 XVECEXP (insn, 0, i) = clobs[i];
22545
22546 return insn;
22547 }
22548
22549 static rtx
22550 gen_frame_set (rtx reg, rtx frame_reg, int offset, bool store)
22551 {
22552 rtx addr, mem;
22553
22554 addr = gen_rtx_PLUS (Pmode, frame_reg, GEN_INT (offset));
22555 mem = gen_frame_mem (GET_MODE (reg), addr);
22556 return gen_rtx_SET (VOIDmode, store ? mem : reg, store ? reg : mem);
22557 }
22558
22559 static rtx
22560 gen_frame_load (rtx reg, rtx frame_reg, int offset)
22561 {
22562 return gen_frame_set (reg, frame_reg, offset, false);
22563 }
22564
22565 static rtx
22566 gen_frame_store (rtx reg, rtx frame_reg, int offset)
22567 {
22568 return gen_frame_set (reg, frame_reg, offset, true);
22569 }
22570
22571 /* Save a register into the frame, and emit RTX_FRAME_RELATED_P notes.
22572 Save REGNO into [FRAME_REG + OFFSET] in mode MODE. */
22573
22574 static rtx
22575 emit_frame_save (rtx frame_reg, enum machine_mode mode,
22576 unsigned int regno, int offset, HOST_WIDE_INT frame_reg_to_sp)
22577 {
22578 rtx reg, insn;
22579
22580 /* Some cases that need register indexed addressing. */
22581 gcc_checking_assert (!((TARGET_ALTIVEC_ABI && ALTIVEC_VECTOR_MODE (mode))
22582 || (TARGET_VSX && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
22583 || (TARGET_E500_DOUBLE && mode == DFmode)
22584 || (TARGET_SPE_ABI
22585 && SPE_VECTOR_MODE (mode)
22586 && !SPE_CONST_OFFSET_OK (offset))));
22587
22588 reg = gen_rtx_REG (mode, regno);
22589 insn = emit_insn (gen_frame_store (reg, frame_reg, offset));
22590 return rs6000_frame_related (insn, frame_reg, frame_reg_to_sp,
22591 NULL_RTX, NULL_RTX, NULL_RTX);
22592 }
22593
22594 /* Emit an offset memory reference suitable for a frame store, while
22595 converting to a valid addressing mode. */
22596
22597 static rtx
22598 gen_frame_mem_offset (enum machine_mode mode, rtx reg, int offset)
22599 {
22600 rtx int_rtx, offset_rtx;
22601
22602 int_rtx = GEN_INT (offset);
22603
22604 if ((TARGET_SPE_ABI && SPE_VECTOR_MODE (mode) && !SPE_CONST_OFFSET_OK (offset))
22605 || (TARGET_E500_DOUBLE && mode == DFmode))
22606 {
22607 offset_rtx = gen_rtx_REG (Pmode, FIXED_SCRATCH);
22608 emit_move_insn (offset_rtx, int_rtx);
22609 }
22610 else
22611 offset_rtx = int_rtx;
22612
22613 return gen_frame_mem (mode, gen_rtx_PLUS (Pmode, reg, offset_rtx));
22614 }
22615
22616 #ifndef TARGET_FIX_AND_CONTINUE
22617 #define TARGET_FIX_AND_CONTINUE 0
22618 #endif
22619
22620 /* It's really GPR 13 or 14, FPR 14 and VR 20. We need the smallest. */
22621 #define FIRST_SAVRES_REGISTER FIRST_SAVED_GP_REGNO
22622 #define LAST_SAVRES_REGISTER 31
22623 #define N_SAVRES_REGISTERS (LAST_SAVRES_REGISTER - FIRST_SAVRES_REGISTER + 1)
22624
22625 enum {
22626 SAVRES_LR = 0x1,
22627 SAVRES_SAVE = 0x2,
22628 SAVRES_REG = 0x0c,
22629 SAVRES_GPR = 0,
22630 SAVRES_FPR = 4,
22631 SAVRES_VR = 8
22632 };
22633
22634 static GTY(()) rtx savres_routine_syms[N_SAVRES_REGISTERS][12];
22635
22636 /* Temporary holding space for an out-of-line register save/restore
22637 routine name. */
22638 static char savres_routine_name[30];
22639
22640 /* Return the name for an out-of-line register save/restore routine.
22641 We are saving/restoring GPRs if GPR is true. */
22642
22643 static char *
22644 rs6000_savres_routine_name (rs6000_stack_t *info, int regno, int sel)
22645 {
22646 const char *prefix = "";
22647 const char *suffix = "";
22648
22649 /* Different targets are supposed to define
22650 {SAVE,RESTORE}_FP_{PREFIX,SUFFIX} with the idea that the needed
22651 routine name could be defined with:
22652
22653 sprintf (name, "%s%d%s", SAVE_FP_PREFIX, regno, SAVE_FP_SUFFIX)
22654
22655 This is a nice idea in practice, but in reality, things are
22656 complicated in several ways:
22657
22658 - ELF targets have save/restore routines for GPRs.
22659
22660 - SPE targets use different prefixes for 32/64-bit registers, and
22661 neither of them fit neatly in the FOO_{PREFIX,SUFFIX} regimen.
22662
22663 - PPC64 ELF targets have routines for save/restore of GPRs that
22664 differ in what they do with the link register, so having a set
22665 prefix doesn't work. (We only use one of the save routines at
22666 the moment, though.)
22667
22668 - PPC32 elf targets have "exit" versions of the restore routines
22669 that restore the link register and can save some extra space.
22670 These require an extra suffix. (There are also "tail" versions
22671 of the restore routines and "GOT" versions of the save routines,
22672 but we don't generate those at present. Same problems apply,
22673 though.)
22674
22675 We deal with all this by synthesizing our own prefix/suffix and
22676 using that for the simple sprintf call shown above. */
22677 if (TARGET_SPE)
22678 {
22679 /* No floating point saves on the SPE. */
22680 gcc_assert ((sel & SAVRES_REG) == SAVRES_GPR);
22681
22682 if ((sel & SAVRES_SAVE))
22683 prefix = info->spe_64bit_regs_used ? "_save64gpr_" : "_save32gpr_";
22684 else
22685 prefix = info->spe_64bit_regs_used ? "_rest64gpr_" : "_rest32gpr_";
22686
22687 if ((sel & SAVRES_LR))
22688 suffix = "_x";
22689 }
22690 else if (DEFAULT_ABI == ABI_V4)
22691 {
22692 if (TARGET_64BIT)
22693 goto aix_names;
22694
22695 if ((sel & SAVRES_REG) == SAVRES_GPR)
22696 prefix = (sel & SAVRES_SAVE) ? "_savegpr_" : "_restgpr_";
22697 else if ((sel & SAVRES_REG) == SAVRES_FPR)
22698 prefix = (sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_";
22699 else if ((sel & SAVRES_REG) == SAVRES_VR)
22700 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
22701 else
22702 abort ();
22703
22704 if ((sel & SAVRES_LR))
22705 suffix = "_x";
22706 }
22707 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
22708 {
22709 #if !defined (POWERPC_LINUX) && !defined (POWERPC_FREEBSD)
22710 /* No out-of-line save/restore routines for GPRs on AIX. */
22711 gcc_assert (!TARGET_AIX || (sel & SAVRES_REG) != SAVRES_GPR);
22712 #endif
22713
22714 aix_names:
22715 if ((sel & SAVRES_REG) == SAVRES_GPR)
22716 prefix = ((sel & SAVRES_SAVE)
22717 ? ((sel & SAVRES_LR) ? "_savegpr0_" : "_savegpr1_")
22718 : ((sel & SAVRES_LR) ? "_restgpr0_" : "_restgpr1_"));
22719 else if ((sel & SAVRES_REG) == SAVRES_FPR)
22720 {
22721 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
22722 if ((sel & SAVRES_LR))
22723 prefix = ((sel & SAVRES_SAVE) ? "_savefpr_" : "_restfpr_");
22724 else
22725 #endif
22726 {
22727 prefix = (sel & SAVRES_SAVE) ? SAVE_FP_PREFIX : RESTORE_FP_PREFIX;
22728 suffix = (sel & SAVRES_SAVE) ? SAVE_FP_SUFFIX : RESTORE_FP_SUFFIX;
22729 }
22730 }
22731 else if ((sel & SAVRES_REG) == SAVRES_VR)
22732 prefix = (sel & SAVRES_SAVE) ? "_savevr_" : "_restvr_";
22733 else
22734 abort ();
22735 }
22736
22737 if (DEFAULT_ABI == ABI_DARWIN)
22738 {
22739 /* The Darwin approach is (slightly) different, in order to be
22740 compatible with code generated by the system toolchain. There is a
22741 single symbol for the start of save sequence, and the code here
22742 embeds an offset into that code on the basis of the first register
22743 to be saved. */
22744 prefix = (sel & SAVRES_SAVE) ? "save" : "rest" ;
22745 if ((sel & SAVRES_REG) == SAVRES_GPR)
22746 sprintf (savres_routine_name, "*%sGPR%s%s%.0d ; %s r%d-r31", prefix,
22747 ((sel & SAVRES_LR) ? "x" : ""), (regno == 13 ? "" : "+"),
22748 (regno - 13) * 4, prefix, regno);
22749 else if ((sel & SAVRES_REG) == SAVRES_FPR)
22750 sprintf (savres_routine_name, "*%sFP%s%.0d ; %s f%d-f31", prefix,
22751 (regno == 14 ? "" : "+"), (regno - 14) * 4, prefix, regno);
22752 else if ((sel & SAVRES_REG) == SAVRES_VR)
22753 sprintf (savres_routine_name, "*%sVEC%s%.0d ; %s v%d-v31", prefix,
22754 (regno == 20 ? "" : "+"), (regno - 20) * 8, prefix, regno);
22755 else
22756 abort ();
22757 }
22758 else
22759 sprintf (savres_routine_name, "%s%d%s", prefix, regno, suffix);
22760
22761 return savres_routine_name;
22762 }
22763
22764 /* Return an RTL SYMBOL_REF for an out-of-line register save/restore routine.
22765 We are saving/restoring GPRs if GPR is true. */
22766
22767 static rtx
22768 rs6000_savres_routine_sym (rs6000_stack_t *info, int sel)
22769 {
22770 int regno = ((sel & SAVRES_REG) == SAVRES_GPR
22771 ? info->first_gp_reg_save
22772 : (sel & SAVRES_REG) == SAVRES_FPR
22773 ? info->first_fp_reg_save - 32
22774 : (sel & SAVRES_REG) == SAVRES_VR
22775 ? info->first_altivec_reg_save - FIRST_ALTIVEC_REGNO
22776 : -1);
22777 rtx sym;
22778 int select = sel;
22779
22780 /* On the SPE, we never have any FPRs, but we do have 32/64-bit
22781 versions of the gpr routines. */
22782 if (TARGET_SPE_ABI && (sel & SAVRES_REG) == SAVRES_GPR
22783 && info->spe_64bit_regs_used)
22784 select ^= SAVRES_FPR ^ SAVRES_GPR;
22785
22786 /* Don't generate bogus routine names. */
22787 gcc_assert (FIRST_SAVRES_REGISTER <= regno
22788 && regno <= LAST_SAVRES_REGISTER
22789 && select >= 0 && select <= 12);
22790
22791 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select];
22792
22793 if (sym == NULL)
22794 {
22795 char *name;
22796
22797 name = rs6000_savres_routine_name (info, regno, sel);
22798
22799 sym = savres_routine_syms[regno-FIRST_SAVRES_REGISTER][select]
22800 = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (name));
22801 SYMBOL_REF_FLAGS (sym) |= SYMBOL_FLAG_FUNCTION;
22802 }
22803
22804 return sym;
22805 }
22806
22807 /* Emit a sequence of insns, including a stack tie if needed, for
22808 resetting the stack pointer. If UPDT_REGNO is not 1, then don't
22809 reset the stack pointer, but move the base of the frame into
22810 reg UPDT_REGNO for use by out-of-line register restore routines. */
22811
22812 static rtx
22813 rs6000_emit_stack_reset (rs6000_stack_t *info,
22814 rtx frame_reg_rtx, HOST_WIDE_INT frame_off,
22815 unsigned updt_regno)
22816 {
22817 rtx updt_reg_rtx;
22818
22819 /* This blockage is needed so that sched doesn't decide to move
22820 the sp change before the register restores. */
22821 if (DEFAULT_ABI == ABI_V4
22822 || (TARGET_SPE_ABI
22823 && info->spe_64bit_regs_used != 0
22824 && info->first_gp_reg_save != 32))
22825 rs6000_emit_stack_tie (frame_reg_rtx, frame_pointer_needed);
22826
22827 /* If we are restoring registers out-of-line, we will be using the
22828 "exit" variants of the restore routines, which will reset the
22829 stack for us. But we do need to point updt_reg into the
22830 right place for those routines. */
22831 updt_reg_rtx = gen_rtx_REG (Pmode, updt_regno);
22832
22833 if (frame_off != 0)
22834 return emit_insn (gen_add3_insn (updt_reg_rtx,
22835 frame_reg_rtx, GEN_INT (frame_off)));
22836 else if (REGNO (frame_reg_rtx) != updt_regno)
22837 return emit_move_insn (updt_reg_rtx, frame_reg_rtx);
22838
22839 return NULL_RTX;
22840 }
22841
22842 /* Return the register number used as a pointer by out-of-line
22843 save/restore functions. */
22844
22845 static inline unsigned
22846 ptr_regno_for_savres (int sel)
22847 {
22848 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
22849 return (sel & SAVRES_REG) == SAVRES_FPR || (sel & SAVRES_LR) ? 1 : 12;
22850 return DEFAULT_ABI == ABI_DARWIN && (sel & SAVRES_REG) == SAVRES_FPR ? 1 : 11;
22851 }
22852
22853 /* Construct a parallel rtx describing the effect of a call to an
22854 out-of-line register save/restore routine, and emit the insn
22855 or jump_insn as appropriate. */
22856
22857 static rtx
22858 rs6000_emit_savres_rtx (rs6000_stack_t *info,
22859 rtx frame_reg_rtx, int save_area_offset, int lr_offset,
22860 enum machine_mode reg_mode, int sel)
22861 {
22862 int i;
22863 int offset, start_reg, end_reg, n_regs, use_reg;
22864 int reg_size = GET_MODE_SIZE (reg_mode);
22865 rtx sym;
22866 rtvec p;
22867 rtx par, insn;
22868
22869 offset = 0;
22870 start_reg = ((sel & SAVRES_REG) == SAVRES_GPR
22871 ? info->first_gp_reg_save
22872 : (sel & SAVRES_REG) == SAVRES_FPR
22873 ? info->first_fp_reg_save
22874 : (sel & SAVRES_REG) == SAVRES_VR
22875 ? info->first_altivec_reg_save
22876 : -1);
22877 end_reg = ((sel & SAVRES_REG) == SAVRES_GPR
22878 ? 32
22879 : (sel & SAVRES_REG) == SAVRES_FPR
22880 ? 64
22881 : (sel & SAVRES_REG) == SAVRES_VR
22882 ? LAST_ALTIVEC_REGNO + 1
22883 : -1);
22884 n_regs = end_reg - start_reg;
22885 p = rtvec_alloc (3 + ((sel & SAVRES_LR) ? 1 : 0)
22886 + ((sel & SAVRES_REG) == SAVRES_VR ? 1 : 0)
22887 + n_regs);
22888
22889 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
22890 RTVEC_ELT (p, offset++) = ret_rtx;
22891
22892 RTVEC_ELT (p, offset++)
22893 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
22894
22895 sym = rs6000_savres_routine_sym (info, sel);
22896 RTVEC_ELT (p, offset++) = gen_rtx_USE (VOIDmode, sym);
22897
22898 use_reg = ptr_regno_for_savres (sel);
22899 if ((sel & SAVRES_REG) == SAVRES_VR)
22900 {
22901 /* Vector regs are saved/restored using [reg+reg] addressing. */
22902 RTVEC_ELT (p, offset++)
22903 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, use_reg));
22904 RTVEC_ELT (p, offset++)
22905 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 0));
22906 }
22907 else
22908 RTVEC_ELT (p, offset++)
22909 = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, use_reg));
22910
22911 for (i = 0; i < end_reg - start_reg; i++)
22912 RTVEC_ELT (p, i + offset)
22913 = gen_frame_set (gen_rtx_REG (reg_mode, start_reg + i),
22914 frame_reg_rtx, save_area_offset + reg_size * i,
22915 (sel & SAVRES_SAVE) != 0);
22916
22917 if ((sel & SAVRES_SAVE) && (sel & SAVRES_LR))
22918 RTVEC_ELT (p, i + offset)
22919 = gen_frame_store (gen_rtx_REG (Pmode, 0), frame_reg_rtx, lr_offset);
22920
22921 par = gen_rtx_PARALLEL (VOIDmode, p);
22922
22923 if (!(sel & SAVRES_SAVE) && (sel & SAVRES_LR))
22924 {
22925 insn = emit_jump_insn (par);
22926 JUMP_LABEL (insn) = ret_rtx;
22927 }
22928 else
22929 insn = emit_insn (par);
22930 return insn;
22931 }
22932
22933 /* Emit code to store CR fields that need to be saved into REG. */
22934
22935 static void
22936 rs6000_emit_move_from_cr (rtx reg)
22937 {
22938 /* Only the ELFv2 ABI allows storing only selected fields. */
22939 if (DEFAULT_ABI == ABI_ELFv2 && TARGET_MFCRF)
22940 {
22941 int i, cr_reg[8], count = 0;
22942
22943 /* Collect CR fields that must be saved. */
22944 for (i = 0; i < 8; i++)
22945 if (save_reg_p (CR0_REGNO + i))
22946 cr_reg[count++] = i;
22947
22948 /* If it's just a single one, use mfcrf. */
22949 if (count == 1)
22950 {
22951 rtvec p = rtvec_alloc (1);
22952 rtvec r = rtvec_alloc (2);
22953 RTVEC_ELT (r, 0) = gen_rtx_REG (CCmode, CR0_REGNO + cr_reg[0]);
22954 RTVEC_ELT (r, 1) = GEN_INT (1 << (7 - cr_reg[0]));
22955 RTVEC_ELT (p, 0)
22956 = gen_rtx_SET (VOIDmode, reg,
22957 gen_rtx_UNSPEC (SImode, r, UNSPEC_MOVESI_FROM_CR));
22958
22959 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
22960 return;
22961 }
22962
22963 /* ??? It might be better to handle count == 2 / 3 cases here
22964 as well, using logical operations to combine the values. */
22965 }
22966
22967 emit_insn (gen_movesi_from_cr (reg));
22968 }
22969
22970 /* Determine whether the gp REG is really used. */
22971
22972 static bool
22973 rs6000_reg_live_or_pic_offset_p (int reg)
22974 {
22975 /* If the function calls eh_return, claim used all the registers that would
22976 be checked for liveness otherwise. This is required for the PIC offset
22977 register with -mminimal-toc on AIX, as it is advertised as "fixed" for
22978 register allocation purposes in this case. */
22979
22980 return (((crtl->calls_eh_return || df_regs_ever_live_p (reg))
22981 && (!call_used_regs[reg]
22982 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
22983 && !TARGET_SINGLE_PIC_BASE
22984 && TARGET_TOC && TARGET_MINIMAL_TOC)))
22985 || (reg == RS6000_PIC_OFFSET_TABLE_REGNUM
22986 && !TARGET_SINGLE_PIC_BASE
22987 && ((DEFAULT_ABI == ABI_V4 && flag_pic != 0)
22988 || (DEFAULT_ABI == ABI_DARWIN && flag_pic))));
22989 }
22990
22991 /* Emit function prologue as insns. */
22992
22993 void
22994 rs6000_emit_prologue (void)
22995 {
22996 rs6000_stack_t *info = rs6000_stack_info ();
22997 enum machine_mode reg_mode = Pmode;
22998 int reg_size = TARGET_32BIT ? 4 : 8;
22999 rtx sp_reg_rtx = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
23000 rtx frame_reg_rtx = sp_reg_rtx;
23001 unsigned int cr_save_regno;
23002 rtx cr_save_rtx = NULL_RTX;
23003 rtx insn;
23004 int strategy;
23005 int using_static_chain_p = (cfun->static_chain_decl != NULL_TREE
23006 && df_regs_ever_live_p (STATIC_CHAIN_REGNUM)
23007 && call_used_regs[STATIC_CHAIN_REGNUM]);
23008 /* Offset to top of frame for frame_reg and sp respectively. */
23009 HOST_WIDE_INT frame_off = 0;
23010 HOST_WIDE_INT sp_off = 0;
23011
23012 #ifdef ENABLE_CHECKING
23013 /* Track and check usage of r0, r11, r12. */
23014 int reg_inuse = using_static_chain_p ? 1 << 11 : 0;
23015 #define START_USE(R) do \
23016 { \
23017 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
23018 reg_inuse |= 1 << (R); \
23019 } while (0)
23020 #define END_USE(R) do \
23021 { \
23022 gcc_assert ((reg_inuse & (1 << (R))) != 0); \
23023 reg_inuse &= ~(1 << (R)); \
23024 } while (0)
23025 #define NOT_INUSE(R) do \
23026 { \
23027 gcc_assert ((reg_inuse & (1 << (R))) == 0); \
23028 } while (0)
23029 #else
23030 #define START_USE(R) do {} while (0)
23031 #define END_USE(R) do {} while (0)
23032 #define NOT_INUSE(R) do {} while (0)
23033 #endif
23034
23035 if (DEFAULT_ABI == ABI_ELFv2)
23036 {
23037 cfun->machine->r2_setup_needed = df_regs_ever_live_p (TOC_REGNUM);
23038
23039 /* With -mminimal-toc we may generate an extra use of r2 below. */
23040 if (!TARGET_SINGLE_PIC_BASE
23041 && TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
23042 cfun->machine->r2_setup_needed = true;
23043 }
23044
23045
23046 if (flag_stack_usage_info)
23047 current_function_static_stack_size = info->total_size;
23048
23049 if (flag_stack_check == STATIC_BUILTIN_STACK_CHECK)
23050 {
23051 HOST_WIDE_INT size = info->total_size;
23052
23053 if (crtl->is_leaf && !cfun->calls_alloca)
23054 {
23055 if (size > PROBE_INTERVAL && size > STACK_CHECK_PROTECT)
23056 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT,
23057 size - STACK_CHECK_PROTECT);
23058 }
23059 else if (size > 0)
23060 rs6000_emit_probe_stack_range (STACK_CHECK_PROTECT, size);
23061 }
23062
23063 if (TARGET_FIX_AND_CONTINUE)
23064 {
23065 /* gdb on darwin arranges to forward a function from the old
23066 address by modifying the first 5 instructions of the function
23067 to branch to the overriding function. This is necessary to
23068 permit function pointers that point to the old function to
23069 actually forward to the new function. */
23070 emit_insn (gen_nop ());
23071 emit_insn (gen_nop ());
23072 emit_insn (gen_nop ());
23073 emit_insn (gen_nop ());
23074 emit_insn (gen_nop ());
23075 }
23076
23077 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
23078 {
23079 reg_mode = V2SImode;
23080 reg_size = 8;
23081 }
23082
23083 /* Handle world saves specially here. */
23084 if (WORLD_SAVE_P (info))
23085 {
23086 int i, j, sz;
23087 rtx treg;
23088 rtvec p;
23089 rtx reg0;
23090
23091 /* save_world expects lr in r0. */
23092 reg0 = gen_rtx_REG (Pmode, 0);
23093 if (info->lr_save_p)
23094 {
23095 insn = emit_move_insn (reg0,
23096 gen_rtx_REG (Pmode, LR_REGNO));
23097 RTX_FRAME_RELATED_P (insn) = 1;
23098 }
23099
23100 /* The SAVE_WORLD and RESTORE_WORLD routines make a number of
23101 assumptions about the offsets of various bits of the stack
23102 frame. */
23103 gcc_assert (info->gp_save_offset == -220
23104 && info->fp_save_offset == -144
23105 && info->lr_save_offset == 8
23106 && info->cr_save_offset == 4
23107 && info->push_p
23108 && info->lr_save_p
23109 && (!crtl->calls_eh_return
23110 || info->ehrd_offset == -432)
23111 && info->vrsave_save_offset == -224
23112 && info->altivec_save_offset == -416);
23113
23114 treg = gen_rtx_REG (SImode, 11);
23115 emit_move_insn (treg, GEN_INT (-info->total_size));
23116
23117 /* SAVE_WORLD takes the caller's LR in R0 and the frame size
23118 in R11. It also clobbers R12, so beware! */
23119
23120 /* Preserve CR2 for save_world prologues */
23121 sz = 5;
23122 sz += 32 - info->first_gp_reg_save;
23123 sz += 64 - info->first_fp_reg_save;
23124 sz += LAST_ALTIVEC_REGNO - info->first_altivec_reg_save + 1;
23125 p = rtvec_alloc (sz);
23126 j = 0;
23127 RTVEC_ELT (p, j++) = gen_rtx_CLOBBER (VOIDmode,
23128 gen_rtx_REG (SImode,
23129 LR_REGNO));
23130 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
23131 gen_rtx_SYMBOL_REF (Pmode,
23132 "*save_world"));
23133 /* We do floats first so that the instruction pattern matches
23134 properly. */
23135 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
23136 RTVEC_ELT (p, j++)
23137 = gen_frame_store (gen_rtx_REG (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
23138 ? DFmode : SFmode,
23139 info->first_fp_reg_save + i),
23140 frame_reg_rtx,
23141 info->fp_save_offset + frame_off + 8 * i);
23142 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
23143 RTVEC_ELT (p, j++)
23144 = gen_frame_store (gen_rtx_REG (V4SImode,
23145 info->first_altivec_reg_save + i),
23146 frame_reg_rtx,
23147 info->altivec_save_offset + frame_off + 16 * i);
23148 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23149 RTVEC_ELT (p, j++)
23150 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
23151 frame_reg_rtx,
23152 info->gp_save_offset + frame_off + reg_size * i);
23153
23154 /* CR register traditionally saved as CR2. */
23155 RTVEC_ELT (p, j++)
23156 = gen_frame_store (gen_rtx_REG (SImode, CR2_REGNO),
23157 frame_reg_rtx, info->cr_save_offset + frame_off);
23158 /* Explain about use of R0. */
23159 if (info->lr_save_p)
23160 RTVEC_ELT (p, j++)
23161 = gen_frame_store (reg0,
23162 frame_reg_rtx, info->lr_save_offset + frame_off);
23163 /* Explain what happens to the stack pointer. */
23164 {
23165 rtx newval = gen_rtx_PLUS (Pmode, sp_reg_rtx, treg);
23166 RTVEC_ELT (p, j++) = gen_rtx_SET (VOIDmode, sp_reg_rtx, newval);
23167 }
23168
23169 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23170 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23171 treg, GEN_INT (-info->total_size), NULL_RTX);
23172 sp_off = frame_off = info->total_size;
23173 }
23174
23175 strategy = info->savres_strategy;
23176
23177 /* For V.4, update stack before we do any saving and set back pointer. */
23178 if (! WORLD_SAVE_P (info)
23179 && info->push_p
23180 && (DEFAULT_ABI == ABI_V4
23181 || crtl->calls_eh_return))
23182 {
23183 bool need_r11 = (TARGET_SPE
23184 ? (!(strategy & SAVE_INLINE_GPRS)
23185 && info->spe_64bit_regs_used == 0)
23186 : (!(strategy & SAVE_INLINE_FPRS)
23187 || !(strategy & SAVE_INLINE_GPRS)
23188 || !(strategy & SAVE_INLINE_VRS)));
23189 int ptr_regno = -1;
23190 rtx ptr_reg = NULL_RTX;
23191 int ptr_off = 0;
23192
23193 if (info->total_size < 32767)
23194 frame_off = info->total_size;
23195 else if (need_r11)
23196 ptr_regno = 11;
23197 else if (info->cr_save_p
23198 || info->lr_save_p
23199 || info->first_fp_reg_save < 64
23200 || info->first_gp_reg_save < 32
23201 || info->altivec_size != 0
23202 || info->vrsave_mask != 0
23203 || crtl->calls_eh_return)
23204 ptr_regno = 12;
23205 else
23206 {
23207 /* The prologue won't be saving any regs so there is no need
23208 to set up a frame register to access any frame save area.
23209 We also won't be using frame_off anywhere below, but set
23210 the correct value anyway to protect against future
23211 changes to this function. */
23212 frame_off = info->total_size;
23213 }
23214 if (ptr_regno != -1)
23215 {
23216 /* Set up the frame offset to that needed by the first
23217 out-of-line save function. */
23218 START_USE (ptr_regno);
23219 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23220 frame_reg_rtx = ptr_reg;
23221 if (!(strategy & SAVE_INLINE_FPRS) && info->fp_size != 0)
23222 gcc_checking_assert (info->fp_save_offset + info->fp_size == 0);
23223 else if (!(strategy & SAVE_INLINE_GPRS) && info->first_gp_reg_save < 32)
23224 ptr_off = info->gp_save_offset + info->gp_size;
23225 else if (!(strategy & SAVE_INLINE_VRS) && info->altivec_size != 0)
23226 ptr_off = info->altivec_save_offset + info->altivec_size;
23227 frame_off = -ptr_off;
23228 }
23229 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
23230 sp_off = info->total_size;
23231 if (frame_reg_rtx != sp_reg_rtx)
23232 rs6000_emit_stack_tie (frame_reg_rtx, false);
23233 }
23234
23235 /* If we use the link register, get it into r0. */
23236 if (!WORLD_SAVE_P (info) && info->lr_save_p)
23237 {
23238 rtx addr, reg, mem;
23239
23240 reg = gen_rtx_REG (Pmode, 0);
23241 START_USE (0);
23242 insn = emit_move_insn (reg, gen_rtx_REG (Pmode, LR_REGNO));
23243 RTX_FRAME_RELATED_P (insn) = 1;
23244
23245 if (!(strategy & (SAVE_NOINLINE_GPRS_SAVES_LR
23246 | SAVE_NOINLINE_FPRS_SAVES_LR)))
23247 {
23248 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
23249 GEN_INT (info->lr_save_offset + frame_off));
23250 mem = gen_rtx_MEM (Pmode, addr);
23251 /* This should not be of rs6000_sr_alias_set, because of
23252 __builtin_return_address. */
23253
23254 insn = emit_move_insn (mem, reg);
23255 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23256 NULL_RTX, NULL_RTX, NULL_RTX);
23257 END_USE (0);
23258 }
23259 }
23260
23261 /* If we need to save CR, put it into r12 or r11. Choose r12 except when
23262 r12 will be needed by out-of-line gpr restore. */
23263 cr_save_regno = ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
23264 && !(strategy & (SAVE_INLINE_GPRS
23265 | SAVE_NOINLINE_GPRS_SAVES_LR))
23266 ? 11 : 12);
23267 if (!WORLD_SAVE_P (info)
23268 && info->cr_save_p
23269 && REGNO (frame_reg_rtx) != cr_save_regno
23270 && !(using_static_chain_p && cr_save_regno == 11))
23271 {
23272 cr_save_rtx = gen_rtx_REG (SImode, cr_save_regno);
23273 START_USE (cr_save_regno);
23274 rs6000_emit_move_from_cr (cr_save_rtx);
23275 }
23276
23277 /* Do any required saving of fpr's. If only one or two to save, do
23278 it ourselves. Otherwise, call function. */
23279 if (!WORLD_SAVE_P (info) && (strategy & SAVE_INLINE_FPRS))
23280 {
23281 int i;
23282 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
23283 if (save_reg_p (info->first_fp_reg_save + i))
23284 emit_frame_save (frame_reg_rtx,
23285 (TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
23286 ? DFmode : SFmode),
23287 info->first_fp_reg_save + i,
23288 info->fp_save_offset + frame_off + 8 * i,
23289 sp_off - frame_off);
23290 }
23291 else if (!WORLD_SAVE_P (info) && info->first_fp_reg_save != 64)
23292 {
23293 bool lr = (strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
23294 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
23295 unsigned ptr_regno = ptr_regno_for_savres (sel);
23296 rtx ptr_reg = frame_reg_rtx;
23297
23298 if (REGNO (frame_reg_rtx) == ptr_regno)
23299 gcc_checking_assert (frame_off == 0);
23300 else
23301 {
23302 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23303 NOT_INUSE (ptr_regno);
23304 emit_insn (gen_add3_insn (ptr_reg,
23305 frame_reg_rtx, GEN_INT (frame_off)));
23306 }
23307 insn = rs6000_emit_savres_rtx (info, ptr_reg,
23308 info->fp_save_offset,
23309 info->lr_save_offset,
23310 DFmode, sel);
23311 rs6000_frame_related (insn, ptr_reg, sp_off,
23312 NULL_RTX, NULL_RTX, NULL_RTX);
23313 if (lr)
23314 END_USE (0);
23315 }
23316
23317 /* Save GPRs. This is done as a PARALLEL if we are using
23318 the store-multiple instructions. */
23319 if (!WORLD_SAVE_P (info)
23320 && TARGET_SPE_ABI
23321 && info->spe_64bit_regs_used != 0
23322 && info->first_gp_reg_save != 32)
23323 {
23324 int i;
23325 rtx spe_save_area_ptr;
23326 HOST_WIDE_INT save_off;
23327 int ool_adjust = 0;
23328
23329 /* Determine whether we can address all of the registers that need
23330 to be saved with an offset from frame_reg_rtx that fits in
23331 the small const field for SPE memory instructions. */
23332 int spe_regs_addressable
23333 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
23334 + reg_size * (32 - info->first_gp_reg_save - 1))
23335 && (strategy & SAVE_INLINE_GPRS));
23336
23337 if (spe_regs_addressable)
23338 {
23339 spe_save_area_ptr = frame_reg_rtx;
23340 save_off = frame_off;
23341 }
23342 else
23343 {
23344 /* Make r11 point to the start of the SPE save area. We need
23345 to be careful here if r11 is holding the static chain. If
23346 it is, then temporarily save it in r0. */
23347 HOST_WIDE_INT offset;
23348
23349 if (!(strategy & SAVE_INLINE_GPRS))
23350 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
23351 offset = info->spe_gp_save_offset + frame_off - ool_adjust;
23352 spe_save_area_ptr = gen_rtx_REG (Pmode, 11);
23353 save_off = frame_off - offset;
23354
23355 if (using_static_chain_p)
23356 {
23357 rtx r0 = gen_rtx_REG (Pmode, 0);
23358
23359 START_USE (0);
23360 gcc_assert (info->first_gp_reg_save > 11);
23361
23362 emit_move_insn (r0, spe_save_area_ptr);
23363 }
23364 else if (REGNO (frame_reg_rtx) != 11)
23365 START_USE (11);
23366
23367 emit_insn (gen_addsi3 (spe_save_area_ptr,
23368 frame_reg_rtx, GEN_INT (offset)));
23369 if (!using_static_chain_p && REGNO (frame_reg_rtx) == 11)
23370 frame_off = -info->spe_gp_save_offset + ool_adjust;
23371 }
23372
23373 if ((strategy & SAVE_INLINE_GPRS))
23374 {
23375 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23376 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
23377 emit_frame_save (spe_save_area_ptr, reg_mode,
23378 info->first_gp_reg_save + i,
23379 (info->spe_gp_save_offset + save_off
23380 + reg_size * i),
23381 sp_off - save_off);
23382 }
23383 else
23384 {
23385 insn = rs6000_emit_savres_rtx (info, spe_save_area_ptr,
23386 info->spe_gp_save_offset + save_off,
23387 0, reg_mode,
23388 SAVRES_SAVE | SAVRES_GPR);
23389
23390 rs6000_frame_related (insn, spe_save_area_ptr, sp_off - save_off,
23391 NULL_RTX, NULL_RTX, NULL_RTX);
23392 }
23393
23394 /* Move the static chain pointer back. */
23395 if (!spe_regs_addressable)
23396 {
23397 if (using_static_chain_p)
23398 {
23399 emit_move_insn (spe_save_area_ptr, gen_rtx_REG (Pmode, 0));
23400 END_USE (0);
23401 }
23402 else if (REGNO (frame_reg_rtx) != 11)
23403 END_USE (11);
23404 }
23405 }
23406 else if (!WORLD_SAVE_P (info) && !(strategy & SAVE_INLINE_GPRS))
23407 {
23408 bool lr = (strategy & SAVE_NOINLINE_GPRS_SAVES_LR) != 0;
23409 int sel = SAVRES_SAVE | SAVRES_GPR | (lr ? SAVRES_LR : 0);
23410 unsigned ptr_regno = ptr_regno_for_savres (sel);
23411 rtx ptr_reg = frame_reg_rtx;
23412 bool ptr_set_up = REGNO (ptr_reg) == ptr_regno;
23413 int end_save = info->gp_save_offset + info->gp_size;
23414 int ptr_off;
23415
23416 if (!ptr_set_up)
23417 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23418
23419 /* Need to adjust r11 (r12) if we saved any FPRs. */
23420 if (end_save + frame_off != 0)
23421 {
23422 rtx offset = GEN_INT (end_save + frame_off);
23423
23424 if (ptr_set_up)
23425 frame_off = -end_save;
23426 else
23427 NOT_INUSE (ptr_regno);
23428 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
23429 }
23430 else if (!ptr_set_up)
23431 {
23432 NOT_INUSE (ptr_regno);
23433 emit_move_insn (ptr_reg, frame_reg_rtx);
23434 }
23435 ptr_off = -end_save;
23436 insn = rs6000_emit_savres_rtx (info, ptr_reg,
23437 info->gp_save_offset + ptr_off,
23438 info->lr_save_offset + ptr_off,
23439 reg_mode, sel);
23440 rs6000_frame_related (insn, ptr_reg, sp_off - ptr_off,
23441 NULL_RTX, NULL_RTX, NULL_RTX);
23442 if (lr)
23443 END_USE (0);
23444 }
23445 else if (!WORLD_SAVE_P (info) && (strategy & SAVRES_MULTIPLE))
23446 {
23447 rtvec p;
23448 int i;
23449 p = rtvec_alloc (32 - info->first_gp_reg_save);
23450 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23451 RTVEC_ELT (p, i)
23452 = gen_frame_store (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
23453 frame_reg_rtx,
23454 info->gp_save_offset + frame_off + reg_size * i);
23455 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23456 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23457 NULL_RTX, NULL_RTX, NULL_RTX);
23458 }
23459 else if (!WORLD_SAVE_P (info))
23460 {
23461 int i;
23462 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
23463 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
23464 emit_frame_save (frame_reg_rtx, reg_mode,
23465 info->first_gp_reg_save + i,
23466 info->gp_save_offset + frame_off + reg_size * i,
23467 sp_off - frame_off);
23468 }
23469
23470 if (crtl->calls_eh_return)
23471 {
23472 unsigned int i;
23473 rtvec p;
23474
23475 for (i = 0; ; ++i)
23476 {
23477 unsigned int regno = EH_RETURN_DATA_REGNO (i);
23478 if (regno == INVALID_REGNUM)
23479 break;
23480 }
23481
23482 p = rtvec_alloc (i);
23483
23484 for (i = 0; ; ++i)
23485 {
23486 unsigned int regno = EH_RETURN_DATA_REGNO (i);
23487 if (regno == INVALID_REGNUM)
23488 break;
23489
23490 insn
23491 = gen_frame_store (gen_rtx_REG (reg_mode, regno),
23492 sp_reg_rtx,
23493 info->ehrd_offset + sp_off + reg_size * (int) i);
23494 RTVEC_ELT (p, i) = insn;
23495 RTX_FRAME_RELATED_P (insn) = 1;
23496 }
23497
23498 insn = emit_insn (gen_blockage ());
23499 RTX_FRAME_RELATED_P (insn) = 1;
23500 add_reg_note (insn, REG_FRAME_RELATED_EXPR, gen_rtx_PARALLEL (VOIDmode, p));
23501 }
23502
23503 /* In AIX ABI we need to make sure r2 is really saved. */
23504 if (TARGET_AIX && crtl->calls_eh_return)
23505 {
23506 rtx tmp_reg, tmp_reg_si, hi, lo, compare_result, toc_save_done, jump;
23507 rtx save_insn, join_insn, note;
23508 long toc_restore_insn;
23509
23510 tmp_reg = gen_rtx_REG (Pmode, 11);
23511 tmp_reg_si = gen_rtx_REG (SImode, 11);
23512 if (using_static_chain_p)
23513 {
23514 START_USE (0);
23515 emit_move_insn (gen_rtx_REG (Pmode, 0), tmp_reg);
23516 }
23517 else
23518 START_USE (11);
23519 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, LR_REGNO));
23520 /* Peek at instruction to which this function returns. If it's
23521 restoring r2, then we know we've already saved r2. We can't
23522 unconditionally save r2 because the value we have will already
23523 be updated if we arrived at this function via a plt call or
23524 toc adjusting stub. */
23525 emit_move_insn (tmp_reg_si, gen_rtx_MEM (SImode, tmp_reg));
23526 toc_restore_insn = ((TARGET_32BIT ? 0x80410000 : 0xE8410000)
23527 + RS6000_TOC_SAVE_SLOT);
23528 hi = gen_int_mode (toc_restore_insn & ~0xffff, SImode);
23529 emit_insn (gen_xorsi3 (tmp_reg_si, tmp_reg_si, hi));
23530 compare_result = gen_rtx_REG (CCUNSmode, CR0_REGNO);
23531 validate_condition_mode (EQ, CCUNSmode);
23532 lo = gen_int_mode (toc_restore_insn & 0xffff, SImode);
23533 emit_insn (gen_rtx_SET (VOIDmode, compare_result,
23534 gen_rtx_COMPARE (CCUNSmode, tmp_reg_si, lo)));
23535 toc_save_done = gen_label_rtx ();
23536 jump = gen_rtx_IF_THEN_ELSE (VOIDmode,
23537 gen_rtx_EQ (VOIDmode, compare_result,
23538 const0_rtx),
23539 gen_rtx_LABEL_REF (VOIDmode, toc_save_done),
23540 pc_rtx);
23541 jump = emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx, jump));
23542 JUMP_LABEL (jump) = toc_save_done;
23543 LABEL_NUSES (toc_save_done) += 1;
23544
23545 save_insn = emit_frame_save (frame_reg_rtx, reg_mode,
23546 TOC_REGNUM, frame_off + RS6000_TOC_SAVE_SLOT,
23547 sp_off - frame_off);
23548
23549 emit_label (toc_save_done);
23550
23551 /* ??? If we leave SAVE_INSN as marked as saving R2, then we'll
23552 have a CFG that has different saves along different paths.
23553 Move the note to a dummy blockage insn, which describes that
23554 R2 is unconditionally saved after the label. */
23555 /* ??? An alternate representation might be a special insn pattern
23556 containing both the branch and the store. That might let the
23557 code that minimizes the number of DW_CFA_advance opcodes better
23558 freedom in placing the annotations. */
23559 note = find_reg_note (save_insn, REG_FRAME_RELATED_EXPR, NULL);
23560 if (note)
23561 remove_note (save_insn, note);
23562 else
23563 note = alloc_reg_note (REG_FRAME_RELATED_EXPR,
23564 copy_rtx (PATTERN (save_insn)), NULL_RTX);
23565 RTX_FRAME_RELATED_P (save_insn) = 0;
23566
23567 join_insn = emit_insn (gen_blockage ());
23568 REG_NOTES (join_insn) = note;
23569 RTX_FRAME_RELATED_P (join_insn) = 1;
23570
23571 if (using_static_chain_p)
23572 {
23573 emit_move_insn (tmp_reg, gen_rtx_REG (Pmode, 0));
23574 END_USE (0);
23575 }
23576 else
23577 END_USE (11);
23578 }
23579
23580 /* Save CR if we use any that must be preserved. */
23581 if (!WORLD_SAVE_P (info) && info->cr_save_p)
23582 {
23583 rtx addr = gen_rtx_PLUS (Pmode, frame_reg_rtx,
23584 GEN_INT (info->cr_save_offset + frame_off));
23585 rtx mem = gen_frame_mem (SImode, addr);
23586
23587 /* If we didn't copy cr before, do so now using r0. */
23588 if (cr_save_rtx == NULL_RTX)
23589 {
23590 START_USE (0);
23591 cr_save_rtx = gen_rtx_REG (SImode, 0);
23592 rs6000_emit_move_from_cr (cr_save_rtx);
23593 }
23594
23595 /* Saving CR requires a two-instruction sequence: one instruction
23596 to move the CR to a general-purpose register, and a second
23597 instruction that stores the GPR to memory.
23598
23599 We do not emit any DWARF CFI records for the first of these,
23600 because we cannot properly represent the fact that CR is saved in
23601 a register. One reason is that we cannot express that multiple
23602 CR fields are saved; another reason is that on 64-bit, the size
23603 of the CR register in DWARF (4 bytes) differs from the size of
23604 a general-purpose register.
23605
23606 This means if any intervening instruction were to clobber one of
23607 the call-saved CR fields, we'd have incorrect CFI. To prevent
23608 this from happening, we mark the store to memory as a use of
23609 those CR fields, which prevents any such instruction from being
23610 scheduled in between the two instructions. */
23611 rtx crsave_v[9];
23612 int n_crsave = 0;
23613 int i;
23614
23615 crsave_v[n_crsave++] = gen_rtx_SET (VOIDmode, mem, cr_save_rtx);
23616 for (i = 0; i < 8; i++)
23617 if (save_reg_p (CR0_REGNO + i))
23618 crsave_v[n_crsave++]
23619 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
23620
23621 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode,
23622 gen_rtvec_v (n_crsave, crsave_v)));
23623 END_USE (REGNO (cr_save_rtx));
23624
23625 /* Now, there's no way that dwarf2out_frame_debug_expr is going to
23626 understand '(unspec:SI [(reg:CC 68) ...] UNSPEC_MOVESI_FROM_CR)',
23627 so we need to construct a frame expression manually. */
23628 RTX_FRAME_RELATED_P (insn) = 1;
23629
23630 /* Update address to be stack-pointer relative, like
23631 rs6000_frame_related would do. */
23632 addr = gen_rtx_PLUS (Pmode, gen_rtx_REG (Pmode, STACK_POINTER_REGNUM),
23633 GEN_INT (info->cr_save_offset + sp_off));
23634 mem = gen_frame_mem (SImode, addr);
23635
23636 if (DEFAULT_ABI == ABI_ELFv2)
23637 {
23638 /* In the ELFv2 ABI we generate separate CFI records for each
23639 CR field that was actually saved. They all point to the
23640 same 32-bit stack slot. */
23641 rtx crframe[8];
23642 int n_crframe = 0;
23643
23644 for (i = 0; i < 8; i++)
23645 if (save_reg_p (CR0_REGNO + i))
23646 {
23647 crframe[n_crframe]
23648 = gen_rtx_SET (VOIDmode, mem,
23649 gen_rtx_REG (SImode, CR0_REGNO + i));
23650
23651 RTX_FRAME_RELATED_P (crframe[n_crframe]) = 1;
23652 n_crframe++;
23653 }
23654
23655 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
23656 gen_rtx_PARALLEL (VOIDmode,
23657 gen_rtvec_v (n_crframe, crframe)));
23658 }
23659 else
23660 {
23661 /* In other ABIs, by convention, we use a single CR regnum to
23662 represent the fact that all call-saved CR fields are saved.
23663 We use CR2_REGNO to be compatible with gcc-2.95 on Linux. */
23664 rtx set = gen_rtx_SET (VOIDmode, mem,
23665 gen_rtx_REG (SImode, CR2_REGNO));
23666 add_reg_note (insn, REG_FRAME_RELATED_EXPR, set);
23667 }
23668 }
23669
23670 /* In the ELFv2 ABI we need to save all call-saved CR fields into
23671 *separate* slots if the routine calls __builtin_eh_return, so
23672 that they can be independently restored by the unwinder. */
23673 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
23674 {
23675 int i, cr_off = info->ehcr_offset;
23676 rtx crsave;
23677
23678 /* ??? We might get better performance by using multiple mfocrf
23679 instructions. */
23680 crsave = gen_rtx_REG (SImode, 0);
23681 emit_insn (gen_movesi_from_cr (crsave));
23682
23683 for (i = 0; i < 8; i++)
23684 if (!call_used_regs[CR0_REGNO + i])
23685 {
23686 rtvec p = rtvec_alloc (2);
23687 RTVEC_ELT (p, 0)
23688 = gen_frame_store (crsave, frame_reg_rtx, cr_off + frame_off);
23689 RTVEC_ELT (p, 1)
23690 = gen_rtx_USE (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i));
23691
23692 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
23693
23694 RTX_FRAME_RELATED_P (insn) = 1;
23695 add_reg_note (insn, REG_FRAME_RELATED_EXPR,
23696 gen_frame_store (gen_rtx_REG (SImode, CR0_REGNO + i),
23697 sp_reg_rtx, cr_off + sp_off));
23698
23699 cr_off += reg_size;
23700 }
23701 }
23702
23703 /* Update stack and set back pointer unless this is V.4,
23704 for which it was done previously. */
23705 if (!WORLD_SAVE_P (info) && info->push_p
23706 && !(DEFAULT_ABI == ABI_V4 || crtl->calls_eh_return))
23707 {
23708 rtx ptr_reg = NULL;
23709 int ptr_off = 0;
23710
23711 /* If saving altivec regs we need to be able to address all save
23712 locations using a 16-bit offset. */
23713 if ((strategy & SAVE_INLINE_VRS) == 0
23714 || (info->altivec_size != 0
23715 && (info->altivec_save_offset + info->altivec_size - 16
23716 + info->total_size - frame_off) > 32767)
23717 || (info->vrsave_size != 0
23718 && (info->vrsave_save_offset
23719 + info->total_size - frame_off) > 32767))
23720 {
23721 int sel = SAVRES_SAVE | SAVRES_VR;
23722 unsigned ptr_regno = ptr_regno_for_savres (sel);
23723
23724 if (using_static_chain_p
23725 && ptr_regno == STATIC_CHAIN_REGNUM)
23726 ptr_regno = 12;
23727 if (REGNO (frame_reg_rtx) != ptr_regno)
23728 START_USE (ptr_regno);
23729 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
23730 frame_reg_rtx = ptr_reg;
23731 ptr_off = info->altivec_save_offset + info->altivec_size;
23732 frame_off = -ptr_off;
23733 }
23734 else if (REGNO (frame_reg_rtx) == 1)
23735 frame_off = info->total_size;
23736 rs6000_emit_allocate_stack (info->total_size, ptr_reg, ptr_off);
23737 sp_off = info->total_size;
23738 if (frame_reg_rtx != sp_reg_rtx)
23739 rs6000_emit_stack_tie (frame_reg_rtx, false);
23740 }
23741
23742 /* Set frame pointer, if needed. */
23743 if (frame_pointer_needed)
23744 {
23745 insn = emit_move_insn (gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM),
23746 sp_reg_rtx);
23747 RTX_FRAME_RELATED_P (insn) = 1;
23748 }
23749
23750 /* Save AltiVec registers if needed. Save here because the red zone does
23751 not always include AltiVec registers. */
23752 if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
23753 && info->altivec_size != 0 && (strategy & SAVE_INLINE_VRS) == 0)
23754 {
23755 int end_save = info->altivec_save_offset + info->altivec_size;
23756 int ptr_off;
23757 /* Oddly, the vector save/restore functions point r0 at the end
23758 of the save area, then use r11 or r12 to load offsets for
23759 [reg+reg] addressing. */
23760 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
23761 int scratch_regno = ptr_regno_for_savres (SAVRES_SAVE | SAVRES_VR);
23762 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
23763
23764 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
23765 NOT_INUSE (0);
23766 if (end_save + frame_off != 0)
23767 {
23768 rtx offset = GEN_INT (end_save + frame_off);
23769
23770 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
23771 }
23772 else
23773 emit_move_insn (ptr_reg, frame_reg_rtx);
23774
23775 ptr_off = -end_save;
23776 insn = rs6000_emit_savres_rtx (info, scratch_reg,
23777 info->altivec_save_offset + ptr_off,
23778 0, V4SImode, SAVRES_SAVE | SAVRES_VR);
23779 rs6000_frame_related (insn, scratch_reg, sp_off - ptr_off,
23780 NULL_RTX, NULL_RTX, NULL_RTX);
23781 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
23782 {
23783 /* The oddity mentioned above clobbered our frame reg. */
23784 emit_move_insn (frame_reg_rtx, ptr_reg);
23785 frame_off = ptr_off;
23786 }
23787 }
23788 else if (!WORLD_SAVE_P (info) && TARGET_ALTIVEC_ABI
23789 && info->altivec_size != 0)
23790 {
23791 int i;
23792
23793 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
23794 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
23795 {
23796 rtx areg, savereg, mem, split_reg;
23797 int offset;
23798
23799 offset = (info->altivec_save_offset + frame_off
23800 + 16 * (i - info->first_altivec_reg_save));
23801
23802 savereg = gen_rtx_REG (V4SImode, i);
23803
23804 NOT_INUSE (0);
23805 areg = gen_rtx_REG (Pmode, 0);
23806 emit_move_insn (areg, GEN_INT (offset));
23807
23808 /* AltiVec addressing mode is [reg+reg]. */
23809 mem = gen_frame_mem (V4SImode,
23810 gen_rtx_PLUS (Pmode, frame_reg_rtx, areg));
23811
23812 insn = emit_move_insn (mem, savereg);
23813
23814 /* When we split a VSX store into two insns, we need to make
23815 sure the DWARF info knows which register we are storing.
23816 Pass it in to be used on the appropriate note. */
23817 if (!BYTES_BIG_ENDIAN
23818 && GET_CODE (PATTERN (insn)) == SET
23819 && GET_CODE (SET_SRC (PATTERN (insn))) == VEC_SELECT)
23820 split_reg = savereg;
23821 else
23822 split_reg = NULL_RTX;
23823
23824 rs6000_frame_related (insn, frame_reg_rtx, sp_off - frame_off,
23825 areg, GEN_INT (offset), split_reg);
23826 }
23827 }
23828
23829 /* VRSAVE is a bit vector representing which AltiVec registers
23830 are used. The OS uses this to determine which vector
23831 registers to save on a context switch. We need to save
23832 VRSAVE on the stack frame, add whatever AltiVec registers we
23833 used in this function, and do the corresponding magic in the
23834 epilogue. */
23835
23836 if (!WORLD_SAVE_P (info)
23837 && TARGET_ALTIVEC
23838 && TARGET_ALTIVEC_VRSAVE
23839 && info->vrsave_mask != 0)
23840 {
23841 rtx reg, vrsave;
23842 int offset;
23843 int save_regno;
23844
23845 /* Get VRSAVE onto a GPR. Note that ABI_V4 and ABI_DARWIN might
23846 be using r12 as frame_reg_rtx and r11 as the static chain
23847 pointer for nested functions. */
23848 save_regno = 12;
23849 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
23850 && !using_static_chain_p)
23851 save_regno = 11;
23852 else if (REGNO (frame_reg_rtx) == 12)
23853 {
23854 save_regno = 11;
23855 if (using_static_chain_p)
23856 save_regno = 0;
23857 }
23858
23859 NOT_INUSE (save_regno);
23860 reg = gen_rtx_REG (SImode, save_regno);
23861 vrsave = gen_rtx_REG (SImode, VRSAVE_REGNO);
23862 if (TARGET_MACHO)
23863 emit_insn (gen_get_vrsave_internal (reg));
23864 else
23865 emit_insn (gen_rtx_SET (VOIDmode, reg, vrsave));
23866
23867 /* Save VRSAVE. */
23868 offset = info->vrsave_save_offset + frame_off;
23869 insn = emit_insn (gen_frame_store (reg, frame_reg_rtx, offset));
23870
23871 /* Include the registers in the mask. */
23872 emit_insn (gen_iorsi3 (reg, reg, GEN_INT ((int) info->vrsave_mask)));
23873
23874 insn = emit_insn (generate_set_vrsave (reg, info, 0));
23875 }
23876
23877 /* If we are using RS6000_PIC_OFFSET_TABLE_REGNUM, we need to set it up. */
23878 if (!TARGET_SINGLE_PIC_BASE
23879 && ((TARGET_TOC && TARGET_MINIMAL_TOC && get_pool_size () != 0)
23880 || (DEFAULT_ABI == ABI_V4
23881 && (flag_pic == 1 || (flag_pic && TARGET_SECURE_PLT))
23882 && df_regs_ever_live_p (RS6000_PIC_OFFSET_TABLE_REGNUM))))
23883 {
23884 /* If emit_load_toc_table will use the link register, we need to save
23885 it. We use R12 for this purpose because emit_load_toc_table
23886 can use register 0. This allows us to use a plain 'blr' to return
23887 from the procedure more often. */
23888 int save_LR_around_toc_setup = (TARGET_ELF
23889 && DEFAULT_ABI == ABI_V4
23890 && flag_pic
23891 && ! info->lr_save_p
23892 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds) > 0);
23893 if (save_LR_around_toc_setup)
23894 {
23895 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
23896 rtx tmp = gen_rtx_REG (Pmode, 12);
23897
23898 insn = emit_move_insn (tmp, lr);
23899 RTX_FRAME_RELATED_P (insn) = 1;
23900
23901 rs6000_emit_load_toc_table (TRUE);
23902
23903 insn = emit_move_insn (lr, tmp);
23904 add_reg_note (insn, REG_CFA_RESTORE, lr);
23905 RTX_FRAME_RELATED_P (insn) = 1;
23906 }
23907 else
23908 rs6000_emit_load_toc_table (TRUE);
23909 }
23910
23911 #if TARGET_MACHO
23912 if (!TARGET_SINGLE_PIC_BASE
23913 && DEFAULT_ABI == ABI_DARWIN
23914 && flag_pic && crtl->uses_pic_offset_table)
23915 {
23916 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
23917 rtx src = gen_rtx_SYMBOL_REF (Pmode, MACHOPIC_FUNCTION_BASE_NAME);
23918
23919 /* Save and restore LR locally around this call (in R0). */
23920 if (!info->lr_save_p)
23921 emit_move_insn (gen_rtx_REG (Pmode, 0), lr);
23922
23923 emit_insn (gen_load_macho_picbase (src));
23924
23925 emit_move_insn (gen_rtx_REG (Pmode,
23926 RS6000_PIC_OFFSET_TABLE_REGNUM),
23927 lr);
23928
23929 if (!info->lr_save_p)
23930 emit_move_insn (lr, gen_rtx_REG (Pmode, 0));
23931 }
23932 #endif
23933
23934 /* If we need to, save the TOC register after doing the stack setup.
23935 Do not emit eh frame info for this save. The unwinder wants info,
23936 conceptually attached to instructions in this function, about
23937 register values in the caller of this function. This R2 may have
23938 already been changed from the value in the caller.
23939 We don't attempt to write accurate DWARF EH frame info for R2
23940 because code emitted by gcc for a (non-pointer) function call
23941 doesn't save and restore R2. Instead, R2 is managed out-of-line
23942 by a linker generated plt call stub when the function resides in
23943 a shared library. This behaviour is costly to describe in DWARF,
23944 both in terms of the size of DWARF info and the time taken in the
23945 unwinder to interpret it. R2 changes, apart from the
23946 calls_eh_return case earlier in this function, are handled by
23947 linux-unwind.h frob_update_context. */
23948 if (rs6000_save_toc_in_prologue_p ())
23949 {
23950 rtx reg = gen_rtx_REG (reg_mode, TOC_REGNUM);
23951 emit_insn (gen_frame_store (reg, sp_reg_rtx, RS6000_TOC_SAVE_SLOT));
23952 }
23953 }
23954
23955 /* Write function prologue. */
23956
23957 static void
23958 rs6000_output_function_prologue (FILE *file,
23959 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
23960 {
23961 rs6000_stack_t *info = rs6000_stack_info ();
23962
23963 if (TARGET_DEBUG_STACK)
23964 debug_stack_info (info);
23965
23966 /* Write .extern for any function we will call to save and restore
23967 fp values. */
23968 if (info->first_fp_reg_save < 64
23969 && !TARGET_MACHO
23970 && !TARGET_ELF)
23971 {
23972 char *name;
23973 int regno = info->first_fp_reg_save - 32;
23974
23975 if ((info->savres_strategy & SAVE_INLINE_FPRS) == 0)
23976 {
23977 bool lr = (info->savres_strategy & SAVE_NOINLINE_FPRS_SAVES_LR) != 0;
23978 int sel = SAVRES_SAVE | SAVRES_FPR | (lr ? SAVRES_LR : 0);
23979 name = rs6000_savres_routine_name (info, regno, sel);
23980 fprintf (file, "\t.extern %s\n", name);
23981 }
23982 if ((info->savres_strategy & REST_INLINE_FPRS) == 0)
23983 {
23984 bool lr = (info->savres_strategy
23985 & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
23986 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
23987 name = rs6000_savres_routine_name (info, regno, sel);
23988 fprintf (file, "\t.extern %s\n", name);
23989 }
23990 }
23991
23992 /* ELFv2 ABI r2 setup code and local entry point. This must follow
23993 immediately after the global entry point label. */
23994 if (DEFAULT_ABI == ABI_ELFv2 && cfun->machine->r2_setup_needed)
23995 {
23996 const char *name = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
23997
23998 fprintf (file, "0:\taddis 2,12,.TOC.-0b@ha\n");
23999 fprintf (file, "\taddi 2,2,.TOC.-0b@l\n");
24000
24001 fputs ("\t.localentry\t", file);
24002 assemble_name (file, name);
24003 fputs (",.-", file);
24004 assemble_name (file, name);
24005 fputs ("\n", file);
24006 }
24007
24008 /* Output -mprofile-kernel code. This needs to be done here instead of
24009 in output_function_profile since it must go after the ELFv2 ABI
24010 local entry point. */
24011 if (TARGET_PROFILE_KERNEL && crtl->profile)
24012 {
24013 gcc_assert (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2);
24014 gcc_assert (!TARGET_32BIT);
24015
24016 asm_fprintf (file, "\tmflr %s\n", reg_names[0]);
24017 asm_fprintf (file, "\tstd %s,16(%s)\n", reg_names[0], reg_names[1]);
24018
24019 /* In the ELFv2 ABI we have no compiler stack word. It must be
24020 the resposibility of _mcount to preserve the static chain
24021 register if required. */
24022 if (DEFAULT_ABI != ABI_ELFv2
24023 && cfun->static_chain_decl != NULL)
24024 {
24025 asm_fprintf (file, "\tstd %s,24(%s)\n",
24026 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
24027 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
24028 asm_fprintf (file, "\tld %s,24(%s)\n",
24029 reg_names[STATIC_CHAIN_REGNUM], reg_names[1]);
24030 }
24031 else
24032 fprintf (file, "\tbl %s\n", RS6000_MCOUNT);
24033 }
24034
24035 rs6000_pic_labelno++;
24036 }
24037
24038 /* Non-zero if vmx regs are restored before the frame pop, zero if
24039 we restore after the pop when possible. */
24040 #define ALWAYS_RESTORE_ALTIVEC_BEFORE_POP 0
24041
24042 /* Restoring cr is a two step process: loading a reg from the frame
24043 save, then moving the reg to cr. For ABI_V4 we must let the
24044 unwinder know that the stack location is no longer valid at or
24045 before the stack deallocation, but we can't emit a cfa_restore for
24046 cr at the stack deallocation like we do for other registers.
24047 The trouble is that it is possible for the move to cr to be
24048 scheduled after the stack deallocation. So say exactly where cr
24049 is located on each of the two insns. */
24050
24051 static rtx
24052 load_cr_save (int regno, rtx frame_reg_rtx, int offset, bool exit_func)
24053 {
24054 rtx mem = gen_frame_mem_offset (SImode, frame_reg_rtx, offset);
24055 rtx reg = gen_rtx_REG (SImode, regno);
24056 rtx_insn *insn = emit_move_insn (reg, mem);
24057
24058 if (!exit_func && DEFAULT_ABI == ABI_V4)
24059 {
24060 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
24061 rtx set = gen_rtx_SET (VOIDmode, reg, cr);
24062
24063 add_reg_note (insn, REG_CFA_REGISTER, set);
24064 RTX_FRAME_RELATED_P (insn) = 1;
24065 }
24066 return reg;
24067 }
24068
24069 /* Reload CR from REG. */
24070
24071 static void
24072 restore_saved_cr (rtx reg, int using_mfcr_multiple, bool exit_func)
24073 {
24074 int count = 0;
24075 int i;
24076
24077 if (using_mfcr_multiple)
24078 {
24079 for (i = 0; i < 8; i++)
24080 if (save_reg_p (CR0_REGNO + i))
24081 count++;
24082 gcc_assert (count);
24083 }
24084
24085 if (using_mfcr_multiple && count > 1)
24086 {
24087 rtx_insn *insn;
24088 rtvec p;
24089 int ndx;
24090
24091 p = rtvec_alloc (count);
24092
24093 ndx = 0;
24094 for (i = 0; i < 8; i++)
24095 if (save_reg_p (CR0_REGNO + i))
24096 {
24097 rtvec r = rtvec_alloc (2);
24098 RTVEC_ELT (r, 0) = reg;
24099 RTVEC_ELT (r, 1) = GEN_INT (1 << (7-i));
24100 RTVEC_ELT (p, ndx) =
24101 gen_rtx_SET (VOIDmode, gen_rtx_REG (CCmode, CR0_REGNO + i),
24102 gen_rtx_UNSPEC (CCmode, r, UNSPEC_MOVESI_TO_CR));
24103 ndx++;
24104 }
24105 insn = emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
24106 gcc_assert (ndx == count);
24107
24108 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
24109 CR field separately. */
24110 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
24111 {
24112 for (i = 0; i < 8; i++)
24113 if (save_reg_p (CR0_REGNO + i))
24114 add_reg_note (insn, REG_CFA_RESTORE,
24115 gen_rtx_REG (SImode, CR0_REGNO + i));
24116
24117 RTX_FRAME_RELATED_P (insn) = 1;
24118 }
24119 }
24120 else
24121 for (i = 0; i < 8; i++)
24122 if (save_reg_p (CR0_REGNO + i))
24123 {
24124 rtx insn = emit_insn (gen_movsi_to_cr_one
24125 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
24126
24127 /* For the ELFv2 ABI we generate a CFA_RESTORE for each
24128 CR field separately, attached to the insn that in fact
24129 restores this particular CR field. */
24130 if (!exit_func && DEFAULT_ABI == ABI_ELFv2 && flag_shrink_wrap)
24131 {
24132 add_reg_note (insn, REG_CFA_RESTORE,
24133 gen_rtx_REG (SImode, CR0_REGNO + i));
24134
24135 RTX_FRAME_RELATED_P (insn) = 1;
24136 }
24137 }
24138
24139 /* For other ABIs, we just generate a single CFA_RESTORE for CR2. */
24140 if (!exit_func && DEFAULT_ABI != ABI_ELFv2
24141 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
24142 {
24143 rtx_insn *insn = get_last_insn ();
24144 rtx cr = gen_rtx_REG (SImode, CR2_REGNO);
24145
24146 add_reg_note (insn, REG_CFA_RESTORE, cr);
24147 RTX_FRAME_RELATED_P (insn) = 1;
24148 }
24149 }
24150
24151 /* Like cr, the move to lr instruction can be scheduled after the
24152 stack deallocation, but unlike cr, its stack frame save is still
24153 valid. So we only need to emit the cfa_restore on the correct
24154 instruction. */
24155
24156 static void
24157 load_lr_save (int regno, rtx frame_reg_rtx, int offset)
24158 {
24159 rtx mem = gen_frame_mem_offset (Pmode, frame_reg_rtx, offset);
24160 rtx reg = gen_rtx_REG (Pmode, regno);
24161
24162 emit_move_insn (reg, mem);
24163 }
24164
24165 static void
24166 restore_saved_lr (int regno, bool exit_func)
24167 {
24168 rtx reg = gen_rtx_REG (Pmode, regno);
24169 rtx lr = gen_rtx_REG (Pmode, LR_REGNO);
24170 rtx_insn *insn = emit_move_insn (lr, reg);
24171
24172 if (!exit_func && flag_shrink_wrap)
24173 {
24174 add_reg_note (insn, REG_CFA_RESTORE, lr);
24175 RTX_FRAME_RELATED_P (insn) = 1;
24176 }
24177 }
24178
24179 static rtx
24180 add_crlr_cfa_restore (const rs6000_stack_t *info, rtx cfa_restores)
24181 {
24182 if (DEFAULT_ABI == ABI_ELFv2)
24183 {
24184 int i;
24185 for (i = 0; i < 8; i++)
24186 if (save_reg_p (CR0_REGNO + i))
24187 {
24188 rtx cr = gen_rtx_REG (SImode, CR0_REGNO + i);
24189 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, cr,
24190 cfa_restores);
24191 }
24192 }
24193 else if (info->cr_save_p)
24194 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
24195 gen_rtx_REG (SImode, CR2_REGNO),
24196 cfa_restores);
24197
24198 if (info->lr_save_p)
24199 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
24200 gen_rtx_REG (Pmode, LR_REGNO),
24201 cfa_restores);
24202 return cfa_restores;
24203 }
24204
24205 /* Return true if OFFSET from stack pointer can be clobbered by signals.
24206 V.4 doesn't have any stack cushion, AIX ABIs have 220 or 288 bytes
24207 below stack pointer not cloberred by signals. */
24208
24209 static inline bool
24210 offset_below_red_zone_p (HOST_WIDE_INT offset)
24211 {
24212 return offset < (DEFAULT_ABI == ABI_V4
24213 ? 0
24214 : TARGET_32BIT ? -220 : -288);
24215 }
24216
24217 /* Append CFA_RESTORES to any existing REG_NOTES on the last insn. */
24218
24219 static void
24220 emit_cfa_restores (rtx cfa_restores)
24221 {
24222 rtx_insn *insn = get_last_insn ();
24223 rtx *loc = &REG_NOTES (insn);
24224
24225 while (*loc)
24226 loc = &XEXP (*loc, 1);
24227 *loc = cfa_restores;
24228 RTX_FRAME_RELATED_P (insn) = 1;
24229 }
24230
24231 /* Emit function epilogue as insns. */
24232
24233 void
24234 rs6000_emit_epilogue (int sibcall)
24235 {
24236 rs6000_stack_t *info;
24237 int restoring_GPRs_inline;
24238 int restoring_FPRs_inline;
24239 int using_load_multiple;
24240 int using_mtcr_multiple;
24241 int use_backchain_to_restore_sp;
24242 int restore_lr;
24243 int strategy;
24244 HOST_WIDE_INT frame_off = 0;
24245 rtx sp_reg_rtx = gen_rtx_REG (Pmode, 1);
24246 rtx frame_reg_rtx = sp_reg_rtx;
24247 rtx cfa_restores = NULL_RTX;
24248 rtx insn;
24249 rtx cr_save_reg = NULL_RTX;
24250 enum machine_mode reg_mode = Pmode;
24251 int reg_size = TARGET_32BIT ? 4 : 8;
24252 int i;
24253 bool exit_func;
24254 unsigned ptr_regno;
24255
24256 info = rs6000_stack_info ();
24257
24258 if (TARGET_SPE_ABI && info->spe_64bit_regs_used != 0)
24259 {
24260 reg_mode = V2SImode;
24261 reg_size = 8;
24262 }
24263
24264 strategy = info->savres_strategy;
24265 using_load_multiple = strategy & SAVRES_MULTIPLE;
24266 restoring_FPRs_inline = sibcall || (strategy & REST_INLINE_FPRS);
24267 restoring_GPRs_inline = sibcall || (strategy & REST_INLINE_GPRS);
24268 using_mtcr_multiple = (rs6000_cpu == PROCESSOR_PPC601
24269 || rs6000_cpu == PROCESSOR_PPC603
24270 || rs6000_cpu == PROCESSOR_PPC750
24271 || optimize_size);
24272 /* Restore via the backchain when we have a large frame, since this
24273 is more efficient than an addis, addi pair. The second condition
24274 here will not trigger at the moment; We don't actually need a
24275 frame pointer for alloca, but the generic parts of the compiler
24276 give us one anyway. */
24277 use_backchain_to_restore_sp = (info->total_size > 32767 - info->lr_save_offset
24278 || (cfun->calls_alloca
24279 && !frame_pointer_needed));
24280 restore_lr = (info->lr_save_p
24281 && (restoring_FPRs_inline
24282 || (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR))
24283 && (restoring_GPRs_inline
24284 || info->first_fp_reg_save < 64));
24285
24286 if (WORLD_SAVE_P (info))
24287 {
24288 int i, j;
24289 char rname[30];
24290 const char *alloc_rname;
24291 rtvec p;
24292
24293 /* eh_rest_world_r10 will return to the location saved in the LR
24294 stack slot (which is not likely to be our caller.)
24295 Input: R10 -- stack adjustment. Clobbers R0, R11, R12, R7, R8.
24296 rest_world is similar, except any R10 parameter is ignored.
24297 The exception-handling stuff that was here in 2.95 is no
24298 longer necessary. */
24299
24300 p = rtvec_alloc (9
24301 + 1
24302 + 32 - info->first_gp_reg_save
24303 + LAST_ALTIVEC_REGNO + 1 - info->first_altivec_reg_save
24304 + 63 + 1 - info->first_fp_reg_save);
24305
24306 strcpy (rname, ((crtl->calls_eh_return) ?
24307 "*eh_rest_world_r10" : "*rest_world"));
24308 alloc_rname = ggc_strdup (rname);
24309
24310 j = 0;
24311 RTVEC_ELT (p, j++) = ret_rtx;
24312 RTVEC_ELT (p, j++) = gen_rtx_USE (VOIDmode,
24313 gen_rtx_REG (Pmode,
24314 LR_REGNO));
24315 RTVEC_ELT (p, j++)
24316 = gen_rtx_USE (VOIDmode, gen_rtx_SYMBOL_REF (Pmode, alloc_rname));
24317 /* The instruction pattern requires a clobber here;
24318 it is shared with the restVEC helper. */
24319 RTVEC_ELT (p, j++)
24320 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 11));
24321
24322 {
24323 /* CR register traditionally saved as CR2. */
24324 rtx reg = gen_rtx_REG (SImode, CR2_REGNO);
24325 RTVEC_ELT (p, j++)
24326 = gen_frame_load (reg, frame_reg_rtx, info->cr_save_offset);
24327 if (flag_shrink_wrap)
24328 {
24329 cfa_restores = alloc_reg_note (REG_CFA_RESTORE,
24330 gen_rtx_REG (Pmode, LR_REGNO),
24331 cfa_restores);
24332 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24333 }
24334 }
24335
24336 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24337 {
24338 rtx reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
24339 RTVEC_ELT (p, j++)
24340 = gen_frame_load (reg,
24341 frame_reg_rtx, info->gp_save_offset + reg_size * i);
24342 if (flag_shrink_wrap)
24343 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24344 }
24345 for (i = 0; info->first_altivec_reg_save + i <= LAST_ALTIVEC_REGNO; i++)
24346 {
24347 rtx reg = gen_rtx_REG (V4SImode, info->first_altivec_reg_save + i);
24348 RTVEC_ELT (p, j++)
24349 = gen_frame_load (reg,
24350 frame_reg_rtx, info->altivec_save_offset + 16 * i);
24351 if (flag_shrink_wrap)
24352 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24353 }
24354 for (i = 0; info->first_fp_reg_save + i <= 63; i++)
24355 {
24356 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
24357 ? DFmode : SFmode),
24358 info->first_fp_reg_save + i);
24359 RTVEC_ELT (p, j++)
24360 = gen_frame_load (reg, frame_reg_rtx, info->fp_save_offset + 8 * i);
24361 if (flag_shrink_wrap)
24362 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24363 }
24364 RTVEC_ELT (p, j++)
24365 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, 0));
24366 RTVEC_ELT (p, j++)
24367 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 12));
24368 RTVEC_ELT (p, j++)
24369 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 7));
24370 RTVEC_ELT (p, j++)
24371 = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (SImode, 8));
24372 RTVEC_ELT (p, j++)
24373 = gen_rtx_USE (VOIDmode, gen_rtx_REG (SImode, 10));
24374 insn = emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
24375
24376 if (flag_shrink_wrap)
24377 {
24378 REG_NOTES (insn) = cfa_restores;
24379 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
24380 RTX_FRAME_RELATED_P (insn) = 1;
24381 }
24382 return;
24383 }
24384
24385 /* frame_reg_rtx + frame_off points to the top of this stack frame. */
24386 if (info->push_p)
24387 frame_off = info->total_size;
24388
24389 /* Restore AltiVec registers if we must do so before adjusting the
24390 stack. */
24391 if (TARGET_ALTIVEC_ABI
24392 && info->altivec_size != 0
24393 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24394 || (DEFAULT_ABI != ABI_V4
24395 && offset_below_red_zone_p (info->altivec_save_offset))))
24396 {
24397 int i;
24398 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
24399
24400 gcc_checking_assert (scratch_regno == 11 || scratch_regno == 12);
24401 if (use_backchain_to_restore_sp)
24402 {
24403 int frame_regno = 11;
24404
24405 if ((strategy & REST_INLINE_VRS) == 0)
24406 {
24407 /* Of r11 and r12, select the one not clobbered by an
24408 out-of-line restore function for the frame register. */
24409 frame_regno = 11 + 12 - scratch_regno;
24410 }
24411 frame_reg_rtx = gen_rtx_REG (Pmode, frame_regno);
24412 emit_move_insn (frame_reg_rtx,
24413 gen_rtx_MEM (Pmode, sp_reg_rtx));
24414 frame_off = 0;
24415 }
24416 else if (frame_pointer_needed)
24417 frame_reg_rtx = hard_frame_pointer_rtx;
24418
24419 if ((strategy & REST_INLINE_VRS) == 0)
24420 {
24421 int end_save = info->altivec_save_offset + info->altivec_size;
24422 int ptr_off;
24423 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
24424 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
24425
24426 if (end_save + frame_off != 0)
24427 {
24428 rtx offset = GEN_INT (end_save + frame_off);
24429
24430 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
24431 }
24432 else
24433 emit_move_insn (ptr_reg, frame_reg_rtx);
24434
24435 ptr_off = -end_save;
24436 insn = rs6000_emit_savres_rtx (info, scratch_reg,
24437 info->altivec_save_offset + ptr_off,
24438 0, V4SImode, SAVRES_VR);
24439 }
24440 else
24441 {
24442 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24443 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
24444 {
24445 rtx addr, areg, mem, reg;
24446
24447 areg = gen_rtx_REG (Pmode, 0);
24448 emit_move_insn
24449 (areg, GEN_INT (info->altivec_save_offset
24450 + frame_off
24451 + 16 * (i - info->first_altivec_reg_save)));
24452
24453 /* AltiVec addressing mode is [reg+reg]. */
24454 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
24455 mem = gen_frame_mem (V4SImode, addr);
24456
24457 reg = gen_rtx_REG (V4SImode, i);
24458 emit_move_insn (reg, mem);
24459 }
24460 }
24461
24462 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24463 if (((strategy & REST_INLINE_VRS) == 0
24464 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
24465 && (flag_shrink_wrap
24466 || (offset_below_red_zone_p
24467 (info->altivec_save_offset
24468 + 16 * (i - info->first_altivec_reg_save)))))
24469 {
24470 rtx reg = gen_rtx_REG (V4SImode, i);
24471 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24472 }
24473 }
24474
24475 /* Restore VRSAVE if we must do so before adjusting the stack. */
24476 if (TARGET_ALTIVEC
24477 && TARGET_ALTIVEC_VRSAVE
24478 && info->vrsave_mask != 0
24479 && (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24480 || (DEFAULT_ABI != ABI_V4
24481 && offset_below_red_zone_p (info->vrsave_save_offset))))
24482 {
24483 rtx reg;
24484
24485 if (frame_reg_rtx == sp_reg_rtx)
24486 {
24487 if (use_backchain_to_restore_sp)
24488 {
24489 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24490 emit_move_insn (frame_reg_rtx,
24491 gen_rtx_MEM (Pmode, sp_reg_rtx));
24492 frame_off = 0;
24493 }
24494 else if (frame_pointer_needed)
24495 frame_reg_rtx = hard_frame_pointer_rtx;
24496 }
24497
24498 reg = gen_rtx_REG (SImode, 12);
24499 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24500 info->vrsave_save_offset + frame_off));
24501
24502 emit_insn (generate_set_vrsave (reg, info, 1));
24503 }
24504
24505 insn = NULL_RTX;
24506 /* If we have a large stack frame, restore the old stack pointer
24507 using the backchain. */
24508 if (use_backchain_to_restore_sp)
24509 {
24510 if (frame_reg_rtx == sp_reg_rtx)
24511 {
24512 /* Under V.4, don't reset the stack pointer until after we're done
24513 loading the saved registers. */
24514 if (DEFAULT_ABI == ABI_V4)
24515 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24516
24517 insn = emit_move_insn (frame_reg_rtx,
24518 gen_rtx_MEM (Pmode, sp_reg_rtx));
24519 frame_off = 0;
24520 }
24521 else if (ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24522 && DEFAULT_ABI == ABI_V4)
24523 /* frame_reg_rtx has been set up by the altivec restore. */
24524 ;
24525 else
24526 {
24527 insn = emit_move_insn (sp_reg_rtx, frame_reg_rtx);
24528 frame_reg_rtx = sp_reg_rtx;
24529 }
24530 }
24531 /* If we have a frame pointer, we can restore the old stack pointer
24532 from it. */
24533 else if (frame_pointer_needed)
24534 {
24535 frame_reg_rtx = sp_reg_rtx;
24536 if (DEFAULT_ABI == ABI_V4)
24537 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24538 /* Prevent reordering memory accesses against stack pointer restore. */
24539 else if (cfun->calls_alloca
24540 || offset_below_red_zone_p (-info->total_size))
24541 rs6000_emit_stack_tie (frame_reg_rtx, true);
24542
24543 insn = emit_insn (gen_add3_insn (frame_reg_rtx, hard_frame_pointer_rtx,
24544 GEN_INT (info->total_size)));
24545 frame_off = 0;
24546 }
24547 else if (info->push_p
24548 && DEFAULT_ABI != ABI_V4
24549 && !crtl->calls_eh_return)
24550 {
24551 /* Prevent reordering memory accesses against stack pointer restore. */
24552 if (cfun->calls_alloca
24553 || offset_below_red_zone_p (-info->total_size))
24554 rs6000_emit_stack_tie (frame_reg_rtx, false);
24555 insn = emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx,
24556 GEN_INT (info->total_size)));
24557 frame_off = 0;
24558 }
24559 if (insn && frame_reg_rtx == sp_reg_rtx)
24560 {
24561 if (cfa_restores)
24562 {
24563 REG_NOTES (insn) = cfa_restores;
24564 cfa_restores = NULL_RTX;
24565 }
24566 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
24567 RTX_FRAME_RELATED_P (insn) = 1;
24568 }
24569
24570 /* Restore AltiVec registers if we have not done so already. */
24571 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24572 && TARGET_ALTIVEC_ABI
24573 && info->altivec_size != 0
24574 && (DEFAULT_ABI == ABI_V4
24575 || !offset_below_red_zone_p (info->altivec_save_offset)))
24576 {
24577 int i;
24578
24579 if ((strategy & REST_INLINE_VRS) == 0)
24580 {
24581 int end_save = info->altivec_save_offset + info->altivec_size;
24582 int ptr_off;
24583 rtx ptr_reg = gen_rtx_REG (Pmode, 0);
24584 int scratch_regno = ptr_regno_for_savres (SAVRES_VR);
24585 rtx scratch_reg = gen_rtx_REG (Pmode, scratch_regno);
24586
24587 if (end_save + frame_off != 0)
24588 {
24589 rtx offset = GEN_INT (end_save + frame_off);
24590
24591 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx, offset));
24592 }
24593 else
24594 emit_move_insn (ptr_reg, frame_reg_rtx);
24595
24596 ptr_off = -end_save;
24597 insn = rs6000_emit_savres_rtx (info, scratch_reg,
24598 info->altivec_save_offset + ptr_off,
24599 0, V4SImode, SAVRES_VR);
24600 if (REGNO (frame_reg_rtx) == REGNO (scratch_reg))
24601 {
24602 /* Frame reg was clobbered by out-of-line save. Restore it
24603 from ptr_reg, and if we are calling out-of-line gpr or
24604 fpr restore set up the correct pointer and offset. */
24605 unsigned newptr_regno = 1;
24606 if (!restoring_GPRs_inline)
24607 {
24608 bool lr = info->gp_save_offset + info->gp_size == 0;
24609 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
24610 newptr_regno = ptr_regno_for_savres (sel);
24611 end_save = info->gp_save_offset + info->gp_size;
24612 }
24613 else if (!restoring_FPRs_inline)
24614 {
24615 bool lr = !(strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR);
24616 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
24617 newptr_regno = ptr_regno_for_savres (sel);
24618 end_save = info->gp_save_offset + info->gp_size;
24619 }
24620
24621 if (newptr_regno != 1 && REGNO (frame_reg_rtx) != newptr_regno)
24622 frame_reg_rtx = gen_rtx_REG (Pmode, newptr_regno);
24623
24624 if (end_save + ptr_off != 0)
24625 {
24626 rtx offset = GEN_INT (end_save + ptr_off);
24627
24628 frame_off = -end_save;
24629 emit_insn (gen_add3_insn (frame_reg_rtx, ptr_reg, offset));
24630 }
24631 else
24632 {
24633 frame_off = ptr_off;
24634 emit_move_insn (frame_reg_rtx, ptr_reg);
24635 }
24636 }
24637 }
24638 else
24639 {
24640 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24641 if (info->vrsave_mask & ALTIVEC_REG_BIT (i))
24642 {
24643 rtx addr, areg, mem, reg;
24644
24645 areg = gen_rtx_REG (Pmode, 0);
24646 emit_move_insn
24647 (areg, GEN_INT (info->altivec_save_offset
24648 + frame_off
24649 + 16 * (i - info->first_altivec_reg_save)));
24650
24651 /* AltiVec addressing mode is [reg+reg]. */
24652 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, areg);
24653 mem = gen_frame_mem (V4SImode, addr);
24654
24655 reg = gen_rtx_REG (V4SImode, i);
24656 emit_move_insn (reg, mem);
24657 }
24658 }
24659
24660 for (i = info->first_altivec_reg_save; i <= LAST_ALTIVEC_REGNO; ++i)
24661 if (((strategy & REST_INLINE_VRS) == 0
24662 || (info->vrsave_mask & ALTIVEC_REG_BIT (i)) != 0)
24663 && (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap))
24664 {
24665 rtx reg = gen_rtx_REG (V4SImode, i);
24666 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24667 }
24668 }
24669
24670 /* Restore VRSAVE if we have not done so already. */
24671 if (!ALWAYS_RESTORE_ALTIVEC_BEFORE_POP
24672 && TARGET_ALTIVEC
24673 && TARGET_ALTIVEC_VRSAVE
24674 && info->vrsave_mask != 0
24675 && (DEFAULT_ABI == ABI_V4
24676 || !offset_below_red_zone_p (info->vrsave_save_offset)))
24677 {
24678 rtx reg;
24679
24680 reg = gen_rtx_REG (SImode, 12);
24681 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24682 info->vrsave_save_offset + frame_off));
24683
24684 emit_insn (generate_set_vrsave (reg, info, 1));
24685 }
24686
24687 /* If we exit by an out-of-line restore function on ABI_V4 then that
24688 function will deallocate the stack, so we don't need to worry
24689 about the unwinder restoring cr from an invalid stack frame
24690 location. */
24691 exit_func = (!restoring_FPRs_inline
24692 || (!restoring_GPRs_inline
24693 && info->first_fp_reg_save == 64));
24694
24695 /* In the ELFv2 ABI we need to restore all call-saved CR fields from
24696 *separate* slots if the routine calls __builtin_eh_return, so
24697 that they can be independently restored by the unwinder. */
24698 if (DEFAULT_ABI == ABI_ELFv2 && crtl->calls_eh_return)
24699 {
24700 int i, cr_off = info->ehcr_offset;
24701
24702 for (i = 0; i < 8; i++)
24703 if (!call_used_regs[CR0_REGNO + i])
24704 {
24705 rtx reg = gen_rtx_REG (SImode, 0);
24706 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24707 cr_off + frame_off));
24708
24709 insn = emit_insn (gen_movsi_to_cr_one
24710 (gen_rtx_REG (CCmode, CR0_REGNO + i), reg));
24711
24712 if (!exit_func && flag_shrink_wrap)
24713 {
24714 add_reg_note (insn, REG_CFA_RESTORE,
24715 gen_rtx_REG (SImode, CR0_REGNO + i));
24716
24717 RTX_FRAME_RELATED_P (insn) = 1;
24718 }
24719
24720 cr_off += reg_size;
24721 }
24722 }
24723
24724 /* Get the old lr if we saved it. If we are restoring registers
24725 out-of-line, then the out-of-line routines can do this for us. */
24726 if (restore_lr && restoring_GPRs_inline)
24727 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
24728
24729 /* Get the old cr if we saved it. */
24730 if (info->cr_save_p)
24731 {
24732 unsigned cr_save_regno = 12;
24733
24734 if (!restoring_GPRs_inline)
24735 {
24736 /* Ensure we don't use the register used by the out-of-line
24737 gpr register restore below. */
24738 bool lr = info->gp_save_offset + info->gp_size == 0;
24739 int sel = SAVRES_GPR | (lr ? SAVRES_LR : 0);
24740 int gpr_ptr_regno = ptr_regno_for_savres (sel);
24741
24742 if (gpr_ptr_regno == 12)
24743 cr_save_regno = 11;
24744 gcc_checking_assert (REGNO (frame_reg_rtx) != cr_save_regno);
24745 }
24746 else if (REGNO (frame_reg_rtx) == 12)
24747 cr_save_regno = 11;
24748
24749 cr_save_reg = load_cr_save (cr_save_regno, frame_reg_rtx,
24750 info->cr_save_offset + frame_off,
24751 exit_func);
24752 }
24753
24754 /* Set LR here to try to overlap restores below. */
24755 if (restore_lr && restoring_GPRs_inline)
24756 restore_saved_lr (0, exit_func);
24757
24758 /* Load exception handler data registers, if needed. */
24759 if (crtl->calls_eh_return)
24760 {
24761 unsigned int i, regno;
24762
24763 if (TARGET_AIX)
24764 {
24765 rtx reg = gen_rtx_REG (reg_mode, 2);
24766 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24767 frame_off + RS6000_TOC_SAVE_SLOT));
24768 }
24769
24770 for (i = 0; ; ++i)
24771 {
24772 rtx mem;
24773
24774 regno = EH_RETURN_DATA_REGNO (i);
24775 if (regno == INVALID_REGNUM)
24776 break;
24777
24778 /* Note: possible use of r0 here to address SPE regs. */
24779 mem = gen_frame_mem_offset (reg_mode, frame_reg_rtx,
24780 info->ehrd_offset + frame_off
24781 + reg_size * (int) i);
24782
24783 emit_move_insn (gen_rtx_REG (reg_mode, regno), mem);
24784 }
24785 }
24786
24787 /* Restore GPRs. This is done as a PARALLEL if we are using
24788 the load-multiple instructions. */
24789 if (TARGET_SPE_ABI
24790 && info->spe_64bit_regs_used
24791 && info->first_gp_reg_save != 32)
24792 {
24793 /* Determine whether we can address all of the registers that need
24794 to be saved with an offset from frame_reg_rtx that fits in
24795 the small const field for SPE memory instructions. */
24796 int spe_regs_addressable
24797 = (SPE_CONST_OFFSET_OK (info->spe_gp_save_offset + frame_off
24798 + reg_size * (32 - info->first_gp_reg_save - 1))
24799 && restoring_GPRs_inline);
24800
24801 if (!spe_regs_addressable)
24802 {
24803 int ool_adjust = 0;
24804 rtx old_frame_reg_rtx = frame_reg_rtx;
24805 /* Make r11 point to the start of the SPE save area. We worried about
24806 not clobbering it when we were saving registers in the prologue.
24807 There's no need to worry here because the static chain is passed
24808 anew to every function. */
24809
24810 if (!restoring_GPRs_inline)
24811 ool_adjust = 8 * (info->first_gp_reg_save - FIRST_SAVED_GP_REGNO);
24812 frame_reg_rtx = gen_rtx_REG (Pmode, 11);
24813 emit_insn (gen_addsi3 (frame_reg_rtx, old_frame_reg_rtx,
24814 GEN_INT (info->spe_gp_save_offset
24815 + frame_off
24816 - ool_adjust)));
24817 /* Keep the invariant that frame_reg_rtx + frame_off points
24818 at the top of the stack frame. */
24819 frame_off = -info->spe_gp_save_offset + ool_adjust;
24820 }
24821
24822 if (restoring_GPRs_inline)
24823 {
24824 HOST_WIDE_INT spe_offset = info->spe_gp_save_offset + frame_off;
24825
24826 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24827 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
24828 {
24829 rtx offset, addr, mem, reg;
24830
24831 /* We're doing all this to ensure that the immediate offset
24832 fits into the immediate field of 'evldd'. */
24833 gcc_assert (SPE_CONST_OFFSET_OK (spe_offset + reg_size * i));
24834
24835 offset = GEN_INT (spe_offset + reg_size * i);
24836 addr = gen_rtx_PLUS (Pmode, frame_reg_rtx, offset);
24837 mem = gen_rtx_MEM (V2SImode, addr);
24838 reg = gen_rtx_REG (reg_mode, info->first_gp_reg_save + i);
24839
24840 emit_move_insn (reg, mem);
24841 }
24842 }
24843 else
24844 rs6000_emit_savres_rtx (info, frame_reg_rtx,
24845 info->spe_gp_save_offset + frame_off,
24846 info->lr_save_offset + frame_off,
24847 reg_mode,
24848 SAVRES_GPR | SAVRES_LR);
24849 }
24850 else if (!restoring_GPRs_inline)
24851 {
24852 /* We are jumping to an out-of-line function. */
24853 rtx ptr_reg;
24854 int end_save = info->gp_save_offset + info->gp_size;
24855 bool can_use_exit = end_save == 0;
24856 int sel = SAVRES_GPR | (can_use_exit ? SAVRES_LR : 0);
24857 int ptr_off;
24858
24859 /* Emit stack reset code if we need it. */
24860 ptr_regno = ptr_regno_for_savres (sel);
24861 ptr_reg = gen_rtx_REG (Pmode, ptr_regno);
24862 if (can_use_exit)
24863 rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
24864 else if (end_save + frame_off != 0)
24865 emit_insn (gen_add3_insn (ptr_reg, frame_reg_rtx,
24866 GEN_INT (end_save + frame_off)));
24867 else if (REGNO (frame_reg_rtx) != ptr_regno)
24868 emit_move_insn (ptr_reg, frame_reg_rtx);
24869 if (REGNO (frame_reg_rtx) == ptr_regno)
24870 frame_off = -end_save;
24871
24872 if (can_use_exit && info->cr_save_p)
24873 restore_saved_cr (cr_save_reg, using_mtcr_multiple, true);
24874
24875 ptr_off = -end_save;
24876 rs6000_emit_savres_rtx (info, ptr_reg,
24877 info->gp_save_offset + ptr_off,
24878 info->lr_save_offset + ptr_off,
24879 reg_mode, sel);
24880 }
24881 else if (using_load_multiple)
24882 {
24883 rtvec p;
24884 p = rtvec_alloc (32 - info->first_gp_reg_save);
24885 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24886 RTVEC_ELT (p, i)
24887 = gen_frame_load (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
24888 frame_reg_rtx,
24889 info->gp_save_offset + frame_off + reg_size * i);
24890 emit_insn (gen_rtx_PARALLEL (VOIDmode, p));
24891 }
24892 else
24893 {
24894 for (i = 0; i < 32 - info->first_gp_reg_save; i++)
24895 if (rs6000_reg_live_or_pic_offset_p (info->first_gp_reg_save + i))
24896 emit_insn (gen_frame_load
24897 (gen_rtx_REG (reg_mode, info->first_gp_reg_save + i),
24898 frame_reg_rtx,
24899 info->gp_save_offset + frame_off + reg_size * i));
24900 }
24901
24902 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
24903 {
24904 /* If the frame pointer was used then we can't delay emitting
24905 a REG_CFA_DEF_CFA note. This must happen on the insn that
24906 restores the frame pointer, r31. We may have already emitted
24907 a REG_CFA_DEF_CFA note, but that's OK; A duplicate is
24908 discarded by dwarf2cfi.c/dwarf2out.c, and in any case would
24909 be harmless if emitted. */
24910 if (frame_pointer_needed)
24911 {
24912 insn = get_last_insn ();
24913 add_reg_note (insn, REG_CFA_DEF_CFA,
24914 plus_constant (Pmode, frame_reg_rtx, frame_off));
24915 RTX_FRAME_RELATED_P (insn) = 1;
24916 }
24917
24918 /* Set up cfa_restores. We always need these when
24919 shrink-wrapping. If not shrink-wrapping then we only need
24920 the cfa_restore when the stack location is no longer valid.
24921 The cfa_restores must be emitted on or before the insn that
24922 invalidates the stack, and of course must not be emitted
24923 before the insn that actually does the restore. The latter
24924 is why it is a bad idea to emit the cfa_restores as a group
24925 on the last instruction here that actually does a restore:
24926 That insn may be reordered with respect to others doing
24927 restores. */
24928 if (flag_shrink_wrap
24929 && !restoring_GPRs_inline
24930 && info->first_fp_reg_save == 64)
24931 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
24932
24933 for (i = info->first_gp_reg_save; i < 32; i++)
24934 if (!restoring_GPRs_inline
24935 || using_load_multiple
24936 || rs6000_reg_live_or_pic_offset_p (i))
24937 {
24938 rtx reg = gen_rtx_REG (reg_mode, i);
24939
24940 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24941 }
24942 }
24943
24944 if (!restoring_GPRs_inline
24945 && info->first_fp_reg_save == 64)
24946 {
24947 /* We are jumping to an out-of-line function. */
24948 if (cfa_restores)
24949 emit_cfa_restores (cfa_restores);
24950 return;
24951 }
24952
24953 if (restore_lr && !restoring_GPRs_inline)
24954 {
24955 load_lr_save (0, frame_reg_rtx, info->lr_save_offset + frame_off);
24956 restore_saved_lr (0, exit_func);
24957 }
24958
24959 /* Restore fpr's if we need to do it without calling a function. */
24960 if (restoring_FPRs_inline)
24961 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
24962 if (save_reg_p (info->first_fp_reg_save + i))
24963 {
24964 rtx reg = gen_rtx_REG ((TARGET_HARD_FLOAT && TARGET_DOUBLE_FLOAT
24965 ? DFmode : SFmode),
24966 info->first_fp_reg_save + i);
24967 emit_insn (gen_frame_load (reg, frame_reg_rtx,
24968 info->fp_save_offset + frame_off + 8 * i));
24969 if (DEFAULT_ABI == ABI_V4 || flag_shrink_wrap)
24970 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg, cfa_restores);
24971 }
24972
24973 /* If we saved cr, restore it here. Just those that were used. */
24974 if (info->cr_save_p)
24975 restore_saved_cr (cr_save_reg, using_mtcr_multiple, exit_func);
24976
24977 /* If this is V.4, unwind the stack pointer after all of the loads
24978 have been done, or set up r11 if we are restoring fp out of line. */
24979 ptr_regno = 1;
24980 if (!restoring_FPRs_inline)
24981 {
24982 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
24983 int sel = SAVRES_FPR | (lr ? SAVRES_LR : 0);
24984 ptr_regno = ptr_regno_for_savres (sel);
24985 }
24986
24987 insn = rs6000_emit_stack_reset (info, frame_reg_rtx, frame_off, ptr_regno);
24988 if (REGNO (frame_reg_rtx) == ptr_regno)
24989 frame_off = 0;
24990
24991 if (insn && restoring_FPRs_inline)
24992 {
24993 if (cfa_restores)
24994 {
24995 REG_NOTES (insn) = cfa_restores;
24996 cfa_restores = NULL_RTX;
24997 }
24998 add_reg_note (insn, REG_CFA_DEF_CFA, sp_reg_rtx);
24999 RTX_FRAME_RELATED_P (insn) = 1;
25000 }
25001
25002 if (crtl->calls_eh_return)
25003 {
25004 rtx sa = EH_RETURN_STACKADJ_RTX;
25005 emit_insn (gen_add3_insn (sp_reg_rtx, sp_reg_rtx, sa));
25006 }
25007
25008 if (!sibcall)
25009 {
25010 rtvec p;
25011 bool lr = (strategy & REST_NOINLINE_FPRS_DOESNT_RESTORE_LR) == 0;
25012 if (! restoring_FPRs_inline)
25013 {
25014 p = rtvec_alloc (4 + 64 - info->first_fp_reg_save);
25015 RTVEC_ELT (p, 0) = ret_rtx;
25016 }
25017 else
25018 {
25019 if (cfa_restores)
25020 {
25021 /* We can't hang the cfa_restores off a simple return,
25022 since the shrink-wrap code sometimes uses an existing
25023 return. This means there might be a path from
25024 pre-prologue code to this return, and dwarf2cfi code
25025 wants the eh_frame unwinder state to be the same on
25026 all paths to any point. So we need to emit the
25027 cfa_restores before the return. For -m64 we really
25028 don't need epilogue cfa_restores at all, except for
25029 this irritating dwarf2cfi with shrink-wrap
25030 requirement; The stack red-zone means eh_frame info
25031 from the prologue telling the unwinder to restore
25032 from the stack is perfectly good right to the end of
25033 the function. */
25034 emit_insn (gen_blockage ());
25035 emit_cfa_restores (cfa_restores);
25036 cfa_restores = NULL_RTX;
25037 }
25038 p = rtvec_alloc (2);
25039 RTVEC_ELT (p, 0) = simple_return_rtx;
25040 }
25041
25042 RTVEC_ELT (p, 1) = ((restoring_FPRs_inline || !lr)
25043 ? gen_rtx_USE (VOIDmode,
25044 gen_rtx_REG (Pmode, LR_REGNO))
25045 : gen_rtx_CLOBBER (VOIDmode,
25046 gen_rtx_REG (Pmode, LR_REGNO)));
25047
25048 /* If we have to restore more than two FP registers, branch to the
25049 restore function. It will return to our caller. */
25050 if (! restoring_FPRs_inline)
25051 {
25052 int i;
25053 int reg;
25054 rtx sym;
25055
25056 if (flag_shrink_wrap)
25057 cfa_restores = add_crlr_cfa_restore (info, cfa_restores);
25058
25059 sym = rs6000_savres_routine_sym (info,
25060 SAVRES_FPR | (lr ? SAVRES_LR : 0));
25061 RTVEC_ELT (p, 2) = gen_rtx_USE (VOIDmode, sym);
25062 reg = (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)? 1 : 11;
25063 RTVEC_ELT (p, 3) = gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, reg));
25064
25065 for (i = 0; i < 64 - info->first_fp_reg_save; i++)
25066 {
25067 rtx reg = gen_rtx_REG (DFmode, info->first_fp_reg_save + i);
25068
25069 RTVEC_ELT (p, i + 4)
25070 = gen_frame_load (reg, sp_reg_rtx, info->fp_save_offset + 8 * i);
25071 if (flag_shrink_wrap)
25072 cfa_restores = alloc_reg_note (REG_CFA_RESTORE, reg,
25073 cfa_restores);
25074 }
25075 }
25076
25077 emit_jump_insn (gen_rtx_PARALLEL (VOIDmode, p));
25078 }
25079
25080 if (cfa_restores)
25081 {
25082 if (sibcall)
25083 /* Ensure the cfa_restores are hung off an insn that won't
25084 be reordered above other restores. */
25085 emit_insn (gen_blockage ());
25086
25087 emit_cfa_restores (cfa_restores);
25088 }
25089 }
25090
25091 /* Write function epilogue. */
25092
25093 static void
25094 rs6000_output_function_epilogue (FILE *file,
25095 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
25096 {
25097 #if TARGET_MACHO
25098 macho_branch_islands ();
25099 /* Mach-O doesn't support labels at the end of objects, so if
25100 it looks like we might want one, insert a NOP. */
25101 {
25102 rtx_insn *insn = get_last_insn ();
25103 rtx_insn *deleted_debug_label = NULL;
25104 while (insn
25105 && NOTE_P (insn)
25106 && NOTE_KIND (insn) != NOTE_INSN_DELETED_LABEL)
25107 {
25108 /* Don't insert a nop for NOTE_INSN_DELETED_DEBUG_LABEL
25109 notes only, instead set their CODE_LABEL_NUMBER to -1,
25110 otherwise there would be code generation differences
25111 in between -g and -g0. */
25112 if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
25113 deleted_debug_label = insn;
25114 insn = PREV_INSN (insn);
25115 }
25116 if (insn
25117 && (LABEL_P (insn)
25118 || (NOTE_P (insn)
25119 && NOTE_KIND (insn) == NOTE_INSN_DELETED_LABEL)))
25120 fputs ("\tnop\n", file);
25121 else if (deleted_debug_label)
25122 for (insn = deleted_debug_label; insn; insn = NEXT_INSN (insn))
25123 if (NOTE_KIND (insn) == NOTE_INSN_DELETED_DEBUG_LABEL)
25124 CODE_LABEL_NUMBER (insn) = -1;
25125 }
25126 #endif
25127
25128 /* Output a traceback table here. See /usr/include/sys/debug.h for info
25129 on its format.
25130
25131 We don't output a traceback table if -finhibit-size-directive was
25132 used. The documentation for -finhibit-size-directive reads
25133 ``don't output a @code{.size} assembler directive, or anything
25134 else that would cause trouble if the function is split in the
25135 middle, and the two halves are placed at locations far apart in
25136 memory.'' The traceback table has this property, since it
25137 includes the offset from the start of the function to the
25138 traceback table itself.
25139
25140 System V.4 Powerpc's (and the embedded ABI derived from it) use a
25141 different traceback table. */
25142 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
25143 && ! flag_inhibit_size_directive
25144 && rs6000_traceback != traceback_none && !cfun->is_thunk)
25145 {
25146 const char *fname = NULL;
25147 const char *language_string = lang_hooks.name;
25148 int fixed_parms = 0, float_parms = 0, parm_info = 0;
25149 int i;
25150 int optional_tbtab;
25151 rs6000_stack_t *info = rs6000_stack_info ();
25152
25153 if (rs6000_traceback == traceback_full)
25154 optional_tbtab = 1;
25155 else if (rs6000_traceback == traceback_part)
25156 optional_tbtab = 0;
25157 else
25158 optional_tbtab = !optimize_size && !TARGET_ELF;
25159
25160 if (optional_tbtab)
25161 {
25162 fname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
25163 while (*fname == '.') /* V.4 encodes . in the name */
25164 fname++;
25165
25166 /* Need label immediately before tbtab, so we can compute
25167 its offset from the function start. */
25168 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
25169 ASM_OUTPUT_LABEL (file, fname);
25170 }
25171
25172 /* The .tbtab pseudo-op can only be used for the first eight
25173 expressions, since it can't handle the possibly variable
25174 length fields that follow. However, if you omit the optional
25175 fields, the assembler outputs zeros for all optional fields
25176 anyways, giving each variable length field is minimum length
25177 (as defined in sys/debug.h). Thus we can not use the .tbtab
25178 pseudo-op at all. */
25179
25180 /* An all-zero word flags the start of the tbtab, for debuggers
25181 that have to find it by searching forward from the entry
25182 point or from the current pc. */
25183 fputs ("\t.long 0\n", file);
25184
25185 /* Tbtab format type. Use format type 0. */
25186 fputs ("\t.byte 0,", file);
25187
25188 /* Language type. Unfortunately, there does not seem to be any
25189 official way to discover the language being compiled, so we
25190 use language_string.
25191 C is 0. Fortran is 1. Pascal is 2. Ada is 3. C++ is 9.
25192 Java is 13. Objective-C is 14. Objective-C++ isn't assigned
25193 a number, so for now use 9. LTO and Go aren't assigned numbers
25194 either, so for now use 0. */
25195 if (! strcmp (language_string, "GNU C")
25196 || ! strcmp (language_string, "GNU GIMPLE")
25197 || ! strcmp (language_string, "GNU Go"))
25198 i = 0;
25199 else if (! strcmp (language_string, "GNU F77")
25200 || ! strcmp (language_string, "GNU Fortran"))
25201 i = 1;
25202 else if (! strcmp (language_string, "GNU Pascal"))
25203 i = 2;
25204 else if (! strcmp (language_string, "GNU Ada"))
25205 i = 3;
25206 else if (! strcmp (language_string, "GNU C++")
25207 || ! strcmp (language_string, "GNU Objective-C++"))
25208 i = 9;
25209 else if (! strcmp (language_string, "GNU Java"))
25210 i = 13;
25211 else if (! strcmp (language_string, "GNU Objective-C"))
25212 i = 14;
25213 else
25214 gcc_unreachable ();
25215 fprintf (file, "%d,", i);
25216
25217 /* 8 single bit fields: global linkage (not set for C extern linkage,
25218 apparently a PL/I convention?), out-of-line epilogue/prologue, offset
25219 from start of procedure stored in tbtab, internal function, function
25220 has controlled storage, function has no toc, function uses fp,
25221 function logs/aborts fp operations. */
25222 /* Assume that fp operations are used if any fp reg must be saved. */
25223 fprintf (file, "%d,",
25224 (optional_tbtab << 5) | ((info->first_fp_reg_save != 64) << 1));
25225
25226 /* 6 bitfields: function is interrupt handler, name present in
25227 proc table, function calls alloca, on condition directives
25228 (controls stack walks, 3 bits), saves condition reg, saves
25229 link reg. */
25230 /* The `function calls alloca' bit seems to be set whenever reg 31 is
25231 set up as a frame pointer, even when there is no alloca call. */
25232 fprintf (file, "%d,",
25233 ((optional_tbtab << 6)
25234 | ((optional_tbtab & frame_pointer_needed) << 5)
25235 | (info->cr_save_p << 1)
25236 | (info->lr_save_p)));
25237
25238 /* 3 bitfields: saves backchain, fixup code, number of fpr saved
25239 (6 bits). */
25240 fprintf (file, "%d,",
25241 (info->push_p << 7) | (64 - info->first_fp_reg_save));
25242
25243 /* 2 bitfields: spare bits (2 bits), number of gpr saved (6 bits). */
25244 fprintf (file, "%d,", (32 - first_reg_to_save ()));
25245
25246 if (optional_tbtab)
25247 {
25248 /* Compute the parameter info from the function decl argument
25249 list. */
25250 tree decl;
25251 int next_parm_info_bit = 31;
25252
25253 for (decl = DECL_ARGUMENTS (current_function_decl);
25254 decl; decl = DECL_CHAIN (decl))
25255 {
25256 rtx parameter = DECL_INCOMING_RTL (decl);
25257 enum machine_mode mode = GET_MODE (parameter);
25258
25259 if (GET_CODE (parameter) == REG)
25260 {
25261 if (SCALAR_FLOAT_MODE_P (mode))
25262 {
25263 int bits;
25264
25265 float_parms++;
25266
25267 switch (mode)
25268 {
25269 case SFmode:
25270 case SDmode:
25271 bits = 0x2;
25272 break;
25273
25274 case DFmode:
25275 case DDmode:
25276 case TFmode:
25277 case TDmode:
25278 bits = 0x3;
25279 break;
25280
25281 default:
25282 gcc_unreachable ();
25283 }
25284
25285 /* If only one bit will fit, don't or in this entry. */
25286 if (next_parm_info_bit > 0)
25287 parm_info |= (bits << (next_parm_info_bit - 1));
25288 next_parm_info_bit -= 2;
25289 }
25290 else
25291 {
25292 fixed_parms += ((GET_MODE_SIZE (mode)
25293 + (UNITS_PER_WORD - 1))
25294 / UNITS_PER_WORD);
25295 next_parm_info_bit -= 1;
25296 }
25297 }
25298 }
25299 }
25300
25301 /* Number of fixed point parameters. */
25302 /* This is actually the number of words of fixed point parameters; thus
25303 an 8 byte struct counts as 2; and thus the maximum value is 8. */
25304 fprintf (file, "%d,", fixed_parms);
25305
25306 /* 2 bitfields: number of floating point parameters (7 bits), parameters
25307 all on stack. */
25308 /* This is actually the number of fp registers that hold parameters;
25309 and thus the maximum value is 13. */
25310 /* Set parameters on stack bit if parameters are not in their original
25311 registers, regardless of whether they are on the stack? Xlc
25312 seems to set the bit when not optimizing. */
25313 fprintf (file, "%d\n", ((float_parms << 1) | (! optimize)));
25314
25315 if (! optional_tbtab)
25316 return;
25317
25318 /* Optional fields follow. Some are variable length. */
25319
25320 /* Parameter types, left adjusted bit fields: 0 fixed, 10 single float,
25321 11 double float. */
25322 /* There is an entry for each parameter in a register, in the order that
25323 they occur in the parameter list. Any intervening arguments on the
25324 stack are ignored. If the list overflows a long (max possible length
25325 34 bits) then completely leave off all elements that don't fit. */
25326 /* Only emit this long if there was at least one parameter. */
25327 if (fixed_parms || float_parms)
25328 fprintf (file, "\t.long %d\n", parm_info);
25329
25330 /* Offset from start of code to tb table. */
25331 fputs ("\t.long ", file);
25332 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LT");
25333 RS6000_OUTPUT_BASENAME (file, fname);
25334 putc ('-', file);
25335 rs6000_output_function_entry (file, fname);
25336 putc ('\n', file);
25337
25338 /* Interrupt handler mask. */
25339 /* Omit this long, since we never set the interrupt handler bit
25340 above. */
25341
25342 /* Number of CTL (controlled storage) anchors. */
25343 /* Omit this long, since the has_ctl bit is never set above. */
25344
25345 /* Displacement into stack of each CTL anchor. */
25346 /* Omit this list of longs, because there are no CTL anchors. */
25347
25348 /* Length of function name. */
25349 if (*fname == '*')
25350 ++fname;
25351 fprintf (file, "\t.short %d\n", (int) strlen (fname));
25352
25353 /* Function name. */
25354 assemble_string (fname, strlen (fname));
25355
25356 /* Register for alloca automatic storage; this is always reg 31.
25357 Only emit this if the alloca bit was set above. */
25358 if (frame_pointer_needed)
25359 fputs ("\t.byte 31\n", file);
25360
25361 fputs ("\t.align 2\n", file);
25362 }
25363 }
25364 \f
25365 /* A C compound statement that outputs the assembler code for a thunk
25366 function, used to implement C++ virtual function calls with
25367 multiple inheritance. The thunk acts as a wrapper around a virtual
25368 function, adjusting the implicit object parameter before handing
25369 control off to the real function.
25370
25371 First, emit code to add the integer DELTA to the location that
25372 contains the incoming first argument. Assume that this argument
25373 contains a pointer, and is the one used to pass the `this' pointer
25374 in C++. This is the incoming argument *before* the function
25375 prologue, e.g. `%o0' on a sparc. The addition must preserve the
25376 values of all other incoming arguments.
25377
25378 After the addition, emit code to jump to FUNCTION, which is a
25379 `FUNCTION_DECL'. This is a direct pure jump, not a call, and does
25380 not touch the return address. Hence returning from FUNCTION will
25381 return to whoever called the current `thunk'.
25382
25383 The effect must be as if FUNCTION had been called directly with the
25384 adjusted first argument. This macro is responsible for emitting
25385 all of the code for a thunk function; output_function_prologue()
25386 and output_function_epilogue() are not invoked.
25387
25388 The THUNK_FNDECL is redundant. (DELTA and FUNCTION have already
25389 been extracted from it.) It might possibly be useful on some
25390 targets, but probably not.
25391
25392 If you do not define this macro, the target-independent code in the
25393 C++ frontend will generate a less efficient heavyweight thunk that
25394 calls FUNCTION instead of jumping to it. The generic approach does
25395 not support varargs. */
25396
25397 static void
25398 rs6000_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
25399 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
25400 tree function)
25401 {
25402 rtx this_rtx, funexp;
25403 rtx_insn *insn;
25404
25405 reload_completed = 1;
25406 epilogue_completed = 1;
25407
25408 /* Mark the end of the (empty) prologue. */
25409 emit_note (NOTE_INSN_PROLOGUE_END);
25410
25411 /* Find the "this" pointer. If the function returns a structure,
25412 the structure return pointer is in r3. */
25413 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
25414 this_rtx = gen_rtx_REG (Pmode, 4);
25415 else
25416 this_rtx = gen_rtx_REG (Pmode, 3);
25417
25418 /* Apply the constant offset, if required. */
25419 if (delta)
25420 emit_insn (gen_add3_insn (this_rtx, this_rtx, GEN_INT (delta)));
25421
25422 /* Apply the offset from the vtable, if required. */
25423 if (vcall_offset)
25424 {
25425 rtx vcall_offset_rtx = GEN_INT (vcall_offset);
25426 rtx tmp = gen_rtx_REG (Pmode, 12);
25427
25428 emit_move_insn (tmp, gen_rtx_MEM (Pmode, this_rtx));
25429 if (((unsigned HOST_WIDE_INT) vcall_offset) + 0x8000 >= 0x10000)
25430 {
25431 emit_insn (gen_add3_insn (tmp, tmp, vcall_offset_rtx));
25432 emit_move_insn (tmp, gen_rtx_MEM (Pmode, tmp));
25433 }
25434 else
25435 {
25436 rtx loc = gen_rtx_PLUS (Pmode, tmp, vcall_offset_rtx);
25437
25438 emit_move_insn (tmp, gen_rtx_MEM (Pmode, loc));
25439 }
25440 emit_insn (gen_add3_insn (this_rtx, this_rtx, tmp));
25441 }
25442
25443 /* Generate a tail call to the target function. */
25444 if (!TREE_USED (function))
25445 {
25446 assemble_external (function);
25447 TREE_USED (function) = 1;
25448 }
25449 funexp = XEXP (DECL_RTL (function), 0);
25450 funexp = gen_rtx_MEM (FUNCTION_MODE, funexp);
25451
25452 #if TARGET_MACHO
25453 if (MACHOPIC_INDIRECT)
25454 funexp = machopic_indirect_call_target (funexp);
25455 #endif
25456
25457 /* gen_sibcall expects reload to convert scratch pseudo to LR so we must
25458 generate sibcall RTL explicitly. */
25459 insn = emit_call_insn (
25460 gen_rtx_PARALLEL (VOIDmode,
25461 gen_rtvec (4,
25462 gen_rtx_CALL (VOIDmode,
25463 funexp, const0_rtx),
25464 gen_rtx_USE (VOIDmode, const0_rtx),
25465 gen_rtx_USE (VOIDmode,
25466 gen_rtx_REG (SImode,
25467 LR_REGNO)),
25468 simple_return_rtx)));
25469 SIBLING_CALL_P (insn) = 1;
25470 emit_barrier ();
25471
25472 /* Ensure we have a global entry point for the thunk. ??? We could
25473 avoid that if the target routine doesn't need a global entry point,
25474 but we do not know whether this is the case at this point. */
25475 if (DEFAULT_ABI == ABI_ELFv2)
25476 cfun->machine->r2_setup_needed = true;
25477
25478 /* Run just enough of rest_of_compilation to get the insns emitted.
25479 There's not really enough bulk here to make other passes such as
25480 instruction scheduling worth while. Note that use_thunk calls
25481 assemble_start_function and assemble_end_function. */
25482 insn = get_insns ();
25483 shorten_branches (insn);
25484 final_start_function (insn, file, 1);
25485 final (insn, file, 1);
25486 final_end_function ();
25487
25488 reload_completed = 0;
25489 epilogue_completed = 0;
25490 }
25491 \f
25492 /* A quick summary of the various types of 'constant-pool tables'
25493 under PowerPC:
25494
25495 Target Flags Name One table per
25496 AIX (none) AIX TOC object file
25497 AIX -mfull-toc AIX TOC object file
25498 AIX -mminimal-toc AIX minimal TOC translation unit
25499 SVR4/EABI (none) SVR4 SDATA object file
25500 SVR4/EABI -fpic SVR4 pic object file
25501 SVR4/EABI -fPIC SVR4 PIC translation unit
25502 SVR4/EABI -mrelocatable EABI TOC function
25503 SVR4/EABI -maix AIX TOC object file
25504 SVR4/EABI -maix -mminimal-toc
25505 AIX minimal TOC translation unit
25506
25507 Name Reg. Set by entries contains:
25508 made by addrs? fp? sum?
25509
25510 AIX TOC 2 crt0 as Y option option
25511 AIX minimal TOC 30 prolog gcc Y Y option
25512 SVR4 SDATA 13 crt0 gcc N Y N
25513 SVR4 pic 30 prolog ld Y not yet N
25514 SVR4 PIC 30 prolog gcc Y option option
25515 EABI TOC 30 prolog gcc Y option option
25516
25517 */
25518
25519 /* Hash functions for the hash table. */
25520
25521 static unsigned
25522 rs6000_hash_constant (rtx k)
25523 {
25524 enum rtx_code code = GET_CODE (k);
25525 enum machine_mode mode = GET_MODE (k);
25526 unsigned result = (code << 3) ^ mode;
25527 const char *format;
25528 int flen, fidx;
25529
25530 format = GET_RTX_FORMAT (code);
25531 flen = strlen (format);
25532 fidx = 0;
25533
25534 switch (code)
25535 {
25536 case LABEL_REF:
25537 return result * 1231 + (unsigned) INSN_UID (XEXP (k, 0));
25538
25539 case CONST_WIDE_INT:
25540 {
25541 int i;
25542 flen = CONST_WIDE_INT_NUNITS (k);
25543 for (i = 0; i < flen; i++)
25544 result = result * 613 + CONST_WIDE_INT_ELT (k, i);
25545 return result;
25546 }
25547
25548 case CONST_DOUBLE:
25549 if (mode != VOIDmode)
25550 return real_hash (CONST_DOUBLE_REAL_VALUE (k)) * result;
25551 flen = 2;
25552 break;
25553
25554 case CODE_LABEL:
25555 fidx = 3;
25556 break;
25557
25558 default:
25559 break;
25560 }
25561
25562 for (; fidx < flen; fidx++)
25563 switch (format[fidx])
25564 {
25565 case 's':
25566 {
25567 unsigned i, len;
25568 const char *str = XSTR (k, fidx);
25569 len = strlen (str);
25570 result = result * 613 + len;
25571 for (i = 0; i < len; i++)
25572 result = result * 613 + (unsigned) str[i];
25573 break;
25574 }
25575 case 'u':
25576 case 'e':
25577 result = result * 1231 + rs6000_hash_constant (XEXP (k, fidx));
25578 break;
25579 case 'i':
25580 case 'n':
25581 result = result * 613 + (unsigned) XINT (k, fidx);
25582 break;
25583 case 'w':
25584 if (sizeof (unsigned) >= sizeof (HOST_WIDE_INT))
25585 result = result * 613 + (unsigned) XWINT (k, fidx);
25586 else
25587 {
25588 size_t i;
25589 for (i = 0; i < sizeof (HOST_WIDE_INT) / sizeof (unsigned); i++)
25590 result = result * 613 + (unsigned) (XWINT (k, fidx)
25591 >> CHAR_BIT * i);
25592 }
25593 break;
25594 case '0':
25595 break;
25596 default:
25597 gcc_unreachable ();
25598 }
25599
25600 return result;
25601 }
25602
25603 static unsigned
25604 toc_hash_function (const void *hash_entry)
25605 {
25606 const struct toc_hash_struct *thc =
25607 (const struct toc_hash_struct *) hash_entry;
25608 return rs6000_hash_constant (thc->key) ^ thc->key_mode;
25609 }
25610
25611 /* Compare H1 and H2 for equivalence. */
25612
25613 static int
25614 toc_hash_eq (const void *h1, const void *h2)
25615 {
25616 rtx r1 = ((const struct toc_hash_struct *) h1)->key;
25617 rtx r2 = ((const struct toc_hash_struct *) h2)->key;
25618
25619 if (((const struct toc_hash_struct *) h1)->key_mode
25620 != ((const struct toc_hash_struct *) h2)->key_mode)
25621 return 0;
25622
25623 return rtx_equal_p (r1, r2);
25624 }
25625
25626 /* These are the names given by the C++ front-end to vtables, and
25627 vtable-like objects. Ideally, this logic should not be here;
25628 instead, there should be some programmatic way of inquiring as
25629 to whether or not an object is a vtable. */
25630
25631 #define VTABLE_NAME_P(NAME) \
25632 (strncmp ("_vt.", name, strlen ("_vt.")) == 0 \
25633 || strncmp ("_ZTV", name, strlen ("_ZTV")) == 0 \
25634 || strncmp ("_ZTT", name, strlen ("_ZTT")) == 0 \
25635 || strncmp ("_ZTI", name, strlen ("_ZTI")) == 0 \
25636 || strncmp ("_ZTC", name, strlen ("_ZTC")) == 0)
25637
25638 #ifdef NO_DOLLAR_IN_LABEL
25639 /* Return a GGC-allocated character string translating dollar signs in
25640 input NAME to underscores. Used by XCOFF ASM_OUTPUT_LABELREF. */
25641
25642 const char *
25643 rs6000_xcoff_strip_dollar (const char *name)
25644 {
25645 char *strip, *p;
25646 const char *q;
25647 size_t len;
25648
25649 q = (const char *) strchr (name, '$');
25650
25651 if (q == 0 || q == name)
25652 return name;
25653
25654 len = strlen (name);
25655 strip = XALLOCAVEC (char, len + 1);
25656 strcpy (strip, name);
25657 p = strip + (q - name);
25658 while (p)
25659 {
25660 *p = '_';
25661 p = strchr (p + 1, '$');
25662 }
25663
25664 return ggc_alloc_string (strip, len);
25665 }
25666 #endif
25667
25668 void
25669 rs6000_output_symbol_ref (FILE *file, rtx x)
25670 {
25671 /* Currently C++ toc references to vtables can be emitted before it
25672 is decided whether the vtable is public or private. If this is
25673 the case, then the linker will eventually complain that there is
25674 a reference to an unknown section. Thus, for vtables only,
25675 we emit the TOC reference to reference the symbol and not the
25676 section. */
25677 const char *name = XSTR (x, 0);
25678
25679 if (VTABLE_NAME_P (name))
25680 {
25681 RS6000_OUTPUT_BASENAME (file, name);
25682 }
25683 else
25684 assemble_name (file, name);
25685 }
25686
25687 /* Output a TOC entry. We derive the entry name from what is being
25688 written. */
25689
25690 void
25691 output_toc (FILE *file, rtx x, int labelno, enum machine_mode mode)
25692 {
25693 char buf[256];
25694 const char *name = buf;
25695 rtx base = x;
25696 HOST_WIDE_INT offset = 0;
25697
25698 gcc_assert (!TARGET_NO_TOC);
25699
25700 /* When the linker won't eliminate them, don't output duplicate
25701 TOC entries (this happens on AIX if there is any kind of TOC,
25702 and on SVR4 under -fPIC or -mrelocatable). Don't do this for
25703 CODE_LABELs. */
25704 if (TARGET_TOC && GET_CODE (x) != LABEL_REF)
25705 {
25706 struct toc_hash_struct *h;
25707 void * * found;
25708
25709 /* Create toc_hash_table. This can't be done at TARGET_OPTION_OVERRIDE
25710 time because GGC is not initialized at that point. */
25711 if (toc_hash_table == NULL)
25712 toc_hash_table = htab_create_ggc (1021, toc_hash_function,
25713 toc_hash_eq, NULL);
25714
25715 h = ggc_alloc<toc_hash_struct> ();
25716 h->key = x;
25717 h->key_mode = mode;
25718 h->labelno = labelno;
25719
25720 found = htab_find_slot (toc_hash_table, h, INSERT);
25721 if (*found == NULL)
25722 *found = h;
25723 else /* This is indeed a duplicate.
25724 Set this label equal to that label. */
25725 {
25726 fputs ("\t.set ", file);
25727 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
25728 fprintf (file, "%d,", labelno);
25729 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LC");
25730 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
25731 found)->labelno));
25732
25733 #ifdef HAVE_AS_TLS
25734 if (TARGET_XCOFF && GET_CODE (x) == SYMBOL_REF
25735 && (SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_GLOBAL_DYNAMIC
25736 || SYMBOL_REF_TLS_MODEL (x) == TLS_MODEL_LOCAL_DYNAMIC))
25737 {
25738 fputs ("\t.set ", file);
25739 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
25740 fprintf (file, "%d,", labelno);
25741 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (file, "LCM");
25742 fprintf (file, "%d\n", ((*(const struct toc_hash_struct **)
25743 found)->labelno));
25744 }
25745 #endif
25746 return;
25747 }
25748 }
25749
25750 /* If we're going to put a double constant in the TOC, make sure it's
25751 aligned properly when strict alignment is on. */
25752 if ((CONST_DOUBLE_P (x) || CONST_WIDE_INT_P (x))
25753 && STRICT_ALIGNMENT
25754 && GET_MODE_BITSIZE (mode) >= 64
25755 && ! (TARGET_NO_FP_IN_TOC && ! TARGET_MINIMAL_TOC)) {
25756 ASM_OUTPUT_ALIGN (file, 3);
25757 }
25758
25759 (*targetm.asm_out.internal_label) (file, "LC", labelno);
25760
25761 /* Handle FP constants specially. Note that if we have a minimal
25762 TOC, things we put here aren't actually in the TOC, so we can allow
25763 FP constants. */
25764 if (GET_CODE (x) == CONST_DOUBLE &&
25765 (GET_MODE (x) == TFmode || GET_MODE (x) == TDmode))
25766 {
25767 REAL_VALUE_TYPE rv;
25768 long k[4];
25769
25770 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
25771 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
25772 REAL_VALUE_TO_TARGET_DECIMAL128 (rv, k);
25773 else
25774 REAL_VALUE_TO_TARGET_LONG_DOUBLE (rv, k);
25775
25776 if (TARGET_64BIT)
25777 {
25778 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25779 fputs (DOUBLE_INT_ASM_OP, file);
25780 else
25781 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
25782 k[0] & 0xffffffff, k[1] & 0xffffffff,
25783 k[2] & 0xffffffff, k[3] & 0xffffffff);
25784 fprintf (file, "0x%lx%08lx,0x%lx%08lx\n",
25785 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
25786 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff,
25787 k[WORDS_BIG_ENDIAN ? 2 : 3] & 0xffffffff,
25788 k[WORDS_BIG_ENDIAN ? 3 : 2] & 0xffffffff);
25789 return;
25790 }
25791 else
25792 {
25793 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25794 fputs ("\t.long ", file);
25795 else
25796 fprintf (file, "\t.tc FT_%lx_%lx_%lx_%lx[TC],",
25797 k[0] & 0xffffffff, k[1] & 0xffffffff,
25798 k[2] & 0xffffffff, k[3] & 0xffffffff);
25799 fprintf (file, "0x%lx,0x%lx,0x%lx,0x%lx\n",
25800 k[0] & 0xffffffff, k[1] & 0xffffffff,
25801 k[2] & 0xffffffff, k[3] & 0xffffffff);
25802 return;
25803 }
25804 }
25805 else if (GET_CODE (x) == CONST_DOUBLE &&
25806 (GET_MODE (x) == DFmode || GET_MODE (x) == DDmode))
25807 {
25808 REAL_VALUE_TYPE rv;
25809 long k[2];
25810
25811 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
25812
25813 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
25814 REAL_VALUE_TO_TARGET_DECIMAL64 (rv, k);
25815 else
25816 REAL_VALUE_TO_TARGET_DOUBLE (rv, k);
25817
25818 if (TARGET_64BIT)
25819 {
25820 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25821 fputs (DOUBLE_INT_ASM_OP, file);
25822 else
25823 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
25824 k[0] & 0xffffffff, k[1] & 0xffffffff);
25825 fprintf (file, "0x%lx%08lx\n",
25826 k[WORDS_BIG_ENDIAN ? 0 : 1] & 0xffffffff,
25827 k[WORDS_BIG_ENDIAN ? 1 : 0] & 0xffffffff);
25828 return;
25829 }
25830 else
25831 {
25832 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25833 fputs ("\t.long ", file);
25834 else
25835 fprintf (file, "\t.tc FD_%lx_%lx[TC],",
25836 k[0] & 0xffffffff, k[1] & 0xffffffff);
25837 fprintf (file, "0x%lx,0x%lx\n",
25838 k[0] & 0xffffffff, k[1] & 0xffffffff);
25839 return;
25840 }
25841 }
25842 else if (GET_CODE (x) == CONST_DOUBLE &&
25843 (GET_MODE (x) == SFmode || GET_MODE (x) == SDmode))
25844 {
25845 REAL_VALUE_TYPE rv;
25846 long l;
25847
25848 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
25849 if (DECIMAL_FLOAT_MODE_P (GET_MODE (x)))
25850 REAL_VALUE_TO_TARGET_DECIMAL32 (rv, l);
25851 else
25852 REAL_VALUE_TO_TARGET_SINGLE (rv, l);
25853
25854 if (TARGET_64BIT)
25855 {
25856 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25857 fputs (DOUBLE_INT_ASM_OP, file);
25858 else
25859 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
25860 if (WORDS_BIG_ENDIAN)
25861 fprintf (file, "0x%lx00000000\n", l & 0xffffffff);
25862 else
25863 fprintf (file, "0x%lx\n", l & 0xffffffff);
25864 return;
25865 }
25866 else
25867 {
25868 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25869 fputs ("\t.long ", file);
25870 else
25871 fprintf (file, "\t.tc FS_%lx[TC],", l & 0xffffffff);
25872 fprintf (file, "0x%lx\n", l & 0xffffffff);
25873 return;
25874 }
25875 }
25876 else if (GET_MODE (x) == VOIDmode && GET_CODE (x) == CONST_INT)
25877 {
25878 unsigned HOST_WIDE_INT low;
25879 HOST_WIDE_INT high;
25880
25881 low = INTVAL (x) & 0xffffffff;
25882 high = (HOST_WIDE_INT) INTVAL (x) >> 32;
25883
25884 /* TOC entries are always Pmode-sized, so when big-endian
25885 smaller integer constants in the TOC need to be padded.
25886 (This is still a win over putting the constants in
25887 a separate constant pool, because then we'd have
25888 to have both a TOC entry _and_ the actual constant.)
25889
25890 For a 32-bit target, CONST_INT values are loaded and shifted
25891 entirely within `low' and can be stored in one TOC entry. */
25892
25893 /* It would be easy to make this work, but it doesn't now. */
25894 gcc_assert (!TARGET_64BIT || POINTER_SIZE >= GET_MODE_BITSIZE (mode));
25895
25896 if (WORDS_BIG_ENDIAN && POINTER_SIZE > GET_MODE_BITSIZE (mode))
25897 {
25898 low |= high << 32;
25899 low <<= POINTER_SIZE - GET_MODE_BITSIZE (mode);
25900 high = (HOST_WIDE_INT) low >> 32;
25901 low &= 0xffffffff;
25902 }
25903
25904 if (TARGET_64BIT)
25905 {
25906 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25907 fputs (DOUBLE_INT_ASM_OP, file);
25908 else
25909 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
25910 (long) high & 0xffffffff, (long) low & 0xffffffff);
25911 fprintf (file, "0x%lx%08lx\n",
25912 (long) high & 0xffffffff, (long) low & 0xffffffff);
25913 return;
25914 }
25915 else
25916 {
25917 if (POINTER_SIZE < GET_MODE_BITSIZE (mode))
25918 {
25919 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25920 fputs ("\t.long ", file);
25921 else
25922 fprintf (file, "\t.tc ID_%lx_%lx[TC],",
25923 (long) high & 0xffffffff, (long) low & 0xffffffff);
25924 fprintf (file, "0x%lx,0x%lx\n",
25925 (long) high & 0xffffffff, (long) low & 0xffffffff);
25926 }
25927 else
25928 {
25929 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25930 fputs ("\t.long ", file);
25931 else
25932 fprintf (file, "\t.tc IS_%lx[TC],", (long) low & 0xffffffff);
25933 fprintf (file, "0x%lx\n", (long) low & 0xffffffff);
25934 }
25935 return;
25936 }
25937 }
25938
25939 if (GET_CODE (x) == CONST)
25940 {
25941 gcc_assert (GET_CODE (XEXP (x, 0)) == PLUS
25942 && GET_CODE (XEXP (XEXP (x, 0), 1)) == CONST_INT);
25943
25944 base = XEXP (XEXP (x, 0), 0);
25945 offset = INTVAL (XEXP (XEXP (x, 0), 1));
25946 }
25947
25948 switch (GET_CODE (base))
25949 {
25950 case SYMBOL_REF:
25951 name = XSTR (base, 0);
25952 break;
25953
25954 case LABEL_REF:
25955 ASM_GENERATE_INTERNAL_LABEL (buf, "L",
25956 CODE_LABEL_NUMBER (XEXP (base, 0)));
25957 break;
25958
25959 case CODE_LABEL:
25960 ASM_GENERATE_INTERNAL_LABEL (buf, "L", CODE_LABEL_NUMBER (base));
25961 break;
25962
25963 default:
25964 gcc_unreachable ();
25965 }
25966
25967 if (TARGET_ELF || TARGET_MINIMAL_TOC)
25968 fputs (TARGET_32BIT ? "\t.long " : DOUBLE_INT_ASM_OP, file);
25969 else
25970 {
25971 fputs ("\t.tc ", file);
25972 RS6000_OUTPUT_BASENAME (file, name);
25973
25974 if (offset < 0)
25975 fprintf (file, ".N" HOST_WIDE_INT_PRINT_UNSIGNED, - offset);
25976 else if (offset)
25977 fprintf (file, ".P" HOST_WIDE_INT_PRINT_UNSIGNED, offset);
25978
25979 /* Mark large TOC symbols on AIX with [TE] so they are mapped
25980 after other TOC symbols, reducing overflow of small TOC access
25981 to [TC] symbols. */
25982 fputs (TARGET_XCOFF && TARGET_CMODEL != CMODEL_SMALL
25983 ? "[TE]," : "[TC],", file);
25984 }
25985
25986 /* Currently C++ toc references to vtables can be emitted before it
25987 is decided whether the vtable is public or private. If this is
25988 the case, then the linker will eventually complain that there is
25989 a TOC reference to an unknown section. Thus, for vtables only,
25990 we emit the TOC reference to reference the symbol and not the
25991 section. */
25992 if (VTABLE_NAME_P (name))
25993 {
25994 RS6000_OUTPUT_BASENAME (file, name);
25995 if (offset < 0)
25996 fprintf (file, HOST_WIDE_INT_PRINT_DEC, offset);
25997 else if (offset > 0)
25998 fprintf (file, "+" HOST_WIDE_INT_PRINT_DEC, offset);
25999 }
26000 else
26001 output_addr_const (file, x);
26002
26003 #if HAVE_AS_TLS
26004 if (TARGET_XCOFF && GET_CODE (base) == SYMBOL_REF
26005 && SYMBOL_REF_TLS_MODEL (base) != 0)
26006 {
26007 if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_EXEC)
26008 fputs ("@le", file);
26009 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_INITIAL_EXEC)
26010 fputs ("@ie", file);
26011 /* Use global-dynamic for local-dynamic. */
26012 else if (SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_GLOBAL_DYNAMIC
26013 || SYMBOL_REF_TLS_MODEL (base) == TLS_MODEL_LOCAL_DYNAMIC)
26014 {
26015 putc ('\n', file);
26016 (*targetm.asm_out.internal_label) (file, "LCM", labelno);
26017 fputs ("\t.tc .", file);
26018 RS6000_OUTPUT_BASENAME (file, name);
26019 fputs ("[TC],", file);
26020 output_addr_const (file, x);
26021 fputs ("@m", file);
26022 }
26023 }
26024 #endif
26025
26026 putc ('\n', file);
26027 }
26028 \f
26029 /* Output an assembler pseudo-op to write an ASCII string of N characters
26030 starting at P to FILE.
26031
26032 On the RS/6000, we have to do this using the .byte operation and
26033 write out special characters outside the quoted string.
26034 Also, the assembler is broken; very long strings are truncated,
26035 so we must artificially break them up early. */
26036
26037 void
26038 output_ascii (FILE *file, const char *p, int n)
26039 {
26040 char c;
26041 int i, count_string;
26042 const char *for_string = "\t.byte \"";
26043 const char *for_decimal = "\t.byte ";
26044 const char *to_close = NULL;
26045
26046 count_string = 0;
26047 for (i = 0; i < n; i++)
26048 {
26049 c = *p++;
26050 if (c >= ' ' && c < 0177)
26051 {
26052 if (for_string)
26053 fputs (for_string, file);
26054 putc (c, file);
26055
26056 /* Write two quotes to get one. */
26057 if (c == '"')
26058 {
26059 putc (c, file);
26060 ++count_string;
26061 }
26062
26063 for_string = NULL;
26064 for_decimal = "\"\n\t.byte ";
26065 to_close = "\"\n";
26066 ++count_string;
26067
26068 if (count_string >= 512)
26069 {
26070 fputs (to_close, file);
26071
26072 for_string = "\t.byte \"";
26073 for_decimal = "\t.byte ";
26074 to_close = NULL;
26075 count_string = 0;
26076 }
26077 }
26078 else
26079 {
26080 if (for_decimal)
26081 fputs (for_decimal, file);
26082 fprintf (file, "%d", c);
26083
26084 for_string = "\n\t.byte \"";
26085 for_decimal = ", ";
26086 to_close = "\n";
26087 count_string = 0;
26088 }
26089 }
26090
26091 /* Now close the string if we have written one. Then end the line. */
26092 if (to_close)
26093 fputs (to_close, file);
26094 }
26095 \f
26096 /* Generate a unique section name for FILENAME for a section type
26097 represented by SECTION_DESC. Output goes into BUF.
26098
26099 SECTION_DESC can be any string, as long as it is different for each
26100 possible section type.
26101
26102 We name the section in the same manner as xlc. The name begins with an
26103 underscore followed by the filename (after stripping any leading directory
26104 names) with the last period replaced by the string SECTION_DESC. If
26105 FILENAME does not contain a period, SECTION_DESC is appended to the end of
26106 the name. */
26107
26108 void
26109 rs6000_gen_section_name (char **buf, const char *filename,
26110 const char *section_desc)
26111 {
26112 const char *q, *after_last_slash, *last_period = 0;
26113 char *p;
26114 int len;
26115
26116 after_last_slash = filename;
26117 for (q = filename; *q; q++)
26118 {
26119 if (*q == '/')
26120 after_last_slash = q + 1;
26121 else if (*q == '.')
26122 last_period = q;
26123 }
26124
26125 len = strlen (after_last_slash) + strlen (section_desc) + 2;
26126 *buf = (char *) xmalloc (len);
26127
26128 p = *buf;
26129 *p++ = '_';
26130
26131 for (q = after_last_slash; *q; q++)
26132 {
26133 if (q == last_period)
26134 {
26135 strcpy (p, section_desc);
26136 p += strlen (section_desc);
26137 break;
26138 }
26139
26140 else if (ISALNUM (*q))
26141 *p++ = *q;
26142 }
26143
26144 if (last_period == 0)
26145 strcpy (p, section_desc);
26146 else
26147 *p = '\0';
26148 }
26149 \f
26150 /* Emit profile function. */
26151
26152 void
26153 output_profile_hook (int labelno ATTRIBUTE_UNUSED)
26154 {
26155 /* Non-standard profiling for kernels, which just saves LR then calls
26156 _mcount without worrying about arg saves. The idea is to change
26157 the function prologue as little as possible as it isn't easy to
26158 account for arg save/restore code added just for _mcount. */
26159 if (TARGET_PROFILE_KERNEL)
26160 return;
26161
26162 if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
26163 {
26164 #ifndef NO_PROFILE_COUNTERS
26165 # define NO_PROFILE_COUNTERS 0
26166 #endif
26167 if (NO_PROFILE_COUNTERS)
26168 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
26169 LCT_NORMAL, VOIDmode, 0);
26170 else
26171 {
26172 char buf[30];
26173 const char *label_name;
26174 rtx fun;
26175
26176 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
26177 label_name = ggc_strdup ((*targetm.strip_name_encoding) (buf));
26178 fun = gen_rtx_SYMBOL_REF (Pmode, label_name);
26179
26180 emit_library_call (init_one_libfunc (RS6000_MCOUNT),
26181 LCT_NORMAL, VOIDmode, 1, fun, Pmode);
26182 }
26183 }
26184 else if (DEFAULT_ABI == ABI_DARWIN)
26185 {
26186 const char *mcount_name = RS6000_MCOUNT;
26187 int caller_addr_regno = LR_REGNO;
26188
26189 /* Be conservative and always set this, at least for now. */
26190 crtl->uses_pic_offset_table = 1;
26191
26192 #if TARGET_MACHO
26193 /* For PIC code, set up a stub and collect the caller's address
26194 from r0, which is where the prologue puts it. */
26195 if (MACHOPIC_INDIRECT
26196 && crtl->uses_pic_offset_table)
26197 caller_addr_regno = 0;
26198 #endif
26199 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, mcount_name),
26200 LCT_NORMAL, VOIDmode, 1,
26201 gen_rtx_REG (Pmode, caller_addr_regno), Pmode);
26202 }
26203 }
26204
26205 /* Write function profiler code. */
26206
26207 void
26208 output_function_profiler (FILE *file, int labelno)
26209 {
26210 char buf[100];
26211
26212 switch (DEFAULT_ABI)
26213 {
26214 default:
26215 gcc_unreachable ();
26216
26217 case ABI_V4:
26218 if (!TARGET_32BIT)
26219 {
26220 warning (0, "no profiling of 64-bit code for this ABI");
26221 return;
26222 }
26223 ASM_GENERATE_INTERNAL_LABEL (buf, "LP", labelno);
26224 fprintf (file, "\tmflr %s\n", reg_names[0]);
26225 if (NO_PROFILE_COUNTERS)
26226 {
26227 asm_fprintf (file, "\tstw %s,4(%s)\n",
26228 reg_names[0], reg_names[1]);
26229 }
26230 else if (TARGET_SECURE_PLT && flag_pic)
26231 {
26232 if (TARGET_LINK_STACK)
26233 {
26234 char name[32];
26235 get_ppc476_thunk_name (name);
26236 asm_fprintf (file, "\tbl %s\n", name);
26237 }
26238 else
26239 asm_fprintf (file, "\tbcl 20,31,1f\n1:\n");
26240 asm_fprintf (file, "\tstw %s,4(%s)\n",
26241 reg_names[0], reg_names[1]);
26242 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
26243 asm_fprintf (file, "\taddis %s,%s,",
26244 reg_names[12], reg_names[12]);
26245 assemble_name (file, buf);
26246 asm_fprintf (file, "-1b@ha\n\tla %s,", reg_names[0]);
26247 assemble_name (file, buf);
26248 asm_fprintf (file, "-1b@l(%s)\n", reg_names[12]);
26249 }
26250 else if (flag_pic == 1)
26251 {
26252 fputs ("\tbl _GLOBAL_OFFSET_TABLE_@local-4\n", file);
26253 asm_fprintf (file, "\tstw %s,4(%s)\n",
26254 reg_names[0], reg_names[1]);
26255 asm_fprintf (file, "\tmflr %s\n", reg_names[12]);
26256 asm_fprintf (file, "\tlwz %s,", reg_names[0]);
26257 assemble_name (file, buf);
26258 asm_fprintf (file, "@got(%s)\n", reg_names[12]);
26259 }
26260 else if (flag_pic > 1)
26261 {
26262 asm_fprintf (file, "\tstw %s,4(%s)\n",
26263 reg_names[0], reg_names[1]);
26264 /* Now, we need to get the address of the label. */
26265 if (TARGET_LINK_STACK)
26266 {
26267 char name[32];
26268 get_ppc476_thunk_name (name);
26269 asm_fprintf (file, "\tbl %s\n\tb 1f\n\t.long ", name);
26270 assemble_name (file, buf);
26271 fputs ("-.\n1:", file);
26272 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
26273 asm_fprintf (file, "\taddi %s,%s,4\n",
26274 reg_names[11], reg_names[11]);
26275 }
26276 else
26277 {
26278 fputs ("\tbcl 20,31,1f\n\t.long ", file);
26279 assemble_name (file, buf);
26280 fputs ("-.\n1:", file);
26281 asm_fprintf (file, "\tmflr %s\n", reg_names[11]);
26282 }
26283 asm_fprintf (file, "\tlwz %s,0(%s)\n",
26284 reg_names[0], reg_names[11]);
26285 asm_fprintf (file, "\tadd %s,%s,%s\n",
26286 reg_names[0], reg_names[0], reg_names[11]);
26287 }
26288 else
26289 {
26290 asm_fprintf (file, "\tlis %s,", reg_names[12]);
26291 assemble_name (file, buf);
26292 fputs ("@ha\n", file);
26293 asm_fprintf (file, "\tstw %s,4(%s)\n",
26294 reg_names[0], reg_names[1]);
26295 asm_fprintf (file, "\tla %s,", reg_names[0]);
26296 assemble_name (file, buf);
26297 asm_fprintf (file, "@l(%s)\n", reg_names[12]);
26298 }
26299
26300 /* ABI_V4 saves the static chain reg with ASM_OUTPUT_REG_PUSH. */
26301 fprintf (file, "\tbl %s%s\n",
26302 RS6000_MCOUNT, flag_pic ? "@plt" : "");
26303 break;
26304
26305 case ABI_AIX:
26306 case ABI_ELFv2:
26307 case ABI_DARWIN:
26308 /* Don't do anything, done in output_profile_hook (). */
26309 break;
26310 }
26311 }
26312
26313 \f
26314
26315 /* The following variable value is the last issued insn. */
26316
26317 static rtx last_scheduled_insn;
26318
26319 /* The following variable helps to balance issuing of load and
26320 store instructions */
26321
26322 static int load_store_pendulum;
26323
26324 /* Power4 load update and store update instructions are cracked into a
26325 load or store and an integer insn which are executed in the same cycle.
26326 Branches have their own dispatch slot which does not count against the
26327 GCC issue rate, but it changes the program flow so there are no other
26328 instructions to issue in this cycle. */
26329
26330 static int
26331 rs6000_variable_issue_1 (rtx_insn *insn, int more)
26332 {
26333 last_scheduled_insn = insn;
26334 if (GET_CODE (PATTERN (insn)) == USE
26335 || GET_CODE (PATTERN (insn)) == CLOBBER)
26336 {
26337 cached_can_issue_more = more;
26338 return cached_can_issue_more;
26339 }
26340
26341 if (insn_terminates_group_p (insn, current_group))
26342 {
26343 cached_can_issue_more = 0;
26344 return cached_can_issue_more;
26345 }
26346
26347 /* If no reservation, but reach here */
26348 if (recog_memoized (insn) < 0)
26349 return more;
26350
26351 if (rs6000_sched_groups)
26352 {
26353 if (is_microcoded_insn (insn))
26354 cached_can_issue_more = 0;
26355 else if (is_cracked_insn (insn))
26356 cached_can_issue_more = more > 2 ? more - 2 : 0;
26357 else
26358 cached_can_issue_more = more - 1;
26359
26360 return cached_can_issue_more;
26361 }
26362
26363 if (rs6000_cpu_attr == CPU_CELL && is_nonpipeline_insn (insn))
26364 return 0;
26365
26366 cached_can_issue_more = more - 1;
26367 return cached_can_issue_more;
26368 }
26369
26370 static int
26371 rs6000_variable_issue (FILE *stream, int verbose, rtx_insn *insn, int more)
26372 {
26373 int r = rs6000_variable_issue_1 (insn, more);
26374 if (verbose)
26375 fprintf (stream, "// rs6000_variable_issue (more = %d) = %d\n", more, r);
26376 return r;
26377 }
26378
26379 /* Adjust the cost of a scheduling dependency. Return the new cost of
26380 a dependency LINK or INSN on DEP_INSN. COST is the current cost. */
26381
26382 static int
26383 rs6000_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
26384 {
26385 enum attr_type attr_type;
26386
26387 if (! recog_memoized (insn))
26388 return 0;
26389
26390 switch (REG_NOTE_KIND (link))
26391 {
26392 case REG_DEP_TRUE:
26393 {
26394 /* Data dependency; DEP_INSN writes a register that INSN reads
26395 some cycles later. */
26396
26397 /* Separate a load from a narrower, dependent store. */
26398 if (rs6000_sched_groups
26399 && GET_CODE (PATTERN (insn)) == SET
26400 && GET_CODE (PATTERN (dep_insn)) == SET
26401 && GET_CODE (XEXP (PATTERN (insn), 1)) == MEM
26402 && GET_CODE (XEXP (PATTERN (dep_insn), 0)) == MEM
26403 && (GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (insn), 1)))
26404 > GET_MODE_SIZE (GET_MODE (XEXP (PATTERN (dep_insn), 0)))))
26405 return cost + 14;
26406
26407 attr_type = get_attr_type (insn);
26408
26409 switch (attr_type)
26410 {
26411 case TYPE_JMPREG:
26412 /* Tell the first scheduling pass about the latency between
26413 a mtctr and bctr (and mtlr and br/blr). The first
26414 scheduling pass will not know about this latency since
26415 the mtctr instruction, which has the latency associated
26416 to it, will be generated by reload. */
26417 return 4;
26418 case TYPE_BRANCH:
26419 /* Leave some extra cycles between a compare and its
26420 dependent branch, to inhibit expensive mispredicts. */
26421 if ((rs6000_cpu_attr == CPU_PPC603
26422 || rs6000_cpu_attr == CPU_PPC604
26423 || rs6000_cpu_attr == CPU_PPC604E
26424 || rs6000_cpu_attr == CPU_PPC620
26425 || rs6000_cpu_attr == CPU_PPC630
26426 || rs6000_cpu_attr == CPU_PPC750
26427 || rs6000_cpu_attr == CPU_PPC7400
26428 || rs6000_cpu_attr == CPU_PPC7450
26429 || rs6000_cpu_attr == CPU_PPCE5500
26430 || rs6000_cpu_attr == CPU_PPCE6500
26431 || rs6000_cpu_attr == CPU_POWER4
26432 || rs6000_cpu_attr == CPU_POWER5
26433 || rs6000_cpu_attr == CPU_POWER7
26434 || rs6000_cpu_attr == CPU_POWER8
26435 || rs6000_cpu_attr == CPU_CELL)
26436 && recog_memoized (dep_insn)
26437 && (INSN_CODE (dep_insn) >= 0))
26438
26439 switch (get_attr_type (dep_insn))
26440 {
26441 case TYPE_CMP:
26442 case TYPE_COMPARE:
26443 case TYPE_FPCOMPARE:
26444 case TYPE_CR_LOGICAL:
26445 case TYPE_DELAYED_CR:
26446 return cost + 2;
26447 case TYPE_MUL:
26448 if (get_attr_dot (dep_insn) == DOT_YES)
26449 return cost + 2;
26450 else
26451 break;
26452 case TYPE_SHIFT:
26453 if (get_attr_dot (dep_insn) == DOT_YES
26454 && get_attr_var_shift (dep_insn) == VAR_SHIFT_NO)
26455 return cost + 2;
26456 else
26457 break;
26458 default:
26459 break;
26460 }
26461 break;
26462
26463 case TYPE_STORE:
26464 case TYPE_FPSTORE:
26465 if ((rs6000_cpu == PROCESSOR_POWER6)
26466 && recog_memoized (dep_insn)
26467 && (INSN_CODE (dep_insn) >= 0))
26468 {
26469
26470 if (GET_CODE (PATTERN (insn)) != SET)
26471 /* If this happens, we have to extend this to schedule
26472 optimally. Return default for now. */
26473 return cost;
26474
26475 /* Adjust the cost for the case where the value written
26476 by a fixed point operation is used as the address
26477 gen value on a store. */
26478 switch (get_attr_type (dep_insn))
26479 {
26480 case TYPE_LOAD:
26481 case TYPE_CNTLZ:
26482 {
26483 if (! store_data_bypass_p (dep_insn, insn))
26484 return get_attr_sign_extend (dep_insn)
26485 == SIGN_EXTEND_YES ? 6 : 4;
26486 break;
26487 }
26488 case TYPE_SHIFT:
26489 {
26490 if (! store_data_bypass_p (dep_insn, insn))
26491 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
26492 6 : 3;
26493 break;
26494 }
26495 case TYPE_INTEGER:
26496 case TYPE_ADD:
26497 case TYPE_LOGICAL:
26498 case TYPE_COMPARE:
26499 case TYPE_EXTS:
26500 case TYPE_INSERT:
26501 {
26502 if (! store_data_bypass_p (dep_insn, insn))
26503 return 3;
26504 break;
26505 }
26506 case TYPE_STORE:
26507 case TYPE_FPLOAD:
26508 case TYPE_FPSTORE:
26509 {
26510 if (get_attr_update (dep_insn) == UPDATE_YES
26511 && ! store_data_bypass_p (dep_insn, insn))
26512 return 3;
26513 break;
26514 }
26515 case TYPE_MUL:
26516 {
26517 if (! store_data_bypass_p (dep_insn, insn))
26518 return 17;
26519 break;
26520 }
26521 case TYPE_DIV:
26522 {
26523 if (! store_data_bypass_p (dep_insn, insn))
26524 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
26525 break;
26526 }
26527 default:
26528 break;
26529 }
26530 }
26531 break;
26532
26533 case TYPE_LOAD:
26534 if ((rs6000_cpu == PROCESSOR_POWER6)
26535 && recog_memoized (dep_insn)
26536 && (INSN_CODE (dep_insn) >= 0))
26537 {
26538
26539 /* Adjust the cost for the case where the value written
26540 by a fixed point instruction is used within the address
26541 gen portion of a subsequent load(u)(x) */
26542 switch (get_attr_type (dep_insn))
26543 {
26544 case TYPE_LOAD:
26545 case TYPE_CNTLZ:
26546 {
26547 if (set_to_load_agen (dep_insn, insn))
26548 return get_attr_sign_extend (dep_insn)
26549 == SIGN_EXTEND_YES ? 6 : 4;
26550 break;
26551 }
26552 case TYPE_SHIFT:
26553 {
26554 if (set_to_load_agen (dep_insn, insn))
26555 return get_attr_var_shift (dep_insn) == VAR_SHIFT_YES ?
26556 6 : 3;
26557 break;
26558 }
26559 case TYPE_INTEGER:
26560 case TYPE_ADD:
26561 case TYPE_LOGICAL:
26562 case TYPE_COMPARE:
26563 case TYPE_EXTS:
26564 case TYPE_INSERT:
26565 {
26566 if (set_to_load_agen (dep_insn, insn))
26567 return 3;
26568 break;
26569 }
26570 case TYPE_STORE:
26571 case TYPE_FPLOAD:
26572 case TYPE_FPSTORE:
26573 {
26574 if (get_attr_update (dep_insn) == UPDATE_YES
26575 && set_to_load_agen (dep_insn, insn))
26576 return 3;
26577 break;
26578 }
26579 case TYPE_MUL:
26580 {
26581 if (set_to_load_agen (dep_insn, insn))
26582 return 17;
26583 break;
26584 }
26585 case TYPE_DIV:
26586 {
26587 if (set_to_load_agen (dep_insn, insn))
26588 return get_attr_size (dep_insn) == SIZE_32 ? 45 : 57;
26589 break;
26590 }
26591 default:
26592 break;
26593 }
26594 }
26595 break;
26596
26597 case TYPE_FPLOAD:
26598 if ((rs6000_cpu == PROCESSOR_POWER6)
26599 && get_attr_update (insn) == UPDATE_NO
26600 && recog_memoized (dep_insn)
26601 && (INSN_CODE (dep_insn) >= 0)
26602 && (get_attr_type (dep_insn) == TYPE_MFFGPR))
26603 return 2;
26604
26605 default:
26606 break;
26607 }
26608
26609 /* Fall out to return default cost. */
26610 }
26611 break;
26612
26613 case REG_DEP_OUTPUT:
26614 /* Output dependency; DEP_INSN writes a register that INSN writes some
26615 cycles later. */
26616 if ((rs6000_cpu == PROCESSOR_POWER6)
26617 && recog_memoized (dep_insn)
26618 && (INSN_CODE (dep_insn) >= 0))
26619 {
26620 attr_type = get_attr_type (insn);
26621
26622 switch (attr_type)
26623 {
26624 case TYPE_FP:
26625 if (get_attr_type (dep_insn) == TYPE_FP)
26626 return 1;
26627 break;
26628 case TYPE_FPLOAD:
26629 if (get_attr_update (insn) == UPDATE_NO
26630 && get_attr_type (dep_insn) == TYPE_MFFGPR)
26631 return 2;
26632 break;
26633 default:
26634 break;
26635 }
26636 }
26637 case REG_DEP_ANTI:
26638 /* Anti dependency; DEP_INSN reads a register that INSN writes some
26639 cycles later. */
26640 return 0;
26641
26642 default:
26643 gcc_unreachable ();
26644 }
26645
26646 return cost;
26647 }
26648
26649 /* Debug version of rs6000_adjust_cost. */
26650
26651 static int
26652 rs6000_debug_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn,
26653 int cost)
26654 {
26655 int ret = rs6000_adjust_cost (insn, link, dep_insn, cost);
26656
26657 if (ret != cost)
26658 {
26659 const char *dep;
26660
26661 switch (REG_NOTE_KIND (link))
26662 {
26663 default: dep = "unknown depencency"; break;
26664 case REG_DEP_TRUE: dep = "data dependency"; break;
26665 case REG_DEP_OUTPUT: dep = "output dependency"; break;
26666 case REG_DEP_ANTI: dep = "anti depencency"; break;
26667 }
26668
26669 fprintf (stderr,
26670 "\nrs6000_adjust_cost, final cost = %d, orig cost = %d, "
26671 "%s, insn:\n", ret, cost, dep);
26672
26673 debug_rtx (insn);
26674 }
26675
26676 return ret;
26677 }
26678
26679 /* The function returns a true if INSN is microcoded.
26680 Return false otherwise. */
26681
26682 static bool
26683 is_microcoded_insn (rtx insn)
26684 {
26685 if (!insn || !NONDEBUG_INSN_P (insn)
26686 || GET_CODE (PATTERN (insn)) == USE
26687 || GET_CODE (PATTERN (insn)) == CLOBBER)
26688 return false;
26689
26690 if (rs6000_cpu_attr == CPU_CELL)
26691 return get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS;
26692
26693 if (rs6000_sched_groups
26694 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
26695 {
26696 enum attr_type type = get_attr_type (insn);
26697 if ((type == TYPE_LOAD
26698 && get_attr_update (insn) == UPDATE_YES
26699 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES)
26700 || ((type == TYPE_LOAD || type == TYPE_STORE)
26701 && get_attr_update (insn) == UPDATE_YES
26702 && get_attr_indexed (insn) == INDEXED_YES)
26703 || type == TYPE_MFCR)
26704 return true;
26705 }
26706
26707 return false;
26708 }
26709
26710 /* The function returns true if INSN is cracked into 2 instructions
26711 by the processor (and therefore occupies 2 issue slots). */
26712
26713 static bool
26714 is_cracked_insn (rtx insn)
26715 {
26716 if (!insn || !NONDEBUG_INSN_P (insn)
26717 || GET_CODE (PATTERN (insn)) == USE
26718 || GET_CODE (PATTERN (insn)) == CLOBBER)
26719 return false;
26720
26721 if (rs6000_sched_groups
26722 && (rs6000_cpu == PROCESSOR_POWER4 || rs6000_cpu == PROCESSOR_POWER5))
26723 {
26724 enum attr_type type = get_attr_type (insn);
26725 if ((type == TYPE_LOAD
26726 && get_attr_sign_extend (insn) == SIGN_EXTEND_YES
26727 && get_attr_update (insn) == UPDATE_NO)
26728 || (type == TYPE_LOAD
26729 && get_attr_sign_extend (insn) == SIGN_EXTEND_NO
26730 && get_attr_update (insn) == UPDATE_YES
26731 && get_attr_indexed (insn) == INDEXED_NO)
26732 || (type == TYPE_STORE
26733 && get_attr_update (insn) == UPDATE_YES
26734 && get_attr_indexed (insn) == INDEXED_NO)
26735 || ((type == TYPE_FPLOAD || type == TYPE_FPSTORE)
26736 && get_attr_update (insn) == UPDATE_YES)
26737 || type == TYPE_DELAYED_CR
26738 || type == TYPE_COMPARE
26739 || (type == TYPE_SHIFT
26740 && get_attr_dot (insn) == DOT_YES
26741 && get_attr_var_shift (insn) == VAR_SHIFT_NO)
26742 || (type == TYPE_MUL
26743 && get_attr_dot (insn) == DOT_YES)
26744 || type == TYPE_DIV
26745 || (type == TYPE_INSERT
26746 && get_attr_size (insn) == SIZE_32))
26747 return true;
26748 }
26749
26750 return false;
26751 }
26752
26753 /* The function returns true if INSN can be issued only from
26754 the branch slot. */
26755
26756 static bool
26757 is_branch_slot_insn (rtx insn)
26758 {
26759 if (!insn || !NONDEBUG_INSN_P (insn)
26760 || GET_CODE (PATTERN (insn)) == USE
26761 || GET_CODE (PATTERN (insn)) == CLOBBER)
26762 return false;
26763
26764 if (rs6000_sched_groups)
26765 {
26766 enum attr_type type = get_attr_type (insn);
26767 if (type == TYPE_BRANCH || type == TYPE_JMPREG)
26768 return true;
26769 return false;
26770 }
26771
26772 return false;
26773 }
26774
26775 /* The function returns true if out_inst sets a value that is
26776 used in the address generation computation of in_insn */
26777 static bool
26778 set_to_load_agen (rtx out_insn, rtx in_insn)
26779 {
26780 rtx out_set, in_set;
26781
26782 /* For performance reasons, only handle the simple case where
26783 both loads are a single_set. */
26784 out_set = single_set (out_insn);
26785 if (out_set)
26786 {
26787 in_set = single_set (in_insn);
26788 if (in_set)
26789 return reg_mentioned_p (SET_DEST (out_set), SET_SRC (in_set));
26790 }
26791
26792 return false;
26793 }
26794
26795 /* Try to determine base/offset/size parts of the given MEM.
26796 Return true if successful, false if all the values couldn't
26797 be determined.
26798
26799 This function only looks for REG or REG+CONST address forms.
26800 REG+REG address form will return false. */
26801
26802 static bool
26803 get_memref_parts (rtx mem, rtx *base, HOST_WIDE_INT *offset,
26804 HOST_WIDE_INT *size)
26805 {
26806 rtx addr_rtx;
26807 if MEM_SIZE_KNOWN_P (mem)
26808 *size = MEM_SIZE (mem);
26809 else
26810 return false;
26811
26812 if (GET_CODE (XEXP (mem, 0)) == PRE_MODIFY)
26813 addr_rtx = XEXP (XEXP (mem, 0), 1);
26814 else
26815 addr_rtx = (XEXP (mem, 0));
26816
26817 if (GET_CODE (addr_rtx) == REG)
26818 {
26819 *base = addr_rtx;
26820 *offset = 0;
26821 }
26822 else if (GET_CODE (addr_rtx) == PLUS
26823 && CONST_INT_P (XEXP (addr_rtx, 1)))
26824 {
26825 *base = XEXP (addr_rtx, 0);
26826 *offset = INTVAL (XEXP (addr_rtx, 1));
26827 }
26828 else
26829 return false;
26830
26831 return true;
26832 }
26833
26834 /* The function returns true if the target storage location of
26835 mem1 is adjacent to the target storage location of mem2 */
26836 /* Return 1 if memory locations are adjacent. */
26837
26838 static bool
26839 adjacent_mem_locations (rtx mem1, rtx mem2)
26840 {
26841 rtx reg1, reg2;
26842 HOST_WIDE_INT off1, size1, off2, size2;
26843
26844 if (get_memref_parts (mem1, &reg1, &off1, &size1)
26845 && get_memref_parts (mem2, &reg2, &off2, &size2))
26846 return ((REGNO (reg1) == REGNO (reg2))
26847 && ((off1 + size1 == off2)
26848 || (off2 + size2 == off1)));
26849
26850 return false;
26851 }
26852
26853 /* This function returns true if it can be determined that the two MEM
26854 locations overlap by at least 1 byte based on base reg/offset/size. */
26855
26856 static bool
26857 mem_locations_overlap (rtx mem1, rtx mem2)
26858 {
26859 rtx reg1, reg2;
26860 HOST_WIDE_INT off1, size1, off2, size2;
26861
26862 if (get_memref_parts (mem1, &reg1, &off1, &size1)
26863 && get_memref_parts (mem2, &reg2, &off2, &size2))
26864 return ((REGNO (reg1) == REGNO (reg2))
26865 && (((off1 <= off2) && (off1 + size1 > off2))
26866 || ((off2 <= off1) && (off2 + size2 > off1))));
26867
26868 return false;
26869 }
26870
26871 /* A C statement (sans semicolon) to update the integer scheduling
26872 priority INSN_PRIORITY (INSN). Increase the priority to execute the
26873 INSN earlier, reduce the priority to execute INSN later. Do not
26874 define this macro if you do not need to adjust the scheduling
26875 priorities of insns. */
26876
26877 static int
26878 rs6000_adjust_priority (rtx_insn *insn ATTRIBUTE_UNUSED, int priority)
26879 {
26880 rtx load_mem, str_mem;
26881 /* On machines (like the 750) which have asymmetric integer units,
26882 where one integer unit can do multiply and divides and the other
26883 can't, reduce the priority of multiply/divide so it is scheduled
26884 before other integer operations. */
26885
26886 #if 0
26887 if (! INSN_P (insn))
26888 return priority;
26889
26890 if (GET_CODE (PATTERN (insn)) == USE)
26891 return priority;
26892
26893 switch (rs6000_cpu_attr) {
26894 case CPU_PPC750:
26895 switch (get_attr_type (insn))
26896 {
26897 default:
26898 break;
26899
26900 case TYPE_MUL:
26901 case TYPE_DIV:
26902 fprintf (stderr, "priority was %#x (%d) before adjustment\n",
26903 priority, priority);
26904 if (priority >= 0 && priority < 0x01000000)
26905 priority >>= 3;
26906 break;
26907 }
26908 }
26909 #endif
26910
26911 if (insn_must_be_first_in_group (insn)
26912 && reload_completed
26913 && current_sched_info->sched_max_insns_priority
26914 && rs6000_sched_restricted_insns_priority)
26915 {
26916
26917 /* Prioritize insns that can be dispatched only in the first
26918 dispatch slot. */
26919 if (rs6000_sched_restricted_insns_priority == 1)
26920 /* Attach highest priority to insn. This means that in
26921 haifa-sched.c:ready_sort(), dispatch-slot restriction considerations
26922 precede 'priority' (critical path) considerations. */
26923 return current_sched_info->sched_max_insns_priority;
26924 else if (rs6000_sched_restricted_insns_priority == 2)
26925 /* Increase priority of insn by a minimal amount. This means that in
26926 haifa-sched.c:ready_sort(), only 'priority' (critical path)
26927 considerations precede dispatch-slot restriction considerations. */
26928 return (priority + 1);
26929 }
26930
26931 if (rs6000_cpu == PROCESSOR_POWER6
26932 && ((load_store_pendulum == -2 && is_load_insn (insn, &load_mem))
26933 || (load_store_pendulum == 2 && is_store_insn (insn, &str_mem))))
26934 /* Attach highest priority to insn if the scheduler has just issued two
26935 stores and this instruction is a load, or two loads and this instruction
26936 is a store. Power6 wants loads and stores scheduled alternately
26937 when possible */
26938 return current_sched_info->sched_max_insns_priority;
26939
26940 return priority;
26941 }
26942
26943 /* Return true if the instruction is nonpipelined on the Cell. */
26944 static bool
26945 is_nonpipeline_insn (rtx insn)
26946 {
26947 enum attr_type type;
26948 if (!insn || !NONDEBUG_INSN_P (insn)
26949 || GET_CODE (PATTERN (insn)) == USE
26950 || GET_CODE (PATTERN (insn)) == CLOBBER)
26951 return false;
26952
26953 type = get_attr_type (insn);
26954 if (type == TYPE_MUL
26955 || type == TYPE_DIV
26956 || type == TYPE_SDIV
26957 || type == TYPE_DDIV
26958 || type == TYPE_SSQRT
26959 || type == TYPE_DSQRT
26960 || type == TYPE_MFCR
26961 || type == TYPE_MFCRF
26962 || type == TYPE_MFJMPR)
26963 {
26964 return true;
26965 }
26966 return false;
26967 }
26968
26969
26970 /* Return how many instructions the machine can issue per cycle. */
26971
26972 static int
26973 rs6000_issue_rate (void)
26974 {
26975 /* Unless scheduling for register pressure, use issue rate of 1 for
26976 first scheduling pass to decrease degradation. */
26977 if (!reload_completed && !flag_sched_pressure)
26978 return 1;
26979
26980 switch (rs6000_cpu_attr) {
26981 case CPU_RS64A:
26982 case CPU_PPC601: /* ? */
26983 case CPU_PPC7450:
26984 return 3;
26985 case CPU_PPC440:
26986 case CPU_PPC603:
26987 case CPU_PPC750:
26988 case CPU_PPC7400:
26989 case CPU_PPC8540:
26990 case CPU_PPC8548:
26991 case CPU_CELL:
26992 case CPU_PPCE300C2:
26993 case CPU_PPCE300C3:
26994 case CPU_PPCE500MC:
26995 case CPU_PPCE500MC64:
26996 case CPU_PPCE5500:
26997 case CPU_PPCE6500:
26998 case CPU_TITAN:
26999 return 2;
27000 case CPU_PPC476:
27001 case CPU_PPC604:
27002 case CPU_PPC604E:
27003 case CPU_PPC620:
27004 case CPU_PPC630:
27005 return 4;
27006 case CPU_POWER4:
27007 case CPU_POWER5:
27008 case CPU_POWER6:
27009 case CPU_POWER7:
27010 return 5;
27011 case CPU_POWER8:
27012 return 7;
27013 default:
27014 return 1;
27015 }
27016 }
27017
27018 /* Return how many instructions to look ahead for better insn
27019 scheduling. */
27020
27021 static int
27022 rs6000_use_sched_lookahead (void)
27023 {
27024 switch (rs6000_cpu_attr)
27025 {
27026 case CPU_PPC8540:
27027 case CPU_PPC8548:
27028 return 4;
27029
27030 case CPU_CELL:
27031 return (reload_completed ? 8 : 0);
27032
27033 default:
27034 return 0;
27035 }
27036 }
27037
27038 /* We are choosing insn from the ready queue. Return zero if INSN can be
27039 chosen. */
27040 static int
27041 rs6000_use_sched_lookahead_guard (rtx_insn *insn, int ready_index)
27042 {
27043 if (ready_index == 0)
27044 return 0;
27045
27046 if (rs6000_cpu_attr != CPU_CELL)
27047 return 0;
27048
27049 gcc_assert (insn != NULL_RTX && INSN_P (insn));
27050
27051 if (!reload_completed
27052 || is_nonpipeline_insn (insn)
27053 || is_microcoded_insn (insn))
27054 return 1;
27055
27056 return 0;
27057 }
27058
27059 /* Determine if PAT refers to memory. If so, set MEM_REF to the MEM rtx
27060 and return true. */
27061
27062 static bool
27063 find_mem_ref (rtx pat, rtx *mem_ref)
27064 {
27065 const char * fmt;
27066 int i, j;
27067
27068 /* stack_tie does not produce any real memory traffic. */
27069 if (tie_operand (pat, VOIDmode))
27070 return false;
27071
27072 if (GET_CODE (pat) == MEM)
27073 {
27074 *mem_ref = pat;
27075 return true;
27076 }
27077
27078 /* Recursively process the pattern. */
27079 fmt = GET_RTX_FORMAT (GET_CODE (pat));
27080
27081 for (i = GET_RTX_LENGTH (GET_CODE (pat)) - 1; i >= 0; i--)
27082 {
27083 if (fmt[i] == 'e')
27084 {
27085 if (find_mem_ref (XEXP (pat, i), mem_ref))
27086 return true;
27087 }
27088 else if (fmt[i] == 'E')
27089 for (j = XVECLEN (pat, i) - 1; j >= 0; j--)
27090 {
27091 if (find_mem_ref (XVECEXP (pat, i, j), mem_ref))
27092 return true;
27093 }
27094 }
27095
27096 return false;
27097 }
27098
27099 /* Determine if PAT is a PATTERN of a load insn. */
27100
27101 static bool
27102 is_load_insn1 (rtx pat, rtx *load_mem)
27103 {
27104 if (!pat || pat == NULL_RTX)
27105 return false;
27106
27107 if (GET_CODE (pat) == SET)
27108 return find_mem_ref (SET_SRC (pat), load_mem);
27109
27110 if (GET_CODE (pat) == PARALLEL)
27111 {
27112 int i;
27113
27114 for (i = 0; i < XVECLEN (pat, 0); i++)
27115 if (is_load_insn1 (XVECEXP (pat, 0, i), load_mem))
27116 return true;
27117 }
27118
27119 return false;
27120 }
27121
27122 /* Determine if INSN loads from memory. */
27123
27124 static bool
27125 is_load_insn (rtx insn, rtx *load_mem)
27126 {
27127 if (!insn || !INSN_P (insn))
27128 return false;
27129
27130 if (CALL_P (insn))
27131 return false;
27132
27133 return is_load_insn1 (PATTERN (insn), load_mem);
27134 }
27135
27136 /* Determine if PAT is a PATTERN of a store insn. */
27137
27138 static bool
27139 is_store_insn1 (rtx pat, rtx *str_mem)
27140 {
27141 if (!pat || pat == NULL_RTX)
27142 return false;
27143
27144 if (GET_CODE (pat) == SET)
27145 return find_mem_ref (SET_DEST (pat), str_mem);
27146
27147 if (GET_CODE (pat) == PARALLEL)
27148 {
27149 int i;
27150
27151 for (i = 0; i < XVECLEN (pat, 0); i++)
27152 if (is_store_insn1 (XVECEXP (pat, 0, i), str_mem))
27153 return true;
27154 }
27155
27156 return false;
27157 }
27158
27159 /* Determine if INSN stores to memory. */
27160
27161 static bool
27162 is_store_insn (rtx insn, rtx *str_mem)
27163 {
27164 if (!insn || !INSN_P (insn))
27165 return false;
27166
27167 return is_store_insn1 (PATTERN (insn), str_mem);
27168 }
27169
27170 /* Returns whether the dependence between INSN and NEXT is considered
27171 costly by the given target. */
27172
27173 static bool
27174 rs6000_is_costly_dependence (dep_t dep, int cost, int distance)
27175 {
27176 rtx insn;
27177 rtx next;
27178 rtx load_mem, str_mem;
27179
27180 /* If the flag is not enabled - no dependence is considered costly;
27181 allow all dependent insns in the same group.
27182 This is the most aggressive option. */
27183 if (rs6000_sched_costly_dep == no_dep_costly)
27184 return false;
27185
27186 /* If the flag is set to 1 - a dependence is always considered costly;
27187 do not allow dependent instructions in the same group.
27188 This is the most conservative option. */
27189 if (rs6000_sched_costly_dep == all_deps_costly)
27190 return true;
27191
27192 insn = DEP_PRO (dep);
27193 next = DEP_CON (dep);
27194
27195 if (rs6000_sched_costly_dep == store_to_load_dep_costly
27196 && is_load_insn (next, &load_mem)
27197 && is_store_insn (insn, &str_mem))
27198 /* Prevent load after store in the same group. */
27199 return true;
27200
27201 if (rs6000_sched_costly_dep == true_store_to_load_dep_costly
27202 && is_load_insn (next, &load_mem)
27203 && is_store_insn (insn, &str_mem)
27204 && DEP_TYPE (dep) == REG_DEP_TRUE
27205 && mem_locations_overlap(str_mem, load_mem))
27206 /* Prevent load after store in the same group if it is a true
27207 dependence. */
27208 return true;
27209
27210 /* The flag is set to X; dependences with latency >= X are considered costly,
27211 and will not be scheduled in the same group. */
27212 if (rs6000_sched_costly_dep <= max_dep_latency
27213 && ((cost - distance) >= (int)rs6000_sched_costly_dep))
27214 return true;
27215
27216 return false;
27217 }
27218
27219 /* Return the next insn after INSN that is found before TAIL is reached,
27220 skipping any "non-active" insns - insns that will not actually occupy
27221 an issue slot. Return NULL_RTX if such an insn is not found. */
27222
27223 static rtx_insn *
27224 get_next_active_insn (rtx_insn *insn, rtx_insn *tail)
27225 {
27226 if (insn == NULL_RTX || insn == tail)
27227 return NULL;
27228
27229 while (1)
27230 {
27231 insn = NEXT_INSN (insn);
27232 if (insn == NULL_RTX || insn == tail)
27233 return NULL;
27234
27235 if (CALL_P (insn)
27236 || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn)
27237 || (NONJUMP_INSN_P (insn)
27238 && GET_CODE (PATTERN (insn)) != USE
27239 && GET_CODE (PATTERN (insn)) != CLOBBER
27240 && INSN_CODE (insn) != CODE_FOR_stack_tie))
27241 break;
27242 }
27243 return insn;
27244 }
27245
27246 /* We are about to begin issuing insns for this clock cycle. */
27247
27248 static int
27249 rs6000_sched_reorder (FILE *dump ATTRIBUTE_UNUSED, int sched_verbose,
27250 rtx_insn **ready ATTRIBUTE_UNUSED,
27251 int *pn_ready ATTRIBUTE_UNUSED,
27252 int clock_var ATTRIBUTE_UNUSED)
27253 {
27254 int n_ready = *pn_ready;
27255
27256 if (sched_verbose)
27257 fprintf (dump, "// rs6000_sched_reorder :\n");
27258
27259 /* Reorder the ready list, if the second to last ready insn
27260 is a nonepipeline insn. */
27261 if (rs6000_cpu_attr == CPU_CELL && n_ready > 1)
27262 {
27263 if (is_nonpipeline_insn (ready[n_ready - 1])
27264 && (recog_memoized (ready[n_ready - 2]) > 0))
27265 /* Simply swap first two insns. */
27266 {
27267 rtx_insn *tmp = ready[n_ready - 1];
27268 ready[n_ready - 1] = ready[n_ready - 2];
27269 ready[n_ready - 2] = tmp;
27270 }
27271 }
27272
27273 if (rs6000_cpu == PROCESSOR_POWER6)
27274 load_store_pendulum = 0;
27275
27276 return rs6000_issue_rate ();
27277 }
27278
27279 /* Like rs6000_sched_reorder, but called after issuing each insn. */
27280
27281 static int
27282 rs6000_sched_reorder2 (FILE *dump, int sched_verbose, rtx_insn **ready,
27283 int *pn_ready, int clock_var ATTRIBUTE_UNUSED)
27284 {
27285 if (sched_verbose)
27286 fprintf (dump, "// rs6000_sched_reorder2 :\n");
27287
27288 /* For Power6, we need to handle some special cases to try and keep the
27289 store queue from overflowing and triggering expensive flushes.
27290
27291 This code monitors how load and store instructions are being issued
27292 and skews the ready list one way or the other to increase the likelihood
27293 that a desired instruction is issued at the proper time.
27294
27295 A couple of things are done. First, we maintain a "load_store_pendulum"
27296 to track the current state of load/store issue.
27297
27298 - If the pendulum is at zero, then no loads or stores have been
27299 issued in the current cycle so we do nothing.
27300
27301 - If the pendulum is 1, then a single load has been issued in this
27302 cycle and we attempt to locate another load in the ready list to
27303 issue with it.
27304
27305 - If the pendulum is -2, then two stores have already been
27306 issued in this cycle, so we increase the priority of the first load
27307 in the ready list to increase it's likelihood of being chosen first
27308 in the next cycle.
27309
27310 - If the pendulum is -1, then a single store has been issued in this
27311 cycle and we attempt to locate another store in the ready list to
27312 issue with it, preferring a store to an adjacent memory location to
27313 facilitate store pairing in the store queue.
27314
27315 - If the pendulum is 2, then two loads have already been
27316 issued in this cycle, so we increase the priority of the first store
27317 in the ready list to increase it's likelihood of being chosen first
27318 in the next cycle.
27319
27320 - If the pendulum < -2 or > 2, then do nothing.
27321
27322 Note: This code covers the most common scenarios. There exist non
27323 load/store instructions which make use of the LSU and which
27324 would need to be accounted for to strictly model the behavior
27325 of the machine. Those instructions are currently unaccounted
27326 for to help minimize compile time overhead of this code.
27327 */
27328 if (rs6000_cpu == PROCESSOR_POWER6 && last_scheduled_insn)
27329 {
27330 int pos;
27331 int i;
27332 rtx_insn *tmp;
27333 rtx load_mem, str_mem;
27334
27335 if (is_store_insn (last_scheduled_insn, &str_mem))
27336 /* Issuing a store, swing the load_store_pendulum to the left */
27337 load_store_pendulum--;
27338 else if (is_load_insn (last_scheduled_insn, &load_mem))
27339 /* Issuing a load, swing the load_store_pendulum to the right */
27340 load_store_pendulum++;
27341 else
27342 return cached_can_issue_more;
27343
27344 /* If the pendulum is balanced, or there is only one instruction on
27345 the ready list, then all is well, so return. */
27346 if ((load_store_pendulum == 0) || (*pn_ready <= 1))
27347 return cached_can_issue_more;
27348
27349 if (load_store_pendulum == 1)
27350 {
27351 /* A load has been issued in this cycle. Scan the ready list
27352 for another load to issue with it */
27353 pos = *pn_ready-1;
27354
27355 while (pos >= 0)
27356 {
27357 if (is_load_insn (ready[pos], &load_mem))
27358 {
27359 /* Found a load. Move it to the head of the ready list,
27360 and adjust it's priority so that it is more likely to
27361 stay there */
27362 tmp = ready[pos];
27363 for (i=pos; i<*pn_ready-1; i++)
27364 ready[i] = ready[i + 1];
27365 ready[*pn_ready-1] = tmp;
27366
27367 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27368 INSN_PRIORITY (tmp)++;
27369 break;
27370 }
27371 pos--;
27372 }
27373 }
27374 else if (load_store_pendulum == -2)
27375 {
27376 /* Two stores have been issued in this cycle. Increase the
27377 priority of the first load in the ready list to favor it for
27378 issuing in the next cycle. */
27379 pos = *pn_ready-1;
27380
27381 while (pos >= 0)
27382 {
27383 if (is_load_insn (ready[pos], &load_mem)
27384 && !sel_sched_p ()
27385 && INSN_PRIORITY_KNOWN (ready[pos]))
27386 {
27387 INSN_PRIORITY (ready[pos])++;
27388
27389 /* Adjust the pendulum to account for the fact that a load
27390 was found and increased in priority. This is to prevent
27391 increasing the priority of multiple loads */
27392 load_store_pendulum--;
27393
27394 break;
27395 }
27396 pos--;
27397 }
27398 }
27399 else if (load_store_pendulum == -1)
27400 {
27401 /* A store has been issued in this cycle. Scan the ready list for
27402 another store to issue with it, preferring a store to an adjacent
27403 memory location */
27404 int first_store_pos = -1;
27405
27406 pos = *pn_ready-1;
27407
27408 while (pos >= 0)
27409 {
27410 if (is_store_insn (ready[pos], &str_mem))
27411 {
27412 rtx str_mem2;
27413 /* Maintain the index of the first store found on the
27414 list */
27415 if (first_store_pos == -1)
27416 first_store_pos = pos;
27417
27418 if (is_store_insn (last_scheduled_insn, &str_mem2)
27419 && adjacent_mem_locations (str_mem, str_mem2))
27420 {
27421 /* Found an adjacent store. Move it to the head of the
27422 ready list, and adjust it's priority so that it is
27423 more likely to stay there */
27424 tmp = ready[pos];
27425 for (i=pos; i<*pn_ready-1; i++)
27426 ready[i] = ready[i + 1];
27427 ready[*pn_ready-1] = tmp;
27428
27429 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27430 INSN_PRIORITY (tmp)++;
27431
27432 first_store_pos = -1;
27433
27434 break;
27435 };
27436 }
27437 pos--;
27438 }
27439
27440 if (first_store_pos >= 0)
27441 {
27442 /* An adjacent store wasn't found, but a non-adjacent store was,
27443 so move the non-adjacent store to the front of the ready
27444 list, and adjust its priority so that it is more likely to
27445 stay there. */
27446 tmp = ready[first_store_pos];
27447 for (i=first_store_pos; i<*pn_ready-1; i++)
27448 ready[i] = ready[i + 1];
27449 ready[*pn_ready-1] = tmp;
27450 if (!sel_sched_p () && INSN_PRIORITY_KNOWN (tmp))
27451 INSN_PRIORITY (tmp)++;
27452 }
27453 }
27454 else if (load_store_pendulum == 2)
27455 {
27456 /* Two loads have been issued in this cycle. Increase the priority
27457 of the first store in the ready list to favor it for issuing in
27458 the next cycle. */
27459 pos = *pn_ready-1;
27460
27461 while (pos >= 0)
27462 {
27463 if (is_store_insn (ready[pos], &str_mem)
27464 && !sel_sched_p ()
27465 && INSN_PRIORITY_KNOWN (ready[pos]))
27466 {
27467 INSN_PRIORITY (ready[pos])++;
27468
27469 /* Adjust the pendulum to account for the fact that a store
27470 was found and increased in priority. This is to prevent
27471 increasing the priority of multiple stores */
27472 load_store_pendulum++;
27473
27474 break;
27475 }
27476 pos--;
27477 }
27478 }
27479 }
27480
27481 return cached_can_issue_more;
27482 }
27483
27484 /* Return whether the presence of INSN causes a dispatch group termination
27485 of group WHICH_GROUP.
27486
27487 If WHICH_GROUP == current_group, this function will return true if INSN
27488 causes the termination of the current group (i.e, the dispatch group to
27489 which INSN belongs). This means that INSN will be the last insn in the
27490 group it belongs to.
27491
27492 If WHICH_GROUP == previous_group, this function will return true if INSN
27493 causes the termination of the previous group (i.e, the dispatch group that
27494 precedes the group to which INSN belongs). This means that INSN will be
27495 the first insn in the group it belongs to). */
27496
27497 static bool
27498 insn_terminates_group_p (rtx insn, enum group_termination which_group)
27499 {
27500 bool first, last;
27501
27502 if (! insn)
27503 return false;
27504
27505 first = insn_must_be_first_in_group (insn);
27506 last = insn_must_be_last_in_group (insn);
27507
27508 if (first && last)
27509 return true;
27510
27511 if (which_group == current_group)
27512 return last;
27513 else if (which_group == previous_group)
27514 return first;
27515
27516 return false;
27517 }
27518
27519
27520 static bool
27521 insn_must_be_first_in_group (rtx insn)
27522 {
27523 enum attr_type type;
27524
27525 if (!insn
27526 || NOTE_P (insn)
27527 || DEBUG_INSN_P (insn)
27528 || GET_CODE (PATTERN (insn)) == USE
27529 || GET_CODE (PATTERN (insn)) == CLOBBER)
27530 return false;
27531
27532 switch (rs6000_cpu)
27533 {
27534 case PROCESSOR_POWER5:
27535 if (is_cracked_insn (insn))
27536 return true;
27537 case PROCESSOR_POWER4:
27538 if (is_microcoded_insn (insn))
27539 return true;
27540
27541 if (!rs6000_sched_groups)
27542 return false;
27543
27544 type = get_attr_type (insn);
27545
27546 switch (type)
27547 {
27548 case TYPE_MFCR:
27549 case TYPE_MFCRF:
27550 case TYPE_MTCR:
27551 case TYPE_DELAYED_CR:
27552 case TYPE_CR_LOGICAL:
27553 case TYPE_MTJMPR:
27554 case TYPE_MFJMPR:
27555 case TYPE_DIV:
27556 case TYPE_LOAD_L:
27557 case TYPE_STORE_C:
27558 case TYPE_ISYNC:
27559 case TYPE_SYNC:
27560 return true;
27561 default:
27562 break;
27563 }
27564 break;
27565 case PROCESSOR_POWER6:
27566 type = get_attr_type (insn);
27567
27568 switch (type)
27569 {
27570 case TYPE_EXTS:
27571 case TYPE_CNTLZ:
27572 case TYPE_TRAP:
27573 case TYPE_MUL:
27574 case TYPE_INSERT:
27575 case TYPE_FPCOMPARE:
27576 case TYPE_MFCR:
27577 case TYPE_MTCR:
27578 case TYPE_MFJMPR:
27579 case TYPE_MTJMPR:
27580 case TYPE_ISYNC:
27581 case TYPE_SYNC:
27582 case TYPE_LOAD_L:
27583 case TYPE_STORE_C:
27584 return true;
27585 case TYPE_SHIFT:
27586 if (get_attr_dot (insn) == DOT_NO
27587 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
27588 return true;
27589 else
27590 break;
27591 case TYPE_DIV:
27592 if (get_attr_size (insn) == SIZE_32)
27593 return true;
27594 else
27595 break;
27596 case TYPE_LOAD:
27597 case TYPE_STORE:
27598 case TYPE_FPLOAD:
27599 case TYPE_FPSTORE:
27600 if (get_attr_update (insn) == UPDATE_YES)
27601 return true;
27602 else
27603 break;
27604 default:
27605 break;
27606 }
27607 break;
27608 case PROCESSOR_POWER7:
27609 type = get_attr_type (insn);
27610
27611 switch (type)
27612 {
27613 case TYPE_CR_LOGICAL:
27614 case TYPE_MFCR:
27615 case TYPE_MFCRF:
27616 case TYPE_MTCR:
27617 case TYPE_DIV:
27618 case TYPE_COMPARE:
27619 case TYPE_ISYNC:
27620 case TYPE_LOAD_L:
27621 case TYPE_STORE_C:
27622 case TYPE_MFJMPR:
27623 case TYPE_MTJMPR:
27624 return true;
27625 case TYPE_MUL:
27626 case TYPE_SHIFT:
27627 if (get_attr_dot (insn) == DOT_YES)
27628 return true;
27629 else
27630 break;
27631 case TYPE_LOAD:
27632 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27633 || get_attr_update (insn) == UPDATE_YES)
27634 return true;
27635 else
27636 break;
27637 case TYPE_STORE:
27638 case TYPE_FPLOAD:
27639 case TYPE_FPSTORE:
27640 if (get_attr_update (insn) == UPDATE_YES)
27641 return true;
27642 else
27643 break;
27644 default:
27645 break;
27646 }
27647 break;
27648 case PROCESSOR_POWER8:
27649 type = get_attr_type (insn);
27650
27651 switch (type)
27652 {
27653 case TYPE_CR_LOGICAL:
27654 case TYPE_DELAYED_CR:
27655 case TYPE_MFCR:
27656 case TYPE_MFCRF:
27657 case TYPE_MTCR:
27658 case TYPE_COMPARE:
27659 case TYPE_SYNC:
27660 case TYPE_ISYNC:
27661 case TYPE_LOAD_L:
27662 case TYPE_STORE_C:
27663 case TYPE_VECSTORE:
27664 case TYPE_MFJMPR:
27665 case TYPE_MTJMPR:
27666 return true;
27667 case TYPE_SHIFT:
27668 case TYPE_MUL:
27669 if (get_attr_dot (insn) == DOT_YES)
27670 return true;
27671 else
27672 break;
27673 case TYPE_LOAD:
27674 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27675 || get_attr_update (insn) == UPDATE_YES)
27676 return true;
27677 else
27678 break;
27679 case TYPE_STORE:
27680 if (get_attr_update (insn) == UPDATE_YES
27681 && get_attr_indexed (insn) == INDEXED_YES)
27682 return true;
27683 else
27684 break;
27685 default:
27686 break;
27687 }
27688 break;
27689 default:
27690 break;
27691 }
27692
27693 return false;
27694 }
27695
27696 static bool
27697 insn_must_be_last_in_group (rtx insn)
27698 {
27699 enum attr_type type;
27700
27701 if (!insn
27702 || NOTE_P (insn)
27703 || DEBUG_INSN_P (insn)
27704 || GET_CODE (PATTERN (insn)) == USE
27705 || GET_CODE (PATTERN (insn)) == CLOBBER)
27706 return false;
27707
27708 switch (rs6000_cpu) {
27709 case PROCESSOR_POWER4:
27710 case PROCESSOR_POWER5:
27711 if (is_microcoded_insn (insn))
27712 return true;
27713
27714 if (is_branch_slot_insn (insn))
27715 return true;
27716
27717 break;
27718 case PROCESSOR_POWER6:
27719 type = get_attr_type (insn);
27720
27721 switch (type)
27722 {
27723 case TYPE_EXTS:
27724 case TYPE_CNTLZ:
27725 case TYPE_TRAP:
27726 case TYPE_MUL:
27727 case TYPE_FPCOMPARE:
27728 case TYPE_MFCR:
27729 case TYPE_MTCR:
27730 case TYPE_MFJMPR:
27731 case TYPE_MTJMPR:
27732 case TYPE_ISYNC:
27733 case TYPE_SYNC:
27734 case TYPE_LOAD_L:
27735 case TYPE_STORE_C:
27736 return true;
27737 case TYPE_SHIFT:
27738 if (get_attr_dot (insn) == DOT_NO
27739 || get_attr_var_shift (insn) == VAR_SHIFT_NO)
27740 return true;
27741 else
27742 break;
27743 case TYPE_DIV:
27744 if (get_attr_size (insn) == SIZE_32)
27745 return true;
27746 else
27747 break;
27748 default:
27749 break;
27750 }
27751 break;
27752 case PROCESSOR_POWER7:
27753 type = get_attr_type (insn);
27754
27755 switch (type)
27756 {
27757 case TYPE_ISYNC:
27758 case TYPE_SYNC:
27759 case TYPE_LOAD_L:
27760 case TYPE_STORE_C:
27761 return true;
27762 case TYPE_LOAD:
27763 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27764 && get_attr_update (insn) == UPDATE_YES)
27765 return true;
27766 else
27767 break;
27768 case TYPE_STORE:
27769 if (get_attr_update (insn) == UPDATE_YES
27770 && get_attr_indexed (insn) == INDEXED_YES)
27771 return true;
27772 else
27773 break;
27774 default:
27775 break;
27776 }
27777 break;
27778 case PROCESSOR_POWER8:
27779 type = get_attr_type (insn);
27780
27781 switch (type)
27782 {
27783 case TYPE_MFCR:
27784 case TYPE_MTCR:
27785 case TYPE_ISYNC:
27786 case TYPE_SYNC:
27787 case TYPE_LOAD_L:
27788 case TYPE_STORE_C:
27789 return true;
27790 case TYPE_LOAD:
27791 if (get_attr_sign_extend (insn) == SIGN_EXTEND_YES
27792 && get_attr_update (insn) == UPDATE_YES)
27793 return true;
27794 else
27795 break;
27796 case TYPE_STORE:
27797 if (get_attr_update (insn) == UPDATE_YES
27798 && get_attr_indexed (insn) == INDEXED_YES)
27799 return true;
27800 else
27801 break;
27802 default:
27803 break;
27804 }
27805 break;
27806 default:
27807 break;
27808 }
27809
27810 return false;
27811 }
27812
27813 /* Return true if it is recommended to keep NEXT_INSN "far" (in a separate
27814 dispatch group) from the insns in GROUP_INSNS. Return false otherwise. */
27815
27816 static bool
27817 is_costly_group (rtx *group_insns, rtx next_insn)
27818 {
27819 int i;
27820 int issue_rate = rs6000_issue_rate ();
27821
27822 for (i = 0; i < issue_rate; i++)
27823 {
27824 sd_iterator_def sd_it;
27825 dep_t dep;
27826 rtx insn = group_insns[i];
27827
27828 if (!insn)
27829 continue;
27830
27831 FOR_EACH_DEP (insn, SD_LIST_RES_FORW, sd_it, dep)
27832 {
27833 rtx next = DEP_CON (dep);
27834
27835 if (next == next_insn
27836 && rs6000_is_costly_dependence (dep, dep_cost (dep), 0))
27837 return true;
27838 }
27839 }
27840
27841 return false;
27842 }
27843
27844 /* Utility of the function redefine_groups.
27845 Check if it is too costly to schedule NEXT_INSN together with GROUP_INSNS
27846 in the same dispatch group. If so, insert nops before NEXT_INSN, in order
27847 to keep it "far" (in a separate group) from GROUP_INSNS, following
27848 one of the following schemes, depending on the value of the flag
27849 -minsert_sched_nops = X:
27850 (1) X == sched_finish_regroup_exact: insert exactly as many nops as needed
27851 in order to force NEXT_INSN into a separate group.
27852 (2) X < sched_finish_regroup_exact: insert exactly X nops.
27853 GROUP_END, CAN_ISSUE_MORE and GROUP_COUNT record the state after nop
27854 insertion (has a group just ended, how many vacant issue slots remain in the
27855 last group, and how many dispatch groups were encountered so far). */
27856
27857 static int
27858 force_new_group (int sched_verbose, FILE *dump, rtx *group_insns,
27859 rtx next_insn, bool *group_end, int can_issue_more,
27860 int *group_count)
27861 {
27862 rtx nop;
27863 bool force;
27864 int issue_rate = rs6000_issue_rate ();
27865 bool end = *group_end;
27866 int i;
27867
27868 if (next_insn == NULL_RTX || DEBUG_INSN_P (next_insn))
27869 return can_issue_more;
27870
27871 if (rs6000_sched_insert_nops > sched_finish_regroup_exact)
27872 return can_issue_more;
27873
27874 force = is_costly_group (group_insns, next_insn);
27875 if (!force)
27876 return can_issue_more;
27877
27878 if (sched_verbose > 6)
27879 fprintf (dump,"force: group count = %d, can_issue_more = %d\n",
27880 *group_count ,can_issue_more);
27881
27882 if (rs6000_sched_insert_nops == sched_finish_regroup_exact)
27883 {
27884 if (*group_end)
27885 can_issue_more = 0;
27886
27887 /* Since only a branch can be issued in the last issue_slot, it is
27888 sufficient to insert 'can_issue_more - 1' nops if next_insn is not
27889 a branch. If next_insn is a branch, we insert 'can_issue_more' nops;
27890 in this case the last nop will start a new group and the branch
27891 will be forced to the new group. */
27892 if (can_issue_more && !is_branch_slot_insn (next_insn))
27893 can_issue_more--;
27894
27895 /* Do we have a special group ending nop? */
27896 if (rs6000_cpu_attr == CPU_POWER6 || rs6000_cpu_attr == CPU_POWER7
27897 || rs6000_cpu_attr == CPU_POWER8)
27898 {
27899 nop = gen_group_ending_nop ();
27900 emit_insn_before (nop, next_insn);
27901 can_issue_more = 0;
27902 }
27903 else
27904 while (can_issue_more > 0)
27905 {
27906 nop = gen_nop ();
27907 emit_insn_before (nop, next_insn);
27908 can_issue_more--;
27909 }
27910
27911 *group_end = true;
27912 return 0;
27913 }
27914
27915 if (rs6000_sched_insert_nops < sched_finish_regroup_exact)
27916 {
27917 int n_nops = rs6000_sched_insert_nops;
27918
27919 /* Nops can't be issued from the branch slot, so the effective
27920 issue_rate for nops is 'issue_rate - 1'. */
27921 if (can_issue_more == 0)
27922 can_issue_more = issue_rate;
27923 can_issue_more--;
27924 if (can_issue_more == 0)
27925 {
27926 can_issue_more = issue_rate - 1;
27927 (*group_count)++;
27928 end = true;
27929 for (i = 0; i < issue_rate; i++)
27930 {
27931 group_insns[i] = 0;
27932 }
27933 }
27934
27935 while (n_nops > 0)
27936 {
27937 nop = gen_nop ();
27938 emit_insn_before (nop, next_insn);
27939 if (can_issue_more == issue_rate - 1) /* new group begins */
27940 end = false;
27941 can_issue_more--;
27942 if (can_issue_more == 0)
27943 {
27944 can_issue_more = issue_rate - 1;
27945 (*group_count)++;
27946 end = true;
27947 for (i = 0; i < issue_rate; i++)
27948 {
27949 group_insns[i] = 0;
27950 }
27951 }
27952 n_nops--;
27953 }
27954
27955 /* Scale back relative to 'issue_rate' (instead of 'issue_rate - 1'). */
27956 can_issue_more++;
27957
27958 /* Is next_insn going to start a new group? */
27959 *group_end
27960 = (end
27961 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
27962 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
27963 || (can_issue_more < issue_rate &&
27964 insn_terminates_group_p (next_insn, previous_group)));
27965 if (*group_end && end)
27966 (*group_count)--;
27967
27968 if (sched_verbose > 6)
27969 fprintf (dump, "done force: group count = %d, can_issue_more = %d\n",
27970 *group_count, can_issue_more);
27971 return can_issue_more;
27972 }
27973
27974 return can_issue_more;
27975 }
27976
27977 /* This function tries to synch the dispatch groups that the compiler "sees"
27978 with the dispatch groups that the processor dispatcher is expected to
27979 form in practice. It tries to achieve this synchronization by forcing the
27980 estimated processor grouping on the compiler (as opposed to the function
27981 'pad_goups' which tries to force the scheduler's grouping on the processor).
27982
27983 The function scans the insn sequence between PREV_HEAD_INSN and TAIL and
27984 examines the (estimated) dispatch groups that will be formed by the processor
27985 dispatcher. It marks these group boundaries to reflect the estimated
27986 processor grouping, overriding the grouping that the scheduler had marked.
27987 Depending on the value of the flag '-minsert-sched-nops' this function can
27988 force certain insns into separate groups or force a certain distance between
27989 them by inserting nops, for example, if there exists a "costly dependence"
27990 between the insns.
27991
27992 The function estimates the group boundaries that the processor will form as
27993 follows: It keeps track of how many vacant issue slots are available after
27994 each insn. A subsequent insn will start a new group if one of the following
27995 4 cases applies:
27996 - no more vacant issue slots remain in the current dispatch group.
27997 - only the last issue slot, which is the branch slot, is vacant, but the next
27998 insn is not a branch.
27999 - only the last 2 or less issue slots, including the branch slot, are vacant,
28000 which means that a cracked insn (which occupies two issue slots) can't be
28001 issued in this group.
28002 - less than 'issue_rate' slots are vacant, and the next insn always needs to
28003 start a new group. */
28004
28005 static int
28006 redefine_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
28007 rtx_insn *tail)
28008 {
28009 rtx_insn *insn, *next_insn;
28010 int issue_rate;
28011 int can_issue_more;
28012 int slot, i;
28013 bool group_end;
28014 int group_count = 0;
28015 rtx *group_insns;
28016
28017 /* Initialize. */
28018 issue_rate = rs6000_issue_rate ();
28019 group_insns = XALLOCAVEC (rtx, issue_rate);
28020 for (i = 0; i < issue_rate; i++)
28021 {
28022 group_insns[i] = 0;
28023 }
28024 can_issue_more = issue_rate;
28025 slot = 0;
28026 insn = get_next_active_insn (prev_head_insn, tail);
28027 group_end = false;
28028
28029 while (insn != NULL_RTX)
28030 {
28031 slot = (issue_rate - can_issue_more);
28032 group_insns[slot] = insn;
28033 can_issue_more =
28034 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
28035 if (insn_terminates_group_p (insn, current_group))
28036 can_issue_more = 0;
28037
28038 next_insn = get_next_active_insn (insn, tail);
28039 if (next_insn == NULL_RTX)
28040 return group_count + 1;
28041
28042 /* Is next_insn going to start a new group? */
28043 group_end
28044 = (can_issue_more == 0
28045 || (can_issue_more == 1 && !is_branch_slot_insn (next_insn))
28046 || (can_issue_more <= 2 && is_cracked_insn (next_insn))
28047 || (can_issue_more < issue_rate &&
28048 insn_terminates_group_p (next_insn, previous_group)));
28049
28050 can_issue_more = force_new_group (sched_verbose, dump, group_insns,
28051 next_insn, &group_end, can_issue_more,
28052 &group_count);
28053
28054 if (group_end)
28055 {
28056 group_count++;
28057 can_issue_more = 0;
28058 for (i = 0; i < issue_rate; i++)
28059 {
28060 group_insns[i] = 0;
28061 }
28062 }
28063
28064 if (GET_MODE (next_insn) == TImode && can_issue_more)
28065 PUT_MODE (next_insn, VOIDmode);
28066 else if (!can_issue_more && GET_MODE (next_insn) != TImode)
28067 PUT_MODE (next_insn, TImode);
28068
28069 insn = next_insn;
28070 if (can_issue_more == 0)
28071 can_issue_more = issue_rate;
28072 } /* while */
28073
28074 return group_count;
28075 }
28076
28077 /* Scan the insn sequence between PREV_HEAD_INSN and TAIL and examine the
28078 dispatch group boundaries that the scheduler had marked. Pad with nops
28079 any dispatch groups which have vacant issue slots, in order to force the
28080 scheduler's grouping on the processor dispatcher. The function
28081 returns the number of dispatch groups found. */
28082
28083 static int
28084 pad_groups (FILE *dump, int sched_verbose, rtx_insn *prev_head_insn,
28085 rtx_insn *tail)
28086 {
28087 rtx_insn *insn, *next_insn;
28088 rtx nop;
28089 int issue_rate;
28090 int can_issue_more;
28091 int group_end;
28092 int group_count = 0;
28093
28094 /* Initialize issue_rate. */
28095 issue_rate = rs6000_issue_rate ();
28096 can_issue_more = issue_rate;
28097
28098 insn = get_next_active_insn (prev_head_insn, tail);
28099 next_insn = get_next_active_insn (insn, tail);
28100
28101 while (insn != NULL_RTX)
28102 {
28103 can_issue_more =
28104 rs6000_variable_issue (dump, sched_verbose, insn, can_issue_more);
28105
28106 group_end = (next_insn == NULL_RTX || GET_MODE (next_insn) == TImode);
28107
28108 if (next_insn == NULL_RTX)
28109 break;
28110
28111 if (group_end)
28112 {
28113 /* If the scheduler had marked group termination at this location
28114 (between insn and next_insn), and neither insn nor next_insn will
28115 force group termination, pad the group with nops to force group
28116 termination. */
28117 if (can_issue_more
28118 && (rs6000_sched_insert_nops == sched_finish_pad_groups)
28119 && !insn_terminates_group_p (insn, current_group)
28120 && !insn_terminates_group_p (next_insn, previous_group))
28121 {
28122 if (!is_branch_slot_insn (next_insn))
28123 can_issue_more--;
28124
28125 while (can_issue_more)
28126 {
28127 nop = gen_nop ();
28128 emit_insn_before (nop, next_insn);
28129 can_issue_more--;
28130 }
28131 }
28132
28133 can_issue_more = issue_rate;
28134 group_count++;
28135 }
28136
28137 insn = next_insn;
28138 next_insn = get_next_active_insn (insn, tail);
28139 }
28140
28141 return group_count;
28142 }
28143
28144 /* We're beginning a new block. Initialize data structures as necessary. */
28145
28146 static void
28147 rs6000_sched_init (FILE *dump ATTRIBUTE_UNUSED,
28148 int sched_verbose ATTRIBUTE_UNUSED,
28149 int max_ready ATTRIBUTE_UNUSED)
28150 {
28151 last_scheduled_insn = NULL_RTX;
28152 load_store_pendulum = 0;
28153 }
28154
28155 /* The following function is called at the end of scheduling BB.
28156 After reload, it inserts nops at insn group bundling. */
28157
28158 static void
28159 rs6000_sched_finish (FILE *dump, int sched_verbose)
28160 {
28161 int n_groups;
28162
28163 if (sched_verbose)
28164 fprintf (dump, "=== Finishing schedule.\n");
28165
28166 if (reload_completed && rs6000_sched_groups)
28167 {
28168 /* Do not run sched_finish hook when selective scheduling enabled. */
28169 if (sel_sched_p ())
28170 return;
28171
28172 if (rs6000_sched_insert_nops == sched_finish_none)
28173 return;
28174
28175 if (rs6000_sched_insert_nops == sched_finish_pad_groups)
28176 n_groups = pad_groups (dump, sched_verbose,
28177 current_sched_info->prev_head,
28178 current_sched_info->next_tail);
28179 else
28180 n_groups = redefine_groups (dump, sched_verbose,
28181 current_sched_info->prev_head,
28182 current_sched_info->next_tail);
28183
28184 if (sched_verbose >= 6)
28185 {
28186 fprintf (dump, "ngroups = %d\n", n_groups);
28187 print_rtl (dump, current_sched_info->prev_head);
28188 fprintf (dump, "Done finish_sched\n");
28189 }
28190 }
28191 }
28192
28193 struct _rs6000_sched_context
28194 {
28195 short cached_can_issue_more;
28196 rtx last_scheduled_insn;
28197 int load_store_pendulum;
28198 };
28199
28200 typedef struct _rs6000_sched_context rs6000_sched_context_def;
28201 typedef rs6000_sched_context_def *rs6000_sched_context_t;
28202
28203 /* Allocate store for new scheduling context. */
28204 static void *
28205 rs6000_alloc_sched_context (void)
28206 {
28207 return xmalloc (sizeof (rs6000_sched_context_def));
28208 }
28209
28210 /* If CLEAN_P is true then initializes _SC with clean data,
28211 and from the global context otherwise. */
28212 static void
28213 rs6000_init_sched_context (void *_sc, bool clean_p)
28214 {
28215 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
28216
28217 if (clean_p)
28218 {
28219 sc->cached_can_issue_more = 0;
28220 sc->last_scheduled_insn = NULL_RTX;
28221 sc->load_store_pendulum = 0;
28222 }
28223 else
28224 {
28225 sc->cached_can_issue_more = cached_can_issue_more;
28226 sc->last_scheduled_insn = last_scheduled_insn;
28227 sc->load_store_pendulum = load_store_pendulum;
28228 }
28229 }
28230
28231 /* Sets the global scheduling context to the one pointed to by _SC. */
28232 static void
28233 rs6000_set_sched_context (void *_sc)
28234 {
28235 rs6000_sched_context_t sc = (rs6000_sched_context_t) _sc;
28236
28237 gcc_assert (sc != NULL);
28238
28239 cached_can_issue_more = sc->cached_can_issue_more;
28240 last_scheduled_insn = sc->last_scheduled_insn;
28241 load_store_pendulum = sc->load_store_pendulum;
28242 }
28243
28244 /* Free _SC. */
28245 static void
28246 rs6000_free_sched_context (void *_sc)
28247 {
28248 gcc_assert (_sc != NULL);
28249
28250 free (_sc);
28251 }
28252
28253 \f
28254 /* Length in units of the trampoline for entering a nested function. */
28255
28256 int
28257 rs6000_trampoline_size (void)
28258 {
28259 int ret = 0;
28260
28261 switch (DEFAULT_ABI)
28262 {
28263 default:
28264 gcc_unreachable ();
28265
28266 case ABI_AIX:
28267 ret = (TARGET_32BIT) ? 12 : 24;
28268 break;
28269
28270 case ABI_ELFv2:
28271 gcc_assert (!TARGET_32BIT);
28272 ret = 32;
28273 break;
28274
28275 case ABI_DARWIN:
28276 case ABI_V4:
28277 ret = (TARGET_32BIT) ? 40 : 48;
28278 break;
28279 }
28280
28281 return ret;
28282 }
28283
28284 /* Emit RTL insns to initialize the variable parts of a trampoline.
28285 FNADDR is an RTX for the address of the function's pure code.
28286 CXT is an RTX for the static chain value for the function. */
28287
28288 static void
28289 rs6000_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
28290 {
28291 int regsize = (TARGET_32BIT) ? 4 : 8;
28292 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
28293 rtx ctx_reg = force_reg (Pmode, cxt);
28294 rtx addr = force_reg (Pmode, XEXP (m_tramp, 0));
28295
28296 switch (DEFAULT_ABI)
28297 {
28298 default:
28299 gcc_unreachable ();
28300
28301 /* Under AIX, just build the 3 word function descriptor */
28302 case ABI_AIX:
28303 {
28304 rtx fnmem, fn_reg, toc_reg;
28305
28306 if (!TARGET_POINTERS_TO_NESTED_FUNCTIONS)
28307 error ("You cannot take the address of a nested function if you use "
28308 "the -mno-pointers-to-nested-functions option.");
28309
28310 fnmem = gen_const_mem (Pmode, force_reg (Pmode, fnaddr));
28311 fn_reg = gen_reg_rtx (Pmode);
28312 toc_reg = gen_reg_rtx (Pmode);
28313
28314 /* Macro to shorten the code expansions below. */
28315 # define MEM_PLUS(MEM, OFFSET) adjust_address (MEM, Pmode, OFFSET)
28316
28317 m_tramp = replace_equiv_address (m_tramp, addr);
28318
28319 emit_move_insn (fn_reg, MEM_PLUS (fnmem, 0));
28320 emit_move_insn (toc_reg, MEM_PLUS (fnmem, regsize));
28321 emit_move_insn (MEM_PLUS (m_tramp, 0), fn_reg);
28322 emit_move_insn (MEM_PLUS (m_tramp, regsize), toc_reg);
28323 emit_move_insn (MEM_PLUS (m_tramp, 2*regsize), ctx_reg);
28324
28325 # undef MEM_PLUS
28326 }
28327 break;
28328
28329 /* Under V.4/eabi/darwin, __trampoline_setup does the real work. */
28330 case ABI_ELFv2:
28331 case ABI_DARWIN:
28332 case ABI_V4:
28333 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__trampoline_setup"),
28334 LCT_NORMAL, VOIDmode, 4,
28335 addr, Pmode,
28336 GEN_INT (rs6000_trampoline_size ()), SImode,
28337 fnaddr, Pmode,
28338 ctx_reg, Pmode);
28339 break;
28340 }
28341 }
28342
28343 \f
28344 /* Returns TRUE iff the target attribute indicated by ATTR_ID takes a plain
28345 identifier as an argument, so the front end shouldn't look it up. */
28346
28347 static bool
28348 rs6000_attribute_takes_identifier_p (const_tree attr_id)
28349 {
28350 return is_attribute_p ("altivec", attr_id);
28351 }
28352
28353 /* Handle the "altivec" attribute. The attribute may have
28354 arguments as follows:
28355
28356 __attribute__((altivec(vector__)))
28357 __attribute__((altivec(pixel__))) (always followed by 'unsigned short')
28358 __attribute__((altivec(bool__))) (always followed by 'unsigned')
28359
28360 and may appear more than once (e.g., 'vector bool char') in a
28361 given declaration. */
28362
28363 static tree
28364 rs6000_handle_altivec_attribute (tree *node,
28365 tree name ATTRIBUTE_UNUSED,
28366 tree args,
28367 int flags ATTRIBUTE_UNUSED,
28368 bool *no_add_attrs)
28369 {
28370 tree type = *node, result = NULL_TREE;
28371 enum machine_mode mode;
28372 int unsigned_p;
28373 char altivec_type
28374 = ((args && TREE_CODE (args) == TREE_LIST && TREE_VALUE (args)
28375 && TREE_CODE (TREE_VALUE (args)) == IDENTIFIER_NODE)
28376 ? *IDENTIFIER_POINTER (TREE_VALUE (args))
28377 : '?');
28378
28379 while (POINTER_TYPE_P (type)
28380 || TREE_CODE (type) == FUNCTION_TYPE
28381 || TREE_CODE (type) == METHOD_TYPE
28382 || TREE_CODE (type) == ARRAY_TYPE)
28383 type = TREE_TYPE (type);
28384
28385 mode = TYPE_MODE (type);
28386
28387 /* Check for invalid AltiVec type qualifiers. */
28388 if (type == long_double_type_node)
28389 error ("use of %<long double%> in AltiVec types is invalid");
28390 else if (type == boolean_type_node)
28391 error ("use of boolean types in AltiVec types is invalid");
28392 else if (TREE_CODE (type) == COMPLEX_TYPE)
28393 error ("use of %<complex%> in AltiVec types is invalid");
28394 else if (DECIMAL_FLOAT_MODE_P (mode))
28395 error ("use of decimal floating point types in AltiVec types is invalid");
28396 else if (!TARGET_VSX)
28397 {
28398 if (type == long_unsigned_type_node || type == long_integer_type_node)
28399 {
28400 if (TARGET_64BIT)
28401 error ("use of %<long%> in AltiVec types is invalid for "
28402 "64-bit code without -mvsx");
28403 else if (rs6000_warn_altivec_long)
28404 warning (0, "use of %<long%> in AltiVec types is deprecated; "
28405 "use %<int%>");
28406 }
28407 else if (type == long_long_unsigned_type_node
28408 || type == long_long_integer_type_node)
28409 error ("use of %<long long%> in AltiVec types is invalid without "
28410 "-mvsx");
28411 else if (type == double_type_node)
28412 error ("use of %<double%> in AltiVec types is invalid without -mvsx");
28413 }
28414
28415 switch (altivec_type)
28416 {
28417 case 'v':
28418 unsigned_p = TYPE_UNSIGNED (type);
28419 switch (mode)
28420 {
28421 case TImode:
28422 result = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
28423 break;
28424 case DImode:
28425 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
28426 break;
28427 case SImode:
28428 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
28429 break;
28430 case HImode:
28431 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
28432 break;
28433 case QImode:
28434 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
28435 break;
28436 case SFmode: result = V4SF_type_node; break;
28437 case DFmode: result = V2DF_type_node; break;
28438 /* If the user says 'vector int bool', we may be handed the 'bool'
28439 attribute _before_ the 'vector' attribute, and so select the
28440 proper type in the 'b' case below. */
28441 case V4SImode: case V8HImode: case V16QImode: case V4SFmode:
28442 case V2DImode: case V2DFmode:
28443 result = type;
28444 default: break;
28445 }
28446 break;
28447 case 'b':
28448 switch (mode)
28449 {
28450 case DImode: case V2DImode: result = bool_V2DI_type_node; break;
28451 case SImode: case V4SImode: result = bool_V4SI_type_node; break;
28452 case HImode: case V8HImode: result = bool_V8HI_type_node; break;
28453 case QImode: case V16QImode: result = bool_V16QI_type_node;
28454 default: break;
28455 }
28456 break;
28457 case 'p':
28458 switch (mode)
28459 {
28460 case V8HImode: result = pixel_V8HI_type_node;
28461 default: break;
28462 }
28463 default: break;
28464 }
28465
28466 /* Propagate qualifiers attached to the element type
28467 onto the vector type. */
28468 if (result && result != type && TYPE_QUALS (type))
28469 result = build_qualified_type (result, TYPE_QUALS (type));
28470
28471 *no_add_attrs = true; /* No need to hang on to the attribute. */
28472
28473 if (result)
28474 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
28475
28476 return NULL_TREE;
28477 }
28478
28479 /* AltiVec defines four built-in scalar types that serve as vector
28480 elements; we must teach the compiler how to mangle them. */
28481
28482 static const char *
28483 rs6000_mangle_type (const_tree type)
28484 {
28485 type = TYPE_MAIN_VARIANT (type);
28486
28487 if (TREE_CODE (type) != VOID_TYPE && TREE_CODE (type) != BOOLEAN_TYPE
28488 && TREE_CODE (type) != INTEGER_TYPE && TREE_CODE (type) != REAL_TYPE)
28489 return NULL;
28490
28491 if (type == bool_char_type_node) return "U6__boolc";
28492 if (type == bool_short_type_node) return "U6__bools";
28493 if (type == pixel_type_node) return "u7__pixel";
28494 if (type == bool_int_type_node) return "U6__booli";
28495 if (type == bool_long_type_node) return "U6__booll";
28496
28497 /* Mangle IBM extended float long double as `g' (__float128) on
28498 powerpc*-linux where long-double-64 previously was the default. */
28499 if (TYPE_MAIN_VARIANT (type) == long_double_type_node
28500 && TARGET_ELF
28501 && TARGET_LONG_DOUBLE_128
28502 && !TARGET_IEEEQUAD)
28503 return "g";
28504
28505 /* For all other types, use normal C++ mangling. */
28506 return NULL;
28507 }
28508
28509 /* Handle a "longcall" or "shortcall" attribute; arguments as in
28510 struct attribute_spec.handler. */
28511
28512 static tree
28513 rs6000_handle_longcall_attribute (tree *node, tree name,
28514 tree args ATTRIBUTE_UNUSED,
28515 int flags ATTRIBUTE_UNUSED,
28516 bool *no_add_attrs)
28517 {
28518 if (TREE_CODE (*node) != FUNCTION_TYPE
28519 && TREE_CODE (*node) != FIELD_DECL
28520 && TREE_CODE (*node) != TYPE_DECL)
28521 {
28522 warning (OPT_Wattributes, "%qE attribute only applies to functions",
28523 name);
28524 *no_add_attrs = true;
28525 }
28526
28527 return NULL_TREE;
28528 }
28529
28530 /* Set longcall attributes on all functions declared when
28531 rs6000_default_long_calls is true. */
28532 static void
28533 rs6000_set_default_type_attributes (tree type)
28534 {
28535 if (rs6000_default_long_calls
28536 && (TREE_CODE (type) == FUNCTION_TYPE
28537 || TREE_CODE (type) == METHOD_TYPE))
28538 TYPE_ATTRIBUTES (type) = tree_cons (get_identifier ("longcall"),
28539 NULL_TREE,
28540 TYPE_ATTRIBUTES (type));
28541
28542 #if TARGET_MACHO
28543 darwin_set_default_type_attributes (type);
28544 #endif
28545 }
28546
28547 /* Return a reference suitable for calling a function with the
28548 longcall attribute. */
28549
28550 rtx
28551 rs6000_longcall_ref (rtx call_ref)
28552 {
28553 const char *call_name;
28554 tree node;
28555
28556 if (GET_CODE (call_ref) != SYMBOL_REF)
28557 return call_ref;
28558
28559 /* System V adds '.' to the internal name, so skip them. */
28560 call_name = XSTR (call_ref, 0);
28561 if (*call_name == '.')
28562 {
28563 while (*call_name == '.')
28564 call_name++;
28565
28566 node = get_identifier (call_name);
28567 call_ref = gen_rtx_SYMBOL_REF (VOIDmode, IDENTIFIER_POINTER (node));
28568 }
28569
28570 return force_reg (Pmode, call_ref);
28571 }
28572 \f
28573 #ifndef TARGET_USE_MS_BITFIELD_LAYOUT
28574 #define TARGET_USE_MS_BITFIELD_LAYOUT 0
28575 #endif
28576
28577 /* Handle a "ms_struct" or "gcc_struct" attribute; arguments as in
28578 struct attribute_spec.handler. */
28579 static tree
28580 rs6000_handle_struct_attribute (tree *node, tree name,
28581 tree args ATTRIBUTE_UNUSED,
28582 int flags ATTRIBUTE_UNUSED, bool *no_add_attrs)
28583 {
28584 tree *type = NULL;
28585 if (DECL_P (*node))
28586 {
28587 if (TREE_CODE (*node) == TYPE_DECL)
28588 type = &TREE_TYPE (*node);
28589 }
28590 else
28591 type = node;
28592
28593 if (!(type && (TREE_CODE (*type) == RECORD_TYPE
28594 || TREE_CODE (*type) == UNION_TYPE)))
28595 {
28596 warning (OPT_Wattributes, "%qE attribute ignored", name);
28597 *no_add_attrs = true;
28598 }
28599
28600 else if ((is_attribute_p ("ms_struct", name)
28601 && lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (*type)))
28602 || ((is_attribute_p ("gcc_struct", name)
28603 && lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (*type)))))
28604 {
28605 warning (OPT_Wattributes, "%qE incompatible attribute ignored",
28606 name);
28607 *no_add_attrs = true;
28608 }
28609
28610 return NULL_TREE;
28611 }
28612
28613 static bool
28614 rs6000_ms_bitfield_layout_p (const_tree record_type)
28615 {
28616 return (TARGET_USE_MS_BITFIELD_LAYOUT &&
28617 !lookup_attribute ("gcc_struct", TYPE_ATTRIBUTES (record_type)))
28618 || lookup_attribute ("ms_struct", TYPE_ATTRIBUTES (record_type));
28619 }
28620 \f
28621 #ifdef USING_ELFOS_H
28622
28623 /* A get_unnamed_section callback, used for switching to toc_section. */
28624
28625 static void
28626 rs6000_elf_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
28627 {
28628 if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28629 && TARGET_MINIMAL_TOC
28630 && !TARGET_RELOCATABLE)
28631 {
28632 if (!toc_initialized)
28633 {
28634 toc_initialized = 1;
28635 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
28636 (*targetm.asm_out.internal_label) (asm_out_file, "LCTOC", 0);
28637 fprintf (asm_out_file, "\t.tc ");
28638 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1[TC],");
28639 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
28640 fprintf (asm_out_file, "\n");
28641
28642 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28643 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
28644 fprintf (asm_out_file, " = .+32768\n");
28645 }
28646 else
28647 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28648 }
28649 else if ((DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
28650 && !TARGET_RELOCATABLE)
28651 fprintf (asm_out_file, "%s\n", TOC_SECTION_ASM_OP);
28652 else
28653 {
28654 fprintf (asm_out_file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
28655 if (!toc_initialized)
28656 {
28657 ASM_OUTPUT_INTERNAL_LABEL_PREFIX (asm_out_file, "LCTOC1");
28658 fprintf (asm_out_file, " = .+32768\n");
28659 toc_initialized = 1;
28660 }
28661 }
28662 }
28663
28664 /* Implement TARGET_ASM_INIT_SECTIONS. */
28665
28666 static void
28667 rs6000_elf_asm_init_sections (void)
28668 {
28669 toc_section
28670 = get_unnamed_section (0, rs6000_elf_output_toc_section_asm_op, NULL);
28671
28672 sdata2_section
28673 = get_unnamed_section (SECTION_WRITE, output_section_asm_op,
28674 SDATA2_SECTION_ASM_OP);
28675 }
28676
28677 /* Implement TARGET_SELECT_RTX_SECTION. */
28678
28679 static section *
28680 rs6000_elf_select_rtx_section (enum machine_mode mode, rtx x,
28681 unsigned HOST_WIDE_INT align)
28682 {
28683 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
28684 return toc_section;
28685 else
28686 return default_elf_select_rtx_section (mode, x, align);
28687 }
28688 \f
28689 /* For a SYMBOL_REF, set generic flags and then perform some
28690 target-specific processing.
28691
28692 When the AIX ABI is requested on a non-AIX system, replace the
28693 function name with the real name (with a leading .) rather than the
28694 function descriptor name. This saves a lot of overriding code to
28695 read the prefixes. */
28696
28697 static void rs6000_elf_encode_section_info (tree, rtx, int) ATTRIBUTE_UNUSED;
28698 static void
28699 rs6000_elf_encode_section_info (tree decl, rtx rtl, int first)
28700 {
28701 default_encode_section_info (decl, rtl, first);
28702
28703 if (first
28704 && TREE_CODE (decl) == FUNCTION_DECL
28705 && !TARGET_AIX
28706 && DEFAULT_ABI == ABI_AIX)
28707 {
28708 rtx sym_ref = XEXP (rtl, 0);
28709 size_t len = strlen (XSTR (sym_ref, 0));
28710 char *str = XALLOCAVEC (char, len + 2);
28711 str[0] = '.';
28712 memcpy (str + 1, XSTR (sym_ref, 0), len + 1);
28713 XSTR (sym_ref, 0) = ggc_alloc_string (str, len + 1);
28714 }
28715 }
28716
28717 static inline bool
28718 compare_section_name (const char *section, const char *templ)
28719 {
28720 int len;
28721
28722 len = strlen (templ);
28723 return (strncmp (section, templ, len) == 0
28724 && (section[len] == 0 || section[len] == '.'));
28725 }
28726
28727 bool
28728 rs6000_elf_in_small_data_p (const_tree decl)
28729 {
28730 if (rs6000_sdata == SDATA_NONE)
28731 return false;
28732
28733 /* We want to merge strings, so we never consider them small data. */
28734 if (TREE_CODE (decl) == STRING_CST)
28735 return false;
28736
28737 /* Functions are never in the small data area. */
28738 if (TREE_CODE (decl) == FUNCTION_DECL)
28739 return false;
28740
28741 if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl))
28742 {
28743 const char *section = DECL_SECTION_NAME (decl);
28744 if (compare_section_name (section, ".sdata")
28745 || compare_section_name (section, ".sdata2")
28746 || compare_section_name (section, ".gnu.linkonce.s")
28747 || compare_section_name (section, ".sbss")
28748 || compare_section_name (section, ".sbss2")
28749 || compare_section_name (section, ".gnu.linkonce.sb")
28750 || strcmp (section, ".PPC.EMB.sdata0") == 0
28751 || strcmp (section, ".PPC.EMB.sbss0") == 0)
28752 return true;
28753 }
28754 else
28755 {
28756 HOST_WIDE_INT size = int_size_in_bytes (TREE_TYPE (decl));
28757
28758 if (size > 0
28759 && size <= g_switch_value
28760 /* If it's not public, and we're not going to reference it there,
28761 there's no need to put it in the small data section. */
28762 && (rs6000_sdata != SDATA_DATA || TREE_PUBLIC (decl)))
28763 return true;
28764 }
28765
28766 return false;
28767 }
28768
28769 #endif /* USING_ELFOS_H */
28770 \f
28771 /* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P. */
28772
28773 static bool
28774 rs6000_use_blocks_for_constant_p (enum machine_mode mode, const_rtx x)
28775 {
28776 return !ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode);
28777 }
28778
28779 /* Do not place thread-local symbols refs in the object blocks. */
28780
28781 static bool
28782 rs6000_use_blocks_for_decl_p (const_tree decl)
28783 {
28784 return !DECL_THREAD_LOCAL_P (decl);
28785 }
28786 \f
28787 /* Return a REG that occurs in ADDR with coefficient 1.
28788 ADDR can be effectively incremented by incrementing REG.
28789
28790 r0 is special and we must not select it as an address
28791 register by this routine since our caller will try to
28792 increment the returned register via an "la" instruction. */
28793
28794 rtx
28795 find_addr_reg (rtx addr)
28796 {
28797 while (GET_CODE (addr) == PLUS)
28798 {
28799 if (GET_CODE (XEXP (addr, 0)) == REG
28800 && REGNO (XEXP (addr, 0)) != 0)
28801 addr = XEXP (addr, 0);
28802 else if (GET_CODE (XEXP (addr, 1)) == REG
28803 && REGNO (XEXP (addr, 1)) != 0)
28804 addr = XEXP (addr, 1);
28805 else if (CONSTANT_P (XEXP (addr, 0)))
28806 addr = XEXP (addr, 1);
28807 else if (CONSTANT_P (XEXP (addr, 1)))
28808 addr = XEXP (addr, 0);
28809 else
28810 gcc_unreachable ();
28811 }
28812 gcc_assert (GET_CODE (addr) == REG && REGNO (addr) != 0);
28813 return addr;
28814 }
28815
28816 void
28817 rs6000_fatal_bad_address (rtx op)
28818 {
28819 fatal_insn ("bad address", op);
28820 }
28821
28822 #if TARGET_MACHO
28823
28824 typedef struct branch_island_d {
28825 tree function_name;
28826 tree label_name;
28827 int line_number;
28828 } branch_island;
28829
28830
28831 static vec<branch_island, va_gc> *branch_islands;
28832
28833 /* Remember to generate a branch island for far calls to the given
28834 function. */
28835
28836 static void
28837 add_compiler_branch_island (tree label_name, tree function_name,
28838 int line_number)
28839 {
28840 branch_island bi = {function_name, label_name, line_number};
28841 vec_safe_push (branch_islands, bi);
28842 }
28843
28844 /* Generate far-jump branch islands for everything recorded in
28845 branch_islands. Invoked immediately after the last instruction of
28846 the epilogue has been emitted; the branch islands must be appended
28847 to, and contiguous with, the function body. Mach-O stubs are
28848 generated in machopic_output_stub(). */
28849
28850 static void
28851 macho_branch_islands (void)
28852 {
28853 char tmp_buf[512];
28854
28855 while (!vec_safe_is_empty (branch_islands))
28856 {
28857 branch_island *bi = &branch_islands->last ();
28858 const char *label = IDENTIFIER_POINTER (bi->label_name);
28859 const char *name = IDENTIFIER_POINTER (bi->function_name);
28860 char name_buf[512];
28861 /* Cheap copy of the details from the Darwin ASM_OUTPUT_LABELREF(). */
28862 if (name[0] == '*' || name[0] == '&')
28863 strcpy (name_buf, name+1);
28864 else
28865 {
28866 name_buf[0] = '_';
28867 strcpy (name_buf+1, name);
28868 }
28869 strcpy (tmp_buf, "\n");
28870 strcat (tmp_buf, label);
28871 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
28872 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
28873 dbxout_stabd (N_SLINE, bi->line_number);
28874 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
28875 if (flag_pic)
28876 {
28877 if (TARGET_LINK_STACK)
28878 {
28879 char name[32];
28880 get_ppc476_thunk_name (name);
28881 strcat (tmp_buf, ":\n\tmflr r0\n\tbl ");
28882 strcat (tmp_buf, name);
28883 strcat (tmp_buf, "\n");
28884 strcat (tmp_buf, label);
28885 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
28886 }
28887 else
28888 {
28889 strcat (tmp_buf, ":\n\tmflr r0\n\tbcl 20,31,");
28890 strcat (tmp_buf, label);
28891 strcat (tmp_buf, "_pic\n");
28892 strcat (tmp_buf, label);
28893 strcat (tmp_buf, "_pic:\n\tmflr r11\n");
28894 }
28895
28896 strcat (tmp_buf, "\taddis r11,r11,ha16(");
28897 strcat (tmp_buf, name_buf);
28898 strcat (tmp_buf, " - ");
28899 strcat (tmp_buf, label);
28900 strcat (tmp_buf, "_pic)\n");
28901
28902 strcat (tmp_buf, "\tmtlr r0\n");
28903
28904 strcat (tmp_buf, "\taddi r12,r11,lo16(");
28905 strcat (tmp_buf, name_buf);
28906 strcat (tmp_buf, " - ");
28907 strcat (tmp_buf, label);
28908 strcat (tmp_buf, "_pic)\n");
28909
28910 strcat (tmp_buf, "\tmtctr r12\n\tbctr\n");
28911 }
28912 else
28913 {
28914 strcat (tmp_buf, ":\nlis r12,hi16(");
28915 strcat (tmp_buf, name_buf);
28916 strcat (tmp_buf, ")\n\tori r12,r12,lo16(");
28917 strcat (tmp_buf, name_buf);
28918 strcat (tmp_buf, ")\n\tmtctr r12\n\tbctr");
28919 }
28920 output_asm_insn (tmp_buf, 0);
28921 #if defined (DBX_DEBUGGING_INFO) || defined (XCOFF_DEBUGGING_INFO)
28922 if (write_symbols == DBX_DEBUG || write_symbols == XCOFF_DEBUG)
28923 dbxout_stabd (N_SLINE, bi->line_number);
28924 #endif /* DBX_DEBUGGING_INFO || XCOFF_DEBUGGING_INFO */
28925 branch_islands->pop ();
28926 }
28927 }
28928
28929 /* NO_PREVIOUS_DEF checks in the link list whether the function name is
28930 already there or not. */
28931
28932 static int
28933 no_previous_def (tree function_name)
28934 {
28935 branch_island *bi;
28936 unsigned ix;
28937
28938 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
28939 if (function_name == bi->function_name)
28940 return 0;
28941 return 1;
28942 }
28943
28944 /* GET_PREV_LABEL gets the label name from the previous definition of
28945 the function. */
28946
28947 static tree
28948 get_prev_label (tree function_name)
28949 {
28950 branch_island *bi;
28951 unsigned ix;
28952
28953 FOR_EACH_VEC_SAFE_ELT (branch_islands, ix, bi)
28954 if (function_name == bi->function_name)
28955 return bi->label_name;
28956 return NULL_TREE;
28957 }
28958
28959 /* INSN is either a function call or a millicode call. It may have an
28960 unconditional jump in its delay slot.
28961
28962 CALL_DEST is the routine we are calling. */
28963
28964 char *
28965 output_call (rtx insn, rtx *operands, int dest_operand_number,
28966 int cookie_operand_number)
28967 {
28968 static char buf[256];
28969 if (darwin_emit_branch_islands
28970 && GET_CODE (operands[dest_operand_number]) == SYMBOL_REF
28971 && (INTVAL (operands[cookie_operand_number]) & CALL_LONG))
28972 {
28973 tree labelname;
28974 tree funname = get_identifier (XSTR (operands[dest_operand_number], 0));
28975
28976 if (no_previous_def (funname))
28977 {
28978 rtx label_rtx = gen_label_rtx ();
28979 char *label_buf, temp_buf[256];
28980 ASM_GENERATE_INTERNAL_LABEL (temp_buf, "L",
28981 CODE_LABEL_NUMBER (label_rtx));
28982 label_buf = temp_buf[0] == '*' ? temp_buf + 1 : temp_buf;
28983 labelname = get_identifier (label_buf);
28984 add_compiler_branch_island (labelname, funname, insn_line (insn));
28985 }
28986 else
28987 labelname = get_prev_label (funname);
28988
28989 /* "jbsr foo, L42" is Mach-O for "Link as 'bl foo' if a 'bl'
28990 instruction will reach 'foo', otherwise link as 'bl L42'".
28991 "L42" should be a 'branch island', that will do a far jump to
28992 'foo'. Branch islands are generated in
28993 macho_branch_islands(). */
28994 sprintf (buf, "jbsr %%z%d,%.246s",
28995 dest_operand_number, IDENTIFIER_POINTER (labelname));
28996 }
28997 else
28998 sprintf (buf, "bl %%z%d", dest_operand_number);
28999 return buf;
29000 }
29001
29002 /* Generate PIC and indirect symbol stubs. */
29003
29004 void
29005 machopic_output_stub (FILE *file, const char *symb, const char *stub)
29006 {
29007 unsigned int length;
29008 char *symbol_name, *lazy_ptr_name;
29009 char *local_label_0;
29010 static int label = 0;
29011
29012 /* Lose our funky encoding stuff so it doesn't contaminate the stub. */
29013 symb = (*targetm.strip_name_encoding) (symb);
29014
29015
29016 length = strlen (symb);
29017 symbol_name = XALLOCAVEC (char, length + 32);
29018 GEN_SYMBOL_NAME_FOR_SYMBOL (symbol_name, symb, length);
29019
29020 lazy_ptr_name = XALLOCAVEC (char, length + 32);
29021 GEN_LAZY_PTR_NAME_FOR_SYMBOL (lazy_ptr_name, symb, length);
29022
29023 if (flag_pic == 2)
29024 switch_to_section (darwin_sections[machopic_picsymbol_stub1_section]);
29025 else
29026 switch_to_section (darwin_sections[machopic_symbol_stub1_section]);
29027
29028 if (flag_pic == 2)
29029 {
29030 fprintf (file, "\t.align 5\n");
29031
29032 fprintf (file, "%s:\n", stub);
29033 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29034
29035 label++;
29036 local_label_0 = XALLOCAVEC (char, sizeof ("\"L00000000000$spb\""));
29037 sprintf (local_label_0, "\"L%011d$spb\"", label);
29038
29039 fprintf (file, "\tmflr r0\n");
29040 if (TARGET_LINK_STACK)
29041 {
29042 char name[32];
29043 get_ppc476_thunk_name (name);
29044 fprintf (file, "\tbl %s\n", name);
29045 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
29046 }
29047 else
29048 {
29049 fprintf (file, "\tbcl 20,31,%s\n", local_label_0);
29050 fprintf (file, "%s:\n\tmflr r11\n", local_label_0);
29051 }
29052 fprintf (file, "\taddis r11,r11,ha16(%s-%s)\n",
29053 lazy_ptr_name, local_label_0);
29054 fprintf (file, "\tmtlr r0\n");
29055 fprintf (file, "\t%s r12,lo16(%s-%s)(r11)\n",
29056 (TARGET_64BIT ? "ldu" : "lwzu"),
29057 lazy_ptr_name, local_label_0);
29058 fprintf (file, "\tmtctr r12\n");
29059 fprintf (file, "\tbctr\n");
29060 }
29061 else
29062 {
29063 fprintf (file, "\t.align 4\n");
29064
29065 fprintf (file, "%s:\n", stub);
29066 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29067
29068 fprintf (file, "\tlis r11,ha16(%s)\n", lazy_ptr_name);
29069 fprintf (file, "\t%s r12,lo16(%s)(r11)\n",
29070 (TARGET_64BIT ? "ldu" : "lwzu"),
29071 lazy_ptr_name);
29072 fprintf (file, "\tmtctr r12\n");
29073 fprintf (file, "\tbctr\n");
29074 }
29075
29076 switch_to_section (darwin_sections[machopic_lazy_symbol_ptr_section]);
29077 fprintf (file, "%s:\n", lazy_ptr_name);
29078 fprintf (file, "\t.indirect_symbol %s\n", symbol_name);
29079 fprintf (file, "%sdyld_stub_binding_helper\n",
29080 (TARGET_64BIT ? DOUBLE_INT_ASM_OP : "\t.long\t"));
29081 }
29082
29083 /* Legitimize PIC addresses. If the address is already
29084 position-independent, we return ORIG. Newly generated
29085 position-independent addresses go into a reg. This is REG if non
29086 zero, otherwise we allocate register(s) as necessary. */
29087
29088 #define SMALL_INT(X) ((UINTVAL (X) + 0x8000) < 0x10000)
29089
29090 rtx
29091 rs6000_machopic_legitimize_pic_address (rtx orig, enum machine_mode mode,
29092 rtx reg)
29093 {
29094 rtx base, offset;
29095
29096 if (reg == NULL && ! reload_in_progress && ! reload_completed)
29097 reg = gen_reg_rtx (Pmode);
29098
29099 if (GET_CODE (orig) == CONST)
29100 {
29101 rtx reg_temp;
29102
29103 if (GET_CODE (XEXP (orig, 0)) == PLUS
29104 && XEXP (XEXP (orig, 0), 0) == pic_offset_table_rtx)
29105 return orig;
29106
29107 gcc_assert (GET_CODE (XEXP (orig, 0)) == PLUS);
29108
29109 /* Use a different reg for the intermediate value, as
29110 it will be marked UNCHANGING. */
29111 reg_temp = !can_create_pseudo_p () ? reg : gen_reg_rtx (Pmode);
29112 base = rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 0),
29113 Pmode, reg_temp);
29114 offset =
29115 rs6000_machopic_legitimize_pic_address (XEXP (XEXP (orig, 0), 1),
29116 Pmode, reg);
29117
29118 if (GET_CODE (offset) == CONST_INT)
29119 {
29120 if (SMALL_INT (offset))
29121 return plus_constant (Pmode, base, INTVAL (offset));
29122 else if (! reload_in_progress && ! reload_completed)
29123 offset = force_reg (Pmode, offset);
29124 else
29125 {
29126 rtx mem = force_const_mem (Pmode, orig);
29127 return machopic_legitimize_pic_address (mem, Pmode, reg);
29128 }
29129 }
29130 return gen_rtx_PLUS (Pmode, base, offset);
29131 }
29132
29133 /* Fall back on generic machopic code. */
29134 return machopic_legitimize_pic_address (orig, mode, reg);
29135 }
29136
29137 /* Output a .machine directive for the Darwin assembler, and call
29138 the generic start_file routine. */
29139
29140 static void
29141 rs6000_darwin_file_start (void)
29142 {
29143 static const struct
29144 {
29145 const char *arg;
29146 const char *name;
29147 HOST_WIDE_INT if_set;
29148 } mapping[] = {
29149 { "ppc64", "ppc64", MASK_64BIT },
29150 { "970", "ppc970", MASK_PPC_GPOPT | MASK_MFCRF | MASK_POWERPC64 },
29151 { "power4", "ppc970", 0 },
29152 { "G5", "ppc970", 0 },
29153 { "7450", "ppc7450", 0 },
29154 { "7400", "ppc7400", MASK_ALTIVEC },
29155 { "G4", "ppc7400", 0 },
29156 { "750", "ppc750", 0 },
29157 { "740", "ppc750", 0 },
29158 { "G3", "ppc750", 0 },
29159 { "604e", "ppc604e", 0 },
29160 { "604", "ppc604", 0 },
29161 { "603e", "ppc603", 0 },
29162 { "603", "ppc603", 0 },
29163 { "601", "ppc601", 0 },
29164 { NULL, "ppc", 0 } };
29165 const char *cpu_id = "";
29166 size_t i;
29167
29168 rs6000_file_start ();
29169 darwin_file_start ();
29170
29171 /* Determine the argument to -mcpu=. Default to G3 if not specified. */
29172
29173 if (rs6000_default_cpu != 0 && rs6000_default_cpu[0] != '\0')
29174 cpu_id = rs6000_default_cpu;
29175
29176 if (global_options_set.x_rs6000_cpu_index)
29177 cpu_id = processor_target_table[rs6000_cpu_index].name;
29178
29179 /* Look through the mapping array. Pick the first name that either
29180 matches the argument, has a bit set in IF_SET that is also set
29181 in the target flags, or has a NULL name. */
29182
29183 i = 0;
29184 while (mapping[i].arg != NULL
29185 && strcmp (mapping[i].arg, cpu_id) != 0
29186 && (mapping[i].if_set & rs6000_isa_flags) == 0)
29187 i++;
29188
29189 fprintf (asm_out_file, "\t.machine %s\n", mapping[i].name);
29190 }
29191
29192 #endif /* TARGET_MACHO */
29193
29194 #if TARGET_ELF
29195 static int
29196 rs6000_elf_reloc_rw_mask (void)
29197 {
29198 if (flag_pic)
29199 return 3;
29200 else if (DEFAULT_ABI == ABI_AIX || DEFAULT_ABI == ABI_ELFv2)
29201 return 2;
29202 else
29203 return 0;
29204 }
29205
29206 /* Record an element in the table of global constructors. SYMBOL is
29207 a SYMBOL_REF of the function to be called; PRIORITY is a number
29208 between 0 and MAX_INIT_PRIORITY.
29209
29210 This differs from default_named_section_asm_out_constructor in
29211 that we have special handling for -mrelocatable. */
29212
29213 static void rs6000_elf_asm_out_constructor (rtx, int) ATTRIBUTE_UNUSED;
29214 static void
29215 rs6000_elf_asm_out_constructor (rtx symbol, int priority)
29216 {
29217 const char *section = ".ctors";
29218 char buf[16];
29219
29220 if (priority != DEFAULT_INIT_PRIORITY)
29221 {
29222 sprintf (buf, ".ctors.%.5u",
29223 /* Invert the numbering so the linker puts us in the proper
29224 order; constructors are run from right to left, and the
29225 linker sorts in increasing order. */
29226 MAX_INIT_PRIORITY - priority);
29227 section = buf;
29228 }
29229
29230 switch_to_section (get_section (section, SECTION_WRITE, NULL));
29231 assemble_align (POINTER_SIZE);
29232
29233 if (TARGET_RELOCATABLE)
29234 {
29235 fputs ("\t.long (", asm_out_file);
29236 output_addr_const (asm_out_file, symbol);
29237 fputs (")@fixup\n", asm_out_file);
29238 }
29239 else
29240 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
29241 }
29242
29243 static void rs6000_elf_asm_out_destructor (rtx, int) ATTRIBUTE_UNUSED;
29244 static void
29245 rs6000_elf_asm_out_destructor (rtx symbol, int priority)
29246 {
29247 const char *section = ".dtors";
29248 char buf[16];
29249
29250 if (priority != DEFAULT_INIT_PRIORITY)
29251 {
29252 sprintf (buf, ".dtors.%.5u",
29253 /* Invert the numbering so the linker puts us in the proper
29254 order; constructors are run from right to left, and the
29255 linker sorts in increasing order. */
29256 MAX_INIT_PRIORITY - priority);
29257 section = buf;
29258 }
29259
29260 switch_to_section (get_section (section, SECTION_WRITE, NULL));
29261 assemble_align (POINTER_SIZE);
29262
29263 if (TARGET_RELOCATABLE)
29264 {
29265 fputs ("\t.long (", asm_out_file);
29266 output_addr_const (asm_out_file, symbol);
29267 fputs (")@fixup\n", asm_out_file);
29268 }
29269 else
29270 assemble_integer (symbol, POINTER_SIZE / BITS_PER_UNIT, POINTER_SIZE, 1);
29271 }
29272
29273 void
29274 rs6000_elf_declare_function_name (FILE *file, const char *name, tree decl)
29275 {
29276 if (TARGET_64BIT && DEFAULT_ABI != ABI_ELFv2)
29277 {
29278 fputs ("\t.section\t\".opd\",\"aw\"\n\t.align 3\n", file);
29279 ASM_OUTPUT_LABEL (file, name);
29280 fputs (DOUBLE_INT_ASM_OP, file);
29281 rs6000_output_function_entry (file, name);
29282 fputs (",.TOC.@tocbase,0\n\t.previous\n", file);
29283 if (DOT_SYMBOLS)
29284 {
29285 fputs ("\t.size\t", file);
29286 assemble_name (file, name);
29287 fputs (",24\n\t.type\t.", file);
29288 assemble_name (file, name);
29289 fputs (",@function\n", file);
29290 if (TREE_PUBLIC (decl) && ! DECL_WEAK (decl))
29291 {
29292 fputs ("\t.globl\t.", file);
29293 assemble_name (file, name);
29294 putc ('\n', file);
29295 }
29296 }
29297 else
29298 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
29299 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
29300 rs6000_output_function_entry (file, name);
29301 fputs (":\n", file);
29302 return;
29303 }
29304
29305 if (TARGET_RELOCATABLE
29306 && !TARGET_SECURE_PLT
29307 && (get_pool_size () != 0 || crtl->profile)
29308 && uses_TOC ())
29309 {
29310 char buf[256];
29311
29312 (*targetm.asm_out.internal_label) (file, "LCL", rs6000_pic_labelno);
29313
29314 ASM_GENERATE_INTERNAL_LABEL (buf, "LCTOC", 1);
29315 fprintf (file, "\t.long ");
29316 assemble_name (file, buf);
29317 putc ('-', file);
29318 ASM_GENERATE_INTERNAL_LABEL (buf, "LCF", rs6000_pic_labelno);
29319 assemble_name (file, buf);
29320 putc ('\n', file);
29321 }
29322
29323 ASM_OUTPUT_TYPE_DIRECTIVE (file, name, "function");
29324 ASM_DECLARE_RESULT (file, DECL_RESULT (decl));
29325
29326 if (DEFAULT_ABI == ABI_AIX)
29327 {
29328 const char *desc_name, *orig_name;
29329
29330 orig_name = (*targetm.strip_name_encoding) (name);
29331 desc_name = orig_name;
29332 while (*desc_name == '.')
29333 desc_name++;
29334
29335 if (TREE_PUBLIC (decl))
29336 fprintf (file, "\t.globl %s\n", desc_name);
29337
29338 fprintf (file, "%s\n", MINIMAL_TOC_SECTION_ASM_OP);
29339 fprintf (file, "%s:\n", desc_name);
29340 fprintf (file, "\t.long %s\n", orig_name);
29341 fputs ("\t.long _GLOBAL_OFFSET_TABLE_\n", file);
29342 fputs ("\t.long 0\n", file);
29343 fprintf (file, "\t.previous\n");
29344 }
29345 ASM_OUTPUT_LABEL (file, name);
29346 }
29347
29348 static void rs6000_elf_file_end (void) ATTRIBUTE_UNUSED;
29349 static void
29350 rs6000_elf_file_end (void)
29351 {
29352 #ifdef HAVE_AS_GNU_ATTRIBUTE
29353 if (TARGET_32BIT && DEFAULT_ABI == ABI_V4)
29354 {
29355 if (rs6000_passes_float)
29356 fprintf (asm_out_file, "\t.gnu_attribute 4, %d\n",
29357 ((TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_DOUBLE_FLOAT) ? 1
29358 : (TARGET_HARD_FLOAT && TARGET_FPRS && TARGET_SINGLE_FLOAT) ? 3
29359 : 2));
29360 if (rs6000_passes_vector)
29361 fprintf (asm_out_file, "\t.gnu_attribute 8, %d\n",
29362 (TARGET_ALTIVEC_ABI ? 2
29363 : TARGET_SPE_ABI ? 3
29364 : 1));
29365 if (rs6000_returns_struct)
29366 fprintf (asm_out_file, "\t.gnu_attribute 12, %d\n",
29367 aix_struct_return ? 2 : 1);
29368 }
29369 #endif
29370 #if defined (POWERPC_LINUX) || defined (POWERPC_FREEBSD)
29371 if (TARGET_32BIT || DEFAULT_ABI == ABI_ELFv2)
29372 file_end_indicate_exec_stack ();
29373 #endif
29374 }
29375 #endif
29376
29377 #if TARGET_XCOFF
29378 static void
29379 rs6000_xcoff_asm_output_anchor (rtx symbol)
29380 {
29381 char buffer[100];
29382
29383 sprintf (buffer, "$ + " HOST_WIDE_INT_PRINT_DEC,
29384 SYMBOL_REF_BLOCK_OFFSET (symbol));
29385 fprintf (asm_out_file, "%s", SET_ASM_OP);
29386 RS6000_OUTPUT_BASENAME (asm_out_file, XSTR (symbol, 0));
29387 fprintf (asm_out_file, ",");
29388 RS6000_OUTPUT_BASENAME (asm_out_file, buffer);
29389 fprintf (asm_out_file, "\n");
29390 }
29391
29392 static void
29393 rs6000_xcoff_asm_globalize_label (FILE *stream, const char *name)
29394 {
29395 fputs (GLOBAL_ASM_OP, stream);
29396 RS6000_OUTPUT_BASENAME (stream, name);
29397 putc ('\n', stream);
29398 }
29399
29400 /* A get_unnamed_decl callback, used for read-only sections. PTR
29401 points to the section string variable. */
29402
29403 static void
29404 rs6000_xcoff_output_readonly_section_asm_op (const void *directive)
29405 {
29406 fprintf (asm_out_file, "\t.csect %s[RO],%s\n",
29407 *(const char *const *) directive,
29408 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
29409 }
29410
29411 /* Likewise for read-write sections. */
29412
29413 static void
29414 rs6000_xcoff_output_readwrite_section_asm_op (const void *directive)
29415 {
29416 fprintf (asm_out_file, "\t.csect %s[RW],%s\n",
29417 *(const char *const *) directive,
29418 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
29419 }
29420
29421 static void
29422 rs6000_xcoff_output_tls_section_asm_op (const void *directive)
29423 {
29424 fprintf (asm_out_file, "\t.csect %s[TL],%s\n",
29425 *(const char *const *) directive,
29426 XCOFF_CSECT_DEFAULT_ALIGNMENT_STR);
29427 }
29428
29429 /* A get_unnamed_section callback, used for switching to toc_section. */
29430
29431 static void
29432 rs6000_xcoff_output_toc_section_asm_op (const void *data ATTRIBUTE_UNUSED)
29433 {
29434 if (TARGET_MINIMAL_TOC)
29435 {
29436 /* toc_section is always selected at least once from
29437 rs6000_xcoff_file_start, so this is guaranteed to
29438 always be defined once and only once in each file. */
29439 if (!toc_initialized)
29440 {
29441 fputs ("\t.toc\nLCTOC..1:\n", asm_out_file);
29442 fputs ("\t.tc toc_table[TC],toc_table[RW]\n", asm_out_file);
29443 toc_initialized = 1;
29444 }
29445 fprintf (asm_out_file, "\t.csect toc_table[RW]%s\n",
29446 (TARGET_32BIT ? "" : ",3"));
29447 }
29448 else
29449 fputs ("\t.toc\n", asm_out_file);
29450 }
29451
29452 /* Implement TARGET_ASM_INIT_SECTIONS. */
29453
29454 static void
29455 rs6000_xcoff_asm_init_sections (void)
29456 {
29457 read_only_data_section
29458 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
29459 &xcoff_read_only_section_name);
29460
29461 private_data_section
29462 = get_unnamed_section (SECTION_WRITE,
29463 rs6000_xcoff_output_readwrite_section_asm_op,
29464 &xcoff_private_data_section_name);
29465
29466 tls_data_section
29467 = get_unnamed_section (SECTION_TLS,
29468 rs6000_xcoff_output_tls_section_asm_op,
29469 &xcoff_tls_data_section_name);
29470
29471 tls_private_data_section
29472 = get_unnamed_section (SECTION_TLS,
29473 rs6000_xcoff_output_tls_section_asm_op,
29474 &xcoff_private_data_section_name);
29475
29476 read_only_private_data_section
29477 = get_unnamed_section (0, rs6000_xcoff_output_readonly_section_asm_op,
29478 &xcoff_private_data_section_name);
29479
29480 toc_section
29481 = get_unnamed_section (0, rs6000_xcoff_output_toc_section_asm_op, NULL);
29482
29483 readonly_data_section = read_only_data_section;
29484 exception_section = data_section;
29485 }
29486
29487 static int
29488 rs6000_xcoff_reloc_rw_mask (void)
29489 {
29490 return 3;
29491 }
29492
29493 static void
29494 rs6000_xcoff_asm_named_section (const char *name, unsigned int flags,
29495 tree decl ATTRIBUTE_UNUSED)
29496 {
29497 int smclass;
29498 static const char * const suffix[4] = { "PR", "RO", "RW", "TL" };
29499
29500 if (flags & SECTION_CODE)
29501 smclass = 0;
29502 else if (flags & SECTION_TLS)
29503 smclass = 3;
29504 else if (flags & SECTION_WRITE)
29505 smclass = 2;
29506 else
29507 smclass = 1;
29508
29509 fprintf (asm_out_file, "\t.csect %s%s[%s],%u\n",
29510 (flags & SECTION_CODE) ? "." : "",
29511 name, suffix[smclass], flags & SECTION_ENTSIZE);
29512 }
29513
29514 #define IN_NAMED_SECTION(DECL) \
29515 ((TREE_CODE (DECL) == FUNCTION_DECL || TREE_CODE (DECL) == VAR_DECL) \
29516 && DECL_SECTION_NAME (DECL) != NULL)
29517
29518 static section *
29519 rs6000_xcoff_select_section (tree decl, int reloc,
29520 unsigned HOST_WIDE_INT align)
29521 {
29522 /* Place variables with alignment stricter than BIGGEST_ALIGNMENT into
29523 named section. */
29524 if (align > BIGGEST_ALIGNMENT)
29525 {
29526 resolve_unique_section (decl, reloc, true);
29527 if (IN_NAMED_SECTION (decl))
29528 return get_named_section (decl, NULL, reloc);
29529 }
29530
29531 if (decl_readonly_section (decl, reloc))
29532 {
29533 if (TREE_PUBLIC (decl))
29534 return read_only_data_section;
29535 else
29536 return read_only_private_data_section;
29537 }
29538 else
29539 {
29540 #if HAVE_AS_TLS
29541 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
29542 {
29543 if (TREE_PUBLIC (decl))
29544 return tls_data_section;
29545 else if (bss_initializer_p (decl))
29546 {
29547 /* Convert to COMMON to emit in BSS. */
29548 DECL_COMMON (decl) = 1;
29549 return tls_comm_section;
29550 }
29551 else
29552 return tls_private_data_section;
29553 }
29554 else
29555 #endif
29556 if (TREE_PUBLIC (decl))
29557 return data_section;
29558 else
29559 return private_data_section;
29560 }
29561 }
29562
29563 static void
29564 rs6000_xcoff_unique_section (tree decl, int reloc ATTRIBUTE_UNUSED)
29565 {
29566 const char *name;
29567
29568 /* Use select_section for private data and uninitialized data with
29569 alignment <= BIGGEST_ALIGNMENT. */
29570 if (!TREE_PUBLIC (decl)
29571 || DECL_COMMON (decl)
29572 || (DECL_INITIAL (decl) == NULL_TREE
29573 && DECL_ALIGN (decl) <= BIGGEST_ALIGNMENT)
29574 || DECL_INITIAL (decl) == error_mark_node
29575 || (flag_zero_initialized_in_bss
29576 && initializer_zerop (DECL_INITIAL (decl))))
29577 return;
29578
29579 name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl));
29580 name = (*targetm.strip_name_encoding) (name);
29581 set_decl_section_name (decl, name);
29582 }
29583
29584 /* Select section for constant in constant pool.
29585
29586 On RS/6000, all constants are in the private read-only data area.
29587 However, if this is being placed in the TOC it must be output as a
29588 toc entry. */
29589
29590 static section *
29591 rs6000_xcoff_select_rtx_section (enum machine_mode mode, rtx x,
29592 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
29593 {
29594 if (ASM_OUTPUT_SPECIAL_POOL_ENTRY_P (x, mode))
29595 return toc_section;
29596 else
29597 return read_only_private_data_section;
29598 }
29599
29600 /* Remove any trailing [DS] or the like from the symbol name. */
29601
29602 static const char *
29603 rs6000_xcoff_strip_name_encoding (const char *name)
29604 {
29605 size_t len;
29606 if (*name == '*')
29607 name++;
29608 len = strlen (name);
29609 if (name[len - 1] == ']')
29610 return ggc_alloc_string (name, len - 4);
29611 else
29612 return name;
29613 }
29614
29615 /* Section attributes. AIX is always PIC. */
29616
29617 static unsigned int
29618 rs6000_xcoff_section_type_flags (tree decl, const char *name, int reloc)
29619 {
29620 unsigned int align;
29621 unsigned int flags = default_section_type_flags (decl, name, reloc);
29622
29623 /* Align to at least UNIT size. */
29624 if ((flags & SECTION_CODE) != 0 || !decl || !DECL_P (decl))
29625 align = MIN_UNITS_PER_WORD;
29626 else
29627 /* Increase alignment of large objects if not already stricter. */
29628 align = MAX ((DECL_ALIGN (decl) / BITS_PER_UNIT),
29629 int_size_in_bytes (TREE_TYPE (decl)) > MIN_UNITS_PER_WORD
29630 ? UNITS_PER_FP_WORD : MIN_UNITS_PER_WORD);
29631
29632 return flags | (exact_log2 (align) & SECTION_ENTSIZE);
29633 }
29634
29635 /* Output at beginning of assembler file.
29636
29637 Initialize the section names for the RS/6000 at this point.
29638
29639 Specify filename, including full path, to assembler.
29640
29641 We want to go into the TOC section so at least one .toc will be emitted.
29642 Also, in order to output proper .bs/.es pairs, we need at least one static
29643 [RW] section emitted.
29644
29645 Finally, declare mcount when profiling to make the assembler happy. */
29646
29647 static void
29648 rs6000_xcoff_file_start (void)
29649 {
29650 rs6000_gen_section_name (&xcoff_bss_section_name,
29651 main_input_filename, ".bss_");
29652 rs6000_gen_section_name (&xcoff_private_data_section_name,
29653 main_input_filename, ".rw_");
29654 rs6000_gen_section_name (&xcoff_read_only_section_name,
29655 main_input_filename, ".ro_");
29656 rs6000_gen_section_name (&xcoff_tls_data_section_name,
29657 main_input_filename, ".tls_");
29658 rs6000_gen_section_name (&xcoff_tbss_section_name,
29659 main_input_filename, ".tbss_[UL]");
29660
29661 fputs ("\t.file\t", asm_out_file);
29662 output_quoted_string (asm_out_file, main_input_filename);
29663 fputc ('\n', asm_out_file);
29664 if (write_symbols != NO_DEBUG)
29665 switch_to_section (private_data_section);
29666 switch_to_section (text_section);
29667 if (profile_flag)
29668 fprintf (asm_out_file, "\t.extern %s\n", RS6000_MCOUNT);
29669 rs6000_file_start ();
29670 }
29671
29672 /* Output at end of assembler file.
29673 On the RS/6000, referencing data should automatically pull in text. */
29674
29675 static void
29676 rs6000_xcoff_file_end (void)
29677 {
29678 switch_to_section (text_section);
29679 fputs ("_section_.text:\n", asm_out_file);
29680 switch_to_section (data_section);
29681 fputs (TARGET_32BIT
29682 ? "\t.long _section_.text\n" : "\t.llong _section_.text\n",
29683 asm_out_file);
29684 }
29685
29686 struct declare_alias_data
29687 {
29688 FILE *file;
29689 bool function_descriptor;
29690 };
29691
29692 /* Declare alias N. A helper function for for_node_and_aliases. */
29693
29694 static bool
29695 rs6000_declare_alias (struct symtab_node *n, void *d)
29696 {
29697 struct declare_alias_data *data = (struct declare_alias_data *)d;
29698 /* Main symbol is output specially, because varasm machinery does part of
29699 the job for us - we do not need to declare .globl/lglobs and such. */
29700 if (!n->alias || n->weakref)
29701 return false;
29702
29703 if (lookup_attribute ("ifunc", DECL_ATTRIBUTES (n->decl)))
29704 return false;
29705
29706 /* Prevent assemble_alias from trying to use .set pseudo operation
29707 that does not behave as expected by the middle-end. */
29708 TREE_ASM_WRITTEN (n->decl) = true;
29709
29710 const char *name = IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (n->decl));
29711 char *buffer = (char *) alloca (strlen (name) + 2);
29712 char *p;
29713 int dollar_inside = 0;
29714
29715 strcpy (buffer, name);
29716 p = strchr (buffer, '$');
29717 while (p) {
29718 *p = '_';
29719 dollar_inside++;
29720 p = strchr (p + 1, '$');
29721 }
29722 if (TREE_PUBLIC (n->decl))
29723 {
29724 if (!RS6000_WEAK || !DECL_WEAK (n->decl))
29725 {
29726 if (dollar_inside) {
29727 if (data->function_descriptor)
29728 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
29729 else
29730 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
29731 }
29732 if (data->function_descriptor)
29733 fputs ("\t.globl .", data->file);
29734 else
29735 fputs ("\t.globl ", data->file);
29736 RS6000_OUTPUT_BASENAME (data->file, buffer);
29737 putc ('\n', data->file);
29738 }
29739 else if (DECL_WEAK (n->decl) && !data->function_descriptor)
29740 ASM_WEAKEN_DECL (data->file, n->decl, name, NULL);
29741 }
29742 else
29743 {
29744 if (dollar_inside)
29745 {
29746 if (data->function_descriptor)
29747 fprintf(data->file, "\t.rename %s,\"%s\"\n", buffer, name);
29748 else
29749 fprintf(data->file, "\t.rename .%s,\".%s\"\n", buffer, name);
29750 }
29751 if (data->function_descriptor)
29752 fputs ("\t.lglobl .", data->file);
29753 else
29754 fputs ("\t.lglobl ", data->file);
29755 RS6000_OUTPUT_BASENAME (data->file, buffer);
29756 putc ('\n', data->file);
29757 }
29758 if (data->function_descriptor)
29759 fputs (".", data->file);
29760 RS6000_OUTPUT_BASENAME (data->file, buffer);
29761 fputs (":\n", data->file);
29762 return false;
29763 }
29764
29765 /* This macro produces the initial definition of a function name.
29766 On the RS/6000, we need to place an extra '.' in the function name and
29767 output the function descriptor.
29768 Dollar signs are converted to underscores.
29769
29770 The csect for the function will have already been created when
29771 text_section was selected. We do have to go back to that csect, however.
29772
29773 The third and fourth parameters to the .function pseudo-op (16 and 044)
29774 are placeholders which no longer have any use.
29775
29776 Because AIX assembler's .set command has unexpected semantics, we output
29777 all aliases as alternative labels in front of the definition. */
29778
29779 void
29780 rs6000_xcoff_declare_function_name (FILE *file, const char *name, tree decl)
29781 {
29782 char *buffer = (char *) alloca (strlen (name) + 1);
29783 char *p;
29784 int dollar_inside = 0;
29785 struct declare_alias_data data = {file, false};
29786
29787 strcpy (buffer, name);
29788 p = strchr (buffer, '$');
29789 while (p) {
29790 *p = '_';
29791 dollar_inside++;
29792 p = strchr (p + 1, '$');
29793 }
29794 if (TREE_PUBLIC (decl))
29795 {
29796 if (!RS6000_WEAK || !DECL_WEAK (decl))
29797 {
29798 if (dollar_inside) {
29799 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
29800 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
29801 }
29802 fputs ("\t.globl .", file);
29803 RS6000_OUTPUT_BASENAME (file, buffer);
29804 putc ('\n', file);
29805 }
29806 }
29807 else
29808 {
29809 if (dollar_inside) {
29810 fprintf(file, "\t.rename .%s,\".%s\"\n", buffer, name);
29811 fprintf(file, "\t.rename %s,\"%s\"\n", buffer, name);
29812 }
29813 fputs ("\t.lglobl .", file);
29814 RS6000_OUTPUT_BASENAME (file, buffer);
29815 putc ('\n', file);
29816 }
29817 fputs ("\t.csect ", file);
29818 RS6000_OUTPUT_BASENAME (file, buffer);
29819 fputs (TARGET_32BIT ? "[DS]\n" : "[DS],3\n", file);
29820 RS6000_OUTPUT_BASENAME (file, buffer);
29821 fputs (":\n", file);
29822 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
29823 fputs (TARGET_32BIT ? "\t.long ." : "\t.llong .", file);
29824 RS6000_OUTPUT_BASENAME (file, buffer);
29825 fputs (", TOC[tc0], 0\n", file);
29826 in_section = NULL;
29827 switch_to_section (function_section (decl));
29828 putc ('.', file);
29829 RS6000_OUTPUT_BASENAME (file, buffer);
29830 fputs (":\n", file);
29831 data.function_descriptor = true;
29832 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
29833 if (write_symbols != NO_DEBUG && !DECL_IGNORED_P (decl))
29834 xcoffout_declare_function (file, decl, buffer);
29835 return;
29836 }
29837
29838 /* This macro produces the initial definition of a object (variable) name.
29839 Because AIX assembler's .set command has unexpected semantics, we output
29840 all aliases as alternative labels in front of the definition. */
29841
29842 void
29843 rs6000_xcoff_declare_object_name (FILE *file, const char *name, tree decl)
29844 {
29845 struct declare_alias_data data = {file, false};
29846 RS6000_OUTPUT_BASENAME (file, name);
29847 fputs (":\n", file);
29848 symtab_node::get (decl)->call_for_symbol_and_aliases (rs6000_declare_alias, &data, true);
29849 }
29850
29851 #ifdef HAVE_AS_TLS
29852 static void
29853 rs6000_xcoff_encode_section_info (tree decl, rtx rtl, int first)
29854 {
29855 rtx symbol;
29856 int flags;
29857
29858 default_encode_section_info (decl, rtl, first);
29859
29860 /* Careful not to prod global register variables. */
29861 if (!MEM_P (rtl))
29862 return;
29863 symbol = XEXP (rtl, 0);
29864 if (GET_CODE (symbol) != SYMBOL_REF)
29865 return;
29866
29867 flags = SYMBOL_REF_FLAGS (symbol);
29868
29869 if (TREE_CODE (decl) == VAR_DECL && DECL_THREAD_LOCAL_P (decl))
29870 flags &= ~SYMBOL_FLAG_HAS_BLOCK_INFO;
29871
29872 SYMBOL_REF_FLAGS (symbol) = flags;
29873 }
29874 #endif /* HAVE_AS_TLS */
29875 #endif /* TARGET_XCOFF */
29876
29877 /* Compute a (partial) cost for rtx X. Return true if the complete
29878 cost has been computed, and false if subexpressions should be
29879 scanned. In either case, *TOTAL contains the cost result. */
29880
29881 static bool
29882 rs6000_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
29883 int *total, bool speed)
29884 {
29885 enum machine_mode mode = GET_MODE (x);
29886
29887 switch (code)
29888 {
29889 /* On the RS/6000, if it is valid in the insn, it is free. */
29890 case CONST_INT:
29891 if (((outer_code == SET
29892 || outer_code == PLUS
29893 || outer_code == MINUS)
29894 && (satisfies_constraint_I (x)
29895 || satisfies_constraint_L (x)))
29896 || (outer_code == AND
29897 && (satisfies_constraint_K (x)
29898 || (mode == SImode
29899 ? satisfies_constraint_L (x)
29900 : satisfies_constraint_J (x))
29901 || mask_operand (x, mode)
29902 || (mode == DImode
29903 && mask64_operand (x, DImode))))
29904 || ((outer_code == IOR || outer_code == XOR)
29905 && (satisfies_constraint_K (x)
29906 || (mode == SImode
29907 ? satisfies_constraint_L (x)
29908 : satisfies_constraint_J (x))))
29909 || outer_code == ASHIFT
29910 || outer_code == ASHIFTRT
29911 || outer_code == LSHIFTRT
29912 || outer_code == ROTATE
29913 || outer_code == ROTATERT
29914 || outer_code == ZERO_EXTRACT
29915 || (outer_code == MULT
29916 && satisfies_constraint_I (x))
29917 || ((outer_code == DIV || outer_code == UDIV
29918 || outer_code == MOD || outer_code == UMOD)
29919 && exact_log2 (INTVAL (x)) >= 0)
29920 || (outer_code == COMPARE
29921 && (satisfies_constraint_I (x)
29922 || satisfies_constraint_K (x)))
29923 || ((outer_code == EQ || outer_code == NE)
29924 && (satisfies_constraint_I (x)
29925 || satisfies_constraint_K (x)
29926 || (mode == SImode
29927 ? satisfies_constraint_L (x)
29928 : satisfies_constraint_J (x))))
29929 || (outer_code == GTU
29930 && satisfies_constraint_I (x))
29931 || (outer_code == LTU
29932 && satisfies_constraint_P (x)))
29933 {
29934 *total = 0;
29935 return true;
29936 }
29937 else if ((outer_code == PLUS
29938 && reg_or_add_cint_operand (x, VOIDmode))
29939 || (outer_code == MINUS
29940 && reg_or_sub_cint_operand (x, VOIDmode))
29941 || ((outer_code == SET
29942 || outer_code == IOR
29943 || outer_code == XOR)
29944 && (INTVAL (x)
29945 & ~ (unsigned HOST_WIDE_INT) 0xffffffff) == 0))
29946 {
29947 *total = COSTS_N_INSNS (1);
29948 return true;
29949 }
29950 /* FALLTHRU */
29951
29952 case CONST_DOUBLE:
29953 case CONST_WIDE_INT:
29954 case CONST:
29955 case HIGH:
29956 case SYMBOL_REF:
29957 case MEM:
29958 /* When optimizing for size, MEM should be slightly more expensive
29959 than generating address, e.g., (plus (reg) (const)).
29960 L1 cache latency is about two instructions. */
29961 *total = !speed ? COSTS_N_INSNS (1) + 1 : COSTS_N_INSNS (2);
29962 return true;
29963
29964 case LABEL_REF:
29965 *total = 0;
29966 return true;
29967
29968 case PLUS:
29969 case MINUS:
29970 if (FLOAT_MODE_P (mode))
29971 *total = rs6000_cost->fp;
29972 else
29973 *total = COSTS_N_INSNS (1);
29974 return false;
29975
29976 case MULT:
29977 if (GET_CODE (XEXP (x, 1)) == CONST_INT
29978 && satisfies_constraint_I (XEXP (x, 1)))
29979 {
29980 if (INTVAL (XEXP (x, 1)) >= -256
29981 && INTVAL (XEXP (x, 1)) <= 255)
29982 *total = rs6000_cost->mulsi_const9;
29983 else
29984 *total = rs6000_cost->mulsi_const;
29985 }
29986 else if (mode == SFmode)
29987 *total = rs6000_cost->fp;
29988 else if (FLOAT_MODE_P (mode))
29989 *total = rs6000_cost->dmul;
29990 else if (mode == DImode)
29991 *total = rs6000_cost->muldi;
29992 else
29993 *total = rs6000_cost->mulsi;
29994 return false;
29995
29996 case FMA:
29997 if (mode == SFmode)
29998 *total = rs6000_cost->fp;
29999 else
30000 *total = rs6000_cost->dmul;
30001 break;
30002
30003 case DIV:
30004 case MOD:
30005 if (FLOAT_MODE_P (mode))
30006 {
30007 *total = mode == DFmode ? rs6000_cost->ddiv
30008 : rs6000_cost->sdiv;
30009 return false;
30010 }
30011 /* FALLTHRU */
30012
30013 case UDIV:
30014 case UMOD:
30015 if (GET_CODE (XEXP (x, 1)) == CONST_INT
30016 && exact_log2 (INTVAL (XEXP (x, 1))) >= 0)
30017 {
30018 if (code == DIV || code == MOD)
30019 /* Shift, addze */
30020 *total = COSTS_N_INSNS (2);
30021 else
30022 /* Shift */
30023 *total = COSTS_N_INSNS (1);
30024 }
30025 else
30026 {
30027 if (GET_MODE (XEXP (x, 1)) == DImode)
30028 *total = rs6000_cost->divdi;
30029 else
30030 *total = rs6000_cost->divsi;
30031 }
30032 /* Add in shift and subtract for MOD. */
30033 if (code == MOD || code == UMOD)
30034 *total += COSTS_N_INSNS (2);
30035 return false;
30036
30037 case CTZ:
30038 case FFS:
30039 *total = COSTS_N_INSNS (4);
30040 return false;
30041
30042 case POPCOUNT:
30043 *total = COSTS_N_INSNS (TARGET_POPCNTD ? 1 : 6);
30044 return false;
30045
30046 case PARITY:
30047 *total = COSTS_N_INSNS (TARGET_CMPB ? 2 : 6);
30048 return false;
30049
30050 case NOT:
30051 if (outer_code == AND || outer_code == IOR || outer_code == XOR)
30052 {
30053 *total = 0;
30054 return false;
30055 }
30056 /* FALLTHRU */
30057
30058 case AND:
30059 case CLZ:
30060 case IOR:
30061 case XOR:
30062 case ZERO_EXTRACT:
30063 *total = COSTS_N_INSNS (1);
30064 return false;
30065
30066 case ASHIFT:
30067 case ASHIFTRT:
30068 case LSHIFTRT:
30069 case ROTATE:
30070 case ROTATERT:
30071 /* Handle mul_highpart. */
30072 if (outer_code == TRUNCATE
30073 && GET_CODE (XEXP (x, 0)) == MULT)
30074 {
30075 if (mode == DImode)
30076 *total = rs6000_cost->muldi;
30077 else
30078 *total = rs6000_cost->mulsi;
30079 return true;
30080 }
30081 else if (outer_code == AND)
30082 *total = 0;
30083 else
30084 *total = COSTS_N_INSNS (1);
30085 return false;
30086
30087 case SIGN_EXTEND:
30088 case ZERO_EXTEND:
30089 if (GET_CODE (XEXP (x, 0)) == MEM)
30090 *total = 0;
30091 else
30092 *total = COSTS_N_INSNS (1);
30093 return false;
30094
30095 case COMPARE:
30096 case NEG:
30097 case ABS:
30098 if (!FLOAT_MODE_P (mode))
30099 {
30100 *total = COSTS_N_INSNS (1);
30101 return false;
30102 }
30103 /* FALLTHRU */
30104
30105 case FLOAT:
30106 case UNSIGNED_FLOAT:
30107 case FIX:
30108 case UNSIGNED_FIX:
30109 case FLOAT_TRUNCATE:
30110 *total = rs6000_cost->fp;
30111 return false;
30112
30113 case FLOAT_EXTEND:
30114 if (mode == DFmode)
30115 *total = 0;
30116 else
30117 *total = rs6000_cost->fp;
30118 return false;
30119
30120 case UNSPEC:
30121 switch (XINT (x, 1))
30122 {
30123 case UNSPEC_FRSP:
30124 *total = rs6000_cost->fp;
30125 return true;
30126
30127 default:
30128 break;
30129 }
30130 break;
30131
30132 case CALL:
30133 case IF_THEN_ELSE:
30134 if (!speed)
30135 {
30136 *total = COSTS_N_INSNS (1);
30137 return true;
30138 }
30139 else if (FLOAT_MODE_P (mode)
30140 && TARGET_PPC_GFXOPT && TARGET_HARD_FLOAT && TARGET_FPRS)
30141 {
30142 *total = rs6000_cost->fp;
30143 return false;
30144 }
30145 break;
30146
30147 case EQ:
30148 case GTU:
30149 case LTU:
30150 /* Carry bit requires mode == Pmode.
30151 NEG or PLUS already counted so only add one. */
30152 if (mode == Pmode
30153 && (outer_code == NEG || outer_code == PLUS))
30154 {
30155 *total = COSTS_N_INSNS (1);
30156 return true;
30157 }
30158 if (outer_code == SET)
30159 {
30160 if (XEXP (x, 1) == const0_rtx)
30161 {
30162 if (TARGET_ISEL && !TARGET_MFCRF)
30163 *total = COSTS_N_INSNS (8);
30164 else
30165 *total = COSTS_N_INSNS (2);
30166 return true;
30167 }
30168 else if (mode == Pmode)
30169 {
30170 *total = COSTS_N_INSNS (3);
30171 return false;
30172 }
30173 }
30174 /* FALLTHRU */
30175
30176 case GT:
30177 case LT:
30178 case UNORDERED:
30179 if (outer_code == SET && (XEXP (x, 1) == const0_rtx))
30180 {
30181 if (TARGET_ISEL && !TARGET_MFCRF)
30182 *total = COSTS_N_INSNS (8);
30183 else
30184 *total = COSTS_N_INSNS (2);
30185 return true;
30186 }
30187 /* CC COMPARE. */
30188 if (outer_code == COMPARE)
30189 {
30190 *total = 0;
30191 return true;
30192 }
30193 break;
30194
30195 default:
30196 break;
30197 }
30198
30199 return false;
30200 }
30201
30202 /* Debug form of r6000_rtx_costs that is selected if -mdebug=cost. */
30203
30204 static bool
30205 rs6000_debug_rtx_costs (rtx x, int code, int outer_code, int opno, int *total,
30206 bool speed)
30207 {
30208 bool ret = rs6000_rtx_costs (x, code, outer_code, opno, total, speed);
30209
30210 fprintf (stderr,
30211 "\nrs6000_rtx_costs, return = %s, code = %s, outer_code = %s, "
30212 "opno = %d, total = %d, speed = %s, x:\n",
30213 ret ? "complete" : "scan inner",
30214 GET_RTX_NAME (code),
30215 GET_RTX_NAME (outer_code),
30216 opno,
30217 *total,
30218 speed ? "true" : "false");
30219
30220 debug_rtx (x);
30221
30222 return ret;
30223 }
30224
30225 /* Debug form of ADDRESS_COST that is selected if -mdebug=cost. */
30226
30227 static int
30228 rs6000_debug_address_cost (rtx x, enum machine_mode mode,
30229 addr_space_t as, bool speed)
30230 {
30231 int ret = TARGET_ADDRESS_COST (x, mode, as, speed);
30232
30233 fprintf (stderr, "\nrs6000_address_cost, return = %d, speed = %s, x:\n",
30234 ret, speed ? "true" : "false");
30235 debug_rtx (x);
30236
30237 return ret;
30238 }
30239
30240
30241 /* A C expression returning the cost of moving data from a register of class
30242 CLASS1 to one of CLASS2. */
30243
30244 static int
30245 rs6000_register_move_cost (enum machine_mode mode,
30246 reg_class_t from, reg_class_t to)
30247 {
30248 int ret;
30249
30250 if (TARGET_DEBUG_COST)
30251 dbg_cost_ctrl++;
30252
30253 /* Moves from/to GENERAL_REGS. */
30254 if (reg_classes_intersect_p (to, GENERAL_REGS)
30255 || reg_classes_intersect_p (from, GENERAL_REGS))
30256 {
30257 reg_class_t rclass = from;
30258
30259 if (! reg_classes_intersect_p (to, GENERAL_REGS))
30260 rclass = to;
30261
30262 if (rclass == FLOAT_REGS || rclass == ALTIVEC_REGS || rclass == VSX_REGS)
30263 ret = (rs6000_memory_move_cost (mode, rclass, false)
30264 + rs6000_memory_move_cost (mode, GENERAL_REGS, false));
30265
30266 /* It's more expensive to move CR_REGS than CR0_REGS because of the
30267 shift. */
30268 else if (rclass == CR_REGS)
30269 ret = 4;
30270
30271 /* For those processors that have slow LR/CTR moves, make them more
30272 expensive than memory in order to bias spills to memory .*/
30273 else if ((rs6000_cpu == PROCESSOR_POWER6
30274 || rs6000_cpu == PROCESSOR_POWER7
30275 || rs6000_cpu == PROCESSOR_POWER8)
30276 && reg_classes_intersect_p (rclass, LINK_OR_CTR_REGS))
30277 ret = 6 * hard_regno_nregs[0][mode];
30278
30279 else
30280 /* A move will cost one instruction per GPR moved. */
30281 ret = 2 * hard_regno_nregs[0][mode];
30282 }
30283
30284 /* If we have VSX, we can easily move between FPR or Altivec registers. */
30285 else if (VECTOR_MEM_VSX_P (mode)
30286 && reg_classes_intersect_p (to, VSX_REGS)
30287 && reg_classes_intersect_p (from, VSX_REGS))
30288 ret = 2 * hard_regno_nregs[32][mode];
30289
30290 /* Moving between two similar registers is just one instruction. */
30291 else if (reg_classes_intersect_p (to, from))
30292 ret = (mode == TFmode || mode == TDmode) ? 4 : 2;
30293
30294 /* Everything else has to go through GENERAL_REGS. */
30295 else
30296 ret = (rs6000_register_move_cost (mode, GENERAL_REGS, to)
30297 + rs6000_register_move_cost (mode, from, GENERAL_REGS));
30298
30299 if (TARGET_DEBUG_COST)
30300 {
30301 if (dbg_cost_ctrl == 1)
30302 fprintf (stderr,
30303 "rs6000_register_move_cost:, ret=%d, mode=%s, from=%s, to=%s\n",
30304 ret, GET_MODE_NAME (mode), reg_class_names[from],
30305 reg_class_names[to]);
30306 dbg_cost_ctrl--;
30307 }
30308
30309 return ret;
30310 }
30311
30312 /* A C expressions returning the cost of moving data of MODE from a register to
30313 or from memory. */
30314
30315 static int
30316 rs6000_memory_move_cost (enum machine_mode mode, reg_class_t rclass,
30317 bool in ATTRIBUTE_UNUSED)
30318 {
30319 int ret;
30320
30321 if (TARGET_DEBUG_COST)
30322 dbg_cost_ctrl++;
30323
30324 if (reg_classes_intersect_p (rclass, GENERAL_REGS))
30325 ret = 4 * hard_regno_nregs[0][mode];
30326 else if ((reg_classes_intersect_p (rclass, FLOAT_REGS)
30327 || reg_classes_intersect_p (rclass, VSX_REGS)))
30328 ret = 4 * hard_regno_nregs[32][mode];
30329 else if (reg_classes_intersect_p (rclass, ALTIVEC_REGS))
30330 ret = 4 * hard_regno_nregs[FIRST_ALTIVEC_REGNO][mode];
30331 else
30332 ret = 4 + rs6000_register_move_cost (mode, rclass, GENERAL_REGS);
30333
30334 if (TARGET_DEBUG_COST)
30335 {
30336 if (dbg_cost_ctrl == 1)
30337 fprintf (stderr,
30338 "rs6000_memory_move_cost: ret=%d, mode=%s, rclass=%s, in=%d\n",
30339 ret, GET_MODE_NAME (mode), reg_class_names[rclass], in);
30340 dbg_cost_ctrl--;
30341 }
30342
30343 return ret;
30344 }
30345
30346 /* Returns a code for a target-specific builtin that implements
30347 reciprocal of the function, or NULL_TREE if not available. */
30348
30349 static tree
30350 rs6000_builtin_reciprocal (unsigned int fn, bool md_fn,
30351 bool sqrt ATTRIBUTE_UNUSED)
30352 {
30353 if (optimize_insn_for_size_p ())
30354 return NULL_TREE;
30355
30356 if (md_fn)
30357 switch (fn)
30358 {
30359 case VSX_BUILTIN_XVSQRTDP:
30360 if (!RS6000_RECIP_AUTO_RSQRTE_P (V2DFmode))
30361 return NULL_TREE;
30362
30363 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_2DF];
30364
30365 case VSX_BUILTIN_XVSQRTSP:
30366 if (!RS6000_RECIP_AUTO_RSQRTE_P (V4SFmode))
30367 return NULL_TREE;
30368
30369 return rs6000_builtin_decls[VSX_BUILTIN_RSQRT_4SF];
30370
30371 default:
30372 return NULL_TREE;
30373 }
30374
30375 else
30376 switch (fn)
30377 {
30378 case BUILT_IN_SQRT:
30379 if (!RS6000_RECIP_AUTO_RSQRTE_P (DFmode))
30380 return NULL_TREE;
30381
30382 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRT];
30383
30384 case BUILT_IN_SQRTF:
30385 if (!RS6000_RECIP_AUTO_RSQRTE_P (SFmode))
30386 return NULL_TREE;
30387
30388 return rs6000_builtin_decls[RS6000_BUILTIN_RSQRTF];
30389
30390 default:
30391 return NULL_TREE;
30392 }
30393 }
30394
30395 /* Load up a constant. If the mode is a vector mode, splat the value across
30396 all of the vector elements. */
30397
30398 static rtx
30399 rs6000_load_constant_and_splat (enum machine_mode mode, REAL_VALUE_TYPE dconst)
30400 {
30401 rtx reg;
30402
30403 if (mode == SFmode || mode == DFmode)
30404 {
30405 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, mode);
30406 reg = force_reg (mode, d);
30407 }
30408 else if (mode == V4SFmode)
30409 {
30410 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, SFmode);
30411 rtvec v = gen_rtvec (4, d, d, d, d);
30412 reg = gen_reg_rtx (mode);
30413 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
30414 }
30415 else if (mode == V2DFmode)
30416 {
30417 rtx d = CONST_DOUBLE_FROM_REAL_VALUE (dconst, DFmode);
30418 rtvec v = gen_rtvec (2, d, d);
30419 reg = gen_reg_rtx (mode);
30420 rs6000_expand_vector_init (reg, gen_rtx_PARALLEL (mode, v));
30421 }
30422 else
30423 gcc_unreachable ();
30424
30425 return reg;
30426 }
30427
30428 /* Generate an FMA instruction. */
30429
30430 static void
30431 rs6000_emit_madd (rtx target, rtx m1, rtx m2, rtx a)
30432 {
30433 enum machine_mode mode = GET_MODE (target);
30434 rtx dst;
30435
30436 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
30437 gcc_assert (dst != NULL);
30438
30439 if (dst != target)
30440 emit_move_insn (target, dst);
30441 }
30442
30443 /* Generate a FMSUB instruction: dst = fma(m1, m2, -a). */
30444
30445 static void
30446 rs6000_emit_msub (rtx target, rtx m1, rtx m2, rtx a)
30447 {
30448 enum machine_mode mode = GET_MODE (target);
30449 rtx dst;
30450
30451 /* Altivec does not support fms directly;
30452 generate in terms of fma in that case. */
30453 if (optab_handler (fms_optab, mode) != CODE_FOR_nothing)
30454 dst = expand_ternary_op (mode, fms_optab, m1, m2, a, target, 0);
30455 else
30456 {
30457 a = expand_unop (mode, neg_optab, a, NULL_RTX, 0);
30458 dst = expand_ternary_op (mode, fma_optab, m1, m2, a, target, 0);
30459 }
30460 gcc_assert (dst != NULL);
30461
30462 if (dst != target)
30463 emit_move_insn (target, dst);
30464 }
30465
30466 /* Generate a FNMSUB instruction: dst = -fma(m1, m2, -a). */
30467
30468 static void
30469 rs6000_emit_nmsub (rtx dst, rtx m1, rtx m2, rtx a)
30470 {
30471 enum machine_mode mode = GET_MODE (dst);
30472 rtx r;
30473
30474 /* This is a tad more complicated, since the fnma_optab is for
30475 a different expression: fma(-m1, m2, a), which is the same
30476 thing except in the case of signed zeros.
30477
30478 Fortunately we know that if FMA is supported that FNMSUB is
30479 also supported in the ISA. Just expand it directly. */
30480
30481 gcc_assert (optab_handler (fma_optab, mode) != CODE_FOR_nothing);
30482
30483 r = gen_rtx_NEG (mode, a);
30484 r = gen_rtx_FMA (mode, m1, m2, r);
30485 r = gen_rtx_NEG (mode, r);
30486 emit_insn (gen_rtx_SET (VOIDmode, dst, r));
30487 }
30488
30489 /* Newton-Raphson approximation of floating point divide DST = N/D. If NOTE_P,
30490 add a reg_note saying that this was a division. Support both scalar and
30491 vector divide. Assumes no trapping math and finite arguments. */
30492
30493 void
30494 rs6000_emit_swdiv (rtx dst, rtx n, rtx d, bool note_p)
30495 {
30496 enum machine_mode mode = GET_MODE (dst);
30497 rtx one, x0, e0, x1, xprev, eprev, xnext, enext, u, v;
30498 int i;
30499
30500 /* Low precision estimates guarantee 5 bits of accuracy. High
30501 precision estimates guarantee 14 bits of accuracy. SFmode
30502 requires 23 bits of accuracy. DFmode requires 52 bits of
30503 accuracy. Each pass at least doubles the accuracy, leading
30504 to the following. */
30505 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
30506 if (mode == DFmode || mode == V2DFmode)
30507 passes++;
30508
30509 enum insn_code code = optab_handler (smul_optab, mode);
30510 insn_gen_fn gen_mul = GEN_FCN (code);
30511
30512 gcc_assert (code != CODE_FOR_nothing);
30513
30514 one = rs6000_load_constant_and_splat (mode, dconst1);
30515
30516 /* x0 = 1./d estimate */
30517 x0 = gen_reg_rtx (mode);
30518 emit_insn (gen_rtx_SET (VOIDmode, x0,
30519 gen_rtx_UNSPEC (mode, gen_rtvec (1, d),
30520 UNSPEC_FRES)));
30521
30522 /* Each iteration but the last calculates x_(i+1) = x_i * (2 - d * x_i). */
30523 if (passes > 1) {
30524
30525 /* e0 = 1. - d * x0 */
30526 e0 = gen_reg_rtx (mode);
30527 rs6000_emit_nmsub (e0, d, x0, one);
30528
30529 /* x1 = x0 + e0 * x0 */
30530 x1 = gen_reg_rtx (mode);
30531 rs6000_emit_madd (x1, e0, x0, x0);
30532
30533 for (i = 0, xprev = x1, eprev = e0; i < passes - 2;
30534 ++i, xprev = xnext, eprev = enext) {
30535
30536 /* enext = eprev * eprev */
30537 enext = gen_reg_rtx (mode);
30538 emit_insn (gen_mul (enext, eprev, eprev));
30539
30540 /* xnext = xprev + enext * xprev */
30541 xnext = gen_reg_rtx (mode);
30542 rs6000_emit_madd (xnext, enext, xprev, xprev);
30543 }
30544
30545 } else
30546 xprev = x0;
30547
30548 /* The last iteration calculates x_(i+1) = n * x_i * (2 - d * x_i). */
30549
30550 /* u = n * xprev */
30551 u = gen_reg_rtx (mode);
30552 emit_insn (gen_mul (u, n, xprev));
30553
30554 /* v = n - (d * u) */
30555 v = gen_reg_rtx (mode);
30556 rs6000_emit_nmsub (v, d, u, n);
30557
30558 /* dst = (v * xprev) + u */
30559 rs6000_emit_madd (dst, v, xprev, u);
30560
30561 if (note_p)
30562 add_reg_note (get_last_insn (), REG_EQUAL, gen_rtx_DIV (mode, n, d));
30563 }
30564
30565 /* Newton-Raphson approximation of single/double-precision floating point
30566 rsqrt. Assumes no trapping math and finite arguments. */
30567
30568 void
30569 rs6000_emit_swrsqrt (rtx dst, rtx src)
30570 {
30571 enum machine_mode mode = GET_MODE (src);
30572 rtx x0 = gen_reg_rtx (mode);
30573 rtx y = gen_reg_rtx (mode);
30574
30575 /* Low precision estimates guarantee 5 bits of accuracy. High
30576 precision estimates guarantee 14 bits of accuracy. SFmode
30577 requires 23 bits of accuracy. DFmode requires 52 bits of
30578 accuracy. Each pass at least doubles the accuracy, leading
30579 to the following. */
30580 int passes = (TARGET_RECIP_PRECISION) ? 1 : 3;
30581 if (mode == DFmode || mode == V2DFmode)
30582 passes++;
30583
30584 REAL_VALUE_TYPE dconst3_2;
30585 int i;
30586 rtx halfthree;
30587 enum insn_code code = optab_handler (smul_optab, mode);
30588 insn_gen_fn gen_mul = GEN_FCN (code);
30589
30590 gcc_assert (code != CODE_FOR_nothing);
30591
30592 /* Load up the constant 1.5 either as a scalar, or as a vector. */
30593 real_from_integer (&dconst3_2, VOIDmode, 3, SIGNED);
30594 SET_REAL_EXP (&dconst3_2, REAL_EXP (&dconst3_2) - 1);
30595
30596 halfthree = rs6000_load_constant_and_splat (mode, dconst3_2);
30597
30598 /* x0 = rsqrt estimate */
30599 emit_insn (gen_rtx_SET (VOIDmode, x0,
30600 gen_rtx_UNSPEC (mode, gen_rtvec (1, src),
30601 UNSPEC_RSQRT)));
30602
30603 /* y = 0.5 * src = 1.5 * src - src -> fewer constants */
30604 rs6000_emit_msub (y, src, halfthree, src);
30605
30606 for (i = 0; i < passes; i++)
30607 {
30608 rtx x1 = gen_reg_rtx (mode);
30609 rtx u = gen_reg_rtx (mode);
30610 rtx v = gen_reg_rtx (mode);
30611
30612 /* x1 = x0 * (1.5 - y * (x0 * x0)) */
30613 emit_insn (gen_mul (u, x0, x0));
30614 rs6000_emit_nmsub (v, y, u, halfthree);
30615 emit_insn (gen_mul (x1, x0, v));
30616 x0 = x1;
30617 }
30618
30619 emit_move_insn (dst, x0);
30620 return;
30621 }
30622
30623 /* Emit popcount intrinsic on TARGET_POPCNTB (Power5) and TARGET_POPCNTD
30624 (Power7) targets. DST is the target, and SRC is the argument operand. */
30625
30626 void
30627 rs6000_emit_popcount (rtx dst, rtx src)
30628 {
30629 enum machine_mode mode = GET_MODE (dst);
30630 rtx tmp1, tmp2;
30631
30632 /* Use the PPC ISA 2.06 popcnt{w,d} instruction if we can. */
30633 if (TARGET_POPCNTD)
30634 {
30635 if (mode == SImode)
30636 emit_insn (gen_popcntdsi2 (dst, src));
30637 else
30638 emit_insn (gen_popcntddi2 (dst, src));
30639 return;
30640 }
30641
30642 tmp1 = gen_reg_rtx (mode);
30643
30644 if (mode == SImode)
30645 {
30646 emit_insn (gen_popcntbsi2 (tmp1, src));
30647 tmp2 = expand_mult (SImode, tmp1, GEN_INT (0x01010101),
30648 NULL_RTX, 0);
30649 tmp2 = force_reg (SImode, tmp2);
30650 emit_insn (gen_lshrsi3 (dst, tmp2, GEN_INT (24)));
30651 }
30652 else
30653 {
30654 emit_insn (gen_popcntbdi2 (tmp1, src));
30655 tmp2 = expand_mult (DImode, tmp1,
30656 GEN_INT ((HOST_WIDE_INT)
30657 0x01010101 << 32 | 0x01010101),
30658 NULL_RTX, 0);
30659 tmp2 = force_reg (DImode, tmp2);
30660 emit_insn (gen_lshrdi3 (dst, tmp2, GEN_INT (56)));
30661 }
30662 }
30663
30664
30665 /* Emit parity intrinsic on TARGET_POPCNTB targets. DST is the
30666 target, and SRC is the argument operand. */
30667
30668 void
30669 rs6000_emit_parity (rtx dst, rtx src)
30670 {
30671 enum machine_mode mode = GET_MODE (dst);
30672 rtx tmp;
30673
30674 tmp = gen_reg_rtx (mode);
30675
30676 /* Use the PPC ISA 2.05 prtyw/prtyd instruction if we can. */
30677 if (TARGET_CMPB)
30678 {
30679 if (mode == SImode)
30680 {
30681 emit_insn (gen_popcntbsi2 (tmp, src));
30682 emit_insn (gen_paritysi2_cmpb (dst, tmp));
30683 }
30684 else
30685 {
30686 emit_insn (gen_popcntbdi2 (tmp, src));
30687 emit_insn (gen_paritydi2_cmpb (dst, tmp));
30688 }
30689 return;
30690 }
30691
30692 if (mode == SImode)
30693 {
30694 /* Is mult+shift >= shift+xor+shift+xor? */
30695 if (rs6000_cost->mulsi_const >= COSTS_N_INSNS (3))
30696 {
30697 rtx tmp1, tmp2, tmp3, tmp4;
30698
30699 tmp1 = gen_reg_rtx (SImode);
30700 emit_insn (gen_popcntbsi2 (tmp1, src));
30701
30702 tmp2 = gen_reg_rtx (SImode);
30703 emit_insn (gen_lshrsi3 (tmp2, tmp1, GEN_INT (16)));
30704 tmp3 = gen_reg_rtx (SImode);
30705 emit_insn (gen_xorsi3 (tmp3, tmp1, tmp2));
30706
30707 tmp4 = gen_reg_rtx (SImode);
30708 emit_insn (gen_lshrsi3 (tmp4, tmp3, GEN_INT (8)));
30709 emit_insn (gen_xorsi3 (tmp, tmp3, tmp4));
30710 }
30711 else
30712 rs6000_emit_popcount (tmp, src);
30713 emit_insn (gen_andsi3 (dst, tmp, const1_rtx));
30714 }
30715 else
30716 {
30717 /* Is mult+shift >= shift+xor+shift+xor+shift+xor? */
30718 if (rs6000_cost->muldi >= COSTS_N_INSNS (5))
30719 {
30720 rtx tmp1, tmp2, tmp3, tmp4, tmp5, tmp6;
30721
30722 tmp1 = gen_reg_rtx (DImode);
30723 emit_insn (gen_popcntbdi2 (tmp1, src));
30724
30725 tmp2 = gen_reg_rtx (DImode);
30726 emit_insn (gen_lshrdi3 (tmp2, tmp1, GEN_INT (32)));
30727 tmp3 = gen_reg_rtx (DImode);
30728 emit_insn (gen_xordi3 (tmp3, tmp1, tmp2));
30729
30730 tmp4 = gen_reg_rtx (DImode);
30731 emit_insn (gen_lshrdi3 (tmp4, tmp3, GEN_INT (16)));
30732 tmp5 = gen_reg_rtx (DImode);
30733 emit_insn (gen_xordi3 (tmp5, tmp3, tmp4));
30734
30735 tmp6 = gen_reg_rtx (DImode);
30736 emit_insn (gen_lshrdi3 (tmp6, tmp5, GEN_INT (8)));
30737 emit_insn (gen_xordi3 (tmp, tmp5, tmp6));
30738 }
30739 else
30740 rs6000_emit_popcount (tmp, src);
30741 emit_insn (gen_anddi3 (dst, tmp, const1_rtx));
30742 }
30743 }
30744
30745 /* Expand an Altivec constant permutation for little endian mode.
30746 There are two issues: First, the two input operands must be
30747 swapped so that together they form a double-wide array in LE
30748 order. Second, the vperm instruction has surprising behavior
30749 in LE mode: it interprets the elements of the source vectors
30750 in BE mode ("left to right") and interprets the elements of
30751 the destination vector in LE mode ("right to left"). To
30752 correct for this, we must subtract each element of the permute
30753 control vector from 31.
30754
30755 For example, suppose we want to concatenate vr10 = {0, 1, 2, 3}
30756 with vr11 = {4, 5, 6, 7} and extract {0, 2, 4, 6} using a vperm.
30757 We place {0,1,2,3,8,9,10,11,16,17,18,19,24,25,26,27} in vr12 to
30758 serve as the permute control vector. Then, in BE mode,
30759
30760 vperm 9,10,11,12
30761
30762 places the desired result in vr9. However, in LE mode the
30763 vector contents will be
30764
30765 vr10 = 00000003 00000002 00000001 00000000
30766 vr11 = 00000007 00000006 00000005 00000004
30767
30768 The result of the vperm using the same permute control vector is
30769
30770 vr9 = 05000000 07000000 01000000 03000000
30771
30772 That is, the leftmost 4 bytes of vr10 are interpreted as the
30773 source for the rightmost 4 bytes of vr9, and so on.
30774
30775 If we change the permute control vector to
30776
30777 vr12 = {31,20,29,28,23,22,21,20,15,14,13,12,7,6,5,4}
30778
30779 and issue
30780
30781 vperm 9,11,10,12
30782
30783 we get the desired
30784
30785 vr9 = 00000006 00000004 00000002 00000000. */
30786
30787 void
30788 altivec_expand_vec_perm_const_le (rtx operands[4])
30789 {
30790 unsigned int i;
30791 rtx perm[16];
30792 rtx constv, unspec;
30793 rtx target = operands[0];
30794 rtx op0 = operands[1];
30795 rtx op1 = operands[2];
30796 rtx sel = operands[3];
30797
30798 /* Unpack and adjust the constant selector. */
30799 for (i = 0; i < 16; ++i)
30800 {
30801 rtx e = XVECEXP (sel, 0, i);
30802 unsigned int elt = 31 - (INTVAL (e) & 31);
30803 perm[i] = GEN_INT (elt);
30804 }
30805
30806 /* Expand to a permute, swapping the inputs and using the
30807 adjusted selector. */
30808 if (!REG_P (op0))
30809 op0 = force_reg (V16QImode, op0);
30810 if (!REG_P (op1))
30811 op1 = force_reg (V16QImode, op1);
30812
30813 constv = gen_rtx_CONST_VECTOR (V16QImode, gen_rtvec_v (16, perm));
30814 constv = force_reg (V16QImode, constv);
30815 unspec = gen_rtx_UNSPEC (V16QImode, gen_rtvec (3, op1, op0, constv),
30816 UNSPEC_VPERM);
30817 if (!REG_P (target))
30818 {
30819 rtx tmp = gen_reg_rtx (V16QImode);
30820 emit_move_insn (tmp, unspec);
30821 unspec = tmp;
30822 }
30823
30824 emit_move_insn (target, unspec);
30825 }
30826
30827 /* Similarly to altivec_expand_vec_perm_const_le, we must adjust the
30828 permute control vector. But here it's not a constant, so we must
30829 generate a vector NAND or NOR to do the adjustment. */
30830
30831 void
30832 altivec_expand_vec_perm_le (rtx operands[4])
30833 {
30834 rtx notx, iorx, unspec;
30835 rtx target = operands[0];
30836 rtx op0 = operands[1];
30837 rtx op1 = operands[2];
30838 rtx sel = operands[3];
30839 rtx tmp = target;
30840 rtx norreg = gen_reg_rtx (V16QImode);
30841 enum machine_mode mode = GET_MODE (target);
30842
30843 /* Get everything in regs so the pattern matches. */
30844 if (!REG_P (op0))
30845 op0 = force_reg (mode, op0);
30846 if (!REG_P (op1))
30847 op1 = force_reg (mode, op1);
30848 if (!REG_P (sel))
30849 sel = force_reg (V16QImode, sel);
30850 if (!REG_P (target))
30851 tmp = gen_reg_rtx (mode);
30852
30853 /* Invert the selector with a VNAND if available, else a VNOR.
30854 The VNAND is preferred for future fusion opportunities. */
30855 notx = gen_rtx_NOT (V16QImode, sel);
30856 iorx = (TARGET_P8_VECTOR
30857 ? gen_rtx_IOR (V16QImode, notx, notx)
30858 : gen_rtx_AND (V16QImode, notx, notx));
30859 emit_insn (gen_rtx_SET (VOIDmode, norreg, iorx));
30860
30861 /* Permute with operands reversed and adjusted selector. */
30862 unspec = gen_rtx_UNSPEC (mode, gen_rtvec (3, op1, op0, norreg),
30863 UNSPEC_VPERM);
30864
30865 /* Copy into target, possibly by way of a register. */
30866 if (!REG_P (target))
30867 {
30868 emit_move_insn (tmp, unspec);
30869 unspec = tmp;
30870 }
30871
30872 emit_move_insn (target, unspec);
30873 }
30874
30875 /* Expand an Altivec constant permutation. Return true if we match
30876 an efficient implementation; false to fall back to VPERM. */
30877
30878 bool
30879 altivec_expand_vec_perm_const (rtx operands[4])
30880 {
30881 struct altivec_perm_insn {
30882 HOST_WIDE_INT mask;
30883 enum insn_code impl;
30884 unsigned char perm[16];
30885 };
30886 static const struct altivec_perm_insn patterns[] = {
30887 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuhum_direct,
30888 { 1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 21, 23, 25, 27, 29, 31 } },
30889 { OPTION_MASK_ALTIVEC, CODE_FOR_altivec_vpkuwum_direct,
30890 { 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31 } },
30891 { OPTION_MASK_ALTIVEC,
30892 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghb_direct
30893 : CODE_FOR_altivec_vmrglb_direct),
30894 { 0, 16, 1, 17, 2, 18, 3, 19, 4, 20, 5, 21, 6, 22, 7, 23 } },
30895 { OPTION_MASK_ALTIVEC,
30896 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghh_direct
30897 : CODE_FOR_altivec_vmrglh_direct),
30898 { 0, 1, 16, 17, 2, 3, 18, 19, 4, 5, 20, 21, 6, 7, 22, 23 } },
30899 { OPTION_MASK_ALTIVEC,
30900 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrghw_direct
30901 : CODE_FOR_altivec_vmrglw_direct),
30902 { 0, 1, 2, 3, 16, 17, 18, 19, 4, 5, 6, 7, 20, 21, 22, 23 } },
30903 { OPTION_MASK_ALTIVEC,
30904 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglb_direct
30905 : CODE_FOR_altivec_vmrghb_direct),
30906 { 8, 24, 9, 25, 10, 26, 11, 27, 12, 28, 13, 29, 14, 30, 15, 31 } },
30907 { OPTION_MASK_ALTIVEC,
30908 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglh_direct
30909 : CODE_FOR_altivec_vmrghh_direct),
30910 { 8, 9, 24, 25, 10, 11, 26, 27, 12, 13, 28, 29, 14, 15, 30, 31 } },
30911 { OPTION_MASK_ALTIVEC,
30912 (BYTES_BIG_ENDIAN ? CODE_FOR_altivec_vmrglw_direct
30913 : CODE_FOR_altivec_vmrghw_direct),
30914 { 8, 9, 10, 11, 24, 25, 26, 27, 12, 13, 14, 15, 28, 29, 30, 31 } },
30915 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgew,
30916 { 0, 1, 2, 3, 16, 17, 18, 19, 8, 9, 10, 11, 24, 25, 26, 27 } },
30917 { OPTION_MASK_P8_VECTOR, CODE_FOR_p8_vmrgow,
30918 { 4, 5, 6, 7, 20, 21, 22, 23, 12, 13, 14, 15, 28, 29, 30, 31 } }
30919 };
30920
30921 unsigned int i, j, elt, which;
30922 unsigned char perm[16];
30923 rtx target, op0, op1, sel, x;
30924 bool one_vec;
30925
30926 target = operands[0];
30927 op0 = operands[1];
30928 op1 = operands[2];
30929 sel = operands[3];
30930
30931 /* Unpack the constant selector. */
30932 for (i = which = 0; i < 16; ++i)
30933 {
30934 rtx e = XVECEXP (sel, 0, i);
30935 elt = INTVAL (e) & 31;
30936 which |= (elt < 16 ? 1 : 2);
30937 perm[i] = elt;
30938 }
30939
30940 /* Simplify the constant selector based on operands. */
30941 switch (which)
30942 {
30943 default:
30944 gcc_unreachable ();
30945
30946 case 3:
30947 one_vec = false;
30948 if (!rtx_equal_p (op0, op1))
30949 break;
30950 /* FALLTHRU */
30951
30952 case 2:
30953 for (i = 0; i < 16; ++i)
30954 perm[i] &= 15;
30955 op0 = op1;
30956 one_vec = true;
30957 break;
30958
30959 case 1:
30960 op1 = op0;
30961 one_vec = true;
30962 break;
30963 }
30964
30965 /* Look for splat patterns. */
30966 if (one_vec)
30967 {
30968 elt = perm[0];
30969
30970 for (i = 0; i < 16; ++i)
30971 if (perm[i] != elt)
30972 break;
30973 if (i == 16)
30974 {
30975 if (!BYTES_BIG_ENDIAN)
30976 elt = 15 - elt;
30977 emit_insn (gen_altivec_vspltb_direct (target, op0, GEN_INT (elt)));
30978 return true;
30979 }
30980
30981 if (elt % 2 == 0)
30982 {
30983 for (i = 0; i < 16; i += 2)
30984 if (perm[i] != elt || perm[i + 1] != elt + 1)
30985 break;
30986 if (i == 16)
30987 {
30988 int field = BYTES_BIG_ENDIAN ? elt / 2 : 7 - elt / 2;
30989 x = gen_reg_rtx (V8HImode);
30990 emit_insn (gen_altivec_vsplth_direct (x, gen_lowpart (V8HImode, op0),
30991 GEN_INT (field)));
30992 emit_move_insn (target, gen_lowpart (V16QImode, x));
30993 return true;
30994 }
30995 }
30996
30997 if (elt % 4 == 0)
30998 {
30999 for (i = 0; i < 16; i += 4)
31000 if (perm[i] != elt
31001 || perm[i + 1] != elt + 1
31002 || perm[i + 2] != elt + 2
31003 || perm[i + 3] != elt + 3)
31004 break;
31005 if (i == 16)
31006 {
31007 int field = BYTES_BIG_ENDIAN ? elt / 4 : 3 - elt / 4;
31008 x = gen_reg_rtx (V4SImode);
31009 emit_insn (gen_altivec_vspltw_direct (x, gen_lowpart (V4SImode, op0),
31010 GEN_INT (field)));
31011 emit_move_insn (target, gen_lowpart (V16QImode, x));
31012 return true;
31013 }
31014 }
31015 }
31016
31017 /* Look for merge and pack patterns. */
31018 for (j = 0; j < ARRAY_SIZE (patterns); ++j)
31019 {
31020 bool swapped;
31021
31022 if ((patterns[j].mask & rs6000_isa_flags) == 0)
31023 continue;
31024
31025 elt = patterns[j].perm[0];
31026 if (perm[0] == elt)
31027 swapped = false;
31028 else if (perm[0] == elt + 16)
31029 swapped = true;
31030 else
31031 continue;
31032 for (i = 1; i < 16; ++i)
31033 {
31034 elt = patterns[j].perm[i];
31035 if (swapped)
31036 elt = (elt >= 16 ? elt - 16 : elt + 16);
31037 else if (one_vec && elt >= 16)
31038 elt -= 16;
31039 if (perm[i] != elt)
31040 break;
31041 }
31042 if (i == 16)
31043 {
31044 enum insn_code icode = patterns[j].impl;
31045 enum machine_mode omode = insn_data[icode].operand[0].mode;
31046 enum machine_mode imode = insn_data[icode].operand[1].mode;
31047
31048 /* For little-endian, don't use vpkuwum and vpkuhum if the
31049 underlying vector type is not V4SI and V8HI, respectively.
31050 For example, using vpkuwum with a V8HI picks up the even
31051 halfwords (BE numbering) when the even halfwords (LE
31052 numbering) are what we need. */
31053 if (!BYTES_BIG_ENDIAN
31054 && icode == CODE_FOR_altivec_vpkuwum_direct
31055 && ((GET_CODE (op0) == REG
31056 && GET_MODE (op0) != V4SImode)
31057 || (GET_CODE (op0) == SUBREG
31058 && GET_MODE (XEXP (op0, 0)) != V4SImode)))
31059 continue;
31060 if (!BYTES_BIG_ENDIAN
31061 && icode == CODE_FOR_altivec_vpkuhum_direct
31062 && ((GET_CODE (op0) == REG
31063 && GET_MODE (op0) != V8HImode)
31064 || (GET_CODE (op0) == SUBREG
31065 && GET_MODE (XEXP (op0, 0)) != V8HImode)))
31066 continue;
31067
31068 /* For little-endian, the two input operands must be swapped
31069 (or swapped back) to ensure proper right-to-left numbering
31070 from 0 to 2N-1. */
31071 if (swapped ^ !BYTES_BIG_ENDIAN)
31072 x = op0, op0 = op1, op1 = x;
31073 if (imode != V16QImode)
31074 {
31075 op0 = gen_lowpart (imode, op0);
31076 op1 = gen_lowpart (imode, op1);
31077 }
31078 if (omode == V16QImode)
31079 x = target;
31080 else
31081 x = gen_reg_rtx (omode);
31082 emit_insn (GEN_FCN (icode) (x, op0, op1));
31083 if (omode != V16QImode)
31084 emit_move_insn (target, gen_lowpart (V16QImode, x));
31085 return true;
31086 }
31087 }
31088
31089 if (!BYTES_BIG_ENDIAN)
31090 {
31091 altivec_expand_vec_perm_const_le (operands);
31092 return true;
31093 }
31094
31095 return false;
31096 }
31097
31098 /* Expand a Paired Single, VSX Permute Doubleword, or SPE constant permutation.
31099 Return true if we match an efficient implementation. */
31100
31101 static bool
31102 rs6000_expand_vec_perm_const_1 (rtx target, rtx op0, rtx op1,
31103 unsigned char perm0, unsigned char perm1)
31104 {
31105 rtx x;
31106
31107 /* If both selectors come from the same operand, fold to single op. */
31108 if ((perm0 & 2) == (perm1 & 2))
31109 {
31110 if (perm0 & 2)
31111 op0 = op1;
31112 else
31113 op1 = op0;
31114 }
31115 /* If both operands are equal, fold to simpler permutation. */
31116 if (rtx_equal_p (op0, op1))
31117 {
31118 perm0 = perm0 & 1;
31119 perm1 = (perm1 & 1) + 2;
31120 }
31121 /* If the first selector comes from the second operand, swap. */
31122 else if (perm0 & 2)
31123 {
31124 if (perm1 & 2)
31125 return false;
31126 perm0 -= 2;
31127 perm1 += 2;
31128 x = op0, op0 = op1, op1 = x;
31129 }
31130 /* If the second selector does not come from the second operand, fail. */
31131 else if ((perm1 & 2) == 0)
31132 return false;
31133
31134 /* Success! */
31135 if (target != NULL)
31136 {
31137 enum machine_mode vmode, dmode;
31138 rtvec v;
31139
31140 vmode = GET_MODE (target);
31141 gcc_assert (GET_MODE_NUNITS (vmode) == 2);
31142 dmode = mode_for_vector (GET_MODE_INNER (vmode), 4);
31143 x = gen_rtx_VEC_CONCAT (dmode, op0, op1);
31144 v = gen_rtvec (2, GEN_INT (perm0), GEN_INT (perm1));
31145 x = gen_rtx_VEC_SELECT (vmode, x, gen_rtx_PARALLEL (VOIDmode, v));
31146 emit_insn (gen_rtx_SET (VOIDmode, target, x));
31147 }
31148 return true;
31149 }
31150
31151 bool
31152 rs6000_expand_vec_perm_const (rtx operands[4])
31153 {
31154 rtx target, op0, op1, sel;
31155 unsigned char perm0, perm1;
31156
31157 target = operands[0];
31158 op0 = operands[1];
31159 op1 = operands[2];
31160 sel = operands[3];
31161
31162 /* Unpack the constant selector. */
31163 perm0 = INTVAL (XVECEXP (sel, 0, 0)) & 3;
31164 perm1 = INTVAL (XVECEXP (sel, 0, 1)) & 3;
31165
31166 return rs6000_expand_vec_perm_const_1 (target, op0, op1, perm0, perm1);
31167 }
31168
31169 /* Test whether a constant permutation is supported. */
31170
31171 static bool
31172 rs6000_vectorize_vec_perm_const_ok (enum machine_mode vmode,
31173 const unsigned char *sel)
31174 {
31175 /* AltiVec (and thus VSX) can handle arbitrary permutations. */
31176 if (TARGET_ALTIVEC)
31177 return true;
31178
31179 /* Check for ps_merge* or evmerge* insns. */
31180 if ((TARGET_PAIRED_FLOAT && vmode == V2SFmode)
31181 || (TARGET_SPE && vmode == V2SImode))
31182 {
31183 rtx op0 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 1);
31184 rtx op1 = gen_raw_REG (vmode, LAST_VIRTUAL_REGISTER + 2);
31185 return rs6000_expand_vec_perm_const_1 (NULL, op0, op1, sel[0], sel[1]);
31186 }
31187
31188 return false;
31189 }
31190
31191 /* A subroutine for rs6000_expand_extract_even & rs6000_expand_interleave. */
31192
31193 static void
31194 rs6000_do_expand_vec_perm (rtx target, rtx op0, rtx op1,
31195 enum machine_mode vmode, unsigned nelt, rtx perm[])
31196 {
31197 enum machine_mode imode;
31198 rtx x;
31199
31200 imode = vmode;
31201 if (GET_MODE_CLASS (vmode) != MODE_VECTOR_INT)
31202 {
31203 imode = GET_MODE_INNER (vmode);
31204 imode = mode_for_size (GET_MODE_BITSIZE (imode), MODE_INT, 0);
31205 imode = mode_for_vector (imode, nelt);
31206 }
31207
31208 x = gen_rtx_CONST_VECTOR (imode, gen_rtvec_v (nelt, perm));
31209 x = expand_vec_perm (vmode, op0, op1, x, target);
31210 if (x != target)
31211 emit_move_insn (target, x);
31212 }
31213
31214 /* Expand an extract even operation. */
31215
31216 void
31217 rs6000_expand_extract_even (rtx target, rtx op0, rtx op1)
31218 {
31219 enum machine_mode vmode = GET_MODE (target);
31220 unsigned i, nelt = GET_MODE_NUNITS (vmode);
31221 rtx perm[16];
31222
31223 for (i = 0; i < nelt; i++)
31224 perm[i] = GEN_INT (i * 2);
31225
31226 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
31227 }
31228
31229 /* Expand a vector interleave operation. */
31230
31231 void
31232 rs6000_expand_interleave (rtx target, rtx op0, rtx op1, bool highp)
31233 {
31234 enum machine_mode vmode = GET_MODE (target);
31235 unsigned i, high, nelt = GET_MODE_NUNITS (vmode);
31236 rtx perm[16];
31237
31238 high = (highp ? 0 : nelt / 2);
31239 for (i = 0; i < nelt / 2; i++)
31240 {
31241 perm[i * 2] = GEN_INT (i + high);
31242 perm[i * 2 + 1] = GEN_INT (i + nelt + high);
31243 }
31244
31245 rs6000_do_expand_vec_perm (target, op0, op1, vmode, nelt, perm);
31246 }
31247
31248 /* Return an RTX representing where to find the function value of a
31249 function returning MODE. */
31250 static rtx
31251 rs6000_complex_function_value (enum machine_mode mode)
31252 {
31253 unsigned int regno;
31254 rtx r1, r2;
31255 enum machine_mode inner = GET_MODE_INNER (mode);
31256 unsigned int inner_bytes = GET_MODE_SIZE (inner);
31257
31258 if (FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
31259 regno = FP_ARG_RETURN;
31260 else
31261 {
31262 regno = GP_ARG_RETURN;
31263
31264 /* 32-bit is OK since it'll go in r3/r4. */
31265 if (TARGET_32BIT && inner_bytes >= 4)
31266 return gen_rtx_REG (mode, regno);
31267 }
31268
31269 if (inner_bytes >= 8)
31270 return gen_rtx_REG (mode, regno);
31271
31272 r1 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno),
31273 const0_rtx);
31274 r2 = gen_rtx_EXPR_LIST (inner, gen_rtx_REG (inner, regno + 1),
31275 GEN_INT (inner_bytes));
31276 return gen_rtx_PARALLEL (mode, gen_rtvec (2, r1, r2));
31277 }
31278
31279 /* Target hook for TARGET_FUNCTION_VALUE.
31280
31281 On the SPE, both FPs and vectors are returned in r3.
31282
31283 On RS/6000 an integer value is in r3 and a floating-point value is in
31284 fp1, unless -msoft-float. */
31285
31286 static rtx
31287 rs6000_function_value (const_tree valtype,
31288 const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
31289 bool outgoing ATTRIBUTE_UNUSED)
31290 {
31291 enum machine_mode mode;
31292 unsigned int regno;
31293 enum machine_mode elt_mode;
31294 int n_elts;
31295
31296 /* Special handling for structs in darwin64. */
31297 if (TARGET_MACHO
31298 && rs6000_darwin64_struct_check_p (TYPE_MODE (valtype), valtype))
31299 {
31300 CUMULATIVE_ARGS valcum;
31301 rtx valret;
31302
31303 valcum.words = 0;
31304 valcum.fregno = FP_ARG_MIN_REG;
31305 valcum.vregno = ALTIVEC_ARG_MIN_REG;
31306 /* Do a trial code generation as if this were going to be passed as
31307 an argument; if any part goes in memory, we return NULL. */
31308 valret = rs6000_darwin64_record_arg (&valcum, valtype, true, /* retval= */ true);
31309 if (valret)
31310 return valret;
31311 /* Otherwise fall through to standard ABI rules. */
31312 }
31313
31314 /* The ELFv2 ABI returns homogeneous VFP aggregates in registers. */
31315 if (rs6000_discover_homogeneous_aggregate (TYPE_MODE (valtype), valtype,
31316 &elt_mode, &n_elts))
31317 {
31318 int first_reg, n_regs, i;
31319 rtx par;
31320
31321 if (SCALAR_FLOAT_MODE_P (elt_mode))
31322 {
31323 /* _Decimal128 must use even/odd register pairs. */
31324 first_reg = (elt_mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
31325 n_regs = (GET_MODE_SIZE (elt_mode) + 7) >> 3;
31326 }
31327 else
31328 {
31329 first_reg = ALTIVEC_ARG_RETURN;
31330 n_regs = 1;
31331 }
31332
31333 par = gen_rtx_PARALLEL (TYPE_MODE (valtype), rtvec_alloc (n_elts));
31334 for (i = 0; i < n_elts; i++)
31335 {
31336 rtx r = gen_rtx_REG (elt_mode, first_reg + i * n_regs);
31337 rtx off = GEN_INT (i * GET_MODE_SIZE (elt_mode));
31338 XVECEXP (par, 0, i) = gen_rtx_EXPR_LIST (VOIDmode, r, off);
31339 }
31340
31341 return par;
31342 }
31343
31344 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DImode)
31345 {
31346 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
31347 return gen_rtx_PARALLEL (DImode,
31348 gen_rtvec (2,
31349 gen_rtx_EXPR_LIST (VOIDmode,
31350 gen_rtx_REG (SImode, GP_ARG_RETURN),
31351 const0_rtx),
31352 gen_rtx_EXPR_LIST (VOIDmode,
31353 gen_rtx_REG (SImode,
31354 GP_ARG_RETURN + 1),
31355 GEN_INT (4))));
31356 }
31357 if (TARGET_32BIT && TARGET_POWERPC64 && TYPE_MODE (valtype) == DCmode)
31358 {
31359 return gen_rtx_PARALLEL (DCmode,
31360 gen_rtvec (4,
31361 gen_rtx_EXPR_LIST (VOIDmode,
31362 gen_rtx_REG (SImode, GP_ARG_RETURN),
31363 const0_rtx),
31364 gen_rtx_EXPR_LIST (VOIDmode,
31365 gen_rtx_REG (SImode,
31366 GP_ARG_RETURN + 1),
31367 GEN_INT (4)),
31368 gen_rtx_EXPR_LIST (VOIDmode,
31369 gen_rtx_REG (SImode,
31370 GP_ARG_RETURN + 2),
31371 GEN_INT (8)),
31372 gen_rtx_EXPR_LIST (VOIDmode,
31373 gen_rtx_REG (SImode,
31374 GP_ARG_RETURN + 3),
31375 GEN_INT (12))));
31376 }
31377
31378 mode = TYPE_MODE (valtype);
31379 if ((INTEGRAL_TYPE_P (valtype) && GET_MODE_BITSIZE (mode) < BITS_PER_WORD)
31380 || POINTER_TYPE_P (valtype))
31381 mode = TARGET_32BIT ? SImode : DImode;
31382
31383 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
31384 /* _Decimal128 must use an even/odd register pair. */
31385 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
31386 else if (SCALAR_FLOAT_TYPE_P (valtype) && TARGET_HARD_FLOAT && TARGET_FPRS
31387 && ((TARGET_SINGLE_FLOAT && (mode == SFmode)) || TARGET_DOUBLE_FLOAT))
31388 regno = FP_ARG_RETURN;
31389 else if (TREE_CODE (valtype) == COMPLEX_TYPE
31390 && targetm.calls.split_complex_arg)
31391 return rs6000_complex_function_value (mode);
31392 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
31393 return register is used in both cases, and we won't see V2DImode/V2DFmode
31394 for pure altivec, combine the two cases. */
31395 else if (TREE_CODE (valtype) == VECTOR_TYPE
31396 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI
31397 && ALTIVEC_OR_VSX_VECTOR_MODE (mode))
31398 regno = ALTIVEC_ARG_RETURN;
31399 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
31400 && (mode == DFmode || mode == DCmode
31401 || mode == TFmode || mode == TCmode))
31402 return spe_build_register_parallel (mode, GP_ARG_RETURN);
31403 else
31404 regno = GP_ARG_RETURN;
31405
31406 return gen_rtx_REG (mode, regno);
31407 }
31408
31409 /* Define how to find the value returned by a library function
31410 assuming the value has mode MODE. */
31411 rtx
31412 rs6000_libcall_value (enum machine_mode mode)
31413 {
31414 unsigned int regno;
31415
31416 if (TARGET_32BIT && TARGET_POWERPC64 && mode == DImode)
31417 {
31418 /* Long long return value need be split in -mpowerpc64, 32bit ABI. */
31419 return gen_rtx_PARALLEL (DImode,
31420 gen_rtvec (2,
31421 gen_rtx_EXPR_LIST (VOIDmode,
31422 gen_rtx_REG (SImode, GP_ARG_RETURN),
31423 const0_rtx),
31424 gen_rtx_EXPR_LIST (VOIDmode,
31425 gen_rtx_REG (SImode,
31426 GP_ARG_RETURN + 1),
31427 GEN_INT (4))));
31428 }
31429
31430 if (DECIMAL_FLOAT_MODE_P (mode) && TARGET_HARD_FLOAT && TARGET_FPRS)
31431 /* _Decimal128 must use an even/odd register pair. */
31432 regno = (mode == TDmode) ? FP_ARG_RETURN + 1 : FP_ARG_RETURN;
31433 else if (SCALAR_FLOAT_MODE_P (mode)
31434 && TARGET_HARD_FLOAT && TARGET_FPRS
31435 && ((TARGET_SINGLE_FLOAT && mode == SFmode) || TARGET_DOUBLE_FLOAT))
31436 regno = FP_ARG_RETURN;
31437 /* VSX is a superset of Altivec and adds V2DImode/V2DFmode. Since the same
31438 return register is used in both cases, and we won't see V2DImode/V2DFmode
31439 for pure altivec, combine the two cases. */
31440 else if (ALTIVEC_OR_VSX_VECTOR_MODE (mode)
31441 && TARGET_ALTIVEC && TARGET_ALTIVEC_ABI)
31442 regno = ALTIVEC_ARG_RETURN;
31443 else if (COMPLEX_MODE_P (mode) && targetm.calls.split_complex_arg)
31444 return rs6000_complex_function_value (mode);
31445 else if (TARGET_E500_DOUBLE && TARGET_HARD_FLOAT
31446 && (mode == DFmode || mode == DCmode
31447 || mode == TFmode || mode == TCmode))
31448 return spe_build_register_parallel (mode, GP_ARG_RETURN);
31449 else
31450 regno = GP_ARG_RETURN;
31451
31452 return gen_rtx_REG (mode, regno);
31453 }
31454
31455
31456 /* Return true if we use LRA instead of reload pass. */
31457 static bool
31458 rs6000_lra_p (void)
31459 {
31460 return rs6000_lra_flag;
31461 }
31462
31463 /* Given FROM and TO register numbers, say whether this elimination is allowed.
31464 Frame pointer elimination is automatically handled.
31465
31466 For the RS/6000, if frame pointer elimination is being done, we would like
31467 to convert ap into fp, not sp.
31468
31469 We need r30 if -mminimal-toc was specified, and there are constant pool
31470 references. */
31471
31472 static bool
31473 rs6000_can_eliminate (const int from, const int to)
31474 {
31475 return (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM
31476 ? ! frame_pointer_needed
31477 : from == RS6000_PIC_OFFSET_TABLE_REGNUM
31478 ? ! TARGET_MINIMAL_TOC || TARGET_NO_TOC || get_pool_size () == 0
31479 : true);
31480 }
31481
31482 /* Define the offset between two registers, FROM to be eliminated and its
31483 replacement TO, at the start of a routine. */
31484 HOST_WIDE_INT
31485 rs6000_initial_elimination_offset (int from, int to)
31486 {
31487 rs6000_stack_t *info = rs6000_stack_info ();
31488 HOST_WIDE_INT offset;
31489
31490 if (from == HARD_FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
31491 offset = info->push_p ? 0 : -info->total_size;
31492 else if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
31493 {
31494 offset = info->push_p ? 0 : -info->total_size;
31495 if (FRAME_GROWS_DOWNWARD)
31496 offset += info->fixed_size + info->vars_size + info->parm_size;
31497 }
31498 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
31499 offset = FRAME_GROWS_DOWNWARD
31500 ? info->fixed_size + info->vars_size + info->parm_size
31501 : 0;
31502 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
31503 offset = info->total_size;
31504 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
31505 offset = info->push_p ? info->total_size : 0;
31506 else if (from == RS6000_PIC_OFFSET_TABLE_REGNUM)
31507 offset = 0;
31508 else
31509 gcc_unreachable ();
31510
31511 return offset;
31512 }
31513
31514 static rtx
31515 rs6000_dwarf_register_span (rtx reg)
31516 {
31517 rtx parts[8];
31518 int i, words;
31519 unsigned regno = REGNO (reg);
31520 enum machine_mode mode = GET_MODE (reg);
31521
31522 if (TARGET_SPE
31523 && regno < 32
31524 && (SPE_VECTOR_MODE (GET_MODE (reg))
31525 || (TARGET_E500_DOUBLE && FLOAT_MODE_P (mode)
31526 && mode != SFmode && mode != SDmode && mode != SCmode)))
31527 ;
31528 else
31529 return NULL_RTX;
31530
31531 regno = REGNO (reg);
31532
31533 /* The duality of the SPE register size wreaks all kinds of havoc.
31534 This is a way of distinguishing r0 in 32-bits from r0 in
31535 64-bits. */
31536 words = (GET_MODE_SIZE (mode) + UNITS_PER_FP_WORD - 1) / UNITS_PER_FP_WORD;
31537 gcc_assert (words <= 4);
31538 for (i = 0; i < words; i++, regno++)
31539 {
31540 if (BYTES_BIG_ENDIAN)
31541 {
31542 parts[2 * i] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
31543 parts[2 * i + 1] = gen_rtx_REG (SImode, regno);
31544 }
31545 else
31546 {
31547 parts[2 * i] = gen_rtx_REG (SImode, regno);
31548 parts[2 * i + 1] = gen_rtx_REG (SImode, regno + FIRST_SPE_HIGH_REGNO);
31549 }
31550 }
31551
31552 return gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (words * 2, parts));
31553 }
31554
31555 /* Fill in sizes for SPE register high parts in table used by unwinder. */
31556
31557 static void
31558 rs6000_init_dwarf_reg_sizes_extra (tree address)
31559 {
31560 if (TARGET_SPE)
31561 {
31562 int i;
31563 enum machine_mode mode = TYPE_MODE (char_type_node);
31564 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
31565 rtx mem = gen_rtx_MEM (BLKmode, addr);
31566 rtx value = gen_int_mode (4, mode);
31567
31568 for (i = FIRST_SPE_HIGH_REGNO; i < LAST_SPE_HIGH_REGNO+1; i++)
31569 {
31570 int column = DWARF_REG_TO_UNWIND_COLUMN
31571 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
31572 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
31573
31574 emit_move_insn (adjust_address (mem, mode, offset), value);
31575 }
31576 }
31577
31578 if (TARGET_MACHO && ! TARGET_ALTIVEC)
31579 {
31580 int i;
31581 enum machine_mode mode = TYPE_MODE (char_type_node);
31582 rtx addr = expand_expr (address, NULL_RTX, VOIDmode, EXPAND_NORMAL);
31583 rtx mem = gen_rtx_MEM (BLKmode, addr);
31584 rtx value = gen_int_mode (16, mode);
31585
31586 /* On Darwin, libgcc may be built to run on both G3 and G4/5.
31587 The unwinder still needs to know the size of Altivec registers. */
31588
31589 for (i = FIRST_ALTIVEC_REGNO; i < LAST_ALTIVEC_REGNO+1; i++)
31590 {
31591 int column = DWARF_REG_TO_UNWIND_COLUMN
31592 (DWARF2_FRAME_REG_OUT (DWARF_FRAME_REGNUM (i), true));
31593 HOST_WIDE_INT offset = column * GET_MODE_SIZE (mode);
31594
31595 emit_move_insn (adjust_address (mem, mode, offset), value);
31596 }
31597 }
31598 }
31599
31600 /* Map internal gcc register numbers to DWARF2 register numbers. */
31601
31602 unsigned int
31603 rs6000_dbx_register_number (unsigned int regno)
31604 {
31605 if (regno <= 63 || write_symbols != DWARF2_DEBUG)
31606 return regno;
31607 if (regno == LR_REGNO)
31608 return 108;
31609 if (regno == CTR_REGNO)
31610 return 109;
31611 if (CR_REGNO_P (regno))
31612 return regno - CR0_REGNO + 86;
31613 if (regno == CA_REGNO)
31614 return 101; /* XER */
31615 if (ALTIVEC_REGNO_P (regno))
31616 return regno - FIRST_ALTIVEC_REGNO + 1124;
31617 if (regno == VRSAVE_REGNO)
31618 return 356;
31619 if (regno == VSCR_REGNO)
31620 return 67;
31621 if (regno == SPE_ACC_REGNO)
31622 return 99;
31623 if (regno == SPEFSCR_REGNO)
31624 return 612;
31625 if (SPE_HIGH_REGNO_P (regno))
31626 return regno - FIRST_SPE_HIGH_REGNO + 1200;
31627 return regno;
31628 }
31629
31630 /* target hook eh_return_filter_mode */
31631 static enum machine_mode
31632 rs6000_eh_return_filter_mode (void)
31633 {
31634 return TARGET_32BIT ? SImode : word_mode;
31635 }
31636
31637 /* Target hook for scalar_mode_supported_p. */
31638 static bool
31639 rs6000_scalar_mode_supported_p (enum machine_mode mode)
31640 {
31641 if (DECIMAL_FLOAT_MODE_P (mode))
31642 return default_decimal_float_supported_p ();
31643 else
31644 return default_scalar_mode_supported_p (mode);
31645 }
31646
31647 /* Target hook for vector_mode_supported_p. */
31648 static bool
31649 rs6000_vector_mode_supported_p (enum machine_mode mode)
31650 {
31651
31652 if (TARGET_PAIRED_FLOAT && PAIRED_VECTOR_MODE (mode))
31653 return true;
31654
31655 if (TARGET_SPE && SPE_VECTOR_MODE (mode))
31656 return true;
31657
31658 else if (VECTOR_MEM_ALTIVEC_OR_VSX_P (mode))
31659 return true;
31660
31661 else
31662 return false;
31663 }
31664
31665 /* Target hook for invalid_arg_for_unprototyped_fn. */
31666 static const char *
31667 invalid_arg_for_unprototyped_fn (const_tree typelist, const_tree funcdecl, const_tree val)
31668 {
31669 return (!rs6000_darwin64_abi
31670 && typelist == 0
31671 && TREE_CODE (TREE_TYPE (val)) == VECTOR_TYPE
31672 && (funcdecl == NULL_TREE
31673 || (TREE_CODE (funcdecl) == FUNCTION_DECL
31674 && DECL_BUILT_IN_CLASS (funcdecl) != BUILT_IN_MD)))
31675 ? N_("AltiVec argument passed to unprototyped function")
31676 : NULL;
31677 }
31678
31679 /* For TARGET_SECURE_PLT 32-bit PIC code we can save PIC register
31680 setup by using __stack_chk_fail_local hidden function instead of
31681 calling __stack_chk_fail directly. Otherwise it is better to call
31682 __stack_chk_fail directly. */
31683
31684 static tree ATTRIBUTE_UNUSED
31685 rs6000_stack_protect_fail (void)
31686 {
31687 return (DEFAULT_ABI == ABI_V4 && TARGET_SECURE_PLT && flag_pic)
31688 ? default_hidden_stack_protect_fail ()
31689 : default_external_stack_protect_fail ();
31690 }
31691
31692 void
31693 rs6000_final_prescan_insn (rtx_insn *insn, rtx *operand ATTRIBUTE_UNUSED,
31694 int num_operands ATTRIBUTE_UNUSED)
31695 {
31696 if (rs6000_warn_cell_microcode)
31697 {
31698 const char *temp;
31699 int insn_code_number = recog_memoized (insn);
31700 location_t location = INSN_LOCATION (insn);
31701
31702 /* Punt on insns we cannot recognize. */
31703 if (insn_code_number < 0)
31704 return;
31705
31706 temp = get_insn_template (insn_code_number, insn);
31707
31708 if (get_attr_cell_micro (insn) == CELL_MICRO_ALWAYS)
31709 warning_at (location, OPT_mwarn_cell_microcode,
31710 "emitting microcode insn %s\t[%s] #%d",
31711 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
31712 else if (get_attr_cell_micro (insn) == CELL_MICRO_CONDITIONAL)
31713 warning_at (location, OPT_mwarn_cell_microcode,
31714 "emitting conditional microcode insn %s\t[%s] #%d",
31715 temp, insn_data[INSN_CODE (insn)].name, INSN_UID (insn));
31716 }
31717 }
31718
31719 /* Implement the TARGET_ASAN_SHADOW_OFFSET hook. */
31720
31721 #if TARGET_ELF
31722 static unsigned HOST_WIDE_INT
31723 rs6000_asan_shadow_offset (void)
31724 {
31725 return (unsigned HOST_WIDE_INT) 1 << (TARGET_64BIT ? 41 : 29);
31726 }
31727 #endif
31728 \f
31729 /* Mask options that we want to support inside of attribute((target)) and
31730 #pragma GCC target operations. Note, we do not include things like
31731 64/32-bit, endianess, hard/soft floating point, etc. that would have
31732 different calling sequences. */
31733
31734 struct rs6000_opt_mask {
31735 const char *name; /* option name */
31736 HOST_WIDE_INT mask; /* mask to set */
31737 bool invert; /* invert sense of mask */
31738 bool valid_target; /* option is a target option */
31739 };
31740
31741 static struct rs6000_opt_mask const rs6000_opt_masks[] =
31742 {
31743 { "altivec", OPTION_MASK_ALTIVEC, false, true },
31744 { "cmpb", OPTION_MASK_CMPB, false, true },
31745 { "crypto", OPTION_MASK_CRYPTO, false, true },
31746 { "direct-move", OPTION_MASK_DIRECT_MOVE, false, true },
31747 { "dlmzb", OPTION_MASK_DLMZB, false, true },
31748 { "fprnd", OPTION_MASK_FPRND, false, true },
31749 { "hard-dfp", OPTION_MASK_DFP, false, true },
31750 { "htm", OPTION_MASK_HTM, false, true },
31751 { "isel", OPTION_MASK_ISEL, false, true },
31752 { "mfcrf", OPTION_MASK_MFCRF, false, true },
31753 { "mfpgpr", OPTION_MASK_MFPGPR, false, true },
31754 { "mulhw", OPTION_MASK_MULHW, false, true },
31755 { "multiple", OPTION_MASK_MULTIPLE, false, true },
31756 { "popcntb", OPTION_MASK_POPCNTB, false, true },
31757 { "popcntd", OPTION_MASK_POPCNTD, false, true },
31758 { "power8-fusion", OPTION_MASK_P8_FUSION, false, true },
31759 { "power8-fusion-sign", OPTION_MASK_P8_FUSION_SIGN, false, true },
31760 { "power8-vector", OPTION_MASK_P8_VECTOR, false, true },
31761 { "powerpc-gfxopt", OPTION_MASK_PPC_GFXOPT, false, true },
31762 { "powerpc-gpopt", OPTION_MASK_PPC_GPOPT, false, true },
31763 { "quad-memory", OPTION_MASK_QUAD_MEMORY, false, true },
31764 { "quad-memory-atomic", OPTION_MASK_QUAD_MEMORY_ATOMIC, false, true },
31765 { "recip-precision", OPTION_MASK_RECIP_PRECISION, false, true },
31766 { "string", OPTION_MASK_STRING, false, true },
31767 { "update", OPTION_MASK_NO_UPDATE, true , true },
31768 { "upper-regs-df", OPTION_MASK_UPPER_REGS_DF, false, false },
31769 { "upper-regs-sf", OPTION_MASK_UPPER_REGS_SF, false, false },
31770 { "vsx", OPTION_MASK_VSX, false, true },
31771 { "vsx-timode", OPTION_MASK_VSX_TIMODE, false, true },
31772 #ifdef OPTION_MASK_64BIT
31773 #if TARGET_AIX_OS
31774 { "aix64", OPTION_MASK_64BIT, false, false },
31775 { "aix32", OPTION_MASK_64BIT, true, false },
31776 #else
31777 { "64", OPTION_MASK_64BIT, false, false },
31778 { "32", OPTION_MASK_64BIT, true, false },
31779 #endif
31780 #endif
31781 #ifdef OPTION_MASK_EABI
31782 { "eabi", OPTION_MASK_EABI, false, false },
31783 #endif
31784 #ifdef OPTION_MASK_LITTLE_ENDIAN
31785 { "little", OPTION_MASK_LITTLE_ENDIAN, false, false },
31786 { "big", OPTION_MASK_LITTLE_ENDIAN, true, false },
31787 #endif
31788 #ifdef OPTION_MASK_RELOCATABLE
31789 { "relocatable", OPTION_MASK_RELOCATABLE, false, false },
31790 #endif
31791 #ifdef OPTION_MASK_STRICT_ALIGN
31792 { "strict-align", OPTION_MASK_STRICT_ALIGN, false, false },
31793 #endif
31794 { "soft-float", OPTION_MASK_SOFT_FLOAT, false, false },
31795 { "string", OPTION_MASK_STRING, false, false },
31796 };
31797
31798 /* Builtin mask mapping for printing the flags. */
31799 static struct rs6000_opt_mask const rs6000_builtin_mask_names[] =
31800 {
31801 { "altivec", RS6000_BTM_ALTIVEC, false, false },
31802 { "vsx", RS6000_BTM_VSX, false, false },
31803 { "spe", RS6000_BTM_SPE, false, false },
31804 { "paired", RS6000_BTM_PAIRED, false, false },
31805 { "fre", RS6000_BTM_FRE, false, false },
31806 { "fres", RS6000_BTM_FRES, false, false },
31807 { "frsqrte", RS6000_BTM_FRSQRTE, false, false },
31808 { "frsqrtes", RS6000_BTM_FRSQRTES, false, false },
31809 { "popcntd", RS6000_BTM_POPCNTD, false, false },
31810 { "cell", RS6000_BTM_CELL, false, false },
31811 { "power8-vector", RS6000_BTM_P8_VECTOR, false, false },
31812 { "crypto", RS6000_BTM_CRYPTO, false, false },
31813 { "htm", RS6000_BTM_HTM, false, false },
31814 { "hard-dfp", RS6000_BTM_DFP, false, false },
31815 { "hard-float", RS6000_BTM_HARD_FLOAT, false, false },
31816 { "long-double-128", RS6000_BTM_LDBL128, false, false },
31817 };
31818
31819 /* Option variables that we want to support inside attribute((target)) and
31820 #pragma GCC target operations. */
31821
31822 struct rs6000_opt_var {
31823 const char *name; /* option name */
31824 size_t global_offset; /* offset of the option in global_options. */
31825 size_t target_offset; /* offset of the option in target optiosn. */
31826 };
31827
31828 static struct rs6000_opt_var const rs6000_opt_vars[] =
31829 {
31830 { "friz",
31831 offsetof (struct gcc_options, x_TARGET_FRIZ),
31832 offsetof (struct cl_target_option, x_TARGET_FRIZ), },
31833 { "avoid-indexed-addresses",
31834 offsetof (struct gcc_options, x_TARGET_AVOID_XFORM),
31835 offsetof (struct cl_target_option, x_TARGET_AVOID_XFORM) },
31836 { "paired",
31837 offsetof (struct gcc_options, x_rs6000_paired_float),
31838 offsetof (struct cl_target_option, x_rs6000_paired_float), },
31839 { "longcall",
31840 offsetof (struct gcc_options, x_rs6000_default_long_calls),
31841 offsetof (struct cl_target_option, x_rs6000_default_long_calls), },
31842 };
31843
31844 /* Inner function to handle attribute((target("..."))) and #pragma GCC target
31845 parsing. Return true if there were no errors. */
31846
31847 static bool
31848 rs6000_inner_target_options (tree args, bool attr_p)
31849 {
31850 bool ret = true;
31851
31852 if (args == NULL_TREE)
31853 ;
31854
31855 else if (TREE_CODE (args) == STRING_CST)
31856 {
31857 char *p = ASTRDUP (TREE_STRING_POINTER (args));
31858 char *q;
31859
31860 while ((q = strtok (p, ",")) != NULL)
31861 {
31862 bool error_p = false;
31863 bool not_valid_p = false;
31864 const char *cpu_opt = NULL;
31865
31866 p = NULL;
31867 if (strncmp (q, "cpu=", 4) == 0)
31868 {
31869 int cpu_index = rs6000_cpu_name_lookup (q+4);
31870 if (cpu_index >= 0)
31871 rs6000_cpu_index = cpu_index;
31872 else
31873 {
31874 error_p = true;
31875 cpu_opt = q+4;
31876 }
31877 }
31878 else if (strncmp (q, "tune=", 5) == 0)
31879 {
31880 int tune_index = rs6000_cpu_name_lookup (q+5);
31881 if (tune_index >= 0)
31882 rs6000_tune_index = tune_index;
31883 else
31884 {
31885 error_p = true;
31886 cpu_opt = q+5;
31887 }
31888 }
31889 else
31890 {
31891 size_t i;
31892 bool invert = false;
31893 char *r = q;
31894
31895 error_p = true;
31896 if (strncmp (r, "no-", 3) == 0)
31897 {
31898 invert = true;
31899 r += 3;
31900 }
31901
31902 for (i = 0; i < ARRAY_SIZE (rs6000_opt_masks); i++)
31903 if (strcmp (r, rs6000_opt_masks[i].name) == 0)
31904 {
31905 HOST_WIDE_INT mask = rs6000_opt_masks[i].mask;
31906
31907 if (!rs6000_opt_masks[i].valid_target)
31908 not_valid_p = true;
31909 else
31910 {
31911 error_p = false;
31912 rs6000_isa_flags_explicit |= mask;
31913
31914 /* VSX needs altivec, so -mvsx automagically sets
31915 altivec. */
31916 if (mask == OPTION_MASK_VSX && !invert)
31917 mask |= OPTION_MASK_ALTIVEC;
31918
31919 if (rs6000_opt_masks[i].invert)
31920 invert = !invert;
31921
31922 if (invert)
31923 rs6000_isa_flags &= ~mask;
31924 else
31925 rs6000_isa_flags |= mask;
31926 }
31927 break;
31928 }
31929
31930 if (error_p && !not_valid_p)
31931 {
31932 for (i = 0; i < ARRAY_SIZE (rs6000_opt_vars); i++)
31933 if (strcmp (r, rs6000_opt_vars[i].name) == 0)
31934 {
31935 size_t j = rs6000_opt_vars[i].global_offset;
31936 *((int *) ((char *)&global_options + j)) = !invert;
31937 error_p = false;
31938 break;
31939 }
31940 }
31941 }
31942
31943 if (error_p)
31944 {
31945 const char *eprefix, *esuffix;
31946
31947 ret = false;
31948 if (attr_p)
31949 {
31950 eprefix = "__attribute__((__target__(";
31951 esuffix = ")))";
31952 }
31953 else
31954 {
31955 eprefix = "#pragma GCC target ";
31956 esuffix = "";
31957 }
31958
31959 if (cpu_opt)
31960 error ("invalid cpu \"%s\" for %s\"%s\"%s", cpu_opt, eprefix,
31961 q, esuffix);
31962 else if (not_valid_p)
31963 error ("%s\"%s\"%s is not allowed", eprefix, q, esuffix);
31964 else
31965 error ("%s\"%s\"%s is invalid", eprefix, q, esuffix);
31966 }
31967 }
31968 }
31969
31970 else if (TREE_CODE (args) == TREE_LIST)
31971 {
31972 do
31973 {
31974 tree value = TREE_VALUE (args);
31975 if (value)
31976 {
31977 bool ret2 = rs6000_inner_target_options (value, attr_p);
31978 if (!ret2)
31979 ret = false;
31980 }
31981 args = TREE_CHAIN (args);
31982 }
31983 while (args != NULL_TREE);
31984 }
31985
31986 else
31987 gcc_unreachable ();
31988
31989 return ret;
31990 }
31991
31992 /* Print out the target options as a list for -mdebug=target. */
31993
31994 static void
31995 rs6000_debug_target_options (tree args, const char *prefix)
31996 {
31997 if (args == NULL_TREE)
31998 fprintf (stderr, "%s<NULL>", prefix);
31999
32000 else if (TREE_CODE (args) == STRING_CST)
32001 {
32002 char *p = ASTRDUP (TREE_STRING_POINTER (args));
32003 char *q;
32004
32005 while ((q = strtok (p, ",")) != NULL)
32006 {
32007 p = NULL;
32008 fprintf (stderr, "%s\"%s\"", prefix, q);
32009 prefix = ", ";
32010 }
32011 }
32012
32013 else if (TREE_CODE (args) == TREE_LIST)
32014 {
32015 do
32016 {
32017 tree value = TREE_VALUE (args);
32018 if (value)
32019 {
32020 rs6000_debug_target_options (value, prefix);
32021 prefix = ", ";
32022 }
32023 args = TREE_CHAIN (args);
32024 }
32025 while (args != NULL_TREE);
32026 }
32027
32028 else
32029 gcc_unreachable ();
32030
32031 return;
32032 }
32033
32034 \f
32035 /* Hook to validate attribute((target("..."))). */
32036
32037 static bool
32038 rs6000_valid_attribute_p (tree fndecl,
32039 tree ARG_UNUSED (name),
32040 tree args,
32041 int flags)
32042 {
32043 struct cl_target_option cur_target;
32044 bool ret;
32045 tree old_optimize = build_optimization_node (&global_options);
32046 tree new_target, new_optimize;
32047 tree func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
32048
32049 gcc_assert ((fndecl != NULL_TREE) && (args != NULL_TREE));
32050
32051 if (TARGET_DEBUG_TARGET)
32052 {
32053 tree tname = DECL_NAME (fndecl);
32054 fprintf (stderr, "\n==================== rs6000_valid_attribute_p:\n");
32055 if (tname)
32056 fprintf (stderr, "function: %.*s\n",
32057 (int) IDENTIFIER_LENGTH (tname),
32058 IDENTIFIER_POINTER (tname));
32059 else
32060 fprintf (stderr, "function: unknown\n");
32061
32062 fprintf (stderr, "args:");
32063 rs6000_debug_target_options (args, " ");
32064 fprintf (stderr, "\n");
32065
32066 if (flags)
32067 fprintf (stderr, "flags: 0x%x\n", flags);
32068
32069 fprintf (stderr, "--------------------\n");
32070 }
32071
32072 old_optimize = build_optimization_node (&global_options);
32073 func_optimize = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl);
32074
32075 /* If the function changed the optimization levels as well as setting target
32076 options, start with the optimizations specified. */
32077 if (func_optimize && func_optimize != old_optimize)
32078 cl_optimization_restore (&global_options,
32079 TREE_OPTIMIZATION (func_optimize));
32080
32081 /* The target attributes may also change some optimization flags, so update
32082 the optimization options if necessary. */
32083 cl_target_option_save (&cur_target, &global_options);
32084 rs6000_cpu_index = rs6000_tune_index = -1;
32085 ret = rs6000_inner_target_options (args, true);
32086
32087 /* Set up any additional state. */
32088 if (ret)
32089 {
32090 ret = rs6000_option_override_internal (false);
32091 new_target = build_target_option_node (&global_options);
32092 }
32093 else
32094 new_target = NULL;
32095
32096 new_optimize = build_optimization_node (&global_options);
32097
32098 if (!new_target)
32099 ret = false;
32100
32101 else if (fndecl)
32102 {
32103 DECL_FUNCTION_SPECIFIC_TARGET (fndecl) = new_target;
32104
32105 if (old_optimize != new_optimize)
32106 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (fndecl) = new_optimize;
32107 }
32108
32109 cl_target_option_restore (&global_options, &cur_target);
32110
32111 if (old_optimize != new_optimize)
32112 cl_optimization_restore (&global_options,
32113 TREE_OPTIMIZATION (old_optimize));
32114
32115 return ret;
32116 }
32117
32118 \f
32119 /* Hook to validate the current #pragma GCC target and set the state, and
32120 update the macros based on what was changed. If ARGS is NULL, then
32121 POP_TARGET is used to reset the options. */
32122
32123 bool
32124 rs6000_pragma_target_parse (tree args, tree pop_target)
32125 {
32126 tree prev_tree = build_target_option_node (&global_options);
32127 tree cur_tree;
32128 struct cl_target_option *prev_opt, *cur_opt;
32129 HOST_WIDE_INT prev_flags, cur_flags, diff_flags;
32130 HOST_WIDE_INT prev_bumask, cur_bumask, diff_bumask;
32131
32132 if (TARGET_DEBUG_TARGET)
32133 {
32134 fprintf (stderr, "\n==================== rs6000_pragma_target_parse\n");
32135 fprintf (stderr, "args:");
32136 rs6000_debug_target_options (args, " ");
32137 fprintf (stderr, "\n");
32138
32139 if (pop_target)
32140 {
32141 fprintf (stderr, "pop_target:\n");
32142 debug_tree (pop_target);
32143 }
32144 else
32145 fprintf (stderr, "pop_target: <NULL>\n");
32146
32147 fprintf (stderr, "--------------------\n");
32148 }
32149
32150 if (! args)
32151 {
32152 cur_tree = ((pop_target)
32153 ? pop_target
32154 : target_option_default_node);
32155 cl_target_option_restore (&global_options,
32156 TREE_TARGET_OPTION (cur_tree));
32157 }
32158 else
32159 {
32160 rs6000_cpu_index = rs6000_tune_index = -1;
32161 if (!rs6000_inner_target_options (args, false)
32162 || !rs6000_option_override_internal (false)
32163 || (cur_tree = build_target_option_node (&global_options))
32164 == NULL_TREE)
32165 {
32166 if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
32167 fprintf (stderr, "invalid pragma\n");
32168
32169 return false;
32170 }
32171 }
32172
32173 target_option_current_node = cur_tree;
32174
32175 /* If we have the preprocessor linked in (i.e. C or C++ languages), possibly
32176 change the macros that are defined. */
32177 if (rs6000_target_modify_macros_ptr)
32178 {
32179 prev_opt = TREE_TARGET_OPTION (prev_tree);
32180 prev_bumask = prev_opt->x_rs6000_builtin_mask;
32181 prev_flags = prev_opt->x_rs6000_isa_flags;
32182
32183 cur_opt = TREE_TARGET_OPTION (cur_tree);
32184 cur_flags = cur_opt->x_rs6000_isa_flags;
32185 cur_bumask = cur_opt->x_rs6000_builtin_mask;
32186
32187 diff_bumask = (prev_bumask ^ cur_bumask);
32188 diff_flags = (prev_flags ^ cur_flags);
32189
32190 if ((diff_flags != 0) || (diff_bumask != 0))
32191 {
32192 /* Delete old macros. */
32193 rs6000_target_modify_macros_ptr (false,
32194 prev_flags & diff_flags,
32195 prev_bumask & diff_bumask);
32196
32197 /* Define new macros. */
32198 rs6000_target_modify_macros_ptr (true,
32199 cur_flags & diff_flags,
32200 cur_bumask & diff_bumask);
32201 }
32202 }
32203
32204 return true;
32205 }
32206
32207 \f
32208 /* Remember the last target of rs6000_set_current_function. */
32209 static GTY(()) tree rs6000_previous_fndecl;
32210
32211 /* Establish appropriate back-end context for processing the function
32212 FNDECL. The argument might be NULL to indicate processing at top
32213 level, outside of any function scope. */
32214 static void
32215 rs6000_set_current_function (tree fndecl)
32216 {
32217 tree old_tree = (rs6000_previous_fndecl
32218 ? DECL_FUNCTION_SPECIFIC_TARGET (rs6000_previous_fndecl)
32219 : NULL_TREE);
32220
32221 tree new_tree = (fndecl
32222 ? DECL_FUNCTION_SPECIFIC_TARGET (fndecl)
32223 : NULL_TREE);
32224
32225 if (TARGET_DEBUG_TARGET)
32226 {
32227 bool print_final = false;
32228 fprintf (stderr, "\n==================== rs6000_set_current_function");
32229
32230 if (fndecl)
32231 fprintf (stderr, ", fndecl %s (%p)",
32232 (DECL_NAME (fndecl)
32233 ? IDENTIFIER_POINTER (DECL_NAME (fndecl))
32234 : "<unknown>"), (void *)fndecl);
32235
32236 if (rs6000_previous_fndecl)
32237 fprintf (stderr, ", prev_fndecl (%p)", (void *)rs6000_previous_fndecl);
32238
32239 fprintf (stderr, "\n");
32240 if (new_tree)
32241 {
32242 fprintf (stderr, "\nnew fndecl target specific options:\n");
32243 debug_tree (new_tree);
32244 print_final = true;
32245 }
32246
32247 if (old_tree)
32248 {
32249 fprintf (stderr, "\nold fndecl target specific options:\n");
32250 debug_tree (old_tree);
32251 print_final = true;
32252 }
32253
32254 if (print_final)
32255 fprintf (stderr, "--------------------\n");
32256 }
32257
32258 /* Only change the context if the function changes. This hook is called
32259 several times in the course of compiling a function, and we don't want to
32260 slow things down too much or call target_reinit when it isn't safe. */
32261 if (fndecl && fndecl != rs6000_previous_fndecl)
32262 {
32263 rs6000_previous_fndecl = fndecl;
32264 if (old_tree == new_tree)
32265 ;
32266
32267 else if (new_tree)
32268 {
32269 cl_target_option_restore (&global_options,
32270 TREE_TARGET_OPTION (new_tree));
32271 if (TREE_TARGET_GLOBALS (new_tree))
32272 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
32273 else
32274 TREE_TARGET_GLOBALS (new_tree)
32275 = save_target_globals_default_opts ();
32276 }
32277
32278 else if (old_tree)
32279 {
32280 new_tree = target_option_current_node;
32281 cl_target_option_restore (&global_options,
32282 TREE_TARGET_OPTION (new_tree));
32283 if (TREE_TARGET_GLOBALS (new_tree))
32284 restore_target_globals (TREE_TARGET_GLOBALS (new_tree));
32285 else if (new_tree == target_option_default_node)
32286 restore_target_globals (&default_target_globals);
32287 else
32288 TREE_TARGET_GLOBALS (new_tree)
32289 = save_target_globals_default_opts ();
32290 }
32291 }
32292 }
32293
32294 \f
32295 /* Save the current options */
32296
32297 static void
32298 rs6000_function_specific_save (struct cl_target_option *ptr,
32299 struct gcc_options *opts)
32300 {
32301 ptr->x_rs6000_isa_flags = opts->x_rs6000_isa_flags;
32302 ptr->x_rs6000_isa_flags_explicit = opts->x_rs6000_isa_flags_explicit;
32303 }
32304
32305 /* Restore the current options */
32306
32307 static void
32308 rs6000_function_specific_restore (struct gcc_options *opts,
32309 struct cl_target_option *ptr)
32310
32311 {
32312 opts->x_rs6000_isa_flags = ptr->x_rs6000_isa_flags;
32313 opts->x_rs6000_isa_flags_explicit = ptr->x_rs6000_isa_flags_explicit;
32314 (void) rs6000_option_override_internal (false);
32315 }
32316
32317 /* Print the current options */
32318
32319 static void
32320 rs6000_function_specific_print (FILE *file, int indent,
32321 struct cl_target_option *ptr)
32322 {
32323 rs6000_print_isa_options (file, indent, "Isa options set",
32324 ptr->x_rs6000_isa_flags);
32325
32326 rs6000_print_isa_options (file, indent, "Isa options explicit",
32327 ptr->x_rs6000_isa_flags_explicit);
32328 }
32329
32330 /* Helper function to print the current isa or misc options on a line. */
32331
32332 static void
32333 rs6000_print_options_internal (FILE *file,
32334 int indent,
32335 const char *string,
32336 HOST_WIDE_INT flags,
32337 const char *prefix,
32338 const struct rs6000_opt_mask *opts,
32339 size_t num_elements)
32340 {
32341 size_t i;
32342 size_t start_column = 0;
32343 size_t cur_column;
32344 size_t max_column = 76;
32345 const char *comma = "";
32346
32347 if (indent)
32348 start_column += fprintf (file, "%*s", indent, "");
32349
32350 if (!flags)
32351 {
32352 fprintf (stderr, DEBUG_FMT_S, string, "<none>");
32353 return;
32354 }
32355
32356 start_column += fprintf (stderr, DEBUG_FMT_WX, string, flags);
32357
32358 /* Print the various mask options. */
32359 cur_column = start_column;
32360 for (i = 0; i < num_elements; i++)
32361 {
32362 if ((flags & opts[i].mask) != 0)
32363 {
32364 const char *no_str = rs6000_opt_masks[i].invert ? "no-" : "";
32365 size_t len = (strlen (comma)
32366 + strlen (prefix)
32367 + strlen (no_str)
32368 + strlen (rs6000_opt_masks[i].name));
32369
32370 cur_column += len;
32371 if (cur_column > max_column)
32372 {
32373 fprintf (stderr, ", \\\n%*s", (int)start_column, "");
32374 cur_column = start_column + len;
32375 comma = "";
32376 }
32377
32378 fprintf (file, "%s%s%s%s", comma, prefix, no_str,
32379 rs6000_opt_masks[i].name);
32380 flags &= ~ opts[i].mask;
32381 comma = ", ";
32382 }
32383 }
32384
32385 fputs ("\n", file);
32386 }
32387
32388 /* Helper function to print the current isa options on a line. */
32389
32390 static void
32391 rs6000_print_isa_options (FILE *file, int indent, const char *string,
32392 HOST_WIDE_INT flags)
32393 {
32394 rs6000_print_options_internal (file, indent, string, flags, "-m",
32395 &rs6000_opt_masks[0],
32396 ARRAY_SIZE (rs6000_opt_masks));
32397 }
32398
32399 static void
32400 rs6000_print_builtin_options (FILE *file, int indent, const char *string,
32401 HOST_WIDE_INT flags)
32402 {
32403 rs6000_print_options_internal (file, indent, string, flags, "",
32404 &rs6000_builtin_mask_names[0],
32405 ARRAY_SIZE (rs6000_builtin_mask_names));
32406 }
32407
32408 \f
32409 /* Hook to determine if one function can safely inline another. */
32410
32411 static bool
32412 rs6000_can_inline_p (tree caller, tree callee)
32413 {
32414 bool ret = false;
32415 tree caller_tree = DECL_FUNCTION_SPECIFIC_TARGET (caller);
32416 tree callee_tree = DECL_FUNCTION_SPECIFIC_TARGET (callee);
32417
32418 /* If callee has no option attributes, then it is ok to inline. */
32419 if (!callee_tree)
32420 ret = true;
32421
32422 /* If caller has no option attributes, but callee does then it is not ok to
32423 inline. */
32424 else if (!caller_tree)
32425 ret = false;
32426
32427 else
32428 {
32429 struct cl_target_option *caller_opts = TREE_TARGET_OPTION (caller_tree);
32430 struct cl_target_option *callee_opts = TREE_TARGET_OPTION (callee_tree);
32431
32432 /* Callee's options should a subset of the caller's, i.e. a vsx function
32433 can inline an altivec function but a non-vsx function can't inline a
32434 vsx function. */
32435 if ((caller_opts->x_rs6000_isa_flags & callee_opts->x_rs6000_isa_flags)
32436 == callee_opts->x_rs6000_isa_flags)
32437 ret = true;
32438 }
32439
32440 if (TARGET_DEBUG_TARGET)
32441 fprintf (stderr, "rs6000_can_inline_p:, caller %s, callee %s, %s inline\n",
32442 (DECL_NAME (caller)
32443 ? IDENTIFIER_POINTER (DECL_NAME (caller))
32444 : "<unknown>"),
32445 (DECL_NAME (callee)
32446 ? IDENTIFIER_POINTER (DECL_NAME (callee))
32447 : "<unknown>"),
32448 (ret ? "can" : "cannot"));
32449
32450 return ret;
32451 }
32452 \f
32453 /* Allocate a stack temp and fixup the address so it meets the particular
32454 memory requirements (either offetable or REG+REG addressing). */
32455
32456 rtx
32457 rs6000_allocate_stack_temp (enum machine_mode mode,
32458 bool offsettable_p,
32459 bool reg_reg_p)
32460 {
32461 rtx stack = assign_stack_temp (mode, GET_MODE_SIZE (mode));
32462 rtx addr = XEXP (stack, 0);
32463 int strict_p = (reload_in_progress || reload_completed);
32464
32465 if (!legitimate_indirect_address_p (addr, strict_p))
32466 {
32467 if (offsettable_p
32468 && !rs6000_legitimate_offset_address_p (mode, addr, strict_p, true))
32469 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
32470
32471 else if (reg_reg_p && !legitimate_indexed_address_p (addr, strict_p))
32472 stack = replace_equiv_address (stack, copy_addr_to_reg (addr));
32473 }
32474
32475 return stack;
32476 }
32477
32478 /* Given a memory reference, if it is not a reg or reg+reg addressing, convert
32479 to such a form to deal with memory reference instructions like STFIWX that
32480 only take reg+reg addressing. */
32481
32482 rtx
32483 rs6000_address_for_fpconvert (rtx x)
32484 {
32485 int strict_p = (reload_in_progress || reload_completed);
32486 rtx addr;
32487
32488 gcc_assert (MEM_P (x));
32489 addr = XEXP (x, 0);
32490 if (! legitimate_indirect_address_p (addr, strict_p)
32491 && ! legitimate_indexed_address_p (addr, strict_p))
32492 {
32493 if (GET_CODE (addr) == PRE_INC || GET_CODE (addr) == PRE_DEC)
32494 {
32495 rtx reg = XEXP (addr, 0);
32496 HOST_WIDE_INT size = GET_MODE_SIZE (GET_MODE (x));
32497 rtx size_rtx = GEN_INT ((GET_CODE (addr) == PRE_DEC) ? -size : size);
32498 gcc_assert (REG_P (reg));
32499 emit_insn (gen_add3_insn (reg, reg, size_rtx));
32500 addr = reg;
32501 }
32502 else if (GET_CODE (addr) == PRE_MODIFY)
32503 {
32504 rtx reg = XEXP (addr, 0);
32505 rtx expr = XEXP (addr, 1);
32506 gcc_assert (REG_P (reg));
32507 gcc_assert (GET_CODE (expr) == PLUS);
32508 emit_insn (gen_add3_insn (reg, XEXP (expr, 0), XEXP (expr, 1)));
32509 addr = reg;
32510 }
32511
32512 x = replace_equiv_address (x, copy_addr_to_reg (addr));
32513 }
32514
32515 return x;
32516 }
32517
32518 /* Given a memory reference, if it is not in the form for altivec memory
32519 reference instructions (i.e. reg or reg+reg addressing with AND of -16),
32520 convert to the altivec format. */
32521
32522 rtx
32523 rs6000_address_for_altivec (rtx x)
32524 {
32525 gcc_assert (MEM_P (x));
32526 if (!altivec_indexed_or_indirect_operand (x, GET_MODE (x)))
32527 {
32528 rtx addr = XEXP (x, 0);
32529 int strict_p = (reload_in_progress || reload_completed);
32530
32531 if (!legitimate_indexed_address_p (addr, strict_p)
32532 && !legitimate_indirect_address_p (addr, strict_p))
32533 addr = copy_to_mode_reg (Pmode, addr);
32534
32535 addr = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
32536 x = change_address (x, GET_MODE (x), addr);
32537 }
32538
32539 return x;
32540 }
32541
32542 /* Implement TARGET_LEGITIMATE_CONSTANT_P.
32543
32544 On the RS/6000, all integer constants are acceptable, most won't be valid
32545 for particular insns, though. Only easy FP constants are acceptable. */
32546
32547 static bool
32548 rs6000_legitimate_constant_p (enum machine_mode mode, rtx x)
32549 {
32550 if (TARGET_ELF && tls_referenced_p (x))
32551 return false;
32552
32553 return ((GET_CODE (x) != CONST_DOUBLE && GET_CODE (x) != CONST_VECTOR)
32554 || GET_MODE (x) == VOIDmode
32555 || (TARGET_POWERPC64 && mode == DImode)
32556 || easy_fp_constant (x, mode)
32557 || easy_vector_constant (x, mode));
32558 }
32559
32560 \f
32561
32562 /* Expand code to perform a call under the AIX or ELFv2 ABI. */
32563
32564 void
32565 rs6000_call_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
32566 {
32567 rtx toc_reg = gen_rtx_REG (Pmode, TOC_REGNUM);
32568 rtx toc_load = NULL_RTX;
32569 rtx toc_restore = NULL_RTX;
32570 rtx func_addr;
32571 rtx abi_reg = NULL_RTX;
32572 rtx call[4];
32573 int n_call;
32574 rtx insn;
32575
32576 /* Handle longcall attributes. */
32577 if (INTVAL (cookie) & CALL_LONG)
32578 func_desc = rs6000_longcall_ref (func_desc);
32579
32580 /* Handle indirect calls. */
32581 if (GET_CODE (func_desc) != SYMBOL_REF
32582 || (DEFAULT_ABI == ABI_AIX && !SYMBOL_REF_FUNCTION_P (func_desc)))
32583 {
32584 /* Save the TOC into its reserved slot before the call,
32585 and prepare to restore it after the call. */
32586 rtx stack_ptr = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
32587 rtx stack_toc_offset = GEN_INT (RS6000_TOC_SAVE_SLOT);
32588 rtx stack_toc_mem = gen_frame_mem (Pmode,
32589 gen_rtx_PLUS (Pmode, stack_ptr,
32590 stack_toc_offset));
32591 toc_restore = gen_rtx_SET (VOIDmode, toc_reg, stack_toc_mem);
32592
32593 /* Can we optimize saving the TOC in the prologue or
32594 do we need to do it at every call? */
32595 if (TARGET_SAVE_TOC_INDIRECT && !cfun->calls_alloca)
32596 cfun->machine->save_toc_in_prologue = true;
32597 else
32598 {
32599 MEM_VOLATILE_P (stack_toc_mem) = 1;
32600 emit_move_insn (stack_toc_mem, toc_reg);
32601 }
32602
32603 if (DEFAULT_ABI == ABI_ELFv2)
32604 {
32605 /* A function pointer in the ELFv2 ABI is just a plain address, but
32606 the ABI requires it to be loaded into r12 before the call. */
32607 func_addr = gen_rtx_REG (Pmode, 12);
32608 emit_move_insn (func_addr, func_desc);
32609 abi_reg = func_addr;
32610 }
32611 else
32612 {
32613 /* A function pointer under AIX is a pointer to a data area whose
32614 first word contains the actual address of the function, whose
32615 second word contains a pointer to its TOC, and whose third word
32616 contains a value to place in the static chain register (r11).
32617 Note that if we load the static chain, our "trampoline" need
32618 not have any executable code. */
32619
32620 /* Load up address of the actual function. */
32621 func_desc = force_reg (Pmode, func_desc);
32622 func_addr = gen_reg_rtx (Pmode);
32623 emit_move_insn (func_addr, gen_rtx_MEM (Pmode, func_desc));
32624
32625 /* Prepare to load the TOC of the called function. Note that the
32626 TOC load must happen immediately before the actual call so
32627 that unwinding the TOC registers works correctly. See the
32628 comment in frob_update_context. */
32629 rtx func_toc_offset = GEN_INT (GET_MODE_SIZE (Pmode));
32630 rtx func_toc_mem = gen_rtx_MEM (Pmode,
32631 gen_rtx_PLUS (Pmode, func_desc,
32632 func_toc_offset));
32633 toc_load = gen_rtx_USE (VOIDmode, func_toc_mem);
32634
32635 /* If we have a static chain, load it up. */
32636 if (TARGET_POINTERS_TO_NESTED_FUNCTIONS)
32637 {
32638 rtx sc_reg = gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
32639 rtx func_sc_offset = GEN_INT (2 * GET_MODE_SIZE (Pmode));
32640 rtx func_sc_mem = gen_rtx_MEM (Pmode,
32641 gen_rtx_PLUS (Pmode, func_desc,
32642 func_sc_offset));
32643 emit_move_insn (sc_reg, func_sc_mem);
32644 abi_reg = sc_reg;
32645 }
32646 }
32647 }
32648 else
32649 {
32650 /* Direct calls use the TOC: for local calls, the callee will
32651 assume the TOC register is set; for non-local calls, the
32652 PLT stub needs the TOC register. */
32653 abi_reg = toc_reg;
32654 func_addr = func_desc;
32655 }
32656
32657 /* Create the call. */
32658 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_addr), flag);
32659 if (value != NULL_RTX)
32660 call[0] = gen_rtx_SET (VOIDmode, value, call[0]);
32661 n_call = 1;
32662
32663 if (toc_load)
32664 call[n_call++] = toc_load;
32665 if (toc_restore)
32666 call[n_call++] = toc_restore;
32667
32668 call[n_call++] = gen_rtx_CLOBBER (VOIDmode, gen_rtx_REG (Pmode, LR_REGNO));
32669
32670 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (n_call, call));
32671 insn = emit_call_insn (insn);
32672
32673 /* Mention all registers defined by the ABI to hold information
32674 as uses in CALL_INSN_FUNCTION_USAGE. */
32675 if (abi_reg)
32676 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), abi_reg);
32677 }
32678
32679 /* Expand code to perform a sibling call under the AIX or ELFv2 ABI. */
32680
32681 void
32682 rs6000_sibcall_aix (rtx value, rtx func_desc, rtx flag, rtx cookie)
32683 {
32684 rtx call[2];
32685 rtx insn;
32686
32687 gcc_assert (INTVAL (cookie) == 0);
32688
32689 /* Create the call. */
32690 call[0] = gen_rtx_CALL (VOIDmode, gen_rtx_MEM (SImode, func_desc), flag);
32691 if (value != NULL_RTX)
32692 call[0] = gen_rtx_SET (VOIDmode, value, call[0]);
32693
32694 call[1] = simple_return_rtx;
32695
32696 insn = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (2, call));
32697 insn = emit_call_insn (insn);
32698
32699 /* Note use of the TOC register. */
32700 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, TOC_REGNUM));
32701 /* We need to also mark a use of the link register since the function we
32702 sibling-call to will use it to return to our caller. */
32703 use_reg (&CALL_INSN_FUNCTION_USAGE (insn), gen_rtx_REG (Pmode, LR_REGNO));
32704 }
32705
32706 /* Return whether we need to always update the saved TOC pointer when we update
32707 the stack pointer. */
32708
32709 static bool
32710 rs6000_save_toc_in_prologue_p (void)
32711 {
32712 return (cfun && cfun->machine && cfun->machine->save_toc_in_prologue);
32713 }
32714
32715 #ifdef HAVE_GAS_HIDDEN
32716 # define USE_HIDDEN_LINKONCE 1
32717 #else
32718 # define USE_HIDDEN_LINKONCE 0
32719 #endif
32720
32721 /* Fills in the label name that should be used for a 476 link stack thunk. */
32722
32723 void
32724 get_ppc476_thunk_name (char name[32])
32725 {
32726 gcc_assert (TARGET_LINK_STACK);
32727
32728 if (USE_HIDDEN_LINKONCE)
32729 sprintf (name, "__ppc476.get_thunk");
32730 else
32731 ASM_GENERATE_INTERNAL_LABEL (name, "LPPC476_", 0);
32732 }
32733
32734 /* This function emits the simple thunk routine that is used to preserve
32735 the link stack on the 476 cpu. */
32736
32737 static void rs6000_code_end (void) ATTRIBUTE_UNUSED;
32738 static void
32739 rs6000_code_end (void)
32740 {
32741 char name[32];
32742 tree decl;
32743
32744 if (!TARGET_LINK_STACK)
32745 return;
32746
32747 get_ppc476_thunk_name (name);
32748
32749 decl = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, get_identifier (name),
32750 build_function_type_list (void_type_node, NULL_TREE));
32751 DECL_RESULT (decl) = build_decl (BUILTINS_LOCATION, RESULT_DECL,
32752 NULL_TREE, void_type_node);
32753 TREE_PUBLIC (decl) = 1;
32754 TREE_STATIC (decl) = 1;
32755
32756 #if RS6000_WEAK
32757 if (USE_HIDDEN_LINKONCE)
32758 {
32759 cgraph_node::create (decl)->set_comdat_group (DECL_ASSEMBLER_NAME (decl));
32760 targetm.asm_out.unique_section (decl, 0);
32761 switch_to_section (get_named_section (decl, NULL, 0));
32762 DECL_WEAK (decl) = 1;
32763 ASM_WEAKEN_DECL (asm_out_file, decl, name, 0);
32764 targetm.asm_out.globalize_label (asm_out_file, name);
32765 targetm.asm_out.assemble_visibility (decl, VISIBILITY_HIDDEN);
32766 ASM_DECLARE_FUNCTION_NAME (asm_out_file, name, decl);
32767 }
32768 else
32769 #endif
32770 {
32771 switch_to_section (text_section);
32772 ASM_OUTPUT_LABEL (asm_out_file, name);
32773 }
32774
32775 DECL_INITIAL (decl) = make_node (BLOCK);
32776 current_function_decl = decl;
32777 init_function_start (decl);
32778 first_function_block_is_cold = false;
32779 /* Make sure unwind info is emitted for the thunk if needed. */
32780 final_start_function (emit_barrier (), asm_out_file, 1);
32781
32782 fputs ("\tblr\n", asm_out_file);
32783
32784 final_end_function ();
32785 init_insn_lengths ();
32786 free_after_compilation (cfun);
32787 set_cfun (NULL);
32788 current_function_decl = NULL;
32789 }
32790
32791 /* Add r30 to hard reg set if the prologue sets it up and it is not
32792 pic_offset_table_rtx. */
32793
32794 static void
32795 rs6000_set_up_by_prologue (struct hard_reg_set_container *set)
32796 {
32797 if (!TARGET_SINGLE_PIC_BASE
32798 && TARGET_TOC
32799 && TARGET_MINIMAL_TOC
32800 && get_pool_size () != 0)
32801 add_to_hard_reg_set (&set->set, Pmode, RS6000_PIC_OFFSET_TABLE_REGNUM);
32802 }
32803
32804 \f
32805 /* Helper function for rs6000_split_logical to emit a logical instruction after
32806 spliting the operation to single GPR registers.
32807
32808 DEST is the destination register.
32809 OP1 and OP2 are the input source registers.
32810 CODE is the base operation (AND, IOR, XOR, NOT).
32811 MODE is the machine mode.
32812 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32813 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32814 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
32815
32816 static void
32817 rs6000_split_logical_inner (rtx dest,
32818 rtx op1,
32819 rtx op2,
32820 enum rtx_code code,
32821 enum machine_mode mode,
32822 bool complement_final_p,
32823 bool complement_op1_p,
32824 bool complement_op2_p)
32825 {
32826 rtx bool_rtx;
32827
32828 /* Optimize AND of 0/0xffffffff and IOR/XOR of 0. */
32829 if (op2 && GET_CODE (op2) == CONST_INT
32830 && (mode == SImode || (mode == DImode && TARGET_POWERPC64))
32831 && !complement_final_p && !complement_op1_p && !complement_op2_p)
32832 {
32833 HOST_WIDE_INT mask = GET_MODE_MASK (mode);
32834 HOST_WIDE_INT value = INTVAL (op2) & mask;
32835
32836 /* Optimize AND of 0 to just set 0. Optimize AND of -1 to be a move. */
32837 if (code == AND)
32838 {
32839 if (value == 0)
32840 {
32841 emit_insn (gen_rtx_SET (VOIDmode, dest, const0_rtx));
32842 return;
32843 }
32844
32845 else if (value == mask)
32846 {
32847 if (!rtx_equal_p (dest, op1))
32848 emit_insn (gen_rtx_SET (VOIDmode, dest, op1));
32849 return;
32850 }
32851 }
32852
32853 /* Optimize IOR/XOR of 0 to be a simple move. Split large operations
32854 into separate ORI/ORIS or XORI/XORIS instrucitons. */
32855 else if (code == IOR || code == XOR)
32856 {
32857 if (value == 0)
32858 {
32859 if (!rtx_equal_p (dest, op1))
32860 emit_insn (gen_rtx_SET (VOIDmode, dest, op1));
32861 return;
32862 }
32863 }
32864 }
32865
32866 if (code == AND && mode == SImode
32867 && !complement_final_p && !complement_op1_p && !complement_op2_p)
32868 {
32869 emit_insn (gen_andsi3 (dest, op1, op2));
32870 return;
32871 }
32872
32873 if (complement_op1_p)
32874 op1 = gen_rtx_NOT (mode, op1);
32875
32876 if (complement_op2_p)
32877 op2 = gen_rtx_NOT (mode, op2);
32878
32879 bool_rtx = ((code == NOT)
32880 ? gen_rtx_NOT (mode, op1)
32881 : gen_rtx_fmt_ee (code, mode, op1, op2));
32882
32883 if (complement_final_p)
32884 bool_rtx = gen_rtx_NOT (mode, bool_rtx);
32885
32886 emit_insn (gen_rtx_SET (VOIDmode, dest, bool_rtx));
32887 }
32888
32889 /* Split a DImode AND/IOR/XOR with a constant on a 32-bit system. These
32890 operations are split immediately during RTL generation to allow for more
32891 optimizations of the AND/IOR/XOR.
32892
32893 OPERANDS is an array containing the destination and two input operands.
32894 CODE is the base operation (AND, IOR, XOR, NOT).
32895 MODE is the machine mode.
32896 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
32897 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
32898 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT.
32899 CLOBBER_REG is either NULL or a scratch register of type CC to allow
32900 formation of the AND instructions. */
32901
32902 static void
32903 rs6000_split_logical_di (rtx operands[3],
32904 enum rtx_code code,
32905 bool complement_final_p,
32906 bool complement_op1_p,
32907 bool complement_op2_p)
32908 {
32909 const HOST_WIDE_INT lower_32bits = HOST_WIDE_INT_C(0xffffffff);
32910 const HOST_WIDE_INT upper_32bits = ~ lower_32bits;
32911 const HOST_WIDE_INT sign_bit = HOST_WIDE_INT_C(0x80000000);
32912 enum hi_lo { hi = 0, lo = 1 };
32913 rtx op0_hi_lo[2], op1_hi_lo[2], op2_hi_lo[2];
32914 size_t i;
32915
32916 op0_hi_lo[hi] = gen_highpart (SImode, operands[0]);
32917 op1_hi_lo[hi] = gen_highpart (SImode, operands[1]);
32918 op0_hi_lo[lo] = gen_lowpart (SImode, operands[0]);
32919 op1_hi_lo[lo] = gen_lowpart (SImode, operands[1]);
32920
32921 if (code == NOT)
32922 op2_hi_lo[hi] = op2_hi_lo[lo] = NULL_RTX;
32923 else
32924 {
32925 if (GET_CODE (operands[2]) != CONST_INT)
32926 {
32927 op2_hi_lo[hi] = gen_highpart_mode (SImode, DImode, operands[2]);
32928 op2_hi_lo[lo] = gen_lowpart (SImode, operands[2]);
32929 }
32930 else
32931 {
32932 HOST_WIDE_INT value = INTVAL (operands[2]);
32933 HOST_WIDE_INT value_hi_lo[2];
32934
32935 gcc_assert (!complement_final_p);
32936 gcc_assert (!complement_op1_p);
32937 gcc_assert (!complement_op2_p);
32938
32939 value_hi_lo[hi] = value >> 32;
32940 value_hi_lo[lo] = value & lower_32bits;
32941
32942 for (i = 0; i < 2; i++)
32943 {
32944 HOST_WIDE_INT sub_value = value_hi_lo[i];
32945
32946 if (sub_value & sign_bit)
32947 sub_value |= upper_32bits;
32948
32949 op2_hi_lo[i] = GEN_INT (sub_value);
32950
32951 /* If this is an AND instruction, check to see if we need to load
32952 the value in a register. */
32953 if (code == AND && sub_value != -1 && sub_value != 0
32954 && !and_operand (op2_hi_lo[i], SImode))
32955 op2_hi_lo[i] = force_reg (SImode, op2_hi_lo[i]);
32956 }
32957 }
32958 }
32959
32960 for (i = 0; i < 2; i++)
32961 {
32962 /* Split large IOR/XOR operations. */
32963 if ((code == IOR || code == XOR)
32964 && GET_CODE (op2_hi_lo[i]) == CONST_INT
32965 && !complement_final_p
32966 && !complement_op1_p
32967 && !complement_op2_p
32968 && !logical_const_operand (op2_hi_lo[i], SImode))
32969 {
32970 HOST_WIDE_INT value = INTVAL (op2_hi_lo[i]);
32971 HOST_WIDE_INT hi_16bits = value & HOST_WIDE_INT_C(0xffff0000);
32972 HOST_WIDE_INT lo_16bits = value & HOST_WIDE_INT_C(0x0000ffff);
32973 rtx tmp = gen_reg_rtx (SImode);
32974
32975 /* Make sure the constant is sign extended. */
32976 if ((hi_16bits & sign_bit) != 0)
32977 hi_16bits |= upper_32bits;
32978
32979 rs6000_split_logical_inner (tmp, op1_hi_lo[i], GEN_INT (hi_16bits),
32980 code, SImode, false, false, false);
32981
32982 rs6000_split_logical_inner (op0_hi_lo[i], tmp, GEN_INT (lo_16bits),
32983 code, SImode, false, false, false);
32984 }
32985 else
32986 rs6000_split_logical_inner (op0_hi_lo[i], op1_hi_lo[i], op2_hi_lo[i],
32987 code, SImode, complement_final_p,
32988 complement_op1_p, complement_op2_p);
32989 }
32990
32991 return;
32992 }
32993
32994 /* Split the insns that make up boolean operations operating on multiple GPR
32995 registers. The boolean MD patterns ensure that the inputs either are
32996 exactly the same as the output registers, or there is no overlap.
32997
32998 OPERANDS is an array containing the destination and two input operands.
32999 CODE is the base operation (AND, IOR, XOR, NOT).
33000 If COMPLEMENT_FINAL_P is true, wrap the whole operation with NOT.
33001 If COMPLEMENT_OP1_P is true, wrap operand1 with NOT.
33002 If COMPLEMENT_OP2_P is true, wrap operand2 with NOT. */
33003
33004 void
33005 rs6000_split_logical (rtx operands[3],
33006 enum rtx_code code,
33007 bool complement_final_p,
33008 bool complement_op1_p,
33009 bool complement_op2_p)
33010 {
33011 enum machine_mode mode = GET_MODE (operands[0]);
33012 enum machine_mode sub_mode;
33013 rtx op0, op1, op2;
33014 int sub_size, regno0, regno1, nregs, i;
33015
33016 /* If this is DImode, use the specialized version that can run before
33017 register allocation. */
33018 if (mode == DImode && !TARGET_POWERPC64)
33019 {
33020 rs6000_split_logical_di (operands, code, complement_final_p,
33021 complement_op1_p, complement_op2_p);
33022 return;
33023 }
33024
33025 op0 = operands[0];
33026 op1 = operands[1];
33027 op2 = (code == NOT) ? NULL_RTX : operands[2];
33028 sub_mode = (TARGET_POWERPC64) ? DImode : SImode;
33029 sub_size = GET_MODE_SIZE (sub_mode);
33030 regno0 = REGNO (op0);
33031 regno1 = REGNO (op1);
33032
33033 gcc_assert (reload_completed);
33034 gcc_assert (IN_RANGE (regno0, FIRST_GPR_REGNO, LAST_GPR_REGNO));
33035 gcc_assert (IN_RANGE (regno1, FIRST_GPR_REGNO, LAST_GPR_REGNO));
33036
33037 nregs = rs6000_hard_regno_nregs[(int)mode][regno0];
33038 gcc_assert (nregs > 1);
33039
33040 if (op2 && REG_P (op2))
33041 gcc_assert (IN_RANGE (REGNO (op2), FIRST_GPR_REGNO, LAST_GPR_REGNO));
33042
33043 for (i = 0; i < nregs; i++)
33044 {
33045 int offset = i * sub_size;
33046 rtx sub_op0 = simplify_subreg (sub_mode, op0, mode, offset);
33047 rtx sub_op1 = simplify_subreg (sub_mode, op1, mode, offset);
33048 rtx sub_op2 = ((code == NOT)
33049 ? NULL_RTX
33050 : simplify_subreg (sub_mode, op2, mode, offset));
33051
33052 rs6000_split_logical_inner (sub_op0, sub_op1, sub_op2, code, sub_mode,
33053 complement_final_p, complement_op1_p,
33054 complement_op2_p);
33055 }
33056
33057 return;
33058 }
33059
33060 \f
33061 /* Return true if the peephole2 can combine a load involving a combination of
33062 an addis instruction and a load with an offset that can be fused together on
33063 a power8.
33064
33065 The operands are:
33066 operands[0] register set with addis
33067 operands[1] value set via addis
33068 operands[2] target register being loaded
33069 operands[3] D-form memory reference using operands[0].
33070
33071 In addition, we are passed a boolean that is true if this is a peephole2,
33072 and we can use see if the addis_reg is dead after the insn and can be
33073 replaced by the target register. */
33074
33075 bool
33076 fusion_gpr_load_p (rtx *operands, bool peep2_p)
33077 {
33078 rtx addis_reg = operands[0];
33079 rtx addis_value = operands[1];
33080 rtx target = operands[2];
33081 rtx mem = operands[3];
33082 rtx addr;
33083 rtx base_reg;
33084
33085 /* Validate arguments. */
33086 if (!base_reg_operand (addis_reg, GET_MODE (addis_reg)))
33087 return false;
33088
33089 if (!base_reg_operand (target, GET_MODE (target)))
33090 return false;
33091
33092 if (!fusion_gpr_addis (addis_value, GET_MODE (addis_value)))
33093 return false;
33094
33095 if (!fusion_gpr_mem_load (mem, GET_MODE (mem)))
33096 return false;
33097
33098 /* Allow sign/zero extension. */
33099 if (GET_CODE (mem) == ZERO_EXTEND
33100 || (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN))
33101 mem = XEXP (mem, 0);
33102
33103 if (!MEM_P (mem))
33104 return false;
33105
33106 addr = XEXP (mem, 0); /* either PLUS or LO_SUM. */
33107 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
33108 return false;
33109
33110 /* Validate that the register used to load the high value is either the
33111 register being loaded, or we can safely replace its use in a peephole2.
33112
33113 If this is a peephole2, we assume that there are 2 instructions in the
33114 peephole (addis and load), so we want to check if the target register was
33115 not used in the memory address and the register to hold the addis result
33116 is dead after the peephole. */
33117 if (REGNO (addis_reg) != REGNO (target))
33118 {
33119 if (!peep2_p)
33120 return false;
33121
33122 if (reg_mentioned_p (target, mem))
33123 return false;
33124
33125 if (!peep2_reg_dead_p (2, addis_reg))
33126 return false;
33127
33128 /* If the target register being loaded is the stack pointer, we must
33129 avoid loading any other value into it, even temporarily. */
33130 if (REG_P (target) && REGNO (target) == STACK_POINTER_REGNUM)
33131 return false;
33132 }
33133
33134 base_reg = XEXP (addr, 0);
33135 return REGNO (addis_reg) == REGNO (base_reg);
33136 }
33137
33138 /* During the peephole2 pass, adjust and expand the insns for a load fusion
33139 sequence. We adjust the addis register to use the target register. If the
33140 load sign extends, we adjust the code to do the zero extending load, and an
33141 explicit sign extension later since the fusion only covers zero extending
33142 loads.
33143
33144 The operands are:
33145 operands[0] register set with addis (to be replaced with target)
33146 operands[1] value set via addis
33147 operands[2] target register being loaded
33148 operands[3] D-form memory reference using operands[0]. */
33149
33150 void
33151 expand_fusion_gpr_load (rtx *operands)
33152 {
33153 rtx addis_value = operands[1];
33154 rtx target = operands[2];
33155 rtx orig_mem = operands[3];
33156 rtx new_addr, new_mem, orig_addr, offset;
33157 enum rtx_code plus_or_lo_sum;
33158 enum machine_mode target_mode = GET_MODE (target);
33159 enum machine_mode extend_mode = target_mode;
33160 enum machine_mode ptr_mode = Pmode;
33161 enum rtx_code extend = UNKNOWN;
33162 rtx addis_reg = ((ptr_mode == target_mode)
33163 ? target
33164 : simplify_subreg (ptr_mode, target, target_mode, 0));
33165
33166 if (GET_CODE (orig_mem) == ZERO_EXTEND
33167 || (TARGET_P8_FUSION_SIGN && GET_CODE (orig_mem) == SIGN_EXTEND))
33168 {
33169 extend = GET_CODE (orig_mem);
33170 orig_mem = XEXP (orig_mem, 0);
33171 target_mode = GET_MODE (orig_mem);
33172 }
33173
33174 gcc_assert (MEM_P (orig_mem));
33175
33176 orig_addr = XEXP (orig_mem, 0);
33177 plus_or_lo_sum = GET_CODE (orig_addr);
33178 gcc_assert (plus_or_lo_sum == PLUS || plus_or_lo_sum == LO_SUM);
33179
33180 offset = XEXP (orig_addr, 1);
33181 new_addr = gen_rtx_fmt_ee (plus_or_lo_sum, ptr_mode, addis_reg, offset);
33182 new_mem = change_address (orig_mem, target_mode, new_addr);
33183
33184 if (extend != UNKNOWN)
33185 new_mem = gen_rtx_fmt_e (ZERO_EXTEND, extend_mode, new_mem);
33186
33187 emit_insn (gen_rtx_SET (VOIDmode, addis_reg, addis_value));
33188 emit_insn (gen_rtx_SET (VOIDmode, target, new_mem));
33189
33190 if (extend == SIGN_EXTEND)
33191 {
33192 int sub_off = ((BYTES_BIG_ENDIAN)
33193 ? GET_MODE_SIZE (extend_mode) - GET_MODE_SIZE (target_mode)
33194 : 0);
33195 rtx sign_reg
33196 = simplify_subreg (target_mode, target, extend_mode, sub_off);
33197
33198 emit_insn (gen_rtx_SET (VOIDmode, target,
33199 gen_rtx_SIGN_EXTEND (extend_mode, sign_reg)));
33200 }
33201
33202 return;
33203 }
33204
33205 /* Return a string to fuse an addis instruction with a gpr load to the same
33206 register that we loaded up the addis instruction. The code is complicated,
33207 so we call output_asm_insn directly, and just return "".
33208
33209 The operands are:
33210 operands[0] register set with addis (must be same reg as target).
33211 operands[1] value set via addis
33212 operands[2] target register being loaded
33213 operands[3] D-form memory reference using operands[0]. */
33214
33215 const char *
33216 emit_fusion_gpr_load (rtx *operands)
33217 {
33218 rtx addis_reg = operands[0];
33219 rtx addis_value = operands[1];
33220 rtx target = operands[2];
33221 rtx mem = operands[3];
33222 rtx fuse_ops[10];
33223 rtx addr;
33224 rtx load_offset;
33225 const char *addis_str = NULL;
33226 const char *load_str = NULL;
33227 const char *extend_insn = NULL;
33228 const char *mode_name = NULL;
33229 char insn_template[80];
33230 enum machine_mode mode;
33231 const char *comment_str = ASM_COMMENT_START;
33232 bool sign_p = false;
33233
33234 gcc_assert (REG_P (addis_reg) && REG_P (target));
33235 gcc_assert (REGNO (addis_reg) == REGNO (target));
33236
33237 if (*comment_str == ' ')
33238 comment_str++;
33239
33240 /* Allow sign/zero extension. */
33241 if (GET_CODE (mem) == ZERO_EXTEND)
33242 mem = XEXP (mem, 0);
33243
33244 else if (GET_CODE (mem) == SIGN_EXTEND && TARGET_P8_FUSION_SIGN)
33245 {
33246 sign_p = true;
33247 mem = XEXP (mem, 0);
33248 }
33249
33250 gcc_assert (MEM_P (mem));
33251 addr = XEXP (mem, 0);
33252 if (GET_CODE (addr) != PLUS && GET_CODE (addr) != LO_SUM)
33253 gcc_unreachable ();
33254
33255 load_offset = XEXP (addr, 1);
33256
33257 /* Now emit the load instruction to the same register. */
33258 mode = GET_MODE (mem);
33259 switch (mode)
33260 {
33261 case QImode:
33262 mode_name = "char";
33263 load_str = "lbz";
33264 extend_insn = "extsb %0,%0";
33265 break;
33266
33267 case HImode:
33268 mode_name = "short";
33269 load_str = "lhz";
33270 extend_insn = "extsh %0,%0";
33271 break;
33272
33273 case SImode:
33274 mode_name = "int";
33275 load_str = "lwz";
33276 extend_insn = "extsw %0,%0";
33277 break;
33278
33279 case DImode:
33280 if (TARGET_POWERPC64)
33281 {
33282 mode_name = "long";
33283 load_str = "ld";
33284 }
33285 else
33286 gcc_unreachable ();
33287 break;
33288
33289 default:
33290 gcc_unreachable ();
33291 }
33292
33293 /* Emit the addis instruction. */
33294 fuse_ops[0] = target;
33295 if (satisfies_constraint_L (addis_value))
33296 {
33297 fuse_ops[1] = addis_value;
33298 addis_str = "lis %0,%v1";
33299 }
33300
33301 else if (GET_CODE (addis_value) == PLUS)
33302 {
33303 rtx op0 = XEXP (addis_value, 0);
33304 rtx op1 = XEXP (addis_value, 1);
33305
33306 if (REG_P (op0) && CONST_INT_P (op1)
33307 && satisfies_constraint_L (op1))
33308 {
33309 fuse_ops[1] = op0;
33310 fuse_ops[2] = op1;
33311 addis_str = "addis %0,%1,%v2";
33312 }
33313 }
33314
33315 else if (GET_CODE (addis_value) == HIGH)
33316 {
33317 rtx value = XEXP (addis_value, 0);
33318 if (GET_CODE (value) == UNSPEC && XINT (value, 1) == UNSPEC_TOCREL)
33319 {
33320 fuse_ops[1] = XVECEXP (value, 0, 0); /* symbol ref. */
33321 fuse_ops[2] = XVECEXP (value, 0, 1); /* TOC register. */
33322 if (TARGET_ELF)
33323 addis_str = "addis %0,%2,%1@toc@ha";
33324
33325 else if (TARGET_XCOFF)
33326 addis_str = "addis %0,%1@u(%2)";
33327
33328 else
33329 gcc_unreachable ();
33330 }
33331
33332 else if (GET_CODE (value) == PLUS)
33333 {
33334 rtx op0 = XEXP (value, 0);
33335 rtx op1 = XEXP (value, 1);
33336
33337 if (GET_CODE (op0) == UNSPEC
33338 && XINT (op0, 1) == UNSPEC_TOCREL
33339 && CONST_INT_P (op1))
33340 {
33341 fuse_ops[1] = XVECEXP (op0, 0, 0); /* symbol ref. */
33342 fuse_ops[2] = XVECEXP (op0, 0, 1); /* TOC register. */
33343 fuse_ops[3] = op1;
33344 if (TARGET_ELF)
33345 addis_str = "addis %0,%2,%1+%3@toc@ha";
33346
33347 else if (TARGET_XCOFF)
33348 addis_str = "addis %0,%1+%3@u(%2)";
33349
33350 else
33351 gcc_unreachable ();
33352 }
33353 }
33354
33355 else if (satisfies_constraint_L (value))
33356 {
33357 fuse_ops[1] = value;
33358 addis_str = "lis %0,%v1";
33359 }
33360
33361 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (value))
33362 {
33363 fuse_ops[1] = value;
33364 addis_str = "lis %0,%1@ha";
33365 }
33366 }
33367
33368 if (!addis_str)
33369 fatal_insn ("Could not generate addis value for fusion", addis_value);
33370
33371 sprintf (insn_template, "%s\t\t%s gpr load fusion, type %s", addis_str,
33372 comment_str, mode_name);
33373 output_asm_insn (insn_template, fuse_ops);
33374
33375 /* Emit the D-form load instruction. */
33376 if (CONST_INT_P (load_offset) && satisfies_constraint_I (load_offset))
33377 {
33378 sprintf (insn_template, "%s %%0,%%1(%%0)", load_str);
33379 fuse_ops[1] = load_offset;
33380 output_asm_insn (insn_template, fuse_ops);
33381 }
33382
33383 else if (GET_CODE (load_offset) == UNSPEC
33384 && XINT (load_offset, 1) == UNSPEC_TOCREL)
33385 {
33386 if (TARGET_ELF)
33387 sprintf (insn_template, "%s %%0,%%1@toc@l(%%0)", load_str);
33388
33389 else if (TARGET_XCOFF)
33390 sprintf (insn_template, "%s %%0,%%1@l(%%0)", load_str);
33391
33392 else
33393 gcc_unreachable ();
33394
33395 fuse_ops[1] = XVECEXP (load_offset, 0, 0);
33396 output_asm_insn (insn_template, fuse_ops);
33397 }
33398
33399 else if (GET_CODE (load_offset) == PLUS
33400 && GET_CODE (XEXP (load_offset, 0)) == UNSPEC
33401 && XINT (XEXP (load_offset, 0), 1) == UNSPEC_TOCREL
33402 && CONST_INT_P (XEXP (load_offset, 1)))
33403 {
33404 rtx tocrel_unspec = XEXP (load_offset, 0);
33405 if (TARGET_ELF)
33406 sprintf (insn_template, "%s %%0,%%1+%%2@toc@l(%%0)", load_str);
33407
33408 else if (TARGET_XCOFF)
33409 sprintf (insn_template, "%s %%0,%%1+%%2@l(%%0)", load_str);
33410
33411 else
33412 gcc_unreachable ();
33413
33414 fuse_ops[1] = XVECEXP (tocrel_unspec, 0, 0);
33415 fuse_ops[2] = XEXP (load_offset, 1);
33416 output_asm_insn (insn_template, fuse_ops);
33417 }
33418
33419 else if (TARGET_ELF && !TARGET_POWERPC64 && CONSTANT_P (load_offset))
33420 {
33421 sprintf (insn_template, "%s %%0,%%1@l(%%0)", load_str);
33422
33423 fuse_ops[1] = load_offset;
33424 output_asm_insn (insn_template, fuse_ops);
33425 }
33426
33427 else
33428 fatal_insn ("Unable to generate load offset for fusion", load_offset);
33429
33430 /* Handle sign extension. The peephole2 pass generates this as a separate
33431 insn, but we handle it just in case it got reattached. */
33432 if (sign_p)
33433 {
33434 gcc_assert (extend_insn != NULL);
33435 output_asm_insn (extend_insn, fuse_ops);
33436 }
33437
33438 return "";
33439 }
33440 \f
33441 /* Analyze vector computations and remove unnecessary doubleword
33442 swaps (xxswapdi instructions). This pass is performed only
33443 for little-endian VSX code generation.
33444
33445 For this specific case, loads and stores of 4x32 and 2x64 vectors
33446 are inefficient. These are implemented using the lvx2dx and
33447 stvx2dx instructions, which invert the order of doublewords in
33448 a vector register. Thus the code generation inserts an xxswapdi
33449 after each such load, and prior to each such store. (For spill
33450 code after register assignment, an additional xxswapdi is inserted
33451 following each store in order to return a hard register to its
33452 unpermuted value.)
33453
33454 The extra xxswapdi instructions reduce performance. This can be
33455 particularly bad for vectorized code. The purpose of this pass
33456 is to reduce the number of xxswapdi instructions required for
33457 correctness.
33458
33459 The primary insight is that much code that operates on vectors
33460 does not care about the relative order of elements in a register,
33461 so long as the correct memory order is preserved. If we have
33462 a computation where all input values are provided by lvxd2x/xxswapdi
33463 sequences, all outputs are stored using xxswapdi/stvxd2x sequences,
33464 and all intermediate computations are pure SIMD (independent of
33465 element order), then all the xxswapdi's associated with the loads
33466 and stores may be removed.
33467
33468 This pass uses some of the infrastructure and logical ideas from
33469 the "web" pass in web.c. We create maximal webs of computations
33470 fitting the description above using union-find. Each such web is
33471 then optimized by removing its unnecessary xxswapdi instructions.
33472
33473 The pass is placed prior to global optimization so that we can
33474 perform the optimization in the safest and simplest way possible;
33475 that is, by replacing each xxswapdi insn with a register copy insn.
33476 Subsequent forward propagation will remove copies where possible.
33477
33478 There are some operations sensitive to element order for which we
33479 can still allow the operation, provided we modify those operations.
33480 These include CONST_VECTORs, for which we must swap the first and
33481 second halves of the constant vector; and SUBREGs, for which we
33482 must adjust the byte offset to account for the swapped doublewords.
33483 A remaining opportunity would be non-immediate-form splats, for
33484 which we should adjust the selected lane of the input. We should
33485 also make code generation adjustments for sum-across operations,
33486 since this is a common vectorizer reduction.
33487
33488 Because we run prior to the first split, we can see loads and stores
33489 here that match *vsx_le_perm_{load,store}_<mode>. These are vanilla
33490 vector loads and stores that have not yet been split into a permuting
33491 load/store and a swap. (One way this can happen is with a builtin
33492 call to vec_vsx_{ld,st}.) We can handle these as well, but rather
33493 than deleting a swap, we convert the load/store into a permuting
33494 load/store (which effectively removes the swap). */
33495
33496 /* This is based on the union-find logic in web.c. web_entry_base is
33497 defined in df.h. */
33498 class swap_web_entry : public web_entry_base
33499 {
33500 public:
33501 /* Pointer to the insn. */
33502 rtx_insn *insn;
33503 /* Set if insn contains a mention of a vector register. All other
33504 fields are undefined if this field is unset. */
33505 unsigned int is_relevant : 1;
33506 /* Set if insn is a load. */
33507 unsigned int is_load : 1;
33508 /* Set if insn is a store. */
33509 unsigned int is_store : 1;
33510 /* Set if insn is a doubleword swap. This can either be a register swap
33511 or a permuting load or store (test is_load and is_store for this). */
33512 unsigned int is_swap : 1;
33513 /* Set if the insn has a live-in use of a parameter register. */
33514 unsigned int is_live_in : 1;
33515 /* Set if the insn has a live-out def of a return register. */
33516 unsigned int is_live_out : 1;
33517 /* Set if the insn contains a subreg reference of a vector register. */
33518 unsigned int contains_subreg : 1;
33519 /* Set if the insn contains a 128-bit integer operand. */
33520 unsigned int is_128_int : 1;
33521 /* Set if this is a call-insn. */
33522 unsigned int is_call : 1;
33523 /* Set if this insn does not perform a vector operation for which
33524 element order matters, or if we know how to fix it up if it does.
33525 Undefined if is_swap is set. */
33526 unsigned int is_swappable : 1;
33527 /* A nonzero value indicates what kind of special handling for this
33528 insn is required if doublewords are swapped. Undefined if
33529 is_swappable is not set. */
33530 unsigned int special_handling : 3;
33531 /* Set if the web represented by this entry cannot be optimized. */
33532 unsigned int web_not_optimizable : 1;
33533 /* Set if this insn should be deleted. */
33534 unsigned int will_delete : 1;
33535 };
33536
33537 enum special_handling_values {
33538 SH_NONE = 0,
33539 SH_CONST_VECTOR,
33540 SH_SUBREG,
33541 SH_NOSWAP_LD,
33542 SH_NOSWAP_ST
33543 };
33544
33545 /* Union INSN with all insns containing definitions that reach USE.
33546 Detect whether USE is live-in to the current function. */
33547 static void
33548 union_defs (swap_web_entry *insn_entry, rtx insn, df_ref use)
33549 {
33550 struct df_link *link = DF_REF_CHAIN (use);
33551
33552 if (!link)
33553 insn_entry[INSN_UID (insn)].is_live_in = 1;
33554
33555 while (link)
33556 {
33557 if (DF_REF_IS_ARTIFICIAL (link->ref))
33558 insn_entry[INSN_UID (insn)].is_live_in = 1;
33559
33560 if (DF_REF_INSN_INFO (link->ref))
33561 {
33562 rtx def_insn = DF_REF_INSN (link->ref);
33563 (void)unionfind_union (insn_entry + INSN_UID (insn),
33564 insn_entry + INSN_UID (def_insn));
33565 }
33566
33567 link = link->next;
33568 }
33569 }
33570
33571 /* Union INSN with all insns containing uses reached from DEF.
33572 Detect whether DEF is live-out from the current function. */
33573 static void
33574 union_uses (swap_web_entry *insn_entry, rtx insn, df_ref def)
33575 {
33576 struct df_link *link = DF_REF_CHAIN (def);
33577
33578 if (!link)
33579 insn_entry[INSN_UID (insn)].is_live_out = 1;
33580
33581 while (link)
33582 {
33583 /* This could be an eh use or some other artificial use;
33584 we treat these all the same (killing the optimization). */
33585 if (DF_REF_IS_ARTIFICIAL (link->ref))
33586 insn_entry[INSN_UID (insn)].is_live_out = 1;
33587
33588 if (DF_REF_INSN_INFO (link->ref))
33589 {
33590 rtx use_insn = DF_REF_INSN (link->ref);
33591 (void)unionfind_union (insn_entry + INSN_UID (insn),
33592 insn_entry + INSN_UID (use_insn));
33593 }
33594
33595 link = link->next;
33596 }
33597 }
33598
33599 /* Return 1 iff INSN is a load insn, including permuting loads that
33600 represent an lvxd2x instruction; else return 0. */
33601 static unsigned int
33602 insn_is_load_p (rtx insn)
33603 {
33604 rtx body = PATTERN (insn);
33605
33606 if (GET_CODE (body) == SET)
33607 {
33608 if (GET_CODE (SET_SRC (body)) == MEM)
33609 return 1;
33610
33611 if (GET_CODE (SET_SRC (body)) == VEC_SELECT
33612 && GET_CODE (XEXP (SET_SRC (body), 0)) == MEM)
33613 return 1;
33614
33615 return 0;
33616 }
33617
33618 if (GET_CODE (body) != PARALLEL)
33619 return 0;
33620
33621 rtx set = XVECEXP (body, 0, 0);
33622
33623 if (GET_CODE (set) == SET && GET_CODE (SET_SRC (set)) == MEM)
33624 return 1;
33625
33626 return 0;
33627 }
33628
33629 /* Return 1 iff INSN is a store insn, including permuting stores that
33630 represent an stvxd2x instruction; else return 0. */
33631 static unsigned int
33632 insn_is_store_p (rtx insn)
33633 {
33634 rtx body = PATTERN (insn);
33635 if (GET_CODE (body) == SET && GET_CODE (SET_DEST (body)) == MEM)
33636 return 1;
33637 if (GET_CODE (body) != PARALLEL)
33638 return 0;
33639 rtx set = XVECEXP (body, 0, 0);
33640 if (GET_CODE (set) == SET && GET_CODE (SET_DEST (set)) == MEM)
33641 return 1;
33642 return 0;
33643 }
33644
33645 /* Return 1 iff INSN swaps doublewords. This may be a reg-reg swap,
33646 a permuting load, or a permuting store. */
33647 static unsigned int
33648 insn_is_swap_p (rtx insn)
33649 {
33650 rtx body = PATTERN (insn);
33651 if (GET_CODE (body) != SET)
33652 return 0;
33653 rtx rhs = SET_SRC (body);
33654 if (GET_CODE (rhs) != VEC_SELECT)
33655 return 0;
33656 rtx parallel = XEXP (rhs, 1);
33657 if (GET_CODE (parallel) != PARALLEL)
33658 return 0;
33659 unsigned int len = XVECLEN (parallel, 0);
33660 if (len != 2 && len != 4 && len != 8 && len != 16)
33661 return 0;
33662 for (unsigned int i = 0; i < len / 2; ++i)
33663 {
33664 rtx op = XVECEXP (parallel, 0, i);
33665 if (GET_CODE (op) != CONST_INT || INTVAL (op) != len / 2 + i)
33666 return 0;
33667 }
33668 for (unsigned int i = len / 2; i < len; ++i)
33669 {
33670 rtx op = XVECEXP (parallel, 0, i);
33671 if (GET_CODE (op) != CONST_INT || INTVAL (op) != i - len / 2)
33672 return 0;
33673 }
33674 return 1;
33675 }
33676
33677 /* Return 1 iff OP is an operand that will not be affected by having
33678 vector doublewords swapped in memory. */
33679 static unsigned int
33680 rtx_is_swappable_p (rtx op, unsigned int *special)
33681 {
33682 enum rtx_code code = GET_CODE (op);
33683 int i, j;
33684
33685 switch (code)
33686 {
33687 case LABEL_REF:
33688 case SYMBOL_REF:
33689 case CLOBBER:
33690 case REG:
33691 return 1;
33692
33693 case VEC_CONCAT:
33694 case VEC_SELECT:
33695 case ASM_INPUT:
33696 case ASM_OPERANDS:
33697 return 0;
33698
33699 case CONST_VECTOR:
33700 {
33701 *special = SH_CONST_VECTOR;
33702 return 1;
33703 }
33704
33705 case VEC_DUPLICATE:
33706 /* Opportunity: If XEXP (op, 0) has the same mode as the result,
33707 and XEXP (op, 1) is a PARALLEL with a single QImode const int,
33708 it represents a vector splat for which we can do special
33709 handling. */
33710 if (GET_CODE (XEXP (op, 0)) == CONST_INT)
33711 return 1;
33712 else
33713 return 0;
33714
33715 case UNSPEC:
33716 {
33717 /* Various operations are unsafe for this optimization, at least
33718 without significant additional work. Permutes are obviously
33719 problematic, as both the permute control vector and the ordering
33720 of the target values are invalidated by doubleword swapping.
33721 Vector pack and unpack modify the number of vector lanes.
33722 Merge-high/low will not operate correctly on swapped operands.
33723 Vector shifts across element boundaries are clearly uncool,
33724 as are vector select and concatenate operations. Vector
33725 sum-across instructions define one operand with a specific
33726 order-dependent element, so additional fixup code would be
33727 needed to make those work. Vector set and non-immediate-form
33728 vector splat are element-order sensitive. A few of these
33729 cases might be workable with special handling if required. */
33730 int val = XINT (op, 1);
33731 if (val == UNSPEC_VMRGH_DIRECT
33732 || val == UNSPEC_VMRGL_DIRECT
33733 || val == UNSPEC_VPACK_SIGN_SIGN_SAT
33734 || val == UNSPEC_VPACK_SIGN_UNS_SAT
33735 || val == UNSPEC_VPACK_UNS_UNS_MOD
33736 || val == UNSPEC_VPACK_UNS_UNS_MOD_DIRECT
33737 || val == UNSPEC_VPACK_UNS_UNS_SAT
33738 || val == UNSPEC_VPERM
33739 || val == UNSPEC_VPERM_UNS
33740 || val == UNSPEC_VPERMHI
33741 || val == UNSPEC_VPERMSI
33742 || val == UNSPEC_VPKPX
33743 || val == UNSPEC_VSLDOI
33744 || val == UNSPEC_VSLO
33745 || val == UNSPEC_VSPLT_DIRECT
33746 || val == UNSPEC_VSRO
33747 || val == UNSPEC_VSUM2SWS
33748 || val == UNSPEC_VSUM4S
33749 || val == UNSPEC_VSUM4UBS
33750 || val == UNSPEC_VSUMSWS
33751 || val == UNSPEC_VSUMSWS_DIRECT
33752 || val == UNSPEC_VSX_CONCAT
33753 || val == UNSPEC_VSX_CVSPDP
33754 || val == UNSPEC_VSX_CVSPDPN
33755 || val == UNSPEC_VSX_SET
33756 || val == UNSPEC_VSX_SLDWI
33757 || val == UNSPEC_VSX_XXSPLTW
33758 || val == UNSPEC_VUNPACK_HI_SIGN
33759 || val == UNSPEC_VUNPACK_HI_SIGN_DIRECT
33760 || val == UNSPEC_VUNPACK_LO_SIGN
33761 || val == UNSPEC_VUNPACK_LO_SIGN_DIRECT
33762 || val == UNSPEC_VUPKHPX
33763 || val == UNSPEC_VUPKHS_V4SF
33764 || val == UNSPEC_VUPKHU_V4SF
33765 || val == UNSPEC_VUPKLPX
33766 || val == UNSPEC_VUPKLS_V4SF
33767 || val == UNSPEC_VUPKHU_V4SF)
33768 return 0;
33769 }
33770
33771 default:
33772 break;
33773 }
33774
33775 const char *fmt = GET_RTX_FORMAT (code);
33776 int ok = 1;
33777
33778 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
33779 if (fmt[i] == 'e' || fmt[i] == 'u')
33780 {
33781 unsigned int special_op = SH_NONE;
33782 ok &= rtx_is_swappable_p (XEXP (op, i), &special_op);
33783 /* Ensure we never have two kinds of special handling
33784 for the same insn. */
33785 if (*special != SH_NONE && special_op != SH_NONE
33786 && *special != special_op)
33787 return 0;
33788 *special = special_op;
33789 }
33790 else if (fmt[i] == 'E')
33791 for (j = 0; j < XVECLEN (op, i); ++j)
33792 {
33793 unsigned int special_op = SH_NONE;
33794 ok &= rtx_is_swappable_p (XVECEXP (op, i, j), &special_op);
33795 /* Ensure we never have two kinds of special handling
33796 for the same insn. */
33797 if (*special != SH_NONE && special_op != SH_NONE
33798 && *special != special_op)
33799 return 0;
33800 *special = special_op;
33801 }
33802
33803 return ok;
33804 }
33805
33806 /* Return 1 iff INSN is an operand that will not be affected by
33807 having vector doublewords swapped in memory (in which case
33808 *SPECIAL is unchanged), or that can be modified to be correct
33809 if vector doublewords are swapped in memory (in which case
33810 *SPECIAL is changed to a value indicating how). */
33811 static unsigned int
33812 insn_is_swappable_p (swap_web_entry *insn_entry, rtx insn,
33813 unsigned int *special)
33814 {
33815 /* Calls are always bad. */
33816 if (GET_CODE (insn) == CALL_INSN)
33817 return 0;
33818
33819 /* Loads and stores seen here are not permuting, but we can still
33820 fix them up by converting them to permuting ones. Exception:
33821 UNSPEC_LVX and UNSPEC_STVX, which have a PARALLEL body instead
33822 of a SET. */
33823 rtx body = PATTERN (insn);
33824 int i = INSN_UID (insn);
33825
33826 if (insn_entry[i].is_load)
33827 {
33828 if (GET_CODE (body) == SET)
33829 {
33830 *special = SH_NOSWAP_LD;
33831 return 1;
33832 }
33833 else
33834 return 0;
33835 }
33836
33837 if (insn_entry[i].is_store)
33838 {
33839 if (GET_CODE (body) == SET)
33840 {
33841 *special = SH_NOSWAP_ST;
33842 return 1;
33843 }
33844 else
33845 return 0;
33846 }
33847
33848 /* Otherwise check the operands for vector lane violations. */
33849 return rtx_is_swappable_p (body, special);
33850 }
33851
33852 enum chain_purpose { FOR_LOADS, FOR_STORES };
33853
33854 /* Return true if the UD or DU chain headed by LINK is non-empty,
33855 and every entry on the chain references an insn that is a
33856 register swap. Furthermore, if PURPOSE is FOR_LOADS, each such
33857 register swap must have only permuting loads as reaching defs.
33858 If PURPOSE is FOR_STORES, each such register swap must have only
33859 register swaps or permuting stores as reached uses. */
33860 static bool
33861 chain_contains_only_swaps (swap_web_entry *insn_entry, struct df_link *link,
33862 enum chain_purpose purpose)
33863 {
33864 if (!link)
33865 return false;
33866
33867 for (; link; link = link->next)
33868 {
33869 if (!VECTOR_MODE_P (GET_MODE (DF_REF_REG (link->ref))))
33870 continue;
33871
33872 if (DF_REF_IS_ARTIFICIAL (link->ref))
33873 return false;
33874
33875 rtx reached_insn = DF_REF_INSN (link->ref);
33876 unsigned uid = INSN_UID (reached_insn);
33877 struct df_insn_info *insn_info = DF_INSN_INFO_GET (reached_insn);
33878
33879 if (!insn_entry[uid].is_swap || insn_entry[uid].is_load
33880 || insn_entry[uid].is_store)
33881 return false;
33882
33883 if (purpose == FOR_LOADS)
33884 {
33885 df_ref use;
33886 FOR_EACH_INSN_INFO_USE (use, insn_info)
33887 {
33888 struct df_link *swap_link = DF_REF_CHAIN (use);
33889
33890 while (swap_link)
33891 {
33892 if (DF_REF_IS_ARTIFICIAL (link->ref))
33893 return false;
33894
33895 rtx swap_def_insn = DF_REF_INSN (swap_link->ref);
33896 unsigned uid2 = INSN_UID (swap_def_insn);
33897
33898 /* Only permuting loads are allowed. */
33899 if (!insn_entry[uid2].is_swap || !insn_entry[uid2].is_load)
33900 return false;
33901
33902 swap_link = swap_link->next;
33903 }
33904 }
33905 }
33906 else if (purpose == FOR_STORES)
33907 {
33908 df_ref def;
33909 FOR_EACH_INSN_INFO_DEF (def, insn_info)
33910 {
33911 struct df_link *swap_link = DF_REF_CHAIN (def);
33912
33913 while (swap_link)
33914 {
33915 if (DF_REF_IS_ARTIFICIAL (link->ref))
33916 return false;
33917
33918 rtx swap_use_insn = DF_REF_INSN (swap_link->ref);
33919 unsigned uid2 = INSN_UID (swap_use_insn);
33920
33921 /* Permuting stores or register swaps are allowed. */
33922 if (!insn_entry[uid2].is_swap || insn_entry[uid2].is_load)
33923 return false;
33924
33925 swap_link = swap_link->next;
33926 }
33927 }
33928 }
33929 }
33930
33931 return true;
33932 }
33933
33934 /* Mark the xxswapdi instructions associated with permuting loads and
33935 stores for removal. Note that we only flag them for deletion here,
33936 as there is a possibility of a swap being reached from multiple
33937 loads, etc. */
33938 static void
33939 mark_swaps_for_removal (swap_web_entry *insn_entry, unsigned int i)
33940 {
33941 rtx insn = insn_entry[i].insn;
33942 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
33943
33944 if (insn_entry[i].is_load)
33945 {
33946 df_ref def;
33947 FOR_EACH_INSN_INFO_DEF (def, insn_info)
33948 {
33949 struct df_link *link = DF_REF_CHAIN (def);
33950
33951 /* We know by now that these are swaps, so we can delete
33952 them confidently. */
33953 while (link)
33954 {
33955 rtx use_insn = DF_REF_INSN (link->ref);
33956 insn_entry[INSN_UID (use_insn)].will_delete = 1;
33957 link = link->next;
33958 }
33959 }
33960 }
33961 else if (insn_entry[i].is_store)
33962 {
33963 df_ref use;
33964 FOR_EACH_INSN_INFO_USE (use, insn_info)
33965 {
33966 /* Ignore uses for addressability. */
33967 enum machine_mode mode = GET_MODE (DF_REF_REG (use));
33968 if (!VECTOR_MODE_P (mode))
33969 continue;
33970
33971 struct df_link *link = DF_REF_CHAIN (use);
33972
33973 /* We know by now that these are swaps, so we can delete
33974 them confidently. */
33975 while (link)
33976 {
33977 rtx def_insn = DF_REF_INSN (link->ref);
33978 insn_entry[INSN_UID (def_insn)].will_delete = 1;
33979 link = link->next;
33980 }
33981 }
33982 }
33983 }
33984
33985 /* OP is either a CONST_VECTOR or an expression containing one.
33986 Swap the first half of the vector with the second in the first
33987 case. Recurse to find it in the second. */
33988 static void
33989 swap_const_vector_halves (rtx op)
33990 {
33991 int i;
33992 enum rtx_code code = GET_CODE (op);
33993 if (GET_CODE (op) == CONST_VECTOR)
33994 {
33995 int half_units = GET_MODE_NUNITS (GET_MODE (op)) / 2;
33996 for (i = 0; i < half_units; ++i)
33997 {
33998 rtx temp = CONST_VECTOR_ELT (op, i);
33999 CONST_VECTOR_ELT (op, i) = CONST_VECTOR_ELT (op, i + half_units);
34000 CONST_VECTOR_ELT (op, i + half_units) = temp;
34001 }
34002 }
34003 else
34004 {
34005 int j;
34006 const char *fmt = GET_RTX_FORMAT (code);
34007 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
34008 if (fmt[i] == 'e' || fmt[i] == 'u')
34009 swap_const_vector_halves (XEXP (op, i));
34010 else if (fmt[i] == 'E')
34011 for (j = 0; j < XVECLEN (op, i); ++j)
34012 swap_const_vector_halves (XVECEXP (op, i, j));
34013 }
34014 }
34015
34016 /* Find all subregs of a vector expression that perform a narrowing,
34017 and adjust the subreg index to account for doubleword swapping. */
34018 static void
34019 adjust_subreg_index (rtx op)
34020 {
34021 enum rtx_code code = GET_CODE (op);
34022 if (code == SUBREG
34023 && (GET_MODE_SIZE (GET_MODE (op))
34024 < GET_MODE_SIZE (GET_MODE (XEXP (op, 0)))))
34025 {
34026 unsigned int index = SUBREG_BYTE (op);
34027 if (index < 8)
34028 index += 8;
34029 else
34030 index -= 8;
34031 SUBREG_BYTE (op) = index;
34032 }
34033
34034 const char *fmt = GET_RTX_FORMAT (code);
34035 int i,j;
34036 for (i = 0; i < GET_RTX_LENGTH (code); ++i)
34037 if (fmt[i] == 'e' || fmt[i] == 'u')
34038 adjust_subreg_index (XEXP (op, i));
34039 else if (fmt[i] == 'E')
34040 for (j = 0; j < XVECLEN (op, i); ++j)
34041 adjust_subreg_index (XVECEXP (op, i, j));
34042 }
34043
34044 /* Convert the non-permuting load INSN to a permuting one. */
34045 static void
34046 permute_load (rtx_insn *insn)
34047 {
34048 rtx body = PATTERN (insn);
34049 rtx mem_op = SET_SRC (body);
34050 rtx tgt_reg = SET_DEST (body);
34051 enum machine_mode mode = GET_MODE (tgt_reg);
34052 int n_elts = GET_MODE_NUNITS (mode);
34053 int half_elts = n_elts / 2;
34054 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
34055 int i, j;
34056 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
34057 XVECEXP (par, 0, i) = GEN_INT (j);
34058 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
34059 XVECEXP (par, 0, i) = GEN_INT (j);
34060 rtx sel = gen_rtx_VEC_SELECT (mode, mem_op, par);
34061 SET_SRC (body) = sel;
34062 INSN_CODE (insn) = -1; /* Force re-recognition. */
34063 df_insn_rescan (insn);
34064
34065 if (dump_file)
34066 fprintf (dump_file, "Replacing load %d with permuted load\n",
34067 INSN_UID (insn));
34068 }
34069
34070 /* Convert the non-permuting store INSN to a permuting one. */
34071 static void
34072 permute_store (rtx_insn *insn)
34073 {
34074 rtx body = PATTERN (insn);
34075 rtx src_reg = SET_SRC (body);
34076 enum machine_mode mode = GET_MODE (src_reg);
34077 int n_elts = GET_MODE_NUNITS (mode);
34078 int half_elts = n_elts / 2;
34079 rtx par = gen_rtx_PARALLEL (mode, rtvec_alloc (n_elts));
34080 int i, j;
34081 for (i = 0, j = half_elts; i < half_elts; ++i, ++j)
34082 XVECEXP (par, 0, i) = GEN_INT (j);
34083 for (i = half_elts, j = 0; j < half_elts; ++i, ++j)
34084 XVECEXP (par, 0, i) = GEN_INT (j);
34085 rtx sel = gen_rtx_VEC_SELECT (mode, src_reg, par);
34086 SET_SRC (body) = sel;
34087 INSN_CODE (insn) = -1; /* Force re-recognition. */
34088 df_insn_rescan (insn);
34089
34090 if (dump_file)
34091 fprintf (dump_file, "Replacing store %d with permuted store\n",
34092 INSN_UID (insn));
34093 }
34094
34095 /* The insn described by INSN_ENTRY[I] can be swapped, but only
34096 with special handling. Take care of that here. */
34097 static void
34098 handle_special_swappables (swap_web_entry *insn_entry, unsigned i)
34099 {
34100 rtx_insn *insn = insn_entry[i].insn;
34101 rtx body = PATTERN (insn);
34102
34103 switch (insn_entry[i].special_handling)
34104 {
34105 case SH_CONST_VECTOR:
34106 {
34107 /* A CONST_VECTOR will only show up somewhere in the RHS of a SET. */
34108 gcc_assert (GET_CODE (body) == SET);
34109 rtx rhs = SET_SRC (body);
34110 swap_const_vector_halves (rhs);
34111 if (dump_file)
34112 fprintf (dump_file, "Swapping constant halves in insn %d\n", i);
34113 break;
34114 }
34115 case SH_SUBREG:
34116 /* A subreg of the same size is already safe. For subregs that
34117 select a smaller portion of a reg, adjust the index for
34118 swapped doublewords. */
34119 adjust_subreg_index (body);
34120 if (dump_file)
34121 fprintf (dump_file, "Adjusting subreg in insn %d\n", i);
34122 break;
34123 case SH_NOSWAP_LD:
34124 /* Convert a non-permuting load to a permuting one. */
34125 permute_load (insn);
34126 break;
34127 case SH_NOSWAP_ST:
34128 /* Convert a non-permuting store to a permuting one. */
34129 permute_store (insn);
34130 break;
34131 }
34132 }
34133
34134 /* Find the insn from the Ith table entry, which is known to be a
34135 register swap Y = SWAP(X). Replace it with a copy Y = X. */
34136 static void
34137 replace_swap_with_copy (swap_web_entry *insn_entry, unsigned i)
34138 {
34139 rtx_insn *insn = insn_entry[i].insn;
34140 rtx body = PATTERN (insn);
34141 rtx src_reg = XEXP (SET_SRC (body), 0);
34142 rtx copy = gen_rtx_SET (VOIDmode, SET_DEST (body), src_reg);
34143 rtx_insn *new_insn = emit_insn_before (copy, insn);
34144 set_block_for_insn (new_insn, BLOCK_FOR_INSN (insn));
34145 df_insn_rescan (new_insn);
34146
34147 if (dump_file)
34148 {
34149 unsigned int new_uid = INSN_UID (new_insn);
34150 fprintf (dump_file, "Replacing swap %d with copy %d\n", i, new_uid);
34151 }
34152
34153 df_insn_delete (insn);
34154 remove_insn (insn);
34155 INSN_DELETED_P (insn) = 1;
34156 }
34157
34158 /* Dump the swap table to DUMP_FILE. */
34159 static void
34160 dump_swap_insn_table (swap_web_entry *insn_entry)
34161 {
34162 int e = get_max_uid ();
34163 fprintf (dump_file, "\nRelevant insns with their flag settings\n\n");
34164
34165 for (int i = 0; i < e; ++i)
34166 if (insn_entry[i].is_relevant)
34167 {
34168 swap_web_entry *pred_entry = (swap_web_entry *)insn_entry[i].pred ();
34169 fprintf (dump_file, "%6d %6d ", i,
34170 pred_entry && pred_entry->insn
34171 ? INSN_UID (pred_entry->insn) : 0);
34172 if (insn_entry[i].is_load)
34173 fputs ("load ", dump_file);
34174 if (insn_entry[i].is_store)
34175 fputs ("store ", dump_file);
34176 if (insn_entry[i].is_swap)
34177 fputs ("swap ", dump_file);
34178 if (insn_entry[i].is_live_in)
34179 fputs ("live-in ", dump_file);
34180 if (insn_entry[i].is_live_out)
34181 fputs ("live-out ", dump_file);
34182 if (insn_entry[i].contains_subreg)
34183 fputs ("subreg ", dump_file);
34184 if (insn_entry[i].is_128_int)
34185 fputs ("int128 ", dump_file);
34186 if (insn_entry[i].is_call)
34187 fputs ("call ", dump_file);
34188 if (insn_entry[i].is_swappable)
34189 {
34190 fputs ("swappable ", dump_file);
34191 if (insn_entry[i].special_handling == SH_CONST_VECTOR)
34192 fputs ("special:constvec ", dump_file);
34193 else if (insn_entry[i].special_handling == SH_SUBREG)
34194 fputs ("special:subreg ", dump_file);
34195 else if (insn_entry[i].special_handling == SH_NOSWAP_LD)
34196 fputs ("special:load ", dump_file);
34197 else if (insn_entry[i].special_handling == SH_NOSWAP_ST)
34198 fputs ("special:store ", dump_file);
34199 }
34200 if (insn_entry[i].web_not_optimizable)
34201 fputs ("unoptimizable ", dump_file);
34202 if (insn_entry[i].will_delete)
34203 fputs ("delete ", dump_file);
34204 fputs ("\n", dump_file);
34205 }
34206 fputs ("\n", dump_file);
34207 }
34208
34209 /* Main entry point for this pass. */
34210 unsigned int
34211 rs6000_analyze_swaps (function *fun)
34212 {
34213 swap_web_entry *insn_entry;
34214 basic_block bb;
34215 rtx_insn *insn;
34216
34217 /* Dataflow analysis for use-def chains. */
34218 df_set_flags (DF_RD_PRUNE_DEAD_DEFS);
34219 df_chain_add_problem (DF_DU_CHAIN | DF_UD_CHAIN);
34220 df_analyze ();
34221 df_set_flags (DF_DEFER_INSN_RESCAN);
34222
34223 /* Allocate structure to represent webs of insns. */
34224 insn_entry = XCNEWVEC (swap_web_entry, get_max_uid ());
34225
34226 /* Walk the insns to gather basic data. */
34227 FOR_ALL_BB_FN (bb, fun)
34228 FOR_BB_INSNS (bb, insn)
34229 {
34230 unsigned int uid = INSN_UID (insn);
34231 if (NONDEBUG_INSN_P (insn))
34232 {
34233 insn_entry[uid].insn = insn;
34234
34235 if (GET_CODE (insn) == CALL_INSN)
34236 insn_entry[uid].is_call = 1;
34237
34238 /* Walk the uses and defs to see if we mention vector regs.
34239 Record any constraints on optimization of such mentions. */
34240 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
34241 df_ref mention;
34242 FOR_EACH_INSN_INFO_USE (mention, insn_info)
34243 {
34244 /* We use DF_REF_REAL_REG here to get inside any subregs. */
34245 enum machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
34246
34247 /* If a use gets its value from a call insn, it will be
34248 a hard register and will look like (reg:V4SI 3 3).
34249 The df analysis creates two mentions for GPR3 and GPR4,
34250 both DImode. We must recognize this and treat it as a
34251 vector mention to ensure the call is unioned with this
34252 use. */
34253 if (mode == DImode && DF_REF_INSN_INFO (mention))
34254 {
34255 rtx feeder = DF_REF_INSN (mention);
34256 /* FIXME: It is pretty hard to get from the df mention
34257 to the mode of the use in the insn. We arbitrarily
34258 pick a vector mode here, even though the use might
34259 be a real DImode. We can be too conservative
34260 (create a web larger than necessary) because of
34261 this, so consider eventually fixing this. */
34262 if (GET_CODE (feeder) == CALL_INSN)
34263 mode = V4SImode;
34264 }
34265
34266 if (VECTOR_MODE_P (mode))
34267 {
34268 insn_entry[uid].is_relevant = 1;
34269 if (mode == TImode || mode == V1TImode)
34270 insn_entry[uid].is_128_int = 1;
34271 if (DF_REF_INSN_INFO (mention))
34272 insn_entry[uid].contains_subreg
34273 = !rtx_equal_p (DF_REF_REG (mention),
34274 DF_REF_REAL_REG (mention));
34275 union_defs (insn_entry, insn, mention);
34276 }
34277 }
34278 FOR_EACH_INSN_INFO_DEF (mention, insn_info)
34279 {
34280 /* We use DF_REF_REAL_REG here to get inside any subregs. */
34281 enum machine_mode mode = GET_MODE (DF_REF_REAL_REG (mention));
34282
34283 /* If we're loading up a hard vector register for a call,
34284 it looks like (set (reg:V4SI 9 9) (...)). The df
34285 analysis creates two mentions for GPR9 and GPR10, both
34286 DImode. So relying on the mode from the mentions
34287 isn't sufficient to ensure we union the call into the
34288 web with the parameter setup code. */
34289 if (mode == DImode && GET_CODE (insn) == SET
34290 && VECTOR_MODE_P (GET_MODE (SET_DEST (insn))))
34291 mode = GET_MODE (SET_DEST (insn));
34292
34293 if (VECTOR_MODE_P (mode))
34294 {
34295 insn_entry[uid].is_relevant = 1;
34296 if (mode == TImode || mode == V1TImode)
34297 insn_entry[uid].is_128_int = 1;
34298 if (DF_REF_INSN_INFO (mention))
34299 insn_entry[uid].contains_subreg
34300 = !rtx_equal_p (DF_REF_REG (mention),
34301 DF_REF_REAL_REG (mention));
34302 /* REG_FUNCTION_VALUE_P is not valid for subregs. */
34303 else if (REG_FUNCTION_VALUE_P (DF_REF_REG (mention)))
34304 insn_entry[uid].is_live_out = 1;
34305 union_uses (insn_entry, insn, mention);
34306 }
34307 }
34308
34309 if (insn_entry[uid].is_relevant)
34310 {
34311 /* Determine if this is a load or store. */
34312 insn_entry[uid].is_load = insn_is_load_p (insn);
34313 insn_entry[uid].is_store = insn_is_store_p (insn);
34314
34315 /* Determine if this is a doubleword swap. If not,
34316 determine whether it can legally be swapped. */
34317 if (insn_is_swap_p (insn))
34318 insn_entry[uid].is_swap = 1;
34319 else
34320 {
34321 unsigned int special = SH_NONE;
34322 insn_entry[uid].is_swappable
34323 = insn_is_swappable_p (insn_entry, insn, &special);
34324 if (special != SH_NONE && insn_entry[uid].contains_subreg)
34325 insn_entry[uid].is_swappable = 0;
34326 else if (special != SH_NONE)
34327 insn_entry[uid].special_handling = special;
34328 else if (insn_entry[uid].contains_subreg)
34329 insn_entry[uid].special_handling = SH_SUBREG;
34330 }
34331 }
34332 }
34333 }
34334
34335 if (dump_file)
34336 {
34337 fprintf (dump_file, "\nSwap insn entry table when first built\n");
34338 dump_swap_insn_table (insn_entry);
34339 }
34340
34341 /* Record unoptimizable webs. */
34342 unsigned e = get_max_uid (), i;
34343 for (i = 0; i < e; ++i)
34344 {
34345 if (!insn_entry[i].is_relevant)
34346 continue;
34347
34348 swap_web_entry *root
34349 = (swap_web_entry*)(&insn_entry[i])->unionfind_root ();
34350
34351 if (insn_entry[i].is_live_in || insn_entry[i].is_live_out
34352 || (insn_entry[i].contains_subreg
34353 && insn_entry[i].special_handling != SH_SUBREG)
34354 || insn_entry[i].is_128_int || insn_entry[i].is_call
34355 || !(insn_entry[i].is_swappable || insn_entry[i].is_swap))
34356 root->web_not_optimizable = 1;
34357
34358 /* If we have loads or stores that aren't permuting then the
34359 optimization isn't appropriate. */
34360 else if ((insn_entry[i].is_load || insn_entry[i].is_store)
34361 && !insn_entry[i].is_swap && !insn_entry[i].is_swappable)
34362 root->web_not_optimizable = 1;
34363
34364 /* If we have permuting loads or stores that are not accompanied
34365 by a register swap, the optimization isn't appropriate. */
34366 else if (insn_entry[i].is_load && insn_entry[i].is_swap)
34367 {
34368 rtx insn = insn_entry[i].insn;
34369 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
34370 df_ref def;
34371
34372 FOR_EACH_INSN_INFO_DEF (def, insn_info)
34373 {
34374 struct df_link *link = DF_REF_CHAIN (def);
34375
34376 if (!chain_contains_only_swaps (insn_entry, link, FOR_LOADS))
34377 {
34378 root->web_not_optimizable = 1;
34379 break;
34380 }
34381 }
34382 }
34383 else if (insn_entry[i].is_store && insn_entry[i].is_swap)
34384 {
34385 rtx insn = insn_entry[i].insn;
34386 struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn);
34387 df_ref use;
34388
34389 FOR_EACH_INSN_INFO_USE (use, insn_info)
34390 {
34391 struct df_link *link = DF_REF_CHAIN (use);
34392
34393 if (!chain_contains_only_swaps (insn_entry, link, FOR_STORES))
34394 {
34395 root->web_not_optimizable = 1;
34396 break;
34397 }
34398 }
34399 }
34400 }
34401
34402 if (dump_file)
34403 {
34404 fprintf (dump_file, "\nSwap insn entry table after web analysis\n");
34405 dump_swap_insn_table (insn_entry);
34406 }
34407
34408 /* For each load and store in an optimizable web (which implies
34409 the loads and stores are permuting), find the associated
34410 register swaps and mark them for removal. Due to various
34411 optimizations we may mark the same swap more than once. Also
34412 perform special handling for swappable insns that require it. */
34413 for (i = 0; i < e; ++i)
34414 if ((insn_entry[i].is_load || insn_entry[i].is_store)
34415 && insn_entry[i].is_swap)
34416 {
34417 swap_web_entry* root_entry
34418 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
34419 if (!root_entry->web_not_optimizable)
34420 mark_swaps_for_removal (insn_entry, i);
34421 }
34422 else if (insn_entry[i].is_swappable && insn_entry[i].special_handling)
34423 {
34424 swap_web_entry* root_entry
34425 = (swap_web_entry*)((&insn_entry[i])->unionfind_root ());
34426 if (!root_entry->web_not_optimizable)
34427 handle_special_swappables (insn_entry, i);
34428 }
34429
34430 /* Now delete the swaps marked for removal. */
34431 for (i = 0; i < e; ++i)
34432 if (insn_entry[i].will_delete)
34433 replace_swap_with_copy (insn_entry, i);
34434
34435 /* Clean up. */
34436 free (insn_entry);
34437 return 0;
34438 }
34439
34440 const pass_data pass_data_analyze_swaps =
34441 {
34442 RTL_PASS, /* type */
34443 "swaps", /* name */
34444 OPTGROUP_NONE, /* optinfo_flags */
34445 TV_NONE, /* tv_id */
34446 0, /* properties_required */
34447 0, /* properties_provided */
34448 0, /* properties_destroyed */
34449 0, /* todo_flags_start */
34450 TODO_df_finish, /* todo_flags_finish */
34451 };
34452
34453 class pass_analyze_swaps : public rtl_opt_pass
34454 {
34455 public:
34456 pass_analyze_swaps(gcc::context *ctxt)
34457 : rtl_opt_pass(pass_data_analyze_swaps, ctxt)
34458 {}
34459
34460 /* opt_pass methods: */
34461 virtual bool gate (function *)
34462 {
34463 return (optimize > 0 && !BYTES_BIG_ENDIAN && TARGET_VSX
34464 && rs6000_optimize_swaps);
34465 }
34466
34467 virtual unsigned int execute (function *fun)
34468 {
34469 return rs6000_analyze_swaps (fun);
34470 }
34471
34472 }; // class pass_analyze_swaps
34473
34474 rtl_opt_pass *
34475 make_pass_analyze_swaps (gcc::context *ctxt)
34476 {
34477 return new pass_analyze_swaps (ctxt);
34478 }
34479 \f
34480 struct gcc_target targetm = TARGET_INITIALIZER;
34481
34482 #include "gt-rs6000.h"