]> git.ipfire.org Git - thirdparty/gcc.git/blame - gcc/config/spu/spu.c
target.def (rtx_costs): Remove "code" param, add "mode".
[thirdparty/gcc.git] / gcc / config / spu / spu.c
CommitLineData
5624e564 1/* Copyright (C) 2006-2015 Free Software Foundation, Inc.
85d9c13c
TS
2
3 This file is free software; you can redistribute it and/or modify it under
4 the terms of the GNU General Public License as published by the Free
2f83c7d6 5 Software Foundation; either version 3 of the License, or (at your option)
85d9c13c
TS
6 any later version.
7
8 This file is distributed in the hope that it will be useful, but WITHOUT
9 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
11 for more details.
12
13 You should have received a copy of the GNU General Public License
2f83c7d6
NC
14 along with GCC; see the file COPYING3. If not see
15 <http://www.gnu.org/licenses/>. */
85d9c13c
TS
16
17#include "config.h"
18#include "system.h"
19#include "coretypes.h"
c7131fb2
AM
20#include "backend.h"
21#include "tree.h"
22#include "gimple.h"
85d9c13c 23#include "rtl.h"
c7131fb2 24#include "df.h"
85d9c13c 25#include "regs.h"
85d9c13c
TS
26#include "insn-config.h"
27#include "conditions.h"
28#include "insn-attr.h"
29#include "flags.h"
30#include "recog.h"
31#include "obstack.h"
40e23961 32#include "alias.h"
40e23961 33#include "fold-const.h"
d8a2d370
DN
34#include "stringpool.h"
35#include "stor-layout.h"
36#include "calls.h"
37#include "varasm.h"
36566b39
PK
38#include "expmed.h"
39#include "dojump.h"
40#include "explow.h"
41#include "emit-rtl.h"
42#include "stmt.h"
85d9c13c 43#include "expr.h"
b0710fe1 44#include "insn-codes.h"
85d9c13c
TS
45#include "optabs.h"
46#include "except.h"
85d9c13c 47#include "output.h"
60393bbc
AM
48#include "cfgrtl.h"
49#include "cfganal.h"
50#include "lcm.h"
51#include "cfgbuild.h"
52#include "cfgcleanup.h"
718f9c0f 53#include "diagnostic-core.h"
85d9c13c
TS
54#include "tm_p.h"
55#include "target.h"
85d9c13c
TS
56#include "langhooks.h"
57#include "reload.h"
85d9c13c
TS
58#include "sched-int.h"
59#include "params.h"
2fb9a547
AM
60#include "internal-fn.h"
61#include "gimple-fold.h"
62#include "tree-eh.h"
45b0be94 63#include "gimplify.h"
85d9c13c 64#include "tm-constrs.h"
60393bbc
AM
65#include "ddg.h"
66#include "timevar.h"
7ee2468b 67#include "dumpfile.h"
66b038ce 68#include "cfgloop.h"
9b2b7279 69#include "builtins.h"
3dfc96ea 70#include "rtl-iter.h"
b66b813d 71
994c5d85 72/* This file should be included last. */
d58627a0
RS
73#include "target-def.h"
74
b66b813d 75/* Builtin types, data and prototypes. */
4a3a2376
UW
76
77enum spu_builtin_type_index
78{
79 SPU_BTI_END_OF_PARAMS,
80
81 /* We create new type nodes for these. */
82 SPU_BTI_V16QI,
83 SPU_BTI_V8HI,
84 SPU_BTI_V4SI,
85 SPU_BTI_V2DI,
86 SPU_BTI_V4SF,
87 SPU_BTI_V2DF,
88 SPU_BTI_UV16QI,
89 SPU_BTI_UV8HI,
90 SPU_BTI_UV4SI,
91 SPU_BTI_UV2DI,
92
93 /* A 16-byte type. (Implemented with V16QI_type_node) */
94 SPU_BTI_QUADWORD,
95
96 /* These all correspond to intSI_type_node */
97 SPU_BTI_7,
98 SPU_BTI_S7,
99 SPU_BTI_U7,
100 SPU_BTI_S10,
101 SPU_BTI_S10_4,
102 SPU_BTI_U14,
103 SPU_BTI_16,
104 SPU_BTI_S16,
105 SPU_BTI_S16_2,
106 SPU_BTI_U16,
107 SPU_BTI_U16_2,
108 SPU_BTI_U18,
109
110 /* These correspond to the standard types */
111 SPU_BTI_INTQI,
112 SPU_BTI_INTHI,
113 SPU_BTI_INTSI,
114 SPU_BTI_INTDI,
115
116 SPU_BTI_UINTQI,
117 SPU_BTI_UINTHI,
118 SPU_BTI_UINTSI,
119 SPU_BTI_UINTDI,
120
121 SPU_BTI_FLOAT,
122 SPU_BTI_DOUBLE,
123
124 SPU_BTI_VOID,
125 SPU_BTI_PTR,
126
127 SPU_BTI_MAX
128};
129
130#define V16QI_type_node (spu_builtin_types[SPU_BTI_V16QI])
131#define V8HI_type_node (spu_builtin_types[SPU_BTI_V8HI])
132#define V4SI_type_node (spu_builtin_types[SPU_BTI_V4SI])
133#define V2DI_type_node (spu_builtin_types[SPU_BTI_V2DI])
134#define V4SF_type_node (spu_builtin_types[SPU_BTI_V4SF])
135#define V2DF_type_node (spu_builtin_types[SPU_BTI_V2DF])
136#define unsigned_V16QI_type_node (spu_builtin_types[SPU_BTI_UV16QI])
137#define unsigned_V8HI_type_node (spu_builtin_types[SPU_BTI_UV8HI])
138#define unsigned_V4SI_type_node (spu_builtin_types[SPU_BTI_UV4SI])
139#define unsigned_V2DI_type_node (spu_builtin_types[SPU_BTI_UV2DI])
140
141static GTY(()) tree spu_builtin_types[SPU_BTI_MAX];
142
b66b813d
AP
143struct spu_builtin_range
144{
145 int low, high;
146};
147
148static struct spu_builtin_range spu_builtin_range[] = {
149 {-0x40ll, 0x7fll}, /* SPU_BTI_7 */
150 {-0x40ll, 0x3fll}, /* SPU_BTI_S7 */
151 {0ll, 0x7fll}, /* SPU_BTI_U7 */
152 {-0x200ll, 0x1ffll}, /* SPU_BTI_S10 */
153 {-0x2000ll, 0x1fffll}, /* SPU_BTI_S10_4 */
154 {0ll, 0x3fffll}, /* SPU_BTI_U14 */
155 {-0x8000ll, 0xffffll}, /* SPU_BTI_16 */
156 {-0x8000ll, 0x7fffll}, /* SPU_BTI_S16 */
157 {-0x20000ll, 0x1ffffll}, /* SPU_BTI_S16_2 */
158 {0ll, 0xffffll}, /* SPU_BTI_U16 */
159 {0ll, 0x3ffffll}, /* SPU_BTI_U16_2 */
160 {0ll, 0x3ffffll}, /* SPU_BTI_U18 */
161};
162
85d9c13c
TS
163\f
164/* Target specific attribute specifications. */
165char regs_ever_allocated[FIRST_PSEUDO_REGISTER];
166
167/* Prototypes and external defs. */
23c39aaa 168static int get_pipe (rtx_insn *insn);
85d9c13c 169static int spu_naked_function_p (tree func);
85d9c13c 170static int mem_is_padded_component_ref (rtx x);
32fb22af 171static void fix_range (const char *);
eec9405e 172static rtx spu_expand_load (rtx, rtx, rtx, int);
85d9c13c 173
39aeae85
SL
174/* Which instruction set architecture to use. */
175int spu_arch;
176/* Which cpu are we tuning for. */
177int spu_tune;
178
9dcc2e87
TS
179/* The hardware requires 8 insns between a hint and the branch it
180 effects. This variable describes how many rtl instructions the
181 compiler needs to see before inserting a hint, and then the compiler
182 will insert enough nops to make it at least 8 insns. The default is
183 for the compiler to allow up to 2 nops be emitted. The nops are
184 inserted in pairs, so we round down. */
185int spu_hint_dist = (8*4) - (2*4);
186
85d9c13c
TS
187enum spu_immediate {
188 SPU_NONE,
189 SPU_IL,
190 SPU_ILA,
191 SPU_ILH,
192 SPU_ILHU,
193 SPU_ORI,
194 SPU_ORHI,
195 SPU_ORBI,
01975fc7 196 SPU_IOHL
85d9c13c 197};
a1c6e4b8
TS
198enum immediate_class
199{
200 IC_POOL, /* constant pool */
201 IC_IL1, /* one il* instruction */
202 IC_IL2, /* both ilhu and iohl instructions */
203 IC_IL1s, /* one il* instruction */
204 IC_IL2s, /* both ilhu and iohl instructions */
205 IC_FSMBI, /* the fsmbi instruction */
206 IC_CPAT, /* one of the c*d instructions */
73701e27 207 IC_FSMBI2 /* fsmbi plus 1 other instruction */
a1c6e4b8 208};
85d9c13c
TS
209
210static enum spu_immediate which_immediate_load (HOST_WIDE_INT val);
211static enum spu_immediate which_logical_immediate (HOST_WIDE_INT val);
a1c6e4b8
TS
212static int cpat_info(unsigned char *arr, int size, int *prun, int *pstart);
213static enum immediate_class classify_immediate (rtx op,
ef4bddc2 214 machine_mode mode);
85d9c13c 215
299456f3
BE
216/* Pointer mode for __ea references. */
217#define EAmode (spu_ea_model != 32 ? DImode : SImode)
218
5a82ecd9 219\f
c5d94218
UW
220/* Define the structure for the machine field in struct function. */
221struct GTY(()) machine_function
222{
223 /* Register to use for PIC accesses. */
224 rtx pic_reg;
225};
226
227/* How to allocate a 'struct machine_function'. */
228static struct machine_function *
229spu_init_machine_status (void)
230{
766090c2 231 return ggc_cleared_alloc<machine_function> ();
c5d94218
UW
232}
233
c5387660
JM
234/* Implement TARGET_OPTION_OVERRIDE. */
235static void
236spu_option_override (void)
85d9c13c 237{
c5d94218
UW
238 /* Set up function hooks. */
239 init_machine_status = spu_init_machine_status;
240
039cb258
UW
241 /* Small loops will be unpeeled at -O3. For SPU it is more important
242 to keep code small by default. */
128dc8e2 243 if (!flag_unroll_loops && !flag_peel_loops)
dc242c4a 244 maybe_set_param_value (PARAM_MAX_COMPLETELY_PEEL_TIMES, 4,
48476d13
JM
245 global_options.x_param_values,
246 global_options_set.x_param_values);
039cb258 247
85d9c13c
TS
248 flag_omit_frame_pointer = 1;
249
9dcc2e87 250 /* Functions must be 8 byte aligned so we correctly handle dual issue */
85d9c13c
TS
251 if (align_functions < 8)
252 align_functions = 8;
32fb22af 253
9dcc2e87
TS
254 spu_hint_dist = 8*4 - spu_max_nops*4;
255 if (spu_hint_dist < 0)
256 spu_hint_dist = 0;
257
32fb22af
SL
258 if (spu_fixed_range_string)
259 fix_range (spu_fixed_range_string);
39aeae85
SL
260
261 /* Determine processor architectural level. */
262 if (spu_arch_string)
263 {
264 if (strcmp (&spu_arch_string[0], "cell") == 0)
265 spu_arch = PROCESSOR_CELL;
266 else if (strcmp (&spu_arch_string[0], "celledp") == 0)
267 spu_arch = PROCESSOR_CELLEDP;
268 else
9c1732c4 269 error ("bad value (%s) for -march= switch", spu_arch_string);
39aeae85
SL
270 }
271
272 /* Determine processor to tune for. */
273 if (spu_tune_string)
274 {
275 if (strcmp (&spu_tune_string[0], "cell") == 0)
276 spu_tune = PROCESSOR_CELL;
277 else if (strcmp (&spu_tune_string[0], "celledp") == 0)
278 spu_tune = PROCESSOR_CELLEDP;
279 else
9c1732c4 280 error ("bad value (%s) for -mtune= switch", spu_tune_string);
39aeae85 281 }
88f091f5 282
59dbe4fe
UW
283 /* Change defaults according to the processor architecture. */
284 if (spu_arch == PROCESSOR_CELLEDP)
285 {
286 /* If no command line option has been otherwise specified, change
287 the default to -mno-safe-hints on celledp -- only the original
288 Cell/B.E. processors require this workaround. */
289 if (!(target_flags_explicit & MASK_SAFE_HINTS))
290 target_flags &= ~MASK_SAFE_HINTS;
291 }
292
88f091f5 293 REAL_MODE_FORMAT (SFmode) = &spu_single_format;
85d9c13c
TS
294}
295\f
296/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
297 struct attribute_spec.handler. */
298
85d9c13c
TS
299/* True if MODE is valid for the target. By "valid", we mean able to
300 be manipulated in non-trivial ways. In particular, this means all
301 the arithmetic is supported. */
302static bool
ef4bddc2 303spu_scalar_mode_supported_p (machine_mode mode)
85d9c13c
TS
304{
305 switch (mode)
306 {
307 case QImode:
308 case HImode:
309 case SImode:
310 case SFmode:
311 case DImode:
312 case TImode:
313 case DFmode:
314 return true;
315
316 default:
317 return false;
318 }
319}
320
321/* Similarly for vector modes. "Supported" here is less strict. At
322 least some operations are supported; need to check optabs or builtins
323 for further details. */
324static bool
ef4bddc2 325spu_vector_mode_supported_p (machine_mode mode)
85d9c13c
TS
326{
327 switch (mode)
328 {
329 case V16QImode:
330 case V8HImode:
331 case V4SImode:
332 case V2DImode:
333 case V4SFmode:
334 case V2DFmode:
335 return true;
336
337 default:
338 return false;
339 }
340}
341
342/* GCC assumes that in a paradoxical SUBREG the inner mode occupies the
343 least significant bytes of the outer mode. This function returns
344 TRUE for the SUBREG's where this is correct. */
345int
346valid_subreg (rtx op)
347{
ef4bddc2
RS
348 machine_mode om = GET_MODE (op);
349 machine_mode im = GET_MODE (SUBREG_REG (op));
85d9c13c
TS
350 return om != VOIDmode && im != VOIDmode
351 && (GET_MODE_SIZE (im) == GET_MODE_SIZE (om)
9e071d06
UW
352 || (GET_MODE_SIZE (im) <= 4 && GET_MODE_SIZE (om) <= 4)
353 || (GET_MODE_SIZE (im) >= 16 && GET_MODE_SIZE (om) >= 16));
85d9c13c
TS
354}
355
356/* When insv and ext[sz]v ar passed a TI SUBREG, we want to strip it off
2f8e468b 357 and adjust the start offset. */
85d9c13c
TS
358static rtx
359adjust_operand (rtx op, HOST_WIDE_INT * start)
360{
ef4bddc2 361 machine_mode mode;
85d9c13c 362 int op_size;
9e071d06
UW
363 /* Strip any paradoxical SUBREG. */
364 if (GET_CODE (op) == SUBREG
365 && (GET_MODE_BITSIZE (GET_MODE (op))
366 > GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op)))))
85d9c13c
TS
367 {
368 if (start)
369 *start -=
370 GET_MODE_BITSIZE (GET_MODE (op)) -
371 GET_MODE_BITSIZE (GET_MODE (SUBREG_REG (op)));
372 op = SUBREG_REG (op);
373 }
374 /* If it is smaller than SI, assure a SUBREG */
375 op_size = GET_MODE_BITSIZE (GET_MODE (op));
376 if (op_size < 32)
377 {
378 if (start)
379 *start += 32 - op_size;
380 op_size = 32;
381 }
382 /* If it is not a MODE_INT (and/or it is smaller than SI) add a SUBREG. */
383 mode = mode_for_size (op_size, MODE_INT, 0);
384 if (mode != GET_MODE (op))
385 op = gen_rtx_SUBREG (mode, op, 0);
386 return op;
387}
388
389void
390spu_expand_extv (rtx ops[], int unsignedp)
391{
eec9405e 392 rtx dst = ops[0], src = ops[1];
85d9c13c
TS
393 HOST_WIDE_INT width = INTVAL (ops[2]);
394 HOST_WIDE_INT start = INTVAL (ops[3]);
eec9405e
TS
395 HOST_WIDE_INT align_mask;
396 rtx s0, s1, mask, r0;
85d9c13c 397
eec9405e 398 gcc_assert (REG_P (dst) && GET_MODE (dst) == TImode);
85d9c13c 399
eec9405e 400 if (MEM_P (src))
85d9c13c 401 {
eec9405e
TS
402 /* First, determine if we need 1 TImode load or 2. We need only 1
403 if the bits being extracted do not cross the alignment boundary
404 as determined by the MEM and its address. */
405
406 align_mask = -MEM_ALIGN (src);
407 if ((start & align_mask) == ((start + width - 1) & align_mask))
85d9c13c 408 {
eec9405e
TS
409 /* Alignment is sufficient for 1 load. */
410 s0 = gen_reg_rtx (TImode);
411 r0 = spu_expand_load (s0, 0, src, start / 8);
412 start &= 7;
413 if (r0)
414 emit_insn (gen_rotqby_ti (s0, s0, r0));
85d9c13c 415 }
eec9405e
TS
416 else
417 {
418 /* Need 2 loads. */
419 s0 = gen_reg_rtx (TImode);
420 s1 = gen_reg_rtx (TImode);
421 r0 = spu_expand_load (s0, s1, src, start / 8);
422 start &= 7;
423
424 gcc_assert (start + width <= 128);
425 if (r0)
426 {
427 rtx r1 = gen_reg_rtx (SImode);
428 mask = gen_reg_rtx (TImode);
429 emit_move_insn (mask, GEN_INT (-1));
430 emit_insn (gen_rotqby_ti (s0, s0, r0));
431 emit_insn (gen_rotqby_ti (s1, s1, r0));
432 if (GET_CODE (r0) == CONST_INT)
433 r1 = GEN_INT (INTVAL (r0) & 15);
434 else
435 emit_insn (gen_andsi3 (r1, r0, GEN_INT (15)));
436 emit_insn (gen_shlqby_ti (mask, mask, r1));
437 emit_insn (gen_selb (s0, s1, s0, mask));
438 }
439 }
440
441 }
442 else if (GET_CODE (src) == SUBREG)
443 {
444 rtx r = SUBREG_REG (src);
445 gcc_assert (REG_P (r) && SCALAR_INT_MODE_P (GET_MODE (r)));
446 s0 = gen_reg_rtx (TImode);
447 if (GET_MODE_SIZE (GET_MODE (r)) < GET_MODE_SIZE (TImode))
f7df4a84 448 emit_insn (gen_rtx_SET (s0, gen_rtx_ZERO_EXTEND (TImode, r)));
eec9405e
TS
449 else
450 emit_move_insn (s0, src);
451 }
452 else
453 {
454 gcc_assert (REG_P (src) && GET_MODE (src) == TImode);
455 s0 = gen_reg_rtx (TImode);
456 emit_move_insn (s0, src);
85d9c13c
TS
457 }
458
eec9405e
TS
459 /* Now s0 is TImode and contains the bits to extract at start. */
460
461 if (start)
462 emit_insn (gen_rotlti3 (s0, s0, GEN_INT (start)));
463
464 if (128 - width)
eb6c3df1 465 s0 = expand_shift (RSHIFT_EXPR, TImode, s0, 128 - width, s0, unsignedp);
85d9c13c 466
eec9405e 467 emit_move_insn (dst, s0);
85d9c13c
TS
468}
469
470void
471spu_expand_insv (rtx ops[])
472{
473 HOST_WIDE_INT width = INTVAL (ops[1]);
474 HOST_WIDE_INT start = INTVAL (ops[2]);
475 HOST_WIDE_INT maskbits;
ef4bddc2 476 machine_mode dst_mode;
85d9c13c 477 rtx dst = ops[0], src = ops[3];
d707fc77 478 int dst_size;
85d9c13c
TS
479 rtx mask;
480 rtx shift_reg;
481 int shift;
482
483
484 if (GET_CODE (ops[0]) == MEM)
485 dst = gen_reg_rtx (TImode);
486 else
487 dst = adjust_operand (dst, &start);
488 dst_mode = GET_MODE (dst);
489 dst_size = GET_MODE_BITSIZE (GET_MODE (dst));
490
491 if (CONSTANT_P (src))
492 {
ef4bddc2 493 machine_mode m =
85d9c13c
TS
494 (width <= 32 ? SImode : width <= 64 ? DImode : TImode);
495 src = force_reg (m, convert_to_mode (m, src, 0));
496 }
497 src = adjust_operand (src, 0);
85d9c13c
TS
498
499 mask = gen_reg_rtx (dst_mode);
500 shift_reg = gen_reg_rtx (dst_mode);
501 shift = dst_size - start - width;
502
503 /* It's not safe to use subreg here because the compiler assumes
504 that the SUBREG_REG is right justified in the SUBREG. */
505 convert_move (shift_reg, src, 1);
506
507 if (shift > 0)
508 {
509 switch (dst_mode)
510 {
511 case SImode:
512 emit_insn (gen_ashlsi3 (shift_reg, shift_reg, GEN_INT (shift)));
513 break;
514 case DImode:
515 emit_insn (gen_ashldi3 (shift_reg, shift_reg, GEN_INT (shift)));
516 break;
517 case TImode:
518 emit_insn (gen_ashlti3 (shift_reg, shift_reg, GEN_INT (shift)));
519 break;
520 default:
521 abort ();
522 }
523 }
524 else if (shift < 0)
525 abort ();
526
527 switch (dst_size)
528 {
529 case 32:
530 maskbits = (-1ll << (32 - width - start));
531 if (start)
532 maskbits += (1ll << (32 - start));
533 emit_move_insn (mask, GEN_INT (maskbits));
534 break;
535 case 64:
536 maskbits = (-1ll << (64 - width - start));
537 if (start)
538 maskbits += (1ll << (64 - start));
539 emit_move_insn (mask, GEN_INT (maskbits));
540 break;
541 case 128:
542 {
543 unsigned char arr[16];
544 int i = start / 8;
545 memset (arr, 0, sizeof (arr));
546 arr[i] = 0xff >> (start & 7);
547 for (i++; i <= (start + width - 1) / 8; i++)
548 arr[i] = 0xff;
549 arr[i - 1] &= 0xff << (7 - ((start + width - 1) & 7));
550 emit_move_insn (mask, array_to_constant (TImode, arr));
551 }
552 break;
553 default:
554 abort ();
555 }
556 if (GET_CODE (ops[0]) == MEM)
557 {
85d9c13c 558 rtx low = gen_reg_rtx (SImode);
85d9c13c
TS
559 rtx rotl = gen_reg_rtx (SImode);
560 rtx mask0 = gen_reg_rtx (TImode);
eec9405e
TS
561 rtx addr;
562 rtx addr0;
563 rtx addr1;
85d9c13c
TS
564 rtx mem;
565
eec9405e
TS
566 addr = force_reg (Pmode, XEXP (ops[0], 0));
567 addr0 = gen_rtx_AND (Pmode, addr, GEN_INT (-16));
85d9c13c
TS
568 emit_insn (gen_andsi3 (low, addr, GEN_INT (15)));
569 emit_insn (gen_negsi2 (rotl, low));
570 emit_insn (gen_rotqby_ti (shift_reg, shift_reg, rotl));
571 emit_insn (gen_rotqmby_ti (mask0, mask, rotl));
eec9405e 572 mem = change_address (ops[0], TImode, addr0);
85d9c13c
TS
573 set_mem_alias_set (mem, 0);
574 emit_move_insn (dst, mem);
575 emit_insn (gen_selb (dst, dst, shift_reg, mask0));
85d9c13c
TS
576 if (start + width > MEM_ALIGN (ops[0]))
577 {
578 rtx shl = gen_reg_rtx (SImode);
579 rtx mask1 = gen_reg_rtx (TImode);
580 rtx dst1 = gen_reg_rtx (TImode);
581 rtx mem1;
0a81f074 582 addr1 = plus_constant (Pmode, addr, 16);
eec9405e 583 addr1 = gen_rtx_AND (Pmode, addr1, GEN_INT (-16));
85d9c13c
TS
584 emit_insn (gen_subsi3 (shl, GEN_INT (16), low));
585 emit_insn (gen_shlqby_ti (mask1, mask, shl));
eec9405e 586 mem1 = change_address (ops[0], TImode, addr1);
85d9c13c
TS
587 set_mem_alias_set (mem1, 0);
588 emit_move_insn (dst1, mem1);
589 emit_insn (gen_selb (dst1, dst1, shift_reg, mask1));
590 emit_move_insn (mem1, dst1);
591 }
eec9405e 592 emit_move_insn (mem, dst);
85d9c13c
TS
593 }
594 else
9c1f1e55 595 emit_insn (gen_selb (dst, copy_rtx (dst), shift_reg, mask));
85d9c13c
TS
596}
597
598
599int
600spu_expand_block_move (rtx ops[])
601{
602 HOST_WIDE_INT bytes, align, offset;
603 rtx src, dst, sreg, dreg, target;
604 int i;
605 if (GET_CODE (ops[2]) != CONST_INT
606 || GET_CODE (ops[3]) != CONST_INT
f69bbb46 607 || INTVAL (ops[2]) > (HOST_WIDE_INT) (MOVE_RATIO (optimize_insn_for_speed_p ()) * 8))
85d9c13c
TS
608 return 0;
609
610 bytes = INTVAL (ops[2]);
611 align = INTVAL (ops[3]);
612
613 if (bytes <= 0)
614 return 1;
615
616 dst = ops[0];
617 src = ops[1];
618
619 if (align == 16)
620 {
621 for (offset = 0; offset + 16 <= bytes; offset += 16)
622 {
623 dst = adjust_address (ops[0], V16QImode, offset);
624 src = adjust_address (ops[1], V16QImode, offset);
625 emit_move_insn (dst, src);
626 }
627 if (offset < bytes)
628 {
629 rtx mask;
630 unsigned char arr[16] = { 0 };
631 for (i = 0; i < bytes - offset; i++)
632 arr[i] = 0xff;
633 dst = adjust_address (ops[0], V16QImode, offset);
634 src = adjust_address (ops[1], V16QImode, offset);
635 mask = gen_reg_rtx (V16QImode);
636 sreg = gen_reg_rtx (V16QImode);
637 dreg = gen_reg_rtx (V16QImode);
638 target = gen_reg_rtx (V16QImode);
639 emit_move_insn (mask, array_to_constant (V16QImode, arr));
640 emit_move_insn (dreg, dst);
641 emit_move_insn (sreg, src);
642 emit_insn (gen_selb (target, dreg, sreg, mask));
643 emit_move_insn (dst, target);
644 }
645 return 1;
646 }
647 return 0;
648}
649
650enum spu_comp_code
651{ SPU_EQ, SPU_GT, SPU_GTU };
652
39aeae85
SL
653int spu_comp_icode[12][3] = {
654 {CODE_FOR_ceq_qi, CODE_FOR_cgt_qi, CODE_FOR_clgt_qi},
655 {CODE_FOR_ceq_hi, CODE_FOR_cgt_hi, CODE_FOR_clgt_hi},
656 {CODE_FOR_ceq_si, CODE_FOR_cgt_si, CODE_FOR_clgt_si},
657 {CODE_FOR_ceq_di, CODE_FOR_cgt_di, CODE_FOR_clgt_di},
658 {CODE_FOR_ceq_ti, CODE_FOR_cgt_ti, CODE_FOR_clgt_ti},
659 {CODE_FOR_ceq_sf, CODE_FOR_cgt_sf, 0},
660 {CODE_FOR_ceq_df, CODE_FOR_cgt_df, 0},
661 {CODE_FOR_ceq_v16qi, CODE_FOR_cgt_v16qi, CODE_FOR_clgt_v16qi},
662 {CODE_FOR_ceq_v8hi, CODE_FOR_cgt_v8hi, CODE_FOR_clgt_v8hi},
663 {CODE_FOR_ceq_v4si, CODE_FOR_cgt_v4si, CODE_FOR_clgt_v4si},
664 {CODE_FOR_ceq_v4sf, CODE_FOR_cgt_v4sf, 0},
665 {CODE_FOR_ceq_v2df, CODE_FOR_cgt_v2df, 0},
85d9c13c
TS
666};
667
668/* Generate a compare for CODE. Return a brand-new rtx that represents
669 the result of the compare. GCC can figure this out too if we don't
670 provide all variations of compares, but GCC always wants to use
671 WORD_MODE, we can generate better code in most cases if we do it
672 ourselves. */
673void
f90b7a5a 674spu_emit_branch_or_set (int is_set, rtx cmp, rtx operands[])
85d9c13c
TS
675{
676 int reverse_compare = 0;
677 int reverse_test = 0;
9943eb0b
BE
678 rtx compare_result, eq_result;
679 rtx comp_rtx, eq_rtx;
ef4bddc2
RS
680 machine_mode comp_mode;
681 machine_mode op_mode;
bbbbb16a
ILT
682 enum spu_comp_code scode, eq_code;
683 enum insn_code ior_code;
f90b7a5a
PB
684 enum rtx_code code = GET_CODE (cmp);
685 rtx op0 = XEXP (cmp, 0);
686 rtx op1 = XEXP (cmp, 1);
85d9c13c 687 int index;
9943eb0b 688 int eq_test = 0;
85d9c13c 689
f90b7a5a 690 /* When op1 is a CONST_INT change (X >= C) to (X > C-1),
85d9c13c 691 and so on, to keep the constant in operand 1. */
f90b7a5a 692 if (GET_CODE (op1) == CONST_INT)
85d9c13c 693 {
f90b7a5a
PB
694 HOST_WIDE_INT val = INTVAL (op1) - 1;
695 if (trunc_int_for_mode (val, GET_MODE (op0)) == val)
85d9c13c
TS
696 switch (code)
697 {
698 case GE:
f90b7a5a 699 op1 = GEN_INT (val);
85d9c13c
TS
700 code = GT;
701 break;
702 case LT:
f90b7a5a 703 op1 = GEN_INT (val);
85d9c13c
TS
704 code = LE;
705 break;
706 case GEU:
f90b7a5a 707 op1 = GEN_INT (val);
85d9c13c
TS
708 code = GTU;
709 break;
710 case LTU:
f90b7a5a 711 op1 = GEN_INT (val);
85d9c13c
TS
712 code = LEU;
713 break;
714 default:
715 break;
716 }
717 }
718
0b01f619
UW
719 /* However, if we generate an integer result, performing a reverse test
720 would require an extra negation, so avoid that where possible. */
721 if (GET_CODE (op1) == CONST_INT && is_set == 1)
722 {
723 HOST_WIDE_INT val = INTVAL (op1) + 1;
724 if (trunc_int_for_mode (val, GET_MODE (op0)) == val)
725 switch (code)
726 {
727 case LE:
728 op1 = GEN_INT (val);
729 code = LT;
730 break;
731 case LEU:
732 op1 = GEN_INT (val);
733 code = LTU;
734 break;
735 default:
736 break;
737 }
738 }
739
9943eb0b 740 comp_mode = SImode;
f90b7a5a 741 op_mode = GET_MODE (op0);
9943eb0b 742
85d9c13c
TS
743 switch (code)
744 {
745 case GE:
85d9c13c 746 scode = SPU_GT;
dd4ba939 747 if (HONOR_NANS (op_mode))
9943eb0b
BE
748 {
749 reverse_compare = 0;
750 reverse_test = 0;
751 eq_test = 1;
752 eq_code = SPU_EQ;
753 }
754 else
755 {
756 reverse_compare = 1;
757 reverse_test = 1;
758 }
85d9c13c
TS
759 break;
760 case LE:
85d9c13c 761 scode = SPU_GT;
dd4ba939 762 if (HONOR_NANS (op_mode))
9943eb0b
BE
763 {
764 reverse_compare = 1;
765 reverse_test = 0;
766 eq_test = 1;
767 eq_code = SPU_EQ;
768 }
769 else
770 {
771 reverse_compare = 0;
772 reverse_test = 1;
773 }
85d9c13c
TS
774 break;
775 case LT:
776 reverse_compare = 1;
777 reverse_test = 0;
778 scode = SPU_GT;
779 break;
780 case GEU:
781 reverse_compare = 1;
782 reverse_test = 1;
783 scode = SPU_GTU;
784 break;
785 case LEU:
786 reverse_compare = 0;
787 reverse_test = 1;
788 scode = SPU_GTU;
789 break;
790 case LTU:
791 reverse_compare = 1;
792 reverse_test = 0;
793 scode = SPU_GTU;
794 break;
795 case NE:
796 reverse_compare = 0;
797 reverse_test = 1;
798 scode = SPU_EQ;
799 break;
800
801 case EQ:
802 scode = SPU_EQ;
803 break;
804 case GT:
805 scode = SPU_GT;
806 break;
807 case GTU:
808 scode = SPU_GTU;
809 break;
810 default:
811 scode = SPU_EQ;
812 break;
813 }
814
85d9c13c
TS
815 switch (op_mode)
816 {
817 case QImode:
818 index = 0;
819 comp_mode = QImode;
820 break;
821 case HImode:
822 index = 1;
823 comp_mode = HImode;
824 break;
825 case SImode:
826 index = 2;
827 break;
828 case DImode:
829 index = 3;
830 break;
831 case TImode:
832 index = 4;
833 break;
834 case SFmode:
835 index = 5;
836 break;
837 case DFmode:
838 index = 6;
839 break;
840 case V16QImode:
39aeae85
SL
841 index = 7;
842 comp_mode = op_mode;
843 break;
85d9c13c 844 case V8HImode:
39aeae85
SL
845 index = 8;
846 comp_mode = op_mode;
847 break;
85d9c13c 848 case V4SImode:
39aeae85
SL
849 index = 9;
850 comp_mode = op_mode;
851 break;
85d9c13c 852 case V4SFmode:
39aeae85
SL
853 index = 10;
854 comp_mode = V4SImode;
855 break;
85d9c13c 856 case V2DFmode:
39aeae85
SL
857 index = 11;
858 comp_mode = V2DImode;
85d9c13c 859 break;
39aeae85 860 case V2DImode:
85d9c13c
TS
861 default:
862 abort ();
863 }
864
f90b7a5a 865 if (GET_MODE (op1) == DFmode
dd4ba939
BE
866 && (scode != SPU_GT && scode != SPU_EQ))
867 abort ();
85d9c13c 868
f90b7a5a
PB
869 if (is_set == 0 && op1 == const0_rtx
870 && (GET_MODE (op0) == SImode
0b01f619
UW
871 || GET_MODE (op0) == HImode
872 || GET_MODE (op0) == QImode) && scode == SPU_EQ)
85d9c13c
TS
873 {
874 /* Don't need to set a register with the result when we are
875 comparing against zero and branching. */
876 reverse_test = !reverse_test;
f90b7a5a 877 compare_result = op0;
85d9c13c
TS
878 }
879 else
880 {
881 compare_result = gen_reg_rtx (comp_mode);
882
883 if (reverse_compare)
884 {
f90b7a5a
PB
885 rtx t = op1;
886 op1 = op0;
887 op0 = t;
85d9c13c
TS
888 }
889
890 if (spu_comp_icode[index][scode] == 0)
891 abort ();
892
893 if (!(*insn_data[spu_comp_icode[index][scode]].operand[1].predicate)
f90b7a5a
PB
894 (op0, op_mode))
895 op0 = force_reg (op_mode, op0);
85d9c13c 896 if (!(*insn_data[spu_comp_icode[index][scode]].operand[2].predicate)
f90b7a5a
PB
897 (op1, op_mode))
898 op1 = force_reg (op_mode, op1);
85d9c13c 899 comp_rtx = GEN_FCN (spu_comp_icode[index][scode]) (compare_result,
f90b7a5a 900 op0, op1);
85d9c13c
TS
901 if (comp_rtx == 0)
902 abort ();
903 emit_insn (comp_rtx);
904
9943eb0b
BE
905 if (eq_test)
906 {
907 eq_result = gen_reg_rtx (comp_mode);
908 eq_rtx = GEN_FCN (spu_comp_icode[index][eq_code]) (eq_result,
f90b7a5a 909 op0, op1);
9943eb0b
BE
910 if (eq_rtx == 0)
911 abort ();
912 emit_insn (eq_rtx);
947131ba 913 ior_code = optab_handler (ior_optab, comp_mode);
9943eb0b
BE
914 gcc_assert (ior_code != CODE_FOR_nothing);
915 emit_insn (GEN_FCN (ior_code)
916 (compare_result, compare_result, eq_result));
917 }
85d9c13c
TS
918 }
919
920 if (is_set == 0)
921 {
922 rtx bcomp;
923 rtx loc_ref;
924
925 /* We don't have branch on QI compare insns, so we convert the
926 QI compare result to a HI result. */
927 if (comp_mode == QImode)
928 {
929 rtx old_res = compare_result;
930 compare_result = gen_reg_rtx (HImode);
931 comp_mode = HImode;
932 emit_insn (gen_extendqihi2 (compare_result, old_res));
933 }
934
935 if (reverse_test)
936 bcomp = gen_rtx_EQ (comp_mode, compare_result, const0_rtx);
937 else
938 bcomp = gen_rtx_NE (comp_mode, compare_result, const0_rtx);
939
f90b7a5a 940 loc_ref = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
f7df4a84 941 emit_jump_insn (gen_rtx_SET (pc_rtx,
85d9c13c
TS
942 gen_rtx_IF_THEN_ELSE (VOIDmode, bcomp,
943 loc_ref, pc_rtx)));
944 }
945 else if (is_set == 2)
946 {
f90b7a5a 947 rtx target = operands[0];
85d9c13c
TS
948 int compare_size = GET_MODE_BITSIZE (comp_mode);
949 int target_size = GET_MODE_BITSIZE (GET_MODE (target));
ef4bddc2 950 machine_mode mode = mode_for_size (target_size, MODE_INT, 0);
85d9c13c
TS
951 rtx select_mask;
952 rtx op_t = operands[2];
953 rtx op_f = operands[3];
954
955 /* The result of the comparison can be SI, HI or QI mode. Create a
956 mask based on that result. */
957 if (target_size > compare_size)
958 {
959 select_mask = gen_reg_rtx (mode);
960 emit_insn (gen_extend_compare (select_mask, compare_result));
961 }
962 else if (target_size < compare_size)
963 select_mask =
964 gen_rtx_SUBREG (mode, compare_result,
965 (compare_size - target_size) / BITS_PER_UNIT);
966 else if (comp_mode != mode)
967 select_mask = gen_rtx_SUBREG (mode, compare_result, 0);
968 else
969 select_mask = compare_result;
970
971 if (GET_MODE (target) != GET_MODE (op_t)
972 || GET_MODE (target) != GET_MODE (op_f))
973 abort ();
974
975 if (reverse_test)
976 emit_insn (gen_selb (target, op_t, op_f, select_mask));
977 else
978 emit_insn (gen_selb (target, op_f, op_t, select_mask));
979 }
980 else
981 {
f90b7a5a 982 rtx target = operands[0];
85d9c13c 983 if (reverse_test)
f7df4a84 984 emit_insn (gen_rtx_SET (compare_result,
85d9c13c
TS
985 gen_rtx_NOT (comp_mode, compare_result)));
986 if (GET_MODE (target) == SImode && GET_MODE (compare_result) == HImode)
987 emit_insn (gen_extendhisi2 (target, compare_result));
988 else if (GET_MODE (target) == SImode
989 && GET_MODE (compare_result) == QImode)
990 emit_insn (gen_extend_compare (target, compare_result));
991 else
992 emit_move_insn (target, compare_result);
993 }
994}
995
996HOST_WIDE_INT
997const_double_to_hwint (rtx x)
998{
999 HOST_WIDE_INT val;
1000 REAL_VALUE_TYPE rv;
1001 if (GET_MODE (x) == SFmode)
1002 {
1003 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
1004 REAL_VALUE_TO_TARGET_SINGLE (rv, val);
1005 }
1006 else if (GET_MODE (x) == DFmode)
1007 {
1008 long l[2];
1009 REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
1010 REAL_VALUE_TO_TARGET_DOUBLE (rv, l);
1011 val = l[0];
1012 val = (val << 32) | (l[1] & 0xffffffff);
1013 }
1014 else
1015 abort ();
1016 return val;
1017}
1018
1019rtx
ef4bddc2 1020hwint_to_const_double (machine_mode mode, HOST_WIDE_INT v)
85d9c13c
TS
1021{
1022 long tv[2];
1023 REAL_VALUE_TYPE rv;
1024 gcc_assert (mode == SFmode || mode == DFmode);
1025
1026 if (mode == SFmode)
1027 tv[0] = (v << 32) >> 32;
1028 else if (mode == DFmode)
1029 {
1030 tv[1] = (v << 32) >> 32;
1031 tv[0] = v >> 32;
1032 }
1033 real_from_target (&rv, tv, mode);
1034 return CONST_DOUBLE_FROM_REAL_VALUE (rv, mode);
1035}
1036
1037void
1038print_operand_address (FILE * file, register rtx addr)
1039{
1040 rtx reg;
1041 rtx offset;
1042
09aad82b
TS
1043 if (GET_CODE (addr) == AND
1044 && GET_CODE (XEXP (addr, 1)) == CONST_INT
1045 && INTVAL (XEXP (addr, 1)) == -16)
1046 addr = XEXP (addr, 0);
1047
85d9c13c
TS
1048 switch (GET_CODE (addr))
1049 {
1050 case REG:
1051 fprintf (file, "0(%s)", reg_names[REGNO (addr)]);
1052 break;
1053
1054 case PLUS:
1055 reg = XEXP (addr, 0);
1056 offset = XEXP (addr, 1);
1057 if (GET_CODE (offset) == REG)
1058 {
1059 fprintf (file, "%s,%s", reg_names[REGNO (reg)],
1060 reg_names[REGNO (offset)]);
1061 }
1062 else if (GET_CODE (offset) == CONST_INT)
1063 {
1064 fprintf (file, HOST_WIDE_INT_PRINT_DEC "(%s)",
1065 INTVAL (offset), reg_names[REGNO (reg)]);
1066 }
1067 else
1068 abort ();
1069 break;
1070
1071 case CONST:
1072 case LABEL_REF:
1073 case SYMBOL_REF:
1074 case CONST_INT:
1075 output_addr_const (file, addr);
1076 break;
1077
1078 default:
1079 debug_rtx (addr);
1080 abort ();
1081 }
1082}
1083
1084void
1085print_operand (FILE * file, rtx x, int code)
1086{
ef4bddc2 1087 machine_mode mode = GET_MODE (x);
85d9c13c
TS
1088 HOST_WIDE_INT val;
1089 unsigned char arr[16];
1090 int xcode = GET_CODE (x);
a1c6e4b8 1091 int i, info;
85d9c13c
TS
1092 if (GET_MODE (x) == VOIDmode)
1093 switch (code)
1094 {
85d9c13c
TS
1095 case 'L': /* 128 bits, signed */
1096 case 'm': /* 128 bits, signed */
1097 case 'T': /* 128 bits, signed */
1098 case 't': /* 128 bits, signed */
1099 mode = TImode;
1100 break;
85d9c13c
TS
1101 case 'K': /* 64 bits, signed */
1102 case 'k': /* 64 bits, signed */
1103 case 'D': /* 64 bits, signed */
1104 case 'd': /* 64 bits, signed */
1105 mode = DImode;
1106 break;
85d9c13c
TS
1107 case 'J': /* 32 bits, signed */
1108 case 'j': /* 32 bits, signed */
1109 case 's': /* 32 bits, signed */
1110 case 'S': /* 32 bits, signed */
1111 mode = SImode;
1112 break;
1113 }
1114 switch (code)
1115 {
1116
1117 case 'j': /* 32 bits, signed */
1118 case 'k': /* 64 bits, signed */
1119 case 'm': /* 128 bits, signed */
1120 if (xcode == CONST_INT
1121 || xcode == CONST_DOUBLE || xcode == CONST_VECTOR)
1122 {
1123 gcc_assert (logical_immediate_p (x, mode));
1124 constant_to_array (mode, x, arr);
1125 val = (arr[0] << 24) | (arr[1] << 16) | (arr[2] << 8) | arr[3];
1126 val = trunc_int_for_mode (val, SImode);
1127 switch (which_logical_immediate (val))
1128 {
1129 case SPU_ORI:
1130 break;
1131 case SPU_ORHI:
1132 fprintf (file, "h");
1133 break;
1134 case SPU_ORBI:
1135 fprintf (file, "b");
1136 break;
1137 default:
1138 gcc_unreachable();
1139 }
1140 }
1141 else
1142 gcc_unreachable();
1143 return;
1144
1145 case 'J': /* 32 bits, signed */
1146 case 'K': /* 64 bits, signed */
1147 case 'L': /* 128 bits, signed */
1148 if (xcode == CONST_INT
1149 || xcode == CONST_DOUBLE || xcode == CONST_VECTOR)
1150 {
1151 gcc_assert (logical_immediate_p (x, mode)
1152 || iohl_immediate_p (x, mode));
1153 constant_to_array (mode, x, arr);
1154 val = (arr[0] << 24) | (arr[1] << 16) | (arr[2] << 8) | arr[3];
1155 val = trunc_int_for_mode (val, SImode);
1156 switch (which_logical_immediate (val))
1157 {
1158 case SPU_ORI:
1159 case SPU_IOHL:
1160 break;
1161 case SPU_ORHI:
1162 val = trunc_int_for_mode (val, HImode);
1163 break;
1164 case SPU_ORBI:
1165 val = trunc_int_for_mode (val, QImode);
1166 break;
1167 default:
1168 gcc_unreachable();
1169 }
1170 fprintf (file, HOST_WIDE_INT_PRINT_DEC, val);
1171 }
1172 else
1173 gcc_unreachable();
1174 return;
1175
1176 case 't': /* 128 bits, signed */
1177 case 'd': /* 64 bits, signed */
1178 case 's': /* 32 bits, signed */
a1c6e4b8 1179 if (CONSTANT_P (x))
85d9c13c 1180 {
a1c6e4b8
TS
1181 enum immediate_class c = classify_immediate (x, mode);
1182 switch (c)
1183 {
1184 case IC_IL1:
1185 constant_to_array (mode, x, arr);
1186 val = (arr[0] << 24) | (arr[1] << 16) | (arr[2] << 8) | arr[3];
1187 val = trunc_int_for_mode (val, SImode);
1188 switch (which_immediate_load (val))
1189 {
1190 case SPU_IL:
1191 break;
1192 case SPU_ILA:
1193 fprintf (file, "a");
1194 break;
1195 case SPU_ILH:
1196 fprintf (file, "h");
1197 break;
1198 case SPU_ILHU:
1199 fprintf (file, "hu");
1200 break;
1201 default:
1202 gcc_unreachable ();
1203 }
1204 break;
1205 case IC_CPAT:
1206 constant_to_array (mode, x, arr);
1207 cpat_info (arr, GET_MODE_SIZE (mode), &info, 0);
1208 if (info == 1)
1209 fprintf (file, "b");
1210 else if (info == 2)
1211 fprintf (file, "h");
1212 else if (info == 4)
1213 fprintf (file, "w");
1214 else if (info == 8)
1215 fprintf (file, "d");
1216 break;
1217 case IC_IL1s:
1218 if (xcode == CONST_VECTOR)
1219 {
1220 x = CONST_VECTOR_ELT (x, 0);
1221 xcode = GET_CODE (x);
1222 }
1223 if (xcode == SYMBOL_REF || xcode == LABEL_REF || xcode == CONST)
1224 fprintf (file, "a");
1225 else if (xcode == HIGH)
1226 fprintf (file, "hu");
1227 break;
1228 case IC_FSMBI:
73701e27 1229 case IC_FSMBI2:
a1c6e4b8
TS
1230 case IC_IL2:
1231 case IC_IL2s:
1232 case IC_POOL:
1233 abort ();
1234 }
85d9c13c 1235 }
85d9c13c
TS
1236 else
1237 gcc_unreachable ();
1238 return;
1239
1240 case 'T': /* 128 bits, signed */
1241 case 'D': /* 64 bits, signed */
1242 case 'S': /* 32 bits, signed */
a1c6e4b8 1243 if (CONSTANT_P (x))
85d9c13c 1244 {
a1c6e4b8
TS
1245 enum immediate_class c = classify_immediate (x, mode);
1246 switch (c)
85d9c13c 1247 {
a1c6e4b8
TS
1248 case IC_IL1:
1249 constant_to_array (mode, x, arr);
1250 val = (arr[0] << 24) | (arr[1] << 16) | (arr[2] << 8) | arr[3];
1251 val = trunc_int_for_mode (val, SImode);
1252 switch (which_immediate_load (val))
1253 {
1254 case SPU_IL:
1255 case SPU_ILA:
1256 break;
1257 case SPU_ILH:
1258 case SPU_ILHU:
1259 val = trunc_int_for_mode (((arr[0] << 8) | arr[1]), HImode);
1260 break;
1261 default:
1262 gcc_unreachable ();
1263 }
1264 fprintf (file, HOST_WIDE_INT_PRINT_DEC, val);
1265 break;
1266 case IC_FSMBI:
1267 constant_to_array (mode, x, arr);
1268 val = 0;
1269 for (i = 0; i < 16; i++)
1270 {
1271 val <<= 1;
1272 val |= arr[i] & 1;
1273 }
1274 print_operand (file, GEN_INT (val), 0);
1275 break;
1276 case IC_CPAT:
1277 constant_to_array (mode, x, arr);
1278 cpat_info (arr, GET_MODE_SIZE (mode), 0, &info);
1279 fprintf (file, HOST_WIDE_INT_PRINT_DEC, (HOST_WIDE_INT)info);
85d9c13c 1280 break;
a1c6e4b8 1281 case IC_IL1s:
a1c6e4b8 1282 if (xcode == HIGH)
73701e27
TS
1283 x = XEXP (x, 0);
1284 if (GET_CODE (x) == CONST_VECTOR)
1285 x = CONST_VECTOR_ELT (x, 0);
1286 output_addr_const (file, x);
1287 if (xcode == HIGH)
1288 fprintf (file, "@h");
85d9c13c 1289 break;
a1c6e4b8
TS
1290 case IC_IL2:
1291 case IC_IL2s:
73701e27 1292 case IC_FSMBI2:
a1c6e4b8
TS
1293 case IC_POOL:
1294 abort ();
85d9c13c 1295 }
20e9e759 1296 }
85d9c13c
TS
1297 else
1298 gcc_unreachable ();
1299 return;
1300
85d9c13c
TS
1301 case 'C':
1302 if (xcode == CONST_INT)
1303 {
1304 /* Only 4 least significant bits are relevant for generate
1305 control word instructions. */
1306 fprintf (file, HOST_WIDE_INT_PRINT_DEC, INTVAL (x) & 15);
1307 return;
1308 }
1309 break;
1310
1311 case 'M': /* print code for c*d */
1312 if (GET_CODE (x) == CONST_INT)
1313 switch (INTVAL (x))
1314 {
1315 case 1:
1316 fprintf (file, "b");
1317 break;
1318 case 2:
1319 fprintf (file, "h");
1320 break;
1321 case 4:
1322 fprintf (file, "w");
1323 break;
1324 case 8:
1325 fprintf (file, "d");
1326 break;
1327 default:
1328 gcc_unreachable();
1329 }
1330 else
1331 gcc_unreachable();
1332 return;
1333
1334 case 'N': /* Negate the operand */
1335 if (xcode == CONST_INT)
1336 fprintf (file, HOST_WIDE_INT_PRINT_DEC, -INTVAL (x));
1337 else if (xcode == CONST_VECTOR)
1338 fprintf (file, HOST_WIDE_INT_PRINT_DEC,
1339 -INTVAL (CONST_VECTOR_ELT (x, 0)));
1340 return;
1341
1342 case 'I': /* enable/disable interrupts */
1343 if (xcode == CONST_INT)
1344 fprintf (file, "%s", INTVAL (x) == 0 ? "d" : "e");
1345 return;
1346
1347 case 'b': /* branch modifiers */
1348 if (xcode == REG)
1349 fprintf (file, "%s", GET_MODE (x) == HImode ? "h" : "");
1350 else if (COMPARISON_P (x))
1351 fprintf (file, "%s", xcode == NE ? "n" : "");
1352 return;
1353
1354 case 'i': /* indirect call */
1355 if (xcode == MEM)
1356 {
1357 if (GET_CODE (XEXP (x, 0)) == REG)
1358 /* Used in indirect function calls. */
1359 fprintf (file, "%s", reg_names[REGNO (XEXP (x, 0))]);
1360 else
1361 output_address (XEXP (x, 0));
1362 }
1363 return;
1364
1365 case 'p': /* load/store */
1366 if (xcode == MEM)
1367 {
1368 x = XEXP (x, 0);
1369 xcode = GET_CODE (x);
1370 }
09aad82b
TS
1371 if (xcode == AND)
1372 {
1373 x = XEXP (x, 0);
1374 xcode = GET_CODE (x);
1375 }
85d9c13c
TS
1376 if (xcode == REG)
1377 fprintf (file, "d");
1378 else if (xcode == CONST_INT)
1379 fprintf (file, "a");
1380 else if (xcode == CONST || xcode == SYMBOL_REF || xcode == LABEL_REF)
1381 fprintf (file, "r");
1382 else if (xcode == PLUS || xcode == LO_SUM)
1383 {
1384 if (GET_CODE (XEXP (x, 1)) == REG)
1385 fprintf (file, "x");
1386 else
1387 fprintf (file, "d");
1388 }
1389 return;
1390
73701e27
TS
1391 case 'e':
1392 val = xcode == CONST_INT ? INTVAL (x) : INTVAL (CONST_VECTOR_ELT (x, 0));
1393 val &= 0x7;
1394 output_addr_const (file, GEN_INT (val));
1395 return;
1396
1397 case 'f':
1398 val = xcode == CONST_INT ? INTVAL (x) : INTVAL (CONST_VECTOR_ELT (x, 0));
1399 val &= 0x1f;
1400 output_addr_const (file, GEN_INT (val));
1401 return;
1402
1403 case 'g':
1404 val = xcode == CONST_INT ? INTVAL (x) : INTVAL (CONST_VECTOR_ELT (x, 0));
1405 val &= 0x3f;
1406 output_addr_const (file, GEN_INT (val));
1407 return;
1408
1409 case 'h':
1410 val = xcode == CONST_INT ? INTVAL (x) : INTVAL (CONST_VECTOR_ELT (x, 0));
1411 val = (val >> 3) & 0x1f;
1412 output_addr_const (file, GEN_INT (val));
1413 return;
1414
1415 case 'E':
1416 val = xcode == CONST_INT ? INTVAL (x) : INTVAL (CONST_VECTOR_ELT (x, 0));
1417 val = -val;
1418 val &= 0x7;
1419 output_addr_const (file, GEN_INT (val));
1420 return;
1421
1422 case 'F':
1423 val = xcode == CONST_INT ? INTVAL (x) : INTVAL (CONST_VECTOR_ELT (x, 0));
1424 val = -val;
1425 val &= 0x1f;
1426 output_addr_const (file, GEN_INT (val));
1427 return;
1428
1429 case 'G':
1430 val = xcode == CONST_INT ? INTVAL (x) : INTVAL (CONST_VECTOR_ELT (x, 0));
1431 val = -val;
1432 val &= 0x3f;
1433 output_addr_const (file, GEN_INT (val));
1434 return;
1435
1436 case 'H':
1437 val = xcode == CONST_INT ? INTVAL (x) : INTVAL (CONST_VECTOR_ELT (x, 0));
1438 val = -(val & -8ll);
1439 val = (val >> 3) & 0x1f;
1440 output_addr_const (file, GEN_INT (val));
1441 return;
1442
5345cf68
TS
1443 case 'v':
1444 case 'w':
1445 constant_to_array (mode, x, arr);
1446 val = (((arr[0] << 1) + (arr[1] >> 7)) & 0xff) - 127;
1447 output_addr_const (file, GEN_INT (code == 'w' ? -val : val));
1448 return;
1449
85d9c13c
TS
1450 case 0:
1451 if (xcode == REG)
1452 fprintf (file, "%s", reg_names[REGNO (x)]);
1453 else if (xcode == MEM)
1454 output_address (XEXP (x, 0));
1455 else if (xcode == CONST_VECTOR)
a1c6e4b8 1456 print_operand (file, CONST_VECTOR_ELT (x, 0), 0);
85d9c13c
TS
1457 else
1458 output_addr_const (file, x);
1459 return;
1460
44c7bd63 1461 /* unused letters
5345cf68 1462 o qr u yz
73701e27 1463 AB OPQR UVWXYZ */
85d9c13c
TS
1464 default:
1465 output_operand_lossage ("invalid %%xn code");
1466 }
1467 gcc_unreachable ();
1468}
1469
85d9c13c
TS
1470/* For PIC mode we've reserved PIC_OFFSET_TABLE_REGNUM, which is a
1471 caller saved register. For leaf functions it is more efficient to
1472 use a volatile register because we won't need to save and restore the
1473 pic register. This routine is only valid after register allocation
1474 is completed, so we can pick an unused register. */
1475static rtx
1476get_pic_reg (void)
1477{
85d9c13c
TS
1478 if (!reload_completed && !reload_in_progress)
1479 abort ();
c5d94218
UW
1480
1481 /* If we've already made the decision, we need to keep with it. Once we've
1482 decided to use LAST_ARG_REGNUM, future calls to df_regs_ever_live_p may
1483 return true since the register is now live; this should not cause us to
1484 "switch back" to using pic_offset_table_rtx. */
1485 if (!cfun->machine->pic_reg)
1486 {
416ff32e 1487 if (crtl->is_leaf && !df_regs_ever_live_p (LAST_ARG_REGNUM))
c5d94218
UW
1488 cfun->machine->pic_reg = gen_rtx_REG (SImode, LAST_ARG_REGNUM);
1489 else
1490 cfun->machine->pic_reg = pic_offset_table_rtx;
1491 }
1492
1493 return cfun->machine->pic_reg;
85d9c13c
TS
1494}
1495
73701e27
TS
1496/* Split constant addresses to handle cases that are too large.
1497 Add in the pic register when in PIC mode.
1498 Split immediates that require more than 1 instruction. */
a1c6e4b8
TS
1499int
1500spu_split_immediate (rtx * ops)
20e9e759 1501{
ef4bddc2 1502 machine_mode mode = GET_MODE (ops[0]);
a1c6e4b8
TS
1503 enum immediate_class c = classify_immediate (ops[1], mode);
1504
1505 switch (c)
20e9e759 1506 {
a1c6e4b8
TS
1507 case IC_IL2:
1508 {
1509 unsigned char arrhi[16];
1510 unsigned char arrlo[16];
88f091f5 1511 rtx to, temp, hi, lo;
a1c6e4b8 1512 int i;
ef4bddc2 1513 machine_mode imode = mode;
88f091f5
UW
1514 /* We need to do reals as ints because the constant used in the
1515 IOR might not be a legitimate real constant. */
1516 imode = int_mode_for_mode (mode);
a1c6e4b8 1517 constant_to_array (mode, ops[1], arrhi);
88f091f5
UW
1518 if (imode != mode)
1519 to = simplify_gen_subreg (imode, ops[0], mode, 0);
1520 else
1521 to = ops[0];
1522 temp = !can_create_pseudo_p () ? to : gen_reg_rtx (imode);
a1c6e4b8
TS
1523 for (i = 0; i < 16; i += 4)
1524 {
1525 arrlo[i + 2] = arrhi[i + 2];
1526 arrlo[i + 3] = arrhi[i + 3];
1527 arrlo[i + 0] = arrlo[i + 1] = 0;
1528 arrhi[i + 2] = arrhi[i + 3] = 0;
1529 }
88f091f5
UW
1530 hi = array_to_constant (imode, arrhi);
1531 lo = array_to_constant (imode, arrlo);
1532 emit_move_insn (temp, hi);
f7df4a84 1533 emit_insn (gen_rtx_SET (to, gen_rtx_IOR (imode, temp, lo)));
a1c6e4b8
TS
1534 return 1;
1535 }
73701e27
TS
1536 case IC_FSMBI2:
1537 {
1538 unsigned char arr_fsmbi[16];
1539 unsigned char arr_andbi[16];
1540 rtx to, reg_fsmbi, reg_and;
1541 int i;
ef4bddc2 1542 machine_mode imode = mode;
73701e27
TS
1543 /* We need to do reals as ints because the constant used in the
1544 * AND might not be a legitimate real constant. */
1545 imode = int_mode_for_mode (mode);
1546 constant_to_array (mode, ops[1], arr_fsmbi);
1547 if (imode != mode)
1548 to = simplify_gen_subreg(imode, ops[0], GET_MODE (ops[0]), 0);
1549 else
1550 to = ops[0];
1551 for (i = 0; i < 16; i++)
1552 if (arr_fsmbi[i] != 0)
1553 {
1554 arr_andbi[0] = arr_fsmbi[i];
1555 arr_fsmbi[i] = 0xff;
1556 }
1557 for (i = 1; i < 16; i++)
1558 arr_andbi[i] = arr_andbi[0];
1559 reg_fsmbi = array_to_constant (imode, arr_fsmbi);
1560 reg_and = array_to_constant (imode, arr_andbi);
1561 emit_move_insn (to, reg_fsmbi);
f7df4a84 1562 emit_insn (gen_rtx_SET (to, gen_rtx_AND (imode, to, reg_and)));
73701e27
TS
1563 return 1;
1564 }
a1c6e4b8
TS
1565 case IC_POOL:
1566 if (reload_in_progress || reload_completed)
1567 {
1568 rtx mem = force_const_mem (mode, ops[1]);
1569 if (TARGET_LARGE_MEM)
1570 {
1571 rtx addr = gen_rtx_REG (Pmode, REGNO (ops[0]));
1572 emit_move_insn (addr, XEXP (mem, 0));
1573 mem = replace_equiv_address (mem, addr);
1574 }
1575 emit_move_insn (ops[0], mem);
1576 return 1;
1577 }
1578 break;
1579 case IC_IL1s:
1580 case IC_IL2s:
1581 if (reload_completed && GET_CODE (ops[1]) != HIGH)
1582 {
1583 if (c == IC_IL2s)
1584 {
73701e27
TS
1585 emit_move_insn (ops[0], gen_rtx_HIGH (mode, ops[1]));
1586 emit_move_insn (ops[0], gen_rtx_LO_SUM (mode, ops[0], ops[1]));
a1c6e4b8
TS
1587 }
1588 else if (flag_pic)
1589 emit_insn (gen_pic (ops[0], ops[1]));
1590 if (flag_pic)
1591 {
1592 rtx pic_reg = get_pic_reg ();
1593 emit_insn (gen_addsi3 (ops[0], ops[0], pic_reg));
a1c6e4b8
TS
1594 }
1595 return flag_pic || c == IC_IL2s;
1596 }
1597 break;
1598 case IC_IL1:
1599 case IC_FSMBI:
1600 case IC_CPAT:
1601 break;
20e9e759 1602 }
a1c6e4b8 1603 return 0;
20e9e759
TS
1604}
1605
85d9c13c
TS
1606/* SAVING is TRUE when we are generating the actual load and store
1607 instructions for REGNO. When determining the size of the stack
1608 needed for saving register we must allocate enough space for the
1609 worst case, because we don't always have the information early enough
1610 to not allocate it. But we can at least eliminate the actual loads
1611 and stores during the prologue/epilogue. */
1612static int
1613need_to_save_reg (int regno, int saving)
1614{
6fb5fa3c 1615 if (df_regs_ever_live_p (regno) && !call_used_regs[regno])
85d9c13c
TS
1616 return 1;
1617 if (flag_pic
1618 && regno == PIC_OFFSET_TABLE_REGNUM
c5d94218 1619 && (!saving || cfun->machine->pic_reg == pic_offset_table_rtx))
85d9c13c
TS
1620 return 1;
1621 return 0;
1622}
1623
1624/* This function is only correct starting with local register
1625 allocation */
1626int
1627spu_saved_regs_size (void)
1628{
1629 int reg_save_size = 0;
1630 int regno;
1631
1632 for (regno = FIRST_PSEUDO_REGISTER - 1; regno >= 0; --regno)
1633 if (need_to_save_reg (regno, 0))
1634 reg_save_size += 0x10;
1635 return reg_save_size;
1636}
1637
23c39aaa 1638static rtx_insn *
85d9c13c
TS
1639frame_emit_store (int regno, rtx addr, HOST_WIDE_INT offset)
1640{
1641 rtx reg = gen_rtx_REG (V4SImode, regno);
1642 rtx mem =
1643 gen_frame_mem (V4SImode, gen_rtx_PLUS (Pmode, addr, GEN_INT (offset)));
1644 return emit_insn (gen_movv4si (mem, reg));
1645}
1646
23c39aaa 1647static rtx_insn *
85d9c13c
TS
1648frame_emit_load (int regno, rtx addr, HOST_WIDE_INT offset)
1649{
1650 rtx reg = gen_rtx_REG (V4SImode, regno);
1651 rtx mem =
1652 gen_frame_mem (V4SImode, gen_rtx_PLUS (Pmode, addr, GEN_INT (offset)));
1653 return emit_insn (gen_movv4si (reg, mem));
1654}
1655
1656/* This happens after reload, so we need to expand it. */
23c39aaa 1657static rtx_insn *
85d9c13c
TS
1658frame_emit_add_imm (rtx dst, rtx src, HOST_WIDE_INT imm, rtx scratch)
1659{
23c39aaa 1660 rtx_insn *insn;
85d9c13c
TS
1661 if (satisfies_constraint_K (GEN_INT (imm)))
1662 {
1663 insn = emit_insn (gen_addsi3 (dst, src, GEN_INT (imm)));
1664 }
1665 else
1666 {
6fb5fa3c 1667 emit_insn (gen_movsi (scratch, gen_int_mode (imm, SImode)));
85d9c13c
TS
1668 insn = emit_insn (gen_addsi3 (dst, src, scratch));
1669 if (REGNO (src) == REGNO (scratch))
1670 abort ();
1671 }
85d9c13c
TS
1672 return insn;
1673}
1674
1675/* Return nonzero if this function is known to have a null epilogue. */
1676
1677int
1678direct_return (void)
1679{
1680 if (reload_completed)
1681 {
1682 if (cfun->static_chain_decl == 0
1683 && (spu_saved_regs_size ()
1684 + get_frame_size ()
38173d38
JH
1685 + crtl->outgoing_args_size
1686 + crtl->args.pretend_args_size == 0)
416ff32e 1687 && crtl->is_leaf)
85d9c13c
TS
1688 return 1;
1689 }
1690 return 0;
1691}
1692
1693/*
1694 The stack frame looks like this:
1695 +-------------+
1696 | incoming |
7310a2da
SSF
1697 | args |
1698 AP -> +-------------+
85d9c13c
TS
1699 | $lr save |
1700 +-------------+
1701 prev SP | back chain |
1702 +-------------+
1703 | var args |
38173d38 1704 | reg save | crtl->args.pretend_args_size bytes
85d9c13c
TS
1705 +-------------+
1706 | ... |
1707 | saved regs | spu_saved_regs_size() bytes
7310a2da 1708 FP -> +-------------+
85d9c13c 1709 | ... |
7310a2da
SSF
1710 | vars | get_frame_size() bytes
1711 HFP -> +-------------+
85d9c13c
TS
1712 | ... |
1713 | outgoing |
38173d38 1714 | args | crtl->outgoing_args_size bytes
85d9c13c
TS
1715 +-------------+
1716 | $lr of next |
1717 | frame |
1718 +-------------+
7310a2da
SSF
1719 | back chain |
1720 SP -> +-------------+
85d9c13c
TS
1721
1722*/
1723void
1724spu_expand_prologue (void)
1725{
1726 HOST_WIDE_INT size = get_frame_size (), offset, regno;
1727 HOST_WIDE_INT total_size;
1728 HOST_WIDE_INT saved_regs_size;
1729 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
1730 rtx scratch_reg_0, scratch_reg_1;
23c39aaa
DM
1731 rtx_insn *insn;
1732 rtx real;
85d9c13c 1733
c5d94218
UW
1734 if (flag_pic && optimize == 0 && !cfun->machine->pic_reg)
1735 cfun->machine->pic_reg = pic_offset_table_rtx;
85d9c13c
TS
1736
1737 if (spu_naked_function_p (current_function_decl))
1738 return;
1739
1740 scratch_reg_0 = gen_rtx_REG (SImode, LAST_ARG_REGNUM + 1);
1741 scratch_reg_1 = gen_rtx_REG (SImode, LAST_ARG_REGNUM + 2);
1742
1743 saved_regs_size = spu_saved_regs_size ();
1744 total_size = size + saved_regs_size
38173d38
JH
1745 + crtl->outgoing_args_size
1746 + crtl->args.pretend_args_size;
85d9c13c 1747
416ff32e 1748 if (!crtl->is_leaf
e3b5732b 1749 || cfun->calls_alloca || total_size > 0)
85d9c13c
TS
1750 total_size += STACK_POINTER_OFFSET;
1751
1752 /* Save this first because code after this might use the link
1753 register as a scratch register. */
416ff32e 1754 if (!crtl->is_leaf)
85d9c13c
TS
1755 {
1756 insn = frame_emit_store (LINK_REGISTER_REGNUM, sp_reg, 16);
1757 RTX_FRAME_RELATED_P (insn) = 1;
1758 }
1759
1760 if (total_size > 0)
1761 {
38173d38 1762 offset = -crtl->args.pretend_args_size;
85d9c13c
TS
1763 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; ++regno)
1764 if (need_to_save_reg (regno, 1))
1765 {
1766 offset -= 16;
1767 insn = frame_emit_store (regno, sp_reg, offset);
1768 RTX_FRAME_RELATED_P (insn) = 1;
1769 }
1770 }
1771
c5d94218 1772 if (flag_pic && cfun->machine->pic_reg)
85d9c13c 1773 {
c5d94218 1774 rtx pic_reg = cfun->machine->pic_reg;
85d9c13c 1775 insn = emit_insn (gen_load_pic_offset (pic_reg, scratch_reg_0));
85d9c13c 1776 insn = emit_insn (gen_subsi3 (pic_reg, pic_reg, scratch_reg_0));
85d9c13c
TS
1777 }
1778
1779 if (total_size > 0)
1780 {
1781 if (flag_stack_check)
1782 {
24fc18b9 1783 /* We compare against total_size-1 because
85d9c13c
TS
1784 ($sp >= total_size) <=> ($sp > total_size-1) */
1785 rtx scratch_v4si = gen_rtx_REG (V4SImode, REGNO (scratch_reg_0));
1786 rtx sp_v4si = gen_rtx_REG (V4SImode, STACK_POINTER_REGNUM);
1787 rtx size_v4si = spu_const (V4SImode, total_size - 1);
1788 if (!satisfies_constraint_K (GEN_INT (total_size - 1)))
1789 {
1790 emit_move_insn (scratch_v4si, size_v4si);
1791 size_v4si = scratch_v4si;
1792 }
1793 emit_insn (gen_cgt_v4si (scratch_v4si, sp_v4si, size_v4si));
1794 emit_insn (gen_vec_extractv4si
1795 (scratch_reg_0, scratch_v4si, GEN_INT (1)));
1796 emit_insn (gen_spu_heq (scratch_reg_0, GEN_INT (0)));
1797 }
1798
1799 /* Adjust the stack pointer, and make sure scratch_reg_0 contains
1800 the value of the previous $sp because we save it as the back
1801 chain. */
1802 if (total_size <= 2000)
1803 {
1804 /* In this case we save the back chain first. */
1805 insn = frame_emit_store (STACK_POINTER_REGNUM, sp_reg, -total_size);
85d9c13c
TS
1806 insn =
1807 frame_emit_add_imm (sp_reg, sp_reg, -total_size, scratch_reg_0);
1808 }
85d9c13c
TS
1809 else
1810 {
1811 insn = emit_move_insn (scratch_reg_0, sp_reg);
85d9c13c
TS
1812 insn =
1813 frame_emit_add_imm (sp_reg, sp_reg, -total_size, scratch_reg_1);
1814 }
1815 RTX_FRAME_RELATED_P (insn) = 1;
1816 real = gen_addsi3 (sp_reg, sp_reg, GEN_INT (-total_size));
bbbbb16a 1817 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
85d9c13c
TS
1818
1819 if (total_size > 2000)
1820 {
1821 /* Save the back chain ptr */
1822 insn = frame_emit_store (REGNO (scratch_reg_0), sp_reg, 0);
85d9c13c
TS
1823 }
1824
1825 if (frame_pointer_needed)
1826 {
1827 rtx fp_reg = gen_rtx_REG (Pmode, HARD_FRAME_POINTER_REGNUM);
1828 HOST_WIDE_INT fp_offset = STACK_POINTER_OFFSET
38173d38 1829 + crtl->outgoing_args_size;
85d9c13c 1830 /* Set the new frame_pointer */
10d55907
UW
1831 insn = frame_emit_add_imm (fp_reg, sp_reg, fp_offset, scratch_reg_0);
1832 RTX_FRAME_RELATED_P (insn) = 1;
1833 real = gen_addsi3 (fp_reg, sp_reg, GEN_INT (fp_offset));
bbbbb16a 1834 add_reg_note (insn, REG_FRAME_RELATED_EXPR, real);
73701e27 1835 REGNO_POINTER_ALIGN (HARD_FRAME_POINTER_REGNUM) = STACK_BOUNDARY;
85d9c13c
TS
1836 }
1837 }
1838
a11e0df4 1839 if (flag_stack_usage_info)
4c825c02 1840 current_function_static_stack_size = total_size;
85d9c13c
TS
1841}
1842
1843void
1844spu_expand_epilogue (bool sibcall_p)
1845{
1846 int size = get_frame_size (), offset, regno;
1847 HOST_WIDE_INT saved_regs_size, total_size;
1848 rtx sp_reg = gen_rtx_REG (Pmode, STACK_POINTER_REGNUM);
c187d33c 1849 rtx scratch_reg_0;
85d9c13c 1850
85d9c13c
TS
1851 if (spu_naked_function_p (current_function_decl))
1852 return;
1853
1854 scratch_reg_0 = gen_rtx_REG (SImode, LAST_ARG_REGNUM + 1);
1855
1856 saved_regs_size = spu_saved_regs_size ();
1857 total_size = size + saved_regs_size
38173d38
JH
1858 + crtl->outgoing_args_size
1859 + crtl->args.pretend_args_size;
85d9c13c 1860
416ff32e 1861 if (!crtl->is_leaf
e3b5732b 1862 || cfun->calls_alloca || total_size > 0)
85d9c13c
TS
1863 total_size += STACK_POINTER_OFFSET;
1864
1865 if (total_size > 0)
1866 {
e3b5732b 1867 if (cfun->calls_alloca)
85d9c13c
TS
1868 frame_emit_load (STACK_POINTER_REGNUM, sp_reg, 0);
1869 else
1870 frame_emit_add_imm (sp_reg, sp_reg, total_size, scratch_reg_0);
1871
1872
1873 if (saved_regs_size > 0)
1874 {
38173d38 1875 offset = -crtl->args.pretend_args_size;
85d9c13c
TS
1876 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; ++regno)
1877 if (need_to_save_reg (regno, 1))
1878 {
1879 offset -= 0x10;
1880 frame_emit_load (regno, sp_reg, offset);
1881 }
1882 }
1883 }
1884
416ff32e 1885 if (!crtl->is_leaf)
85d9c13c
TS
1886 frame_emit_load (LINK_REGISTER_REGNUM, sp_reg, 16);
1887
1888 if (!sibcall_p)
1889 {
c41c1387 1890 emit_use (gen_rtx_REG (SImode, LINK_REGISTER_REGNUM));
c187d33c 1891 emit_jump_insn (gen__return ());
85d9c13c 1892 }
85d9c13c
TS
1893}
1894
1895rtx
1896spu_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
1897{
1898 if (count != 0)
1899 return 0;
1900 /* This is inefficient because it ends up copying to a save-register
1901 which then gets saved even though $lr has already been saved. But
1902 it does generate better code for leaf functions and we don't need
1903 to use RETURN_ADDRESS_POINTER_REGNUM to get it working. It's only
1904 used for __builtin_return_address anyway, so maybe we don't care if
1905 it's inefficient. */
1906 return get_hard_reg_initial_val (Pmode, LINK_REGISTER_REGNUM);
1907}
1908\f
1909
1910/* Given VAL, generate a constant appropriate for MODE.
1911 If MODE is a vector mode, every element will be VAL.
1912 For TImode, VAL will be zero extended to 128 bits. */
1913rtx
ef4bddc2 1914spu_const (machine_mode mode, HOST_WIDE_INT val)
85d9c13c
TS
1915{
1916 rtx inner;
1917 rtvec v;
1918 int units, i;
1919
1920 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
1921 || GET_MODE_CLASS (mode) == MODE_FLOAT
1922 || GET_MODE_CLASS (mode) == MODE_VECTOR_INT
1923 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT);
1924
1925 if (GET_MODE_CLASS (mode) == MODE_INT)
1926 return immed_double_const (val, 0, mode);
1927
1928 /* val is the bit representation of the float */
1929 if (GET_MODE_CLASS (mode) == MODE_FLOAT)
1930 return hwint_to_const_double (mode, val);
1931
1932 if (GET_MODE_CLASS (mode) == MODE_VECTOR_INT)
1933 inner = immed_double_const (val, 0, GET_MODE_INNER (mode));
1934 else
1935 inner = hwint_to_const_double (GET_MODE_INNER (mode), val);
1936
1937 units = GET_MODE_NUNITS (mode);
1938
1939 v = rtvec_alloc (units);
1940
1941 for (i = 0; i < units; ++i)
1942 RTVEC_ELT (v, i) = inner;
1943
1944 return gen_rtx_CONST_VECTOR (mode, v);
1945}
85d9c13c 1946
39aeae85
SL
1947/* Create a MODE vector constant from 4 ints. */
1948rtx
ef4bddc2 1949spu_const_from_ints(machine_mode mode, int a, int b, int c, int d)
39aeae85
SL
1950{
1951 unsigned char arr[16];
1952 arr[0] = (a >> 24) & 0xff;
1953 arr[1] = (a >> 16) & 0xff;
1954 arr[2] = (a >> 8) & 0xff;
1955 arr[3] = (a >> 0) & 0xff;
1956 arr[4] = (b >> 24) & 0xff;
1957 arr[5] = (b >> 16) & 0xff;
1958 arr[6] = (b >> 8) & 0xff;
1959 arr[7] = (b >> 0) & 0xff;
1960 arr[8] = (c >> 24) & 0xff;
1961 arr[9] = (c >> 16) & 0xff;
1962 arr[10] = (c >> 8) & 0xff;
1963 arr[11] = (c >> 0) & 0xff;
1964 arr[12] = (d >> 24) & 0xff;
1965 arr[13] = (d >> 16) & 0xff;
1966 arr[14] = (d >> 8) & 0xff;
1967 arr[15] = (d >> 0) & 0xff;
1968 return array_to_constant(mode, arr);
1969}
9dcc2e87
TS
1970\f
1971/* branch hint stuff */
39aeae85 1972
85d9c13c
TS
1973/* An array of these is used to propagate hints to predecessor blocks. */
1974struct spu_bb_info
1975{
23c39aaa 1976 rtx_insn *prop_jump; /* propagated from another block */
9dcc2e87 1977 int bb_index; /* the original block. */
85d9c13c 1978};
9dcc2e87 1979static struct spu_bb_info *spu_bb_info;
85d9c13c 1980
9dcc2e87 1981#define STOP_HINT_P(INSN) \
b64925dc 1982 (CALL_P(INSN) \
9dcc2e87
TS
1983 || INSN_CODE(INSN) == CODE_FOR_divmodsi4 \
1984 || INSN_CODE(INSN) == CODE_FOR_udivmodsi4)
1985
1986/* 1 when RTX is a hinted branch or its target. We keep track of
1987 what has been hinted so the safe-hint code can test it easily. */
1988#define HINTED_P(RTX) \
1989 (RTL_FLAG_CHECK3("HINTED_P", (RTX), CODE_LABEL, JUMP_INSN, CALL_INSN)->unchanging)
1990
1991/* 1 when RTX is an insn that must be scheduled on an even boundary. */
1992#define SCHED_ON_EVEN_P(RTX) \
1993 (RTL_FLAG_CHECK2("SCHED_ON_EVEN_P", (RTX), JUMP_INSN, CALL_INSN)->in_struct)
1994
1995/* Emit a nop for INSN such that the two will dual issue. This assumes
1996 INSN is 8-byte aligned. When INSN is inline asm we emit an lnop.
1997 We check for TImode to handle a MULTI1 insn which has dual issued its
b3d45ff0 1998 first instruction. get_pipe returns -1 for MULTI0 or inline asm. */
9dcc2e87 1999static void
23c39aaa 2000emit_nop_for_insn (rtx_insn *insn)
85d9c13c 2001{
9dcc2e87 2002 int p;
23c39aaa 2003 rtx_insn *new_insn;
b3d45ff0
UW
2004
2005 /* We need to handle JUMP_TABLE_DATA separately. */
2006 if (JUMP_TABLE_DATA_P (insn))
2007 {
2008 new_insn = emit_insn_after (gen_lnop(), insn);
2009 recog_memoized (new_insn);
2010 INSN_LOCATION (new_insn) = UNKNOWN_LOCATION;
2011 return;
2012 }
2013
9dcc2e87
TS
2014 p = get_pipe (insn);
2015 if ((CALL_P (insn) || JUMP_P (insn)) && SCHED_ON_EVEN_P (insn))
2016 new_insn = emit_insn_after (gen_lnop (), insn);
2017 else if (p == 1 && GET_MODE (insn) == TImode)
85d9c13c 2018 {
9dcc2e87
TS
2019 new_insn = emit_insn_before (gen_nopn (GEN_INT (127)), insn);
2020 PUT_MODE (new_insn, TImode);
2021 PUT_MODE (insn, VOIDmode);
2022 }
2023 else
2024 new_insn = emit_insn_after (gen_lnop (), insn);
2025 recog_memoized (new_insn);
9d12bc68 2026 INSN_LOCATION (new_insn) = INSN_LOCATION (insn);
9dcc2e87
TS
2027}
2028
2029/* Insert nops in basic blocks to meet dual issue alignment
2030 requirements. Also make sure hbrp and hint instructions are at least
2031 one cycle apart, possibly inserting a nop. */
2032static void
2033pad_bb(void)
2034{
23c39aaa 2035 rtx_insn *insn, *next_insn, *prev_insn, *hbr_insn = 0;
9dcc2e87
TS
2036 int length;
2037 int addr;
2038
2039 /* This sets up INSN_ADDRESSES. */
2040 shorten_branches (get_insns ());
2041
2042 /* Keep track of length added by nops. */
2043 length = 0;
2044
2045 prev_insn = 0;
2046 insn = get_insns ();
2047 if (!active_insn_p (insn))
2048 insn = next_active_insn (insn);
2049 for (; insn; insn = next_insn)
2050 {
2051 next_insn = next_active_insn (insn);
2052 if (INSN_CODE (insn) == CODE_FOR_iprefetch
2053 || INSN_CODE (insn) == CODE_FOR_hbr)
85d9c13c 2054 {
9dcc2e87
TS
2055 if (hbr_insn)
2056 {
2057 int a0 = INSN_ADDRESSES (INSN_UID (hbr_insn));
2058 int a1 = INSN_ADDRESSES (INSN_UID (insn));
2059 if ((a1 - a0 == 8 && GET_MODE (insn) != TImode)
2060 || (a1 - a0 == 4))
2061 {
2062 prev_insn = emit_insn_before (gen_lnop (), insn);
2063 PUT_MODE (prev_insn, GET_MODE (insn));
2064 PUT_MODE (insn, TImode);
9d12bc68 2065 INSN_LOCATION (prev_insn) = INSN_LOCATION (insn);
9dcc2e87
TS
2066 length += 4;
2067 }
2068 }
2069 hbr_insn = insn;
2070 }
7c40228a 2071 if (INSN_CODE (insn) == CODE_FOR_blockage && next_insn)
9dcc2e87
TS
2072 {
2073 if (GET_MODE (insn) == TImode)
2074 PUT_MODE (next_insn, TImode);
2075 insn = next_insn;
2076 next_insn = next_active_insn (insn);
2077 }
2078 addr = INSN_ADDRESSES (INSN_UID (insn));
2079 if ((CALL_P (insn) || JUMP_P (insn)) && SCHED_ON_EVEN_P (insn))
2080 {
2081 if (((addr + length) & 7) != 0)
2082 {
2083 emit_nop_for_insn (prev_insn);
2084 length += 4;
2085 }
85d9c13c 2086 }
9dcc2e87
TS
2087 else if (GET_MODE (insn) == TImode
2088 && ((next_insn && GET_MODE (next_insn) != TImode)
2089 || get_attr_type (insn) == TYPE_MULTI0)
2090 && ((addr + length) & 7) != 0)
2091 {
2092 /* prev_insn will always be set because the first insn is
2093 always 8-byte aligned. */
2094 emit_nop_for_insn (prev_insn);
2095 length += 4;
2096 }
2097 prev_insn = insn;
85d9c13c 2098 }
85d9c13c
TS
2099}
2100
9dcc2e87
TS
2101\f
2102/* Routines for branch hints. */
2103
85d9c13c 2104static void
23c39aaa 2105spu_emit_branch_hint (rtx_insn *before, rtx_insn *branch, rtx target,
9dcc2e87 2106 int distance, sbitmap blocks)
85d9c13c 2107{
9dcc2e87 2108 rtx branch_label = 0;
23c39aaa
DM
2109 rtx_insn *hint;
2110 rtx_insn *insn;
8942ee0f 2111 rtx_jump_table_data *table;
85d9c13c
TS
2112
2113 if (before == 0 || branch == 0 || target == 0)
2114 return;
2115
9dcc2e87
TS
2116 /* While scheduling we require hints to be no further than 600, so
2117 we need to enforce that here too */
85d9c13c
TS
2118 if (distance > 600)
2119 return;
2120
9dcc2e87 2121 /* If we have a Basic block note, emit it after the basic block note. */
051de0eb 2122 if (NOTE_INSN_BASIC_BLOCK_P (before))
9dcc2e87 2123 before = NEXT_INSN (before);
85d9c13c
TS
2124
2125 branch_label = gen_label_rtx ();
2126 LABEL_NUSES (branch_label)++;
2127 LABEL_PRESERVE_P (branch_label) = 1;
2128 insn = emit_label_before (branch_label, branch);
2129 branch_label = gen_rtx_LABEL_REF (VOIDmode, branch_label);
d7c028c0 2130 bitmap_set_bit (blocks, BLOCK_FOR_INSN (branch)->index);
9dcc2e87
TS
2131
2132 hint = emit_insn_before (gen_hbr (branch_label, target), before);
2133 recog_memoized (hint);
9d12bc68 2134 INSN_LOCATION (hint) = INSN_LOCATION (branch);
9dcc2e87 2135 HINTED_P (branch) = 1;
85d9c13c 2136
9dcc2e87
TS
2137 if (GET_CODE (target) == LABEL_REF)
2138 HINTED_P (XEXP (target, 0)) = 1;
2139 else if (tablejump_p (branch, 0, &table))
85d9c13c 2140 {
9dcc2e87
TS
2141 rtvec vec;
2142 int j;
2143 if (GET_CODE (PATTERN (table)) == ADDR_VEC)
2144 vec = XVEC (PATTERN (table), 0);
2145 else
2146 vec = XVEC (PATTERN (table), 1);
2147 for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j)
2148 HINTED_P (XEXP (RTVEC_ELT (vec, j), 0)) = 1;
85d9c13c 2149 }
9dcc2e87
TS
2150
2151 if (distance >= 588)
85d9c13c 2152 {
9dcc2e87
TS
2153 /* Make sure the hint isn't scheduled any earlier than this point,
2154 which could make it too far for the branch offest to fit */
f626b979
UW
2155 insn = emit_insn_before (gen_blockage (), hint);
2156 recog_memoized (insn);
9d12bc68 2157 INSN_LOCATION (insn) = INSN_LOCATION (hint);
9dcc2e87
TS
2158 }
2159 else if (distance <= 8 * 4)
2160 {
2161 /* To guarantee at least 8 insns between the hint and branch we
2162 insert nops. */
2163 int d;
2164 for (d = distance; d < 8 * 4; d += 4)
2165 {
2166 insn =
2167 emit_insn_after (gen_nopn_nv (gen_rtx_REG (SImode, 127)), hint);
2168 recog_memoized (insn);
9d12bc68 2169 INSN_LOCATION (insn) = INSN_LOCATION (hint);
9dcc2e87
TS
2170 }
2171
2172 /* Make sure any nops inserted aren't scheduled before the hint. */
f626b979
UW
2173 insn = emit_insn_after (gen_blockage (), hint);
2174 recog_memoized (insn);
9d12bc68 2175 INSN_LOCATION (insn) = INSN_LOCATION (hint);
9dcc2e87
TS
2176
2177 /* Make sure any nops inserted aren't scheduled after the call. */
2178 if (CALL_P (branch) && distance < 8 * 4)
f626b979
UW
2179 {
2180 insn = emit_insn_before (gen_blockage (), branch);
2181 recog_memoized (insn);
9d12bc68 2182 INSN_LOCATION (insn) = INSN_LOCATION (branch);
f626b979 2183 }
85d9c13c 2184 }
85d9c13c
TS
2185}
2186
2187/* Returns 0 if we don't want a hint for this branch. Otherwise return
2188 the rtx for the branch target. */
2189static rtx
23c39aaa 2190get_branch_target (rtx_insn *branch)
85d9c13c 2191{
b64925dc 2192 if (JUMP_P (branch))
85d9c13c
TS
2193 {
2194 rtx set, src;
2195
2196 /* Return statements */
2197 if (GET_CODE (PATTERN (branch)) == RETURN)
2198 return gen_rtx_REG (SImode, LINK_REGISTER_REGNUM);
2199
aa633255 2200 /* ASM GOTOs. */
3f254607 2201 if (extract_asm_operands (PATTERN (branch)) != NULL)
aa633255
AP
2202 return NULL;
2203
85d9c13c
TS
2204 set = single_set (branch);
2205 src = SET_SRC (set);
2206 if (GET_CODE (SET_DEST (set)) != PC)
2207 abort ();
2208
2209 if (GET_CODE (src) == IF_THEN_ELSE)
2210 {
2211 rtx lab = 0;
2212 rtx note = find_reg_note (branch, REG_BR_PROB, 0);
2213 if (note)
2214 {
2215 /* If the more probable case is not a fall through, then
2216 try a branch hint. */
e5af9ddd 2217 int prob = XINT (note, 0);
85d9c13c
TS
2218 if (prob > (REG_BR_PROB_BASE * 6 / 10)
2219 && GET_CODE (XEXP (src, 1)) != PC)
2220 lab = XEXP (src, 1);
2221 else if (prob < (REG_BR_PROB_BASE * 4 / 10)
2222 && GET_CODE (XEXP (src, 2)) != PC)
2223 lab = XEXP (src, 2);
2224 }
2225 if (lab)
2226 {
2227 if (GET_CODE (lab) == RETURN)
2228 return gen_rtx_REG (SImode, LINK_REGISTER_REGNUM);
2229 return lab;
2230 }
2231 return 0;
2232 }
2233
2234 return src;
2235 }
b64925dc 2236 else if (CALL_P (branch))
85d9c13c
TS
2237 {
2238 rtx call;
2239 /* All of our call patterns are in a PARALLEL and the CALL is
2240 the first pattern in the PARALLEL. */
2241 if (GET_CODE (PATTERN (branch)) != PARALLEL)
2242 abort ();
2243 call = XVECEXP (PATTERN (branch), 0, 0);
2244 if (GET_CODE (call) == SET)
2245 call = SET_SRC (call);
2246 if (GET_CODE (call) != CALL)
2247 abort ();
2248 return XEXP (XEXP (call, 0), 0);
2249 }
2250 return 0;
2251}
2252
9dcc2e87
TS
2253/* The special $hbr register is used to prevent the insn scheduler from
2254 moving hbr insns across instructions which invalidate them. It
2255 should only be used in a clobber, and this function searches for
2256 insns which clobber it. */
2257static bool
23c39aaa 2258insn_clobbers_hbr (rtx_insn *insn)
9dcc2e87
TS
2259{
2260 if (INSN_P (insn)
2261 && GET_CODE (PATTERN (insn)) == PARALLEL)
2262 {
2263 rtx parallel = PATTERN (insn);
2264 rtx clobber;
2265 int j;
2266 for (j = XVECLEN (parallel, 0) - 1; j >= 0; j--)
2267 {
2268 clobber = XVECEXP (parallel, 0, j);
2269 if (GET_CODE (clobber) == CLOBBER
2270 && GET_CODE (XEXP (clobber, 0)) == REG
2271 && REGNO (XEXP (clobber, 0)) == HBR_REGNUM)
2272 return 1;
2273 }
2274 }
2275 return 0;
2276}
2277
2278/* Search up to 32 insns starting at FIRST:
2279 - at any kind of hinted branch, just return
2280 - at any unconditional branch in the first 15 insns, just return
2281 - at a call or indirect branch, after the first 15 insns, force it to
2282 an even address and return
2283 - at any unconditional branch, after the first 15 insns, force it to
2284 an even address.
2285 At then end of the search, insert an hbrp within 4 insns of FIRST,
2286 and an hbrp within 16 instructions of FIRST.
2287 */
85d9c13c 2288static void
23c39aaa 2289insert_hbrp_for_ilb_runout (rtx_insn *first)
85d9c13c 2290{
23c39aaa 2291 rtx_insn *insn, *before_4 = 0, *before_16 = 0;
9dcc2e87
TS
2292 int addr = 0, length, first_addr = -1;
2293 int hbrp_addr0 = 128 * 4, hbrp_addr1 = 128 * 4;
2294 int insert_lnop_after = 0;
2295 for (insn = first; insn; insn = NEXT_INSN (insn))
2296 if (INSN_P (insn))
2297 {
2298 if (first_addr == -1)
2299 first_addr = INSN_ADDRESSES (INSN_UID (insn));
2300 addr = INSN_ADDRESSES (INSN_UID (insn)) - first_addr;
2301 length = get_attr_length (insn);
2302
2303 if (before_4 == 0 && addr + length >= 4 * 4)
2304 before_4 = insn;
2305 /* We test for 14 instructions because the first hbrp will add
2306 up to 2 instructions. */
2307 if (before_16 == 0 && addr + length >= 14 * 4)
2308 before_16 = insn;
2309
2310 if (INSN_CODE (insn) == CODE_FOR_hbr)
2311 {
2312 /* Make sure an hbrp is at least 2 cycles away from a hint.
2313 Insert an lnop after the hbrp when necessary. */
2314 if (before_4 == 0 && addr > 0)
2315 {
2316 before_4 = insn;
2317 insert_lnop_after |= 1;
2318 }
2319 else if (before_4 && addr <= 4 * 4)
2320 insert_lnop_after |= 1;
2321 if (before_16 == 0 && addr > 10 * 4)
2322 {
2323 before_16 = insn;
2324 insert_lnop_after |= 2;
2325 }
2326 else if (before_16 && addr <= 14 * 4)
2327 insert_lnop_after |= 2;
2328 }
85d9c13c 2329
9dcc2e87
TS
2330 if (INSN_CODE (insn) == CODE_FOR_iprefetch)
2331 {
2332 if (addr < hbrp_addr0)
2333 hbrp_addr0 = addr;
2334 else if (addr < hbrp_addr1)
2335 hbrp_addr1 = addr;
2336 }
85d9c13c 2337
9dcc2e87
TS
2338 if (CALL_P (insn) || JUMP_P (insn))
2339 {
2340 if (HINTED_P (insn))
2341 return;
2342
2343 /* Any branch after the first 15 insns should be on an even
2344 address to avoid a special case branch. There might be
2345 some nops and/or hbrps inserted, so we test after 10
2346 insns. */
2347 if (addr > 10 * 4)
2348 SCHED_ON_EVEN_P (insn) = 1;
2349 }
85d9c13c 2350
9dcc2e87
TS
2351 if (CALL_P (insn) || tablejump_p (insn, 0, 0))
2352 return;
2353
2354
2355 if (addr + length >= 32 * 4)
85d9c13c 2356 {
9dcc2e87
TS
2357 gcc_assert (before_4 && before_16);
2358 if (hbrp_addr0 > 4 * 4)
85d9c13c 2359 {
9dcc2e87
TS
2360 insn =
2361 emit_insn_before (gen_iprefetch (GEN_INT (1)), before_4);
2362 recog_memoized (insn);
9d12bc68 2363 INSN_LOCATION (insn) = INSN_LOCATION (before_4);
9dcc2e87
TS
2364 INSN_ADDRESSES_NEW (insn,
2365 INSN_ADDRESSES (INSN_UID (before_4)));
2366 PUT_MODE (insn, GET_MODE (before_4));
2367 PUT_MODE (before_4, TImode);
2368 if (insert_lnop_after & 1)
85d9c13c 2369 {
9dcc2e87
TS
2370 insn = emit_insn_before (gen_lnop (), before_4);
2371 recog_memoized (insn);
9d12bc68 2372 INSN_LOCATION (insn) = INSN_LOCATION (before_4);
9dcc2e87
TS
2373 INSN_ADDRESSES_NEW (insn,
2374 INSN_ADDRESSES (INSN_UID (before_4)));
2375 PUT_MODE (insn, TImode);
85d9c13c 2376 }
85d9c13c 2377 }
9dcc2e87
TS
2378 if ((hbrp_addr0 <= 4 * 4 || hbrp_addr0 > 16 * 4)
2379 && hbrp_addr1 > 16 * 4)
85d9c13c 2380 {
9dcc2e87
TS
2381 insn =
2382 emit_insn_before (gen_iprefetch (GEN_INT (2)), before_16);
2383 recog_memoized (insn);
9d12bc68 2384 INSN_LOCATION (insn) = INSN_LOCATION (before_16);
9dcc2e87
TS
2385 INSN_ADDRESSES_NEW (insn,
2386 INSN_ADDRESSES (INSN_UID (before_16)));
2387 PUT_MODE (insn, GET_MODE (before_16));
2388 PUT_MODE (before_16, TImode);
2389 if (insert_lnop_after & 2)
85d9c13c 2390 {
9dcc2e87
TS
2391 insn = emit_insn_before (gen_lnop (), before_16);
2392 recog_memoized (insn);
9d12bc68 2393 INSN_LOCATION (insn) = INSN_LOCATION (before_16);
9dcc2e87
TS
2394 INSN_ADDRESSES_NEW (insn,
2395 INSN_ADDRESSES (INSN_UID
2396 (before_16)));
2397 PUT_MODE (insn, TImode);
85d9c13c
TS
2398 }
2399 }
9dcc2e87 2400 return;
85d9c13c 2401 }
85d9c13c 2402 }
9dcc2e87
TS
2403 else if (BARRIER_P (insn))
2404 return;
85d9c13c 2405
85d9c13c 2406}
9dcc2e87
TS
2407
2408/* The SPU might hang when it executes 48 inline instructions after a
2409 hinted branch jumps to its hinted target. The beginning of a
dd5a833e
MS
2410 function and the return from a call might have been hinted, and
2411 must be handled as well. To prevent a hang we insert 2 hbrps. The
2412 first should be within 6 insns of the branch target. The second
2413 should be within 22 insns of the branch target. When determining
2414 if hbrps are necessary, we look for only 32 inline instructions,
2415 because up to 12 nops and 4 hbrps could be inserted. Similarily,
2416 when inserting new hbrps, we insert them within 4 and 16 insns of
2417 the target. */
85d9c13c 2418static void
9dcc2e87 2419insert_hbrp (void)
85d9c13c 2420{
23c39aaa 2421 rtx_insn *insn;
9dcc2e87 2422 if (TARGET_SAFE_HINTS)
85d9c13c 2423 {
9dcc2e87
TS
2424 shorten_branches (get_insns ());
2425 /* Insert hbrp at beginning of function */
2426 insn = next_active_insn (get_insns ());
2427 if (insn)
2428 insert_hbrp_for_ilb_runout (insn);
2429 /* Insert hbrp after hinted targets. */
2430 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2431 if ((LABEL_P (insn) && HINTED_P (insn)) || CALL_P (insn))
2432 insert_hbrp_for_ilb_runout (next_active_insn (insn));
85d9c13c 2433 }
85d9c13c
TS
2434}
2435
9dcc2e87
TS
2436static int in_spu_reorg;
2437
2ba42841
AO
2438static void
2439spu_var_tracking (void)
2440{
2441 if (flag_var_tracking)
2442 {
2443 df_analyze ();
2444 timevar_push (TV_VAR_TRACKING);
2445 variable_tracking_main ();
2446 timevar_pop (TV_VAR_TRACKING);
2447 df_finish_pass (false);
2448 }
2449}
2450
9dcc2e87
TS
2451/* Insert branch hints. There are no branch optimizations after this
2452 pass, so it's safe to set our branch hints now. */
85d9c13c 2453static void
9dcc2e87 2454spu_machine_dependent_reorg (void)
85d9c13c 2455{
9dcc2e87
TS
2456 sbitmap blocks;
2457 basic_block bb;
23c39aaa 2458 rtx_insn *branch, *insn;
9dcc2e87
TS
2459 rtx branch_target = 0;
2460 int branch_addr = 0, insn_addr, required_dist = 0;
2461 int i;
2462 unsigned int j;
85d9c13c 2463
9dcc2e87
TS
2464 if (!TARGET_BRANCH_HINTS || optimize == 0)
2465 {
2466 /* We still do it for unoptimized code because an external
2467 function might have hinted a call or return. */
b4d80e56 2468 compute_bb_for_insn ();
9dcc2e87
TS
2469 insert_hbrp ();
2470 pad_bb ();
2ba42841 2471 spu_var_tracking ();
b4d80e56 2472 free_bb_for_insn ();
9dcc2e87
TS
2473 return;
2474 }
85d9c13c 2475
8b1c6fd7 2476 blocks = sbitmap_alloc (last_basic_block_for_fn (cfun));
f61e445a 2477 bitmap_clear (blocks);
85d9c13c 2478
9dcc2e87
TS
2479 in_spu_reorg = 1;
2480 compute_bb_for_insn ();
2481
66b038ce
UW
2482 /* (Re-)discover loops so that bb->loop_father can be used
2483 in the analysis below. */
2484 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
2485
9dcc2e87
TS
2486 compact_blocks ();
2487
2488 spu_bb_info =
0cae8d31 2489 (struct spu_bb_info *) xcalloc (n_basic_blocks_for_fn (cfun),
9dcc2e87
TS
2490 sizeof (struct spu_bb_info));
2491
2492 /* We need exact insn addresses and lengths. */
2493 shorten_branches (get_insns ());
2494
0cae8d31 2495 for (i = n_basic_blocks_for_fn (cfun) - 1; i >= 0; i--)
85d9c13c 2496 {
06e28de2 2497 bb = BASIC_BLOCK_FOR_FN (cfun, i);
9dcc2e87
TS
2498 branch = 0;
2499 if (spu_bb_info[i].prop_jump)
85d9c13c 2500 {
9dcc2e87
TS
2501 branch = spu_bb_info[i].prop_jump;
2502 branch_target = get_branch_target (branch);
2503 branch_addr = INSN_ADDRESSES (INSN_UID (branch));
2504 required_dist = spu_hint_dist;
2505 }
2506 /* Search from end of a block to beginning. In this loop, find
2507 jumps which need a branch and emit them only when:
2508 - it's an indirect branch and we're at the insn which sets
2509 the register
2510 - we're at an insn that will invalidate the hint. e.g., a
2511 call, another hint insn, inline asm that clobbers $hbr, and
2512 some inlined operations (divmodsi4). Don't consider jumps
2513 because they are only at the end of a block and are
2514 considered when we are deciding whether to propagate
2515 - we're getting too far away from the branch. The hbr insns
2516 only have a signed 10 bit offset
2517 We go back as far as possible so the branch will be considered
2518 for propagation when we get to the beginning of the block. */
2519 for (insn = BB_END (bb); insn; insn = PREV_INSN (insn))
2520 {
2521 if (INSN_P (insn))
2522 {
2523 insn_addr = INSN_ADDRESSES (INSN_UID (insn));
2524 if (branch
2525 && ((GET_CODE (branch_target) == REG
2526 && set_of (branch_target, insn) != NULL_RTX)
2527 || insn_clobbers_hbr (insn)
2528 || branch_addr - insn_addr > 600))
2529 {
23c39aaa 2530 rtx_insn *next = NEXT_INSN (insn);
9dcc2e87
TS
2531 int next_addr = INSN_ADDRESSES (INSN_UID (next));
2532 if (insn != BB_END (bb)
2533 && branch_addr - next_addr >= required_dist)
2534 {
2535 if (dump_file)
2536 fprintf (dump_file,
2537 "hint for %i in block %i before %i\n",
2538 INSN_UID (branch), bb->index,
2539 INSN_UID (next));
2540 spu_emit_branch_hint (next, branch, branch_target,
2541 branch_addr - next_addr, blocks);
2542 }
2543 branch = 0;
2544 }
2545
2546 /* JUMP_P will only be true at the end of a block. When
2547 branch is already set it means we've previously decided
2548 to propagate a hint for that branch into this block. */
2549 if (CALL_P (insn) || (JUMP_P (insn) && !branch))
2550 {
2551 branch = 0;
2552 if ((branch_target = get_branch_target (insn)))
2553 {
2554 branch = insn;
2555 branch_addr = insn_addr;
2556 required_dist = spu_hint_dist;
2557 }
2558 }
2559 }
2560 if (insn == BB_HEAD (bb))
2561 break;
2562 }
2563
2564 if (branch)
2565 {
2566 /* If we haven't emitted a hint for this branch yet, it might
2567 be profitable to emit it in one of the predecessor blocks,
2568 especially for loops. */
23c39aaa 2569 rtx_insn *bbend;
9dcc2e87
TS
2570 basic_block prev = 0, prop = 0, prev2 = 0;
2571 int loop_exit = 0, simple_loop = 0;
2572 int next_addr = INSN_ADDRESSES (INSN_UID (NEXT_INSN (insn)));
2573
2574 for (j = 0; j < EDGE_COUNT (bb->preds); j++)
2575 if (EDGE_PRED (bb, j)->flags & EDGE_FALLTHRU)
2576 prev = EDGE_PRED (bb, j)->src;
2577 else
2578 prev2 = EDGE_PRED (bb, j)->src;
2579
2580 for (j = 0; j < EDGE_COUNT (bb->succs); j++)
2581 if (EDGE_SUCC (bb, j)->flags & EDGE_LOOP_EXIT)
2582 loop_exit = 1;
2583 else if (EDGE_SUCC (bb, j)->dest == bb)
2584 simple_loop = 1;
2585
2586 /* If this branch is a loop exit then propagate to previous
2587 fallthru block. This catches the cases when it is a simple
2588 loop or when there is an initial branch into the loop. */
2589 if (prev && (loop_exit || simple_loop)
66b038ce 2590 && bb_loop_depth (prev) <= bb_loop_depth (bb))
9dcc2e87
TS
2591 prop = prev;
2592
2593 /* If there is only one adjacent predecessor. Don't propagate
66b038ce 2594 outside this loop. */
9dcc2e87 2595 else if (prev && single_pred_p (bb)
66b038ce 2596 && prev->loop_father == bb->loop_father)
9dcc2e87
TS
2597 prop = prev;
2598
2599 /* If this is the JOIN block of a simple IF-THEN then
073a8998 2600 propagate the hint to the HEADER block. */
9dcc2e87
TS
2601 else if (prev && prev2
2602 && EDGE_COUNT (bb->preds) == 2
2603 && EDGE_COUNT (prev->preds) == 1
2604 && EDGE_PRED (prev, 0)->src == prev2
66b038ce 2605 && prev2->loop_father == bb->loop_father
9dcc2e87
TS
2606 && GET_CODE (branch_target) != REG)
2607 prop = prev;
2608
2609 /* Don't propagate when:
2610 - this is a simple loop and the hint would be too far
2611 - this is not a simple loop and there are 16 insns in
2612 this block already
2613 - the predecessor block ends in a branch that will be
2614 hinted
2615 - the predecessor block ends in an insn that invalidates
2616 the hint */
2617 if (prop
2618 && prop->index >= 0
2619 && (bbend = BB_END (prop))
2620 && branch_addr - INSN_ADDRESSES (INSN_UID (bbend)) <
2621 (simple_loop ? 600 : 16 * 4) && get_branch_target (bbend) == 0
2622 && (JUMP_P (bbend) || !insn_clobbers_hbr (bbend)))
2623 {
2624 if (dump_file)
2625 fprintf (dump_file, "propagate from %i to %i (loop depth %i) "
2626 "for %i (loop_exit %i simple_loop %i dist %i)\n",
66b038ce 2627 bb->index, prop->index, bb_loop_depth (bb),
9dcc2e87
TS
2628 INSN_UID (branch), loop_exit, simple_loop,
2629 branch_addr - INSN_ADDRESSES (INSN_UID (bbend)));
2630
2631 spu_bb_info[prop->index].prop_jump = branch;
2632 spu_bb_info[prop->index].bb_index = i;
2633 }
2634 else if (branch_addr - next_addr >= required_dist)
2635 {
2636 if (dump_file)
2637 fprintf (dump_file, "hint for %i in block %i before %i\n",
2638 INSN_UID (branch), bb->index,
2639 INSN_UID (NEXT_INSN (insn)));
2640 spu_emit_branch_hint (NEXT_INSN (insn), branch, branch_target,
2641 branch_addr - next_addr, blocks);
2642 }
2643 branch = 0;
85d9c13c 2644 }
85d9c13c 2645 }
9dcc2e87 2646 free (spu_bb_info);
85d9c13c 2647
f61e445a 2648 if (!bitmap_empty_p (blocks))
9dcc2e87
TS
2649 find_many_sub_basic_blocks (blocks);
2650
2651 /* We have to schedule to make sure alignment is ok. */
11cd3bed 2652 FOR_EACH_BB_FN (bb, cfun) bb->flags &= ~BB_DISABLE_SCHEDULE;
9dcc2e87
TS
2653
2654 /* The hints need to be scheduled, so call it again. */
2655 schedule_insns ();
f626b979 2656 df_finish_pass (true);
9dcc2e87
TS
2657
2658 insert_hbrp ();
2659
2660 pad_bb ();
2661
6e37f6d4
TS
2662 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
2663 if (NONJUMP_INSN_P (insn) && INSN_CODE (insn) == CODE_FOR_hbr)
2664 {
2665 /* Adjust the LABEL_REF in a hint when we have inserted a nop
2666 between its branch label and the branch . We don't move the
2667 label because GCC expects it at the beginning of the block. */
2668 rtx unspec = SET_SRC (XVECEXP (PATTERN (insn), 0, 0));
2669 rtx label_ref = XVECEXP (unspec, 0, 0);
dc01c3d1
DM
2670 rtx_insn *label = as_a <rtx_insn *> (XEXP (label_ref, 0));
2671 rtx_insn *branch;
6e37f6d4
TS
2672 int offset = 0;
2673 for (branch = NEXT_INSN (label);
2674 !JUMP_P (branch) && !CALL_P (branch);
2675 branch = NEXT_INSN (branch))
2676 if (NONJUMP_INSN_P (branch))
2677 offset += get_attr_length (branch);
2678 if (offset > 0)
0a81f074 2679 XVECEXP (unspec, 0, 0) = plus_constant (Pmode, label_ref, offset);
6e37f6d4 2680 }
9dcc2e87 2681
2ba42841 2682 spu_var_tracking ();
9dcc2e87 2683
66b038ce
UW
2684 loop_optimizer_finalize ();
2685
9dcc2e87
TS
2686 free_bb_for_insn ();
2687
2688 in_spu_reorg = 0;
85d9c13c
TS
2689}
2690\f
2691
2692/* Insn scheduling routines, primarily for dual issue. */
2693static int
2694spu_sched_issue_rate (void)
2695{
2696 return 2;
2697}
2698
2699static int
23c39aaa 2700uses_ls_unit(rtx_insn *insn)
85d9c13c 2701{
9dcc2e87
TS
2702 rtx set = single_set (insn);
2703 if (set != 0
2704 && (GET_CODE (SET_DEST (set)) == MEM
2705 || GET_CODE (SET_SRC (set)) == MEM))
2706 return 1;
2707 return 0;
85d9c13c
TS
2708}
2709
2710static int
23c39aaa 2711get_pipe (rtx_insn *insn)
85d9c13c
TS
2712{
2713 enum attr_type t;
2714 /* Handle inline asm */
2715 if (INSN_CODE (insn) == -1)
2716 return -1;
2717 t = get_attr_type (insn);
2718 switch (t)
2719 {
2720 case TYPE_CONVERT:
2721 return -2;
2722 case TYPE_MULTI0:
2723 return -1;
2724
2725 case TYPE_FX2:
2726 case TYPE_FX3:
2727 case TYPE_SPR:
2728 case TYPE_NOP:
2729 case TYPE_FXB:
2730 case TYPE_FPD:
2731 case TYPE_FP6:
2732 case TYPE_FP7:
85d9c13c
TS
2733 return 0;
2734
2735 case TYPE_LNOP:
2736 case TYPE_SHUF:
2737 case TYPE_LOAD:
2738 case TYPE_STORE:
2739 case TYPE_BR:
2740 case TYPE_MULTI1:
2741 case TYPE_HBR:
9dcc2e87 2742 case TYPE_IPREFETCH:
85d9c13c
TS
2743 return 1;
2744 default:
2745 abort ();
2746 }
2747}
2748
9dcc2e87
TS
2749
2750/* haifa-sched.c has a static variable that keeps track of the current
2751 cycle. It is passed to spu_sched_reorder, and we record it here for
2752 use by spu_sched_variable_issue. It won't be accurate if the
2753 scheduler updates it's clock_var between the two calls. */
2754static int clock_var;
2755
2756/* This is used to keep track of insn alignment. Set to 0 at the
2757 beginning of each block and increased by the "length" attr of each
2758 insn scheduled. */
2759static int spu_sched_length;
2760
2761/* Record when we've issued pipe0 and pipe1 insns so we can reorder the
2762 ready list appropriately in spu_sched_reorder(). */
2763static int pipe0_clock;
2764static int pipe1_clock;
2765
2766static int prev_clock_var;
2767
2768static int prev_priority;
2769
2770/* The SPU needs to load the next ilb sometime during the execution of
2771 the previous ilb. There is a potential conflict if every cycle has a
2772 load or store. To avoid the conflict we make sure the load/store
2773 unit is free for at least one cycle during the execution of insns in
2774 the previous ilb. */
2775static int spu_ls_first;
2776static int prev_ls_clock;
2777
2778static void
2779spu_sched_init_global (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
2780 int max_ready ATTRIBUTE_UNUSED)
2781{
2782 spu_sched_length = 0;
2783}
2784
2785static void
2786spu_sched_init (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
2787 int max_ready ATTRIBUTE_UNUSED)
2788{
2789 if (align_labels > 4 || align_loops > 4 || align_jumps > 4)
2790 {
2791 /* When any block might be at least 8-byte aligned, assume they
2792 will all be at least 8-byte aligned to make sure dual issue
2793 works out correctly. */
2794 spu_sched_length = 0;
2795 }
2796 spu_ls_first = INT_MAX;
2797 clock_var = -1;
2798 prev_ls_clock = -1;
2799 pipe0_clock = -1;
2800 pipe1_clock = -1;
2801 prev_clock_var = -1;
2802 prev_priority = -1;
2803}
2804
85d9c13c 2805static int
9dcc2e87 2806spu_sched_variable_issue (FILE *file ATTRIBUTE_UNUSED,
23c39aaa 2807 int verbose ATTRIBUTE_UNUSED,
ac44248e 2808 rtx_insn *insn, int more)
85d9c13c 2809{
9dcc2e87
TS
2810 int len;
2811 int p;
85d9c13c
TS
2812 if (GET_CODE (PATTERN (insn)) == USE
2813 || GET_CODE (PATTERN (insn)) == CLOBBER
9dcc2e87
TS
2814 || (len = get_attr_length (insn)) == 0)
2815 return more;
2816
2817 spu_sched_length += len;
2818
2819 /* Reset on inline asm */
2820 if (INSN_CODE (insn) == -1)
2821 {
2822 spu_ls_first = INT_MAX;
2823 pipe0_clock = -1;
2824 pipe1_clock = -1;
2825 return 0;
2826 }
2827 p = get_pipe (insn);
2828 if (p == 0)
2829 pipe0_clock = clock_var;
2830 else
2831 pipe1_clock = clock_var;
2832
2833 if (in_spu_reorg)
2834 {
2835 if (clock_var - prev_ls_clock > 1
2836 || INSN_CODE (insn) == CODE_FOR_iprefetch)
2837 spu_ls_first = INT_MAX;
2838 if (uses_ls_unit (insn))
2839 {
2840 if (spu_ls_first == INT_MAX)
2841 spu_ls_first = spu_sched_length;
2842 prev_ls_clock = clock_var;
2843 }
2844
2845 /* The scheduler hasn't inserted the nop, but we will later on.
2846 Include those nops in spu_sched_length. */
2847 if (prev_clock_var == clock_var && (spu_sched_length & 7))
2848 spu_sched_length += 4;
2849 prev_clock_var = clock_var;
2850
2851 /* more is -1 when called from spu_sched_reorder for new insns
2852 that don't have INSN_PRIORITY */
2853 if (more >= 0)
2854 prev_priority = INSN_PRIORITY (insn);
2855 }
2856
073a8998 2857 /* Always try issuing more insns. spu_sched_reorder will decide
9dcc2e87
TS
2858 when the cycle should be advanced. */
2859 return 1;
2860}
2861
2862/* This function is called for both TARGET_SCHED_REORDER and
2863 TARGET_SCHED_REORDER2. */
2864static int
2865spu_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
ce1ce33a 2866 rtx_insn **ready, int *nreadyp, int clock)
9dcc2e87
TS
2867{
2868 int i, nready = *nreadyp;
2869 int pipe_0, pipe_1, pipe_hbrp, pipe_ls, schedule_i;
ce1ce33a 2870 rtx_insn *insn;
9dcc2e87
TS
2871
2872 clock_var = clock;
2873
2874 if (nready <= 0 || pipe1_clock >= clock)
2875 return 0;
2876
2877 /* Find any rtl insns that don't generate assembly insns and schedule
2878 them first. */
2879 for (i = nready - 1; i >= 0; i--)
2880 {
2881 insn = ready[i];
2882 if (INSN_CODE (insn) == -1
2883 || INSN_CODE (insn) == CODE_FOR_blockage
eec9405e 2884 || (INSN_P (insn) && get_attr_length (insn) == 0))
9dcc2e87
TS
2885 {
2886 ready[i] = ready[nready - 1];
2887 ready[nready - 1] = insn;
2888 return 1;
2889 }
2890 }
2891
2892 pipe_0 = pipe_1 = pipe_hbrp = pipe_ls = schedule_i = -1;
2893 for (i = 0; i < nready; i++)
2894 if (INSN_CODE (ready[i]) != -1)
2895 {
2896 insn = ready[i];
2897 switch (get_attr_type (insn))
2898 {
2899 default:
2900 case TYPE_MULTI0:
2901 case TYPE_CONVERT:
2902 case TYPE_FX2:
2903 case TYPE_FX3:
2904 case TYPE_SPR:
2905 case TYPE_NOP:
2906 case TYPE_FXB:
2907 case TYPE_FPD:
2908 case TYPE_FP6:
2909 case TYPE_FP7:
2910 pipe_0 = i;
2911 break;
2912 case TYPE_LOAD:
2913 case TYPE_STORE:
2914 pipe_ls = i;
2915 case TYPE_LNOP:
2916 case TYPE_SHUF:
2917 case TYPE_BR:
2918 case TYPE_MULTI1:
2919 case TYPE_HBR:
2920 pipe_1 = i;
2921 break;
2922 case TYPE_IPREFETCH:
2923 pipe_hbrp = i;
2924 break;
2925 }
2926 }
2927
2928 /* In the first scheduling phase, schedule loads and stores together
2929 to increase the chance they will get merged during postreload CSE. */
2930 if (!reload_completed && pipe_ls >= 0)
2931 {
2932 insn = ready[pipe_ls];
2933 ready[pipe_ls] = ready[nready - 1];
2934 ready[nready - 1] = insn;
2935 return 1;
2936 }
2937
2938 /* If there is an hbrp ready, prefer it over other pipe 1 insns. */
2939 if (pipe_hbrp >= 0)
2940 pipe_1 = pipe_hbrp;
2941
2942 /* When we have loads/stores in every cycle of the last 15 insns and
2943 we are about to schedule another load/store, emit an hbrp insn
2944 instead. */
2945 if (in_spu_reorg
2946 && spu_sched_length - spu_ls_first >= 4 * 15
2947 && !(pipe0_clock < clock && pipe_0 >= 0) && pipe_1 == pipe_ls)
2948 {
2949 insn = sched_emit_insn (gen_iprefetch (GEN_INT (3)));
2950 recog_memoized (insn);
2951 if (pipe0_clock < clock)
2952 PUT_MODE (insn, TImode);
2953 spu_sched_variable_issue (file, verbose, insn, -1);
2954 return 0;
2955 }
2956
2957 /* In general, we want to emit nops to increase dual issue, but dual
2958 issue isn't faster when one of the insns could be scheduled later
2959 without effecting the critical path. We look at INSN_PRIORITY to
2960 make a good guess, but it isn't perfect so -mdual-nops=n can be
2961 used to effect it. */
2962 if (in_spu_reorg && spu_dual_nops < 10)
2963 {
073a8998 2964 /* When we are at an even address and we are not issuing nops to
9dcc2e87
TS
2965 improve scheduling then we need to advance the cycle. */
2966 if ((spu_sched_length & 7) == 0 && prev_clock_var == clock
2967 && (spu_dual_nops == 0
2968 || (pipe_1 != -1
2969 && prev_priority >
2970 INSN_PRIORITY (ready[pipe_1]) + spu_dual_nops)))
2971 return 0;
2972
2973 /* When at an odd address, schedule the highest priority insn
2974 without considering pipeline. */
2975 if ((spu_sched_length & 7) == 4 && prev_clock_var != clock
2976 && (spu_dual_nops == 0
2977 || (prev_priority >
2978 INSN_PRIORITY (ready[nready - 1]) + spu_dual_nops)))
2979 return 1;
2980 }
2981
2982
2983 /* We haven't issued a pipe0 insn yet this cycle, if there is a
2984 pipe0 insn in the ready list, schedule it. */
2985 if (pipe0_clock < clock && pipe_0 >= 0)
2986 schedule_i = pipe_0;
2987
2988 /* Either we've scheduled a pipe0 insn already or there is no pipe0
2989 insn to schedule. Put a pipe1 insn at the front of the ready list. */
2990 else
2991 schedule_i = pipe_1;
2992
2993 if (schedule_i > -1)
2994 {
2995 insn = ready[schedule_i];
2996 ready[schedule_i] = ready[nready - 1];
2997 ready[nready - 1] = insn;
2998 return 1;
2999 }
3000 return 0;
85d9c13c
TS
3001}
3002
3003/* INSN is dependent on DEP_INSN. */
3004static int
ac44248e 3005spu_sched_adjust_cost (rtx_insn *insn, rtx link, rtx_insn *dep_insn, int cost)
85d9c13c 3006{
9dcc2e87
TS
3007 rtx set;
3008
3009 /* The blockage pattern is used to prevent instructions from being
3010 moved across it and has no cost. */
3011 if (INSN_CODE (insn) == CODE_FOR_blockage
3012 || INSN_CODE (dep_insn) == CODE_FOR_blockage)
3013 return 0;
3014
eec9405e
TS
3015 if ((INSN_P (insn) && get_attr_length (insn) == 0)
3016 || (INSN_P (dep_insn) && get_attr_length (dep_insn) == 0))
9dcc2e87
TS
3017 return 0;
3018
3019 /* Make sure hbrps are spread out. */
3020 if (INSN_CODE (insn) == CODE_FOR_iprefetch
3021 && INSN_CODE (dep_insn) == CODE_FOR_iprefetch)
3022 return 8;
3023
3024 /* Make sure hints and hbrps are 2 cycles apart. */
3025 if ((INSN_CODE (insn) == CODE_FOR_iprefetch
3026 || INSN_CODE (insn) == CODE_FOR_hbr)
3027 && (INSN_CODE (dep_insn) == CODE_FOR_iprefetch
3028 || INSN_CODE (dep_insn) == CODE_FOR_hbr))
3029 return 2;
3030
3031 /* An hbrp has no real dependency on other insns. */
3032 if (INSN_CODE (insn) == CODE_FOR_iprefetch
3033 || INSN_CODE (dep_insn) == CODE_FOR_iprefetch)
3034 return 0;
3035
3036 /* Assuming that it is unlikely an argument register will be used in
3037 the first cycle of the called function, we reduce the cost for
3038 slightly better scheduling of dep_insn. When not hinted, the
3039 mispredicted branch would hide the cost as well. */
3040 if (CALL_P (insn))
3041 {
3042 rtx target = get_branch_target (insn);
3043 if (GET_CODE (target) != REG || !set_of (target, insn))
3044 return cost - 2;
3045 return cost;
3046 }
3047
3048 /* And when returning from a function, let's assume the return values
3049 are completed sooner too. */
3050 if (CALL_P (dep_insn))
85d9c13c 3051 return cost - 2;
9dcc2e87
TS
3052
3053 /* Make sure an instruction that loads from the back chain is schedule
3054 away from the return instruction so a hint is more likely to get
3055 issued. */
3056 if (INSN_CODE (insn) == CODE_FOR__return
3057 && (set = single_set (dep_insn))
3058 && GET_CODE (SET_DEST (set)) == REG
3059 && REGNO (SET_DEST (set)) == LINK_REGISTER_REGNUM)
3060 return 20;
3061
85d9c13c
TS
3062 /* The dfa scheduler sets cost to 0 for all anti-dependencies and the
3063 scheduler makes every insn in a block anti-dependent on the final
3064 jump_insn. We adjust here so higher cost insns will get scheduled
3065 earlier. */
9dcc2e87 3066 if (JUMP_P (insn) && REG_NOTE_KIND (link) == REG_DEP_ANTI)
b198261f 3067 return insn_cost (dep_insn) - 3;
9dcc2e87 3068
85d9c13c
TS
3069 return cost;
3070}
3071\f
3072/* Create a CONST_DOUBLE from a string. */
984514ac 3073rtx
ef4bddc2 3074spu_float_const (const char *string, machine_mode mode)
85d9c13c
TS
3075{
3076 REAL_VALUE_TYPE value;
3077 value = REAL_VALUE_ATOF (string, mode);
3078 return CONST_DOUBLE_FROM_REAL_VALUE (value, mode);
3079}
3080
85d9c13c
TS
3081int
3082spu_constant_address_p (rtx x)
3083{
3084 return (GET_CODE (x) == LABEL_REF || GET_CODE (x) == SYMBOL_REF
3085 || GET_CODE (x) == CONST_INT || GET_CODE (x) == CONST
3086 || GET_CODE (x) == HIGH);
3087}
3088
3089static enum spu_immediate
3090which_immediate_load (HOST_WIDE_INT val)
3091{
3092 gcc_assert (val == trunc_int_for_mode (val, SImode));
3093
3094 if (val >= -0x8000 && val <= 0x7fff)
3095 return SPU_IL;
3096 if (val >= 0 && val <= 0x3ffff)
3097 return SPU_ILA;
3098 if ((val & 0xffff) == ((val >> 16) & 0xffff))
3099 return SPU_ILH;
3100 if ((val & 0xffff) == 0)
3101 return SPU_ILHU;
3102
3103 return SPU_NONE;
3104}
3105
a1c6e4b8
TS
3106/* Return true when OP can be loaded by one of the il instructions, or
3107 when flow2 is not completed and OP can be loaded using ilhu and iohl. */
85d9c13c 3108int
ef4bddc2 3109immediate_load_p (rtx op, machine_mode mode)
a1c6e4b8
TS
3110{
3111 if (CONSTANT_P (op))
3112 {
3113 enum immediate_class c = classify_immediate (op, mode);
73701e27 3114 return c == IC_IL1 || c == IC_IL1s
6fb5fa3c 3115 || (!epilogue_completed && (c == IC_IL2 || c == IC_IL2s));
a1c6e4b8
TS
3116 }
3117 return 0;
3118}
3119
3120/* Return true if the first SIZE bytes of arr is a constant that can be
3121 generated with cbd, chd, cwd or cdd. When non-NULL, PRUN and PSTART
3122 represent the size and offset of the instruction to use. */
3123static int
3124cpat_info(unsigned char *arr, int size, int *prun, int *pstart)
3125{
3126 int cpat, run, i, start;
3127 cpat = 1;
3128 run = 0;
3129 start = -1;
3130 for (i = 0; i < size && cpat; i++)
3131 if (arr[i] != i+16)
3132 {
3133 if (!run)
3134 {
3135 start = i;
3136 if (arr[i] == 3)
3137 run = 1;
3138 else if (arr[i] == 2 && arr[i+1] == 3)
3139 run = 2;
3140 else if (arr[i] == 0)
3141 {
3142 while (arr[i+run] == run && i+run < 16)
3143 run++;
3144 if (run != 4 && run != 8)
3145 cpat = 0;
3146 }
3147 else
3148 cpat = 0;
3149 if ((i & (run-1)) != 0)
3150 cpat = 0;
3151 i += run;
3152 }
3153 else
3154 cpat = 0;
3155 }
1f49ae6e 3156 if (cpat && (run || size < 16))
a1c6e4b8
TS
3157 {
3158 if (run == 0)
3159 run = 1;
3160 if (prun)
3161 *prun = run;
3162 if (pstart)
3163 *pstart = start == -1 ? 16-run : start;
3164 return 1;
3165 }
3166 return 0;
3167}
3168
3169/* OP is a CONSTANT_P. Determine what instructions can be used to load
24fc18b9 3170 it into a register. MODE is only valid when OP is a CONST_INT. */
a1c6e4b8 3171static enum immediate_class
ef4bddc2 3172classify_immediate (rtx op, machine_mode mode)
85d9c13c
TS
3173{
3174 HOST_WIDE_INT val;
3175 unsigned char arr[16];
73701e27 3176 int i, j, repeated, fsmbi, repeat;
a1c6e4b8
TS
3177
3178 gcc_assert (CONSTANT_P (op));
3179
85d9c13c
TS
3180 if (GET_MODE (op) != VOIDmode)
3181 mode = GET_MODE (op);
3182
a1c6e4b8 3183 /* A V4SI const_vector with all identical symbols is ok. */
73701e27
TS
3184 if (!flag_pic
3185 && mode == V4SImode
a1c6e4b8
TS
3186 && GET_CODE (op) == CONST_VECTOR
3187 && GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_INT
3188 && GET_CODE (CONST_VECTOR_ELT (op, 0)) != CONST_DOUBLE
3189 && CONST_VECTOR_ELT (op, 0) == CONST_VECTOR_ELT (op, 1)
3190 && CONST_VECTOR_ELT (op, 1) == CONST_VECTOR_ELT (op, 2)
3191 && CONST_VECTOR_ELT (op, 2) == CONST_VECTOR_ELT (op, 3))
3192 op = CONST_VECTOR_ELT (op, 0);
85d9c13c 3193
a1c6e4b8
TS
3194 switch (GET_CODE (op))
3195 {
3196 case SYMBOL_REF:
3197 case LABEL_REF:
3198 return TARGET_LARGE_MEM ? IC_IL2s : IC_IL1s;
85d9c13c 3199
a1c6e4b8 3200 case CONST:
3f61b42f
UW
3201 /* We can never know if the resulting address fits in 18 bits and can be
3202 loaded with ila. For now, assume the address will not overflow if
3203 the displacement is "small" (fits 'K' constraint). */
3204 if (!TARGET_LARGE_MEM && GET_CODE (XEXP (op, 0)) == PLUS)
3205 {
3206 rtx sym = XEXP (XEXP (op, 0), 0);
3207 rtx cst = XEXP (XEXP (op, 0), 1);
3208
3209 if (GET_CODE (sym) == SYMBOL_REF
3210 && GET_CODE (cst) == CONST_INT
3211 && satisfies_constraint_K (cst))
3212 return IC_IL1s;
3213 }
3214 return IC_IL2s;
85d9c13c 3215
a1c6e4b8
TS
3216 case HIGH:
3217 return IC_IL1s;
3218
3219 case CONST_VECTOR:
3220 for (i = 0; i < GET_MODE_NUNITS (mode); i++)
3221 if (GET_CODE (CONST_VECTOR_ELT (op, i)) != CONST_INT
3222 && GET_CODE (CONST_VECTOR_ELT (op, i)) != CONST_DOUBLE)
3223 return IC_POOL;
3224 /* Fall through. */
3225
3226 case CONST_INT:
3227 case CONST_DOUBLE:
3228 constant_to_array (mode, op, arr);
85d9c13c 3229
a1c6e4b8
TS
3230 /* Check that each 4-byte slot is identical. */
3231 repeated = 1;
3232 for (i = 4; i < 16; i += 4)
3233 for (j = 0; j < 4; j++)
3234 if (arr[j] != arr[i + j])
3235 repeated = 0;
3236
3237 if (repeated)
3238 {
3239 val = (arr[0] << 24) | (arr[1] << 16) | (arr[2] << 8) | arr[3];
3240 val = trunc_int_for_mode (val, SImode);
3241
3242 if (which_immediate_load (val) != SPU_NONE)
3243 return IC_IL1;
3244 }
3245
3246 /* Any mode of 2 bytes or smaller can be loaded with an il
3247 instruction. */
3248 gcc_assert (GET_MODE_SIZE (mode) > 2);
3249
3250 fsmbi = 1;
73701e27 3251 repeat = 0;
a1c6e4b8 3252 for (i = 0; i < 16 && fsmbi; i++)
73701e27
TS
3253 if (arr[i] != 0 && repeat == 0)
3254 repeat = arr[i];
3255 else if (arr[i] != 0 && arr[i] != repeat)
a1c6e4b8
TS
3256 fsmbi = 0;
3257 if (fsmbi)
73701e27 3258 return repeat == 0xff ? IC_FSMBI : IC_FSMBI2;
a1c6e4b8
TS
3259
3260 if (cpat_info (arr, GET_MODE_SIZE (mode), 0, 0))
3261 return IC_CPAT;
3262
3263 if (repeated)
3264 return IC_IL2;
3265
3266 return IC_POOL;
3267 default:
3268 break;
3269 }
3270 gcc_unreachable ();
85d9c13c
TS
3271}
3272
3273static enum spu_immediate
3274which_logical_immediate (HOST_WIDE_INT val)
3275{
3276 gcc_assert (val == trunc_int_for_mode (val, SImode));
3277
3278 if (val >= -0x200 && val <= 0x1ff)
3279 return SPU_ORI;
3280 if (val >= 0 && val <= 0xffff)
3281 return SPU_IOHL;
3282 if ((val & 0xffff) == ((val >> 16) & 0xffff))
3283 {
3284 val = trunc_int_for_mode (val, HImode);
3285 if (val >= -0x200 && val <= 0x1ff)
3286 return SPU_ORHI;
3287 if ((val & 0xff) == ((val >> 8) & 0xff))
3288 {
3289 val = trunc_int_for_mode (val, QImode);
3290 if (val >= -0x200 && val <= 0x1ff)
3291 return SPU_ORBI;
3292 }
3293 }
3294 return SPU_NONE;
3295}
3296
73701e27
TS
3297/* Return TRUE when X, a CONST_VECTOR, only contains CONST_INTs or
3298 CONST_DOUBLEs. */
3299static int
3300const_vector_immediate_p (rtx x)
3301{
3302 int i;
3303 gcc_assert (GET_CODE (x) == CONST_VECTOR);
3304 for (i = 0; i < GET_MODE_NUNITS (GET_MODE (x)); i++)
3305 if (GET_CODE (CONST_VECTOR_ELT (x, i)) != CONST_INT
3306 && GET_CODE (CONST_VECTOR_ELT (x, i)) != CONST_DOUBLE)
3307 return 0;
3308 return 1;
3309}
3310
85d9c13c 3311int
ef4bddc2 3312logical_immediate_p (rtx op, machine_mode mode)
85d9c13c
TS
3313{
3314 HOST_WIDE_INT val;
3315 unsigned char arr[16];
3316 int i, j;
3317
3318 gcc_assert (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
3319 || GET_CODE (op) == CONST_VECTOR);
3320
73701e27
TS
3321 if (GET_CODE (op) == CONST_VECTOR
3322 && !const_vector_immediate_p (op))
3323 return 0;
3324
85d9c13c
TS
3325 if (GET_MODE (op) != VOIDmode)
3326 mode = GET_MODE (op);
3327
3328 constant_to_array (mode, op, arr);
3329
3330 /* Check that bytes are repeated. */
3331 for (i = 4; i < 16; i += 4)
3332 for (j = 0; j < 4; j++)
3333 if (arr[j] != arr[i + j])
3334 return 0;
3335
3336 val = (arr[0] << 24) | (arr[1] << 16) | (arr[2] << 8) | arr[3];
3337 val = trunc_int_for_mode (val, SImode);
3338
3339 i = which_logical_immediate (val);
3340 return i != SPU_NONE && i != SPU_IOHL;
3341}
3342
3343int
ef4bddc2 3344iohl_immediate_p (rtx op, machine_mode mode)
85d9c13c
TS
3345{
3346 HOST_WIDE_INT val;
3347 unsigned char arr[16];
3348 int i, j;
3349
3350 gcc_assert (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
3351 || GET_CODE (op) == CONST_VECTOR);
3352
73701e27
TS
3353 if (GET_CODE (op) == CONST_VECTOR
3354 && !const_vector_immediate_p (op))
3355 return 0;
3356
85d9c13c
TS
3357 if (GET_MODE (op) != VOIDmode)
3358 mode = GET_MODE (op);
3359
3360 constant_to_array (mode, op, arr);
3361
3362 /* Check that bytes are repeated. */
3363 for (i = 4; i < 16; i += 4)
3364 for (j = 0; j < 4; j++)
3365 if (arr[j] != arr[i + j])
3366 return 0;
3367
3368 val = (arr[0] << 24) | (arr[1] << 16) | (arr[2] << 8) | arr[3];
3369 val = trunc_int_for_mode (val, SImode);
3370
3371 return val >= 0 && val <= 0xffff;
3372}
3373
3374int
ef4bddc2 3375arith_immediate_p (rtx op, machine_mode mode,
85d9c13c
TS
3376 HOST_WIDE_INT low, HOST_WIDE_INT high)
3377{
3378 HOST_WIDE_INT val;
3379 unsigned char arr[16];
3380 int bytes, i, j;
3381
3382 gcc_assert (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
3383 || GET_CODE (op) == CONST_VECTOR);
3384
73701e27
TS
3385 if (GET_CODE (op) == CONST_VECTOR
3386 && !const_vector_immediate_p (op))
3387 return 0;
3388
85d9c13c
TS
3389 if (GET_MODE (op) != VOIDmode)
3390 mode = GET_MODE (op);
3391
3392 constant_to_array (mode, op, arr);
3393
3394 if (VECTOR_MODE_P (mode))
3395 mode = GET_MODE_INNER (mode);
3396
3397 bytes = GET_MODE_SIZE (mode);
3398 mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
3399
3400 /* Check that bytes are repeated. */
3401 for (i = bytes; i < 16; i += bytes)
3402 for (j = 0; j < bytes; j++)
3403 if (arr[j] != arr[i + j])
3404 return 0;
3405
3406 val = arr[0];
3407 for (j = 1; j < bytes; j++)
3408 val = (val << 8) | arr[j];
3409
3410 val = trunc_int_for_mode (val, mode);
3411
3412 return val >= low && val <= high;
3413}
3414
5345cf68
TS
3415/* TRUE when op is an immediate and an exact power of 2, and given that
3416 OP is 2^scale, scale >= LOW && scale <= HIGH. When OP is a vector,
3417 all entries must be the same. */
3418bool
ef4bddc2 3419exp2_immediate_p (rtx op, machine_mode mode, int low, int high)
5345cf68 3420{
ef4bddc2 3421 machine_mode int_mode;
5345cf68
TS
3422 HOST_WIDE_INT val;
3423 unsigned char arr[16];
3424 int bytes, i, j;
3425
3426 gcc_assert (GET_CODE (op) == CONST_INT || GET_CODE (op) == CONST_DOUBLE
3427 || GET_CODE (op) == CONST_VECTOR);
3428
3429 if (GET_CODE (op) == CONST_VECTOR
3430 && !const_vector_immediate_p (op))
3431 return 0;
3432
3433 if (GET_MODE (op) != VOIDmode)
3434 mode = GET_MODE (op);
3435
3436 constant_to_array (mode, op, arr);
3437
3438 if (VECTOR_MODE_P (mode))
3439 mode = GET_MODE_INNER (mode);
3440
3441 bytes = GET_MODE_SIZE (mode);
3442 int_mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
3443
3444 /* Check that bytes are repeated. */
3445 for (i = bytes; i < 16; i += bytes)
3446 for (j = 0; j < bytes; j++)
3447 if (arr[j] != arr[i + j])
3448 return 0;
3449
3450 val = arr[0];
3451 for (j = 1; j < bytes; j++)
3452 val = (val << 8) | arr[j];
3453
3454 val = trunc_int_for_mode (val, int_mode);
3455
3456 /* Currently, we only handle SFmode */
3457 gcc_assert (mode == SFmode);
3458 if (mode == SFmode)
3459 {
3460 int exp = (val >> 23) - 127;
3461 return val > 0 && (val & 0x007fffff) == 0
3462 && exp >= low && exp <= high;
3463 }
3464 return FALSE;
3465}
3466
299456f3
BE
3467/* Return true if X is a SYMBOL_REF to an __ea qualified variable. */
3468
3dfc96ea
RS
3469static bool
3470ea_symbol_ref_p (const_rtx x)
299456f3 3471{
299456f3
BE
3472 tree decl;
3473
3474 if (GET_CODE (x) == CONST && GET_CODE (XEXP (x, 0)) == PLUS)
3475 {
3476 rtx plus = XEXP (x, 0);
3477 rtx op0 = XEXP (plus, 0);
3478 rtx op1 = XEXP (plus, 1);
3479 if (GET_CODE (op1) == CONST_INT)
3480 x = op0;
3481 }
3482
3483 return (GET_CODE (x) == SYMBOL_REF
3484 && (decl = SYMBOL_REF_DECL (x)) != 0
3485 && TREE_CODE (decl) == VAR_DECL
3486 && TYPE_ADDR_SPACE (TREE_TYPE (decl)));
3487}
3488
85d9c13c 3489/* We accept:
a7b376ee 3490 - any 32-bit constant (SImode, SFmode)
85d9c13c 3491 - any constant that can be generated with fsmbi (any mode)
a7b376ee 3492 - a 64-bit constant where the high and low bits are identical
85d9c13c 3493 (DImode, DFmode)
a7b376ee 3494 - a 128-bit constant where the four 32-bit words match. */
1a627b35 3495bool
ef4bddc2 3496spu_legitimate_constant_p (machine_mode mode, rtx x)
85d9c13c 3497{
3dfc96ea 3498 subrtx_iterator::array_type array;
73701e27
TS
3499 if (GET_CODE (x) == HIGH)
3500 x = XEXP (x, 0);
299456f3
BE
3501
3502 /* Reject any __ea qualified reference. These can't appear in
3503 instructions but must be forced to the constant pool. */
3dfc96ea
RS
3504 FOR_EACH_SUBRTX (iter, array, x, ALL)
3505 if (ea_symbol_ref_p (*iter))
3506 return 0;
299456f3 3507
85d9c13c 3508 /* V4SI with all identical symbols is valid. */
73701e27 3509 if (!flag_pic
1a627b35 3510 && mode == V4SImode
85d9c13c
TS
3511 && (GET_CODE (CONST_VECTOR_ELT (x, 0)) == SYMBOL_REF
3512 || GET_CODE (CONST_VECTOR_ELT (x, 0)) == LABEL_REF
73701e27 3513 || GET_CODE (CONST_VECTOR_ELT (x, 0)) == CONST))
85d9c13c
TS
3514 return CONST_VECTOR_ELT (x, 0) == CONST_VECTOR_ELT (x, 1)
3515 && CONST_VECTOR_ELT (x, 1) == CONST_VECTOR_ELT (x, 2)
3516 && CONST_VECTOR_ELT (x, 2) == CONST_VECTOR_ELT (x, 3);
3517
73701e27
TS
3518 if (GET_CODE (x) == CONST_VECTOR
3519 && !const_vector_immediate_p (x))
3520 return 0;
85d9c13c
TS
3521 return 1;
3522}
3523
3524/* Valid address are:
3525 - symbol_ref, label_ref, const
3526 - reg
eec9405e 3527 - reg + const_int, where const_int is 16 byte aligned
85d9c13c
TS
3528 - reg + reg, alignment doesn't matter
3529 The alignment matters in the reg+const case because lqd and stqd
eec9405e
TS
3530 ignore the 4 least significant bits of the const. We only care about
3531 16 byte modes because the expand phase will change all smaller MEM
3532 references to TImode. */
3533static bool
ef4bddc2 3534spu_legitimate_address_p (machine_mode mode,
c6c3dba9 3535 rtx x, bool reg_ok_strict)
85d9c13c 3536{
eec9405e
TS
3537 int aligned = GET_MODE_SIZE (mode) >= 16;
3538 if (aligned
3539 && GET_CODE (x) == AND
85d9c13c 3540 && GET_CODE (XEXP (x, 1)) == CONST_INT
eec9405e 3541 && INTVAL (XEXP (x, 1)) == (HOST_WIDE_INT) - 16)
85d9c13c
TS
3542 x = XEXP (x, 0);
3543 switch (GET_CODE (x))
3544 {
85d9c13c 3545 case LABEL_REF:
299456f3
BE
3546 return !TARGET_LARGE_MEM;
3547
eec9405e 3548 case SYMBOL_REF:
85d9c13c 3549 case CONST:
299456f3
BE
3550 /* Keep __ea references until reload so that spu_expand_mov can see them
3551 in MEMs. */
3dfc96ea 3552 if (ea_symbol_ref_p (x))
299456f3 3553 return !reload_in_progress && !reload_completed;
eec9405e 3554 return !TARGET_LARGE_MEM;
85d9c13c
TS
3555
3556 case CONST_INT:
3557 return INTVAL (x) >= 0 && INTVAL (x) <= 0x3ffff;
3558
3559 case SUBREG:
3560 x = XEXP (x, 0);
eec9405e
TS
3561 if (REG_P (x))
3562 return 0;
85d9c13c
TS
3563
3564 case REG:
3565 return INT_REG_OK_FOR_BASE_P (x, reg_ok_strict);
3566
3567 case PLUS:
3568 case LO_SUM:
3569 {
3570 rtx op0 = XEXP (x, 0);
3571 rtx op1 = XEXP (x, 1);
3572 if (GET_CODE (op0) == SUBREG)
3573 op0 = XEXP (op0, 0);
3574 if (GET_CODE (op1) == SUBREG)
3575 op1 = XEXP (op1, 0);
85d9c13c
TS
3576 if (GET_CODE (op0) == REG
3577 && INT_REG_OK_FOR_BASE_P (op0, reg_ok_strict)
3578 && GET_CODE (op1) == CONST_INT
2ea0be59
UW
3579 && ((INTVAL (op1) >= -0x2000 && INTVAL (op1) <= 0x1fff)
3580 /* If virtual registers are involved, the displacement will
3581 change later on anyway, so checking would be premature.
3582 Reload will make sure the final displacement after
3583 register elimination is OK. */
3584 || op0 == arg_pointer_rtx
3585 || op0 == frame_pointer_rtx
3586 || op0 == virtual_stack_vars_rtx)
eec9405e
TS
3587 && (!aligned || (INTVAL (op1) & 15) == 0))
3588 return TRUE;
85d9c13c
TS
3589 if (GET_CODE (op0) == REG
3590 && INT_REG_OK_FOR_BASE_P (op0, reg_ok_strict)
3591 && GET_CODE (op1) == REG
3592 && INT_REG_OK_FOR_INDEX_P (op1, reg_ok_strict))
eec9405e 3593 return TRUE;
85d9c13c
TS
3594 }
3595 break;
3596
3597 default:
3598 break;
3599 }
eec9405e 3600 return FALSE;
85d9c13c
TS
3601}
3602
299456f3
BE
3603/* Like spu_legitimate_address_p, except with named addresses. */
3604static bool
ef4bddc2 3605spu_addr_space_legitimate_address_p (machine_mode mode, rtx x,
299456f3
BE
3606 bool reg_ok_strict, addr_space_t as)
3607{
3608 if (as == ADDR_SPACE_EA)
3609 return (REG_P (x) && (GET_MODE (x) == EAmode));
3610
3611 else if (as != ADDR_SPACE_GENERIC)
3612 gcc_unreachable ();
3613
3614 return spu_legitimate_address_p (mode, x, reg_ok_strict);
3615}
3616
85d9c13c 3617/* When the address is reg + const_int, force the const_int into a
2f8e468b 3618 register. */
c9c72699 3619static rtx
85d9c13c 3620spu_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
ef4bddc2 3621 machine_mode mode ATTRIBUTE_UNUSED)
85d9c13c
TS
3622{
3623 rtx op0, op1;
3624 /* Make sure both operands are registers. */
3625 if (GET_CODE (x) == PLUS)
3626 {
3627 op0 = XEXP (x, 0);
3628 op1 = XEXP (x, 1);
3629 if (ALIGNED_SYMBOL_REF_P (op0))
3630 {
3631 op0 = force_reg (Pmode, op0);
3632 mark_reg_pointer (op0, 128);
3633 }
3634 else if (GET_CODE (op0) != REG)
3635 op0 = force_reg (Pmode, op0);
3636 if (ALIGNED_SYMBOL_REF_P (op1))
3637 {
3638 op1 = force_reg (Pmode, op1);
3639 mark_reg_pointer (op1, 128);
3640 }
3641 else if (GET_CODE (op1) != REG)
3642 op1 = force_reg (Pmode, op1);
3643 x = gen_rtx_PLUS (Pmode, op0, op1);
85d9c13c 3644 }
506d7b68 3645 return x;
85d9c13c
TS
3646}
3647
299456f3
BE
3648/* Like spu_legitimate_address, except with named address support. */
3649static rtx
ef4bddc2 3650spu_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
299456f3
BE
3651 addr_space_t as)
3652{
3653 if (as != ADDR_SPACE_GENERIC)
3654 return x;
3655
3656 return spu_legitimize_address (x, oldx, mode);
3657}
3658
2ea0be59
UW
3659/* Reload reg + const_int for out-of-range displacements. */
3660rtx
ef4bddc2 3661spu_legitimize_reload_address (rtx ad, machine_mode mode ATTRIBUTE_UNUSED,
2ea0be59
UW
3662 int opnum, int type)
3663{
3664 bool removed_and = false;
3665
3666 if (GET_CODE (ad) == AND
3667 && CONST_INT_P (XEXP (ad, 1))
3668 && INTVAL (XEXP (ad, 1)) == (HOST_WIDE_INT) - 16)
3669 {
3670 ad = XEXP (ad, 0);
3671 removed_and = true;
3672 }
3673
3674 if (GET_CODE (ad) == PLUS
3675 && REG_P (XEXP (ad, 0))
3676 && CONST_INT_P (XEXP (ad, 1))
3677 && !(INTVAL (XEXP (ad, 1)) >= -0x2000
3678 && INTVAL (XEXP (ad, 1)) <= 0x1fff))
3679 {
3680 /* Unshare the sum. */
3681 ad = copy_rtx (ad);
3682
3683 /* Reload the displacement. */
3684 push_reload (XEXP (ad, 1), NULL_RTX, &XEXP (ad, 1), NULL,
3685 BASE_REG_CLASS, GET_MODE (ad), VOIDmode, 0, 0,
3686 opnum, (enum reload_type) type);
3687
3688 /* Add back AND for alignment if we stripped it. */
3689 if (removed_and)
3690 ad = gen_rtx_AND (GET_MODE (ad), ad, GEN_INT (-16));
3691
3692 return ad;
3693 }
3694
3695 return NULL_RTX;
3696}
3697
85d9c13c
TS
3698/* Handle an attribute requiring a FUNCTION_DECL; arguments as in
3699 struct attribute_spec.handler. */
3700static tree
3701spu_handle_fndecl_attribute (tree * node,
3702 tree name,
3703 tree args ATTRIBUTE_UNUSED,
3704 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
3705{
3706 if (TREE_CODE (*node) != FUNCTION_DECL)
3707 {
29d08eba
JM
3708 warning (0, "%qE attribute only applies to functions",
3709 name);
85d9c13c
TS
3710 *no_add_attrs = true;
3711 }
3712
3713 return NULL_TREE;
3714}
3715
3716/* Handle the "vector" attribute. */
3717static tree
3718spu_handle_vector_attribute (tree * node, tree name,
3719 tree args ATTRIBUTE_UNUSED,
3720 int flags ATTRIBUTE_UNUSED, bool * no_add_attrs)
3721{
3722 tree type = *node, result = NULL_TREE;
ef4bddc2 3723 machine_mode mode;
85d9c13c
TS
3724 int unsigned_p;
3725
3726 while (POINTER_TYPE_P (type)
3727 || TREE_CODE (type) == FUNCTION_TYPE
3728 || TREE_CODE (type) == METHOD_TYPE || TREE_CODE (type) == ARRAY_TYPE)
3729 type = TREE_TYPE (type);
3730
3731 mode = TYPE_MODE (type);
3732
3733 unsigned_p = TYPE_UNSIGNED (type);
3734 switch (mode)
3735 {
3736 case DImode:
3737 result = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
3738 break;
3739 case SImode:
3740 result = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
3741 break;
3742 case HImode:
3743 result = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
3744 break;
3745 case QImode:
3746 result = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
3747 break;
3748 case SFmode:
3749 result = V4SF_type_node;
3750 break;
3751 case DFmode:
3752 result = V2DF_type_node;
3753 break;
3754 default:
3755 break;
3756 }
3757
3758 /* Propagate qualifiers attached to the element type
3759 onto the vector type. */
3760 if (result && result != type && TYPE_QUALS (type))
3761 result = build_qualified_type (result, TYPE_QUALS (type));
3762
3763 *no_add_attrs = true; /* No need to hang on to the attribute. */
3764
3765 if (!result)
29d08eba 3766 warning (0, "%qE attribute ignored", name);
85d9c13c 3767 else
5dc11954 3768 *node = lang_hooks.types.reconstruct_complex_type (*node, result);
85d9c13c
TS
3769
3770 return NULL_TREE;
3771}
3772
9f5ed61a 3773/* Return nonzero if FUNC is a naked function. */
85d9c13c
TS
3774static int
3775spu_naked_function_p (tree func)
3776{
3777 tree a;
3778
3779 if (TREE_CODE (func) != FUNCTION_DECL)
3780 abort ();
3781
3782 a = lookup_attribute ("naked", DECL_ATTRIBUTES (func));
3783 return a != NULL_TREE;
3784}
3785
3786int
3787spu_initial_elimination_offset (int from, int to)
3788{
3789 int saved_regs_size = spu_saved_regs_size ();
3790 int sp_offset = 0;
416ff32e 3791 if (!crtl->is_leaf || crtl->outgoing_args_size
85d9c13c
TS
3792 || get_frame_size () || saved_regs_size)
3793 sp_offset = STACK_POINTER_OFFSET;
3794 if (from == FRAME_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
7310a2da 3795 return get_frame_size () + crtl->outgoing_args_size + sp_offset;
85d9c13c 3796 else if (from == FRAME_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
7310a2da 3797 return get_frame_size ();
85d9c13c 3798 else if (from == ARG_POINTER_REGNUM && to == STACK_POINTER_REGNUM)
38173d38 3799 return sp_offset + crtl->outgoing_args_size
85d9c13c
TS
3800 + get_frame_size () + saved_regs_size + STACK_POINTER_OFFSET;
3801 else if (from == ARG_POINTER_REGNUM && to == HARD_FRAME_POINTER_REGNUM)
3802 return get_frame_size () + saved_regs_size + sp_offset;
7310a2da
SSF
3803 else
3804 gcc_unreachable ();
85d9c13c
TS
3805}
3806
3807rtx
586de218 3808spu_function_value (const_tree type, const_tree func ATTRIBUTE_UNUSED)
85d9c13c 3809{
ef4bddc2 3810 machine_mode mode = TYPE_MODE (type);
85d9c13c
TS
3811 int byte_size = ((mode == BLKmode)
3812 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3813
3814 /* Make sure small structs are left justified in a register. */
3815 if ((mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
3816 && byte_size <= UNITS_PER_WORD * MAX_REGISTER_RETURN && byte_size > 0)
3817 {
ef4bddc2 3818 machine_mode smode;
85d9c13c
TS
3819 rtvec v;
3820 int i;
3821 int nregs = (byte_size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3822 int n = byte_size / UNITS_PER_WORD;
3823 v = rtvec_alloc (nregs);
3824 for (i = 0; i < n; i++)
3825 {
3826 RTVEC_ELT (v, i) = gen_rtx_EXPR_LIST (VOIDmode,
3827 gen_rtx_REG (TImode,
3828 FIRST_RETURN_REGNUM
3829 + i),
3830 GEN_INT (UNITS_PER_WORD * i));
3831 byte_size -= UNITS_PER_WORD;
3832 }
3833
3834 if (n < nregs)
3835 {
3836 if (byte_size < 4)
3837 byte_size = 4;
3838 smode =
3839 smallest_mode_for_size (byte_size * BITS_PER_UNIT, MODE_INT);
3840 RTVEC_ELT (v, n) =
3841 gen_rtx_EXPR_LIST (VOIDmode,
3842 gen_rtx_REG (smode, FIRST_RETURN_REGNUM + n),
3843 GEN_INT (UNITS_PER_WORD * n));
3844 }
3845 return gen_rtx_PARALLEL (mode, v);
3846 }
3847 return gen_rtx_REG (mode, FIRST_RETURN_REGNUM);
3848}
3849
925ed112 3850static rtx
d5cc9181 3851spu_function_arg (cumulative_args_t cum_v,
ef4bddc2 3852 machine_mode mode,
925ed112 3853 const_tree type, bool named ATTRIBUTE_UNUSED)
85d9c13c 3854{
d5cc9181 3855 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
85d9c13c
TS
3856 int byte_size;
3857
0aa88287 3858 if (*cum >= MAX_REGISTER_ARGS)
85d9c13c
TS
3859 return 0;
3860
3861 byte_size = ((mode == BLKmode)
3862 ? int_size_in_bytes (type) : GET_MODE_SIZE (mode));
3863
3864 /* The ABI does not allow parameters to be passed partially in
3865 reg and partially in stack. */
0aa88287 3866 if ((*cum + (byte_size + 15) / 16) > MAX_REGISTER_ARGS)
85d9c13c
TS
3867 return 0;
3868
3869 /* Make sure small structs are left justified in a register. */
3870 if ((mode == BLKmode || (type && AGGREGATE_TYPE_P (type)))
3871 && byte_size < UNITS_PER_WORD && byte_size > 0)
3872 {
ef4bddc2 3873 machine_mode smode;
85d9c13c
TS
3874 rtx gr_reg;
3875 if (byte_size < 4)
3876 byte_size = 4;
3877 smode = smallest_mode_for_size (byte_size * BITS_PER_UNIT, MODE_INT);
3878 gr_reg = gen_rtx_EXPR_LIST (VOIDmode,
0aa88287 3879 gen_rtx_REG (smode, FIRST_ARG_REGNUM + *cum),
85d9c13c
TS
3880 const0_rtx);
3881 return gen_rtx_PARALLEL (mode, gen_rtvec (1, gr_reg));
3882 }
3883 else
0aa88287 3884 return gen_rtx_REG (mode, FIRST_ARG_REGNUM + *cum);
85d9c13c
TS
3885}
3886
925ed112 3887static void
ef4bddc2 3888spu_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
925ed112
NF
3889 const_tree type, bool named ATTRIBUTE_UNUSED)
3890{
d5cc9181
JR
3891 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
3892
925ed112
NF
3893 *cum += (type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
3894 ? 1
3895 : mode == BLKmode
3896 ? ((int_size_in_bytes (type) + 15) / 16)
3897 : mode == VOIDmode
3898 ? 1
3899 : HARD_REGNO_NREGS (cum, mode));
3900}
3901
85d9c13c
TS
3902/* Variable sized types are passed by reference. */
3903static bool
d5cc9181 3904spu_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
ef4bddc2 3905 machine_mode mode ATTRIBUTE_UNUSED,
586de218 3906 const_tree type, bool named ATTRIBUTE_UNUSED)
85d9c13c
TS
3907{
3908 return type && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST;
3909}
3910\f
3911
3912/* Var args. */
3913
3914/* Create and return the va_list datatype.
3915
3916 On SPU, va_list is an array type equivalent to
3917
3918 typedef struct __va_list_tag
3919 {
3920 void *__args __attribute__((__aligned(16)));
3921 void *__skip __attribute__((__aligned(16)));
3922
3923 } va_list[1];
3924
2f8e468b 3925 where __args points to the arg that will be returned by the next
85d9c13c
TS
3926 va_arg(), and __skip points to the previous stack frame such that
3927 when __args == __skip we should advance __args by 32 bytes. */
3928static tree
3929spu_build_builtin_va_list (void)
3930{
3931 tree f_args, f_skip, record, type_decl;
3932 bool owp;
3933
3934 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
3935
3936 type_decl =
4c4bde29
AH
3937 build_decl (BUILTINS_LOCATION,
3938 TYPE_DECL, get_identifier ("__va_list_tag"), record);
85d9c13c 3939
4c4bde29
AH
3940 f_args = build_decl (BUILTINS_LOCATION,
3941 FIELD_DECL, get_identifier ("__args"), ptr_type_node);
3942 f_skip = build_decl (BUILTINS_LOCATION,
3943 FIELD_DECL, get_identifier ("__skip"), ptr_type_node);
85d9c13c
TS
3944
3945 DECL_FIELD_CONTEXT (f_args) = record;
3946 DECL_ALIGN (f_args) = 128;
3947 DECL_USER_ALIGN (f_args) = 1;
3948
3949 DECL_FIELD_CONTEXT (f_skip) = record;
3950 DECL_ALIGN (f_skip) = 128;
3951 DECL_USER_ALIGN (f_skip) = 1;
3952
0fd2eac2 3953 TYPE_STUB_DECL (record) = type_decl;
85d9c13c
TS
3954 TYPE_NAME (record) = type_decl;
3955 TYPE_FIELDS (record) = f_args;
910ad8de 3956 DECL_CHAIN (f_args) = f_skip;
85d9c13c
TS
3957
3958 /* We know this is being padded and we want it too. It is an internal
3959 type so hide the warnings from the user. */
3960 owp = warn_padded;
3961 warn_padded = false;
3962
3963 layout_type (record);
3964
3965 warn_padded = owp;
3966
3967 /* The correct type is an array type of one element. */
3968 return build_array_type (record, build_index_type (size_zero_node));
3969}
3970
3971/* Implement va_start by filling the va_list structure VALIST.
3972 NEXTARG points to the first anonymous stack argument.
3973
3974 The following global variables are used to initialize
3975 the va_list structure:
3976
38173d38 3977 crtl->args.info;
85d9c13c
TS
3978 the CUMULATIVE_ARGS for this function
3979
38173d38 3980 crtl->args.arg_offset_rtx:
85d9c13c
TS
3981 holds the offset of the first anonymous stack argument
3982 (relative to the virtual arg pointer). */
3983
d7bd8aeb 3984static void
85d9c13c
TS
3985spu_va_start (tree valist, rtx nextarg)
3986{
3987 tree f_args, f_skip;
3988 tree args, skip, t;
3989
3990 f_args = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
910ad8de 3991 f_skip = DECL_CHAIN (f_args);
85d9c13c 3992
86710a8b 3993 valist = build_simple_mem_ref (valist);
85d9c13c
TS
3994 args =
3995 build3 (COMPONENT_REF, TREE_TYPE (f_args), valist, f_args, NULL_TREE);
3996 skip =
3997 build3 (COMPONENT_REF, TREE_TYPE (f_skip), valist, f_skip, NULL_TREE);
3998
3999 /* Find the __args area. */
4000 t = make_tree (TREE_TYPE (args), nextarg);
38173d38 4001 if (crtl->args.pretend_args_size > 0)
5d49b6a7 4002 t = fold_build_pointer_plus_hwi (t, -STACK_POINTER_OFFSET);
726a989a 4003 t = build2 (MODIFY_EXPR, TREE_TYPE (args), args, t);
85d9c13c
TS
4004 TREE_SIDE_EFFECTS (t) = 1;
4005 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4006
4007 /* Find the __skip area. */
4008 t = make_tree (TREE_TYPE (skip), virtual_incoming_args_rtx);
5d49b6a7
RG
4009 t = fold_build_pointer_plus_hwi (t, (crtl->args.pretend_args_size
4010 - STACK_POINTER_OFFSET));
726a989a 4011 t = build2 (MODIFY_EXPR, TREE_TYPE (skip), skip, t);
85d9c13c
TS
4012 TREE_SIDE_EFFECTS (t) = 1;
4013 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4014}
4015
4016/* Gimplify va_arg by updating the va_list structure
4017 VALIST as required to retrieve an argument of type
4018 TYPE, and returning that argument.
4019
4020 ret = va_arg(VALIST, TYPE);
4021
4022 generates code equivalent to:
4023
4024 paddedsize = (sizeof(TYPE) + 15) & -16;
4025 if (VALIST.__args + paddedsize > VALIST.__skip
4026 && VALIST.__args <= VALIST.__skip)
4027 addr = VALIST.__skip + 32;
4028 else
4029 addr = VALIST.__args;
4030 VALIST.__args = addr + paddedsize;
4031 ret = *(TYPE *)addr;
4032 */
4033static tree
726a989a
RB
4034spu_gimplify_va_arg_expr (tree valist, tree type, gimple_seq * pre_p,
4035 gimple_seq * post_p ATTRIBUTE_UNUSED)
85d9c13c
TS
4036{
4037 tree f_args, f_skip;
4038 tree args, skip;
4039 HOST_WIDE_INT size, rsize;
5d49b6a7 4040 tree addr, tmp;
85d9c13c
TS
4041 bool pass_by_reference_p;
4042
4043 f_args = TYPE_FIELDS (TREE_TYPE (va_list_type_node));
910ad8de 4044 f_skip = DECL_CHAIN (f_args);
85d9c13c 4045
85d9c13c
TS
4046 args =
4047 build3 (COMPONENT_REF, TREE_TYPE (f_args), valist, f_args, NULL_TREE);
4048 skip =
4049 build3 (COMPONENT_REF, TREE_TYPE (f_skip), valist, f_skip, NULL_TREE);
4050
4051 addr = create_tmp_var (ptr_type_node, "va_arg");
85d9c13c
TS
4052
4053 /* if an object is dynamically sized, a pointer to it is passed
4054 instead of the object itself. */
a207915a
UW
4055 pass_by_reference_p = pass_by_reference (NULL, TYPE_MODE (type), type,
4056 false);
85d9c13c
TS
4057 if (pass_by_reference_p)
4058 type = build_pointer_type (type);
4059 size = int_size_in_bytes (type);
4060 rsize = ((size + UNITS_PER_WORD - 1) / UNITS_PER_WORD) * UNITS_PER_WORD;
4061
4062 /* build conditional expression to calculate addr. The expression
4063 will be gimplified later. */
5d49b6a7 4064 tmp = fold_build_pointer_plus_hwi (unshare_expr (args), rsize);
85d9c13c 4065 tmp = build2 (TRUTH_AND_EXPR, boolean_type_node,
726a989a
RB
4066 build2 (GT_EXPR, boolean_type_node, tmp, unshare_expr (skip)),
4067 build2 (LE_EXPR, boolean_type_node, unshare_expr (args),
4068 unshare_expr (skip)));
85d9c13c
TS
4069
4070 tmp = build3 (COND_EXPR, ptr_type_node, tmp,
5d49b6a7
RG
4071 fold_build_pointer_plus_hwi (unshare_expr (skip), 32),
4072 unshare_expr (args));
85d9c13c 4073
726a989a 4074 gimplify_assign (addr, tmp, pre_p);
85d9c13c
TS
4075
4076 /* update VALIST.__args */
5d49b6a7 4077 tmp = fold_build_pointer_plus_hwi (addr, rsize);
726a989a 4078 gimplify_assign (unshare_expr (args), tmp, pre_p);
85d9c13c 4079
5b21f0f3
RG
4080 addr = fold_convert (build_pointer_type_for_mode (type, ptr_mode, true),
4081 addr);
85d9c13c
TS
4082
4083 if (pass_by_reference_p)
4084 addr = build_va_arg_indirect_ref (addr);
4085
4086 return build_va_arg_indirect_ref (addr);
4087}
4088
4089/* Save parameter registers starting with the register that corresponds
4090 to the first unnamed parameters. If the first unnamed parameter is
4091 in the stack then save no registers. Set pretend_args_size to the
4092 amount of space needed to save the registers. */
d5cc9181 4093static void
ef4bddc2 4094spu_setup_incoming_varargs (cumulative_args_t cum, machine_mode mode,
85d9c13c
TS
4095 tree type, int *pretend_size, int no_rtl)
4096{
4097 if (!no_rtl)
4098 {
4099 rtx tmp;
4100 int regno;
4101 int offset;
d5cc9181 4102 int ncum = *get_cumulative_args (cum);
85d9c13c
TS
4103
4104 /* cum currently points to the last named argument, we want to
4105 start at the next argument. */
d5cc9181 4106 spu_function_arg_advance (pack_cumulative_args (&ncum), mode, type, true);
85d9c13c
TS
4107
4108 offset = -STACK_POINTER_OFFSET;
4109 for (regno = ncum; regno < MAX_REGISTER_ARGS; regno++)
4110 {
4111 tmp = gen_frame_mem (V4SImode,
0a81f074 4112 plus_constant (Pmode, virtual_incoming_args_rtx,
85d9c13c
TS
4113 offset));
4114 emit_move_insn (tmp,
4115 gen_rtx_REG (V4SImode, FIRST_ARG_REGNUM + regno));
4116 offset += 16;
4117 }
4118 *pretend_size = offset + STACK_POINTER_OFFSET;
4119 }
4120}
4121\f
5efd84c5 4122static void
85d9c13c
TS
4123spu_conditional_register_usage (void)
4124{
4125 if (flag_pic)
4126 {
4127 fixed_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
4128 call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
4129 }
85d9c13c
TS
4130}
4131
eec9405e
TS
4132/* This is called any time we inspect the alignment of a register for
4133 addresses. */
85d9c13c 4134static int
eec9405e 4135reg_aligned_for_addr (rtx x)
85d9c13c 4136{
eec9405e
TS
4137 int regno =
4138 REGNO (x) < FIRST_PSEUDO_REGISTER ? ORIGINAL_REGNO (x) : REGNO (x);
4139 return REGNO_POINTER_ALIGN (regno) >= 128;
85d9c13c
TS
4140}
4141
3d9cd79a
UW
4142/* Encode symbol attributes (local vs. global, tls model) of a SYMBOL_REF
4143 into its SYMBOL_REF_FLAGS. */
4144static void
4145spu_encode_section_info (tree decl, rtx rtl, int first)
4146{
4147 default_encode_section_info (decl, rtl, first);
4148
4149 /* If a variable has a forced alignment to < 16 bytes, mark it with
4150 SYMBOL_FLAG_ALIGN1. */
4151 if (TREE_CODE (decl) == VAR_DECL
4152 && DECL_USER_ALIGN (decl) && DECL_ALIGN (decl) < 128)
4153 SYMBOL_REF_FLAGS (XEXP (rtl, 0)) |= SYMBOL_FLAG_ALIGN1;
4154}
4155
85d9c13c
TS
4156/* Return TRUE if we are certain the mem refers to a complete object
4157 which is both 16-byte aligned and padded to a 16-byte boundary. This
4158 would make it safe to store with a single instruction.
4159 We guarantee the alignment and padding for static objects by aligning
4160 all of them to 16-bytes. (DATA_ALIGNMENT and CONSTANT_ALIGNMENT.)
4161 FIXME: We currently cannot guarantee this for objects on the stack
4162 because assign_parm_setup_stack calls assign_stack_local with the
4163 alignment of the parameter mode and in that case the alignment never
4164 gets adjusted by LOCAL_ALIGNMENT. */
4165static int
4166store_with_one_insn_p (rtx mem)
4167{
ef4bddc2 4168 machine_mode mode = GET_MODE (mem);
85d9c13c 4169 rtx addr = XEXP (mem, 0);
eec9405e 4170 if (mode == BLKmode)
85d9c13c 4171 return 0;
eec9405e
TS
4172 if (GET_MODE_SIZE (mode) >= 16)
4173 return 1;
85d9c13c
TS
4174 /* Only static objects. */
4175 if (GET_CODE (addr) == SYMBOL_REF)
4176 {
4177 /* We use the associated declaration to make sure the access is
2f8e468b 4178 referring to the whole object.
dd5a833e 4179 We check both MEM_EXPR and SYMBOL_REF_DECL. I'm not sure
85d9c13c
TS
4180 if it is necessary. Will there be cases where one exists, and
4181 the other does not? Will there be cases where both exist, but
4182 have different types? */
4183 tree decl = MEM_EXPR (mem);
4184 if (decl
4185 && TREE_CODE (decl) == VAR_DECL
4186 && GET_MODE (mem) == TYPE_MODE (TREE_TYPE (decl)))
4187 return 1;
4188 decl = SYMBOL_REF_DECL (addr);
4189 if (decl
4190 && TREE_CODE (decl) == VAR_DECL
4191 && GET_MODE (mem) == TYPE_MODE (TREE_TYPE (decl)))
4192 return 1;
4193 }
4194 return 0;
4195}
4196
eec9405e
TS
4197/* Return 1 when the address is not valid for a simple load and store as
4198 required by the '_mov*' patterns. We could make this less strict
4199 for loads, but we prefer mem's to look the same so they are more
4200 likely to be merged. */
4201static int
4202address_needs_split (rtx mem)
4203{
4204 if (GET_MODE_SIZE (GET_MODE (mem)) < 16
4205 && (GET_MODE_SIZE (GET_MODE (mem)) < 4
4206 || !(store_with_one_insn_p (mem)
4207 || mem_is_padded_component_ref (mem))))
4208 return 1;
4209
4210 return 0;
4211}
4212
299456f3
BE
4213static GTY(()) rtx cache_fetch; /* __cache_fetch function */
4214static GTY(()) rtx cache_fetch_dirty; /* __cache_fetch_dirty function */
4215static alias_set_type ea_alias_set = -1; /* alias set for __ea memory */
4216
4217/* MEM is known to be an __ea qualified memory access. Emit a call to
4218 fetch the ppu memory to local store, and return its address in local
4219 store. */
4220
4221static void
4222ea_load_store (rtx mem, bool is_store, rtx ea_addr, rtx data_addr)
4223{
4224 if (is_store)
4225 {
4226 rtx ndirty = GEN_INT (GET_MODE_SIZE (GET_MODE (mem)));
4227 if (!cache_fetch_dirty)
4228 cache_fetch_dirty = init_one_libfunc ("__cache_fetch_dirty");
4229 emit_library_call_value (cache_fetch_dirty, data_addr, LCT_NORMAL, Pmode,
4230 2, ea_addr, EAmode, ndirty, SImode);
4231 }
4232 else
4233 {
4234 if (!cache_fetch)
4235 cache_fetch = init_one_libfunc ("__cache_fetch");
4236 emit_library_call_value (cache_fetch, data_addr, LCT_NORMAL, Pmode,
4237 1, ea_addr, EAmode);
4238 }
4239}
4240
4241/* Like ea_load_store, but do the cache tag comparison and, for stores,
4242 dirty bit marking, inline.
4243
4244 The cache control data structure is an array of
4245
4246 struct __cache_tag_array
4247 {
4248 unsigned int tag_lo[4];
4249 unsigned int tag_hi[4];
4250 void *data_pointer[4];
4251 int reserved[4];
4252 vector unsigned short dirty_bits[4];
4253 } */
4254
4255static void
4256ea_load_store_inline (rtx mem, bool is_store, rtx ea_addr, rtx data_addr)
4257{
4258 rtx ea_addr_si;
4259 HOST_WIDE_INT v;
4260 rtx tag_size_sym = gen_rtx_SYMBOL_REF (Pmode, "__cache_tag_array_size");
4261 rtx tag_arr_sym = gen_rtx_SYMBOL_REF (Pmode, "__cache_tag_array");
4262 rtx index_mask = gen_reg_rtx (SImode);
4263 rtx tag_arr = gen_reg_rtx (Pmode);
4264 rtx splat_mask = gen_reg_rtx (TImode);
4265 rtx splat = gen_reg_rtx (V4SImode);
4266 rtx splat_hi = NULL_RTX;
4267 rtx tag_index = gen_reg_rtx (Pmode);
4268 rtx block_off = gen_reg_rtx (SImode);
4269 rtx tag_addr = gen_reg_rtx (Pmode);
4270 rtx tag = gen_reg_rtx (V4SImode);
4271 rtx cache_tag = gen_reg_rtx (V4SImode);
4272 rtx cache_tag_hi = NULL_RTX;
4273 rtx cache_ptrs = gen_reg_rtx (TImode);
4274 rtx cache_ptrs_si = gen_reg_rtx (SImode);
4275 rtx tag_equal = gen_reg_rtx (V4SImode);
4276 rtx tag_equal_hi = NULL_RTX;
4277 rtx tag_eq_pack = gen_reg_rtx (V4SImode);
4278 rtx tag_eq_pack_si = gen_reg_rtx (SImode);
4279 rtx eq_index = gen_reg_rtx (SImode);
23c39aaa
DM
4280 rtx bcomp, hit_label, hit_ref, cont_label;
4281 rtx_insn *insn;
299456f3
BE
4282
4283 if (spu_ea_model != 32)
4284 {
4285 splat_hi = gen_reg_rtx (V4SImode);
4286 cache_tag_hi = gen_reg_rtx (V4SImode);
4287 tag_equal_hi = gen_reg_rtx (V4SImode);
4288 }
4289
0a81f074 4290 emit_move_insn (index_mask, plus_constant (Pmode, tag_size_sym, -128));
299456f3
BE
4291 emit_move_insn (tag_arr, tag_arr_sym);
4292 v = 0x0001020300010203LL;
4293 emit_move_insn (splat_mask, immed_double_const (v, v, TImode));
4294 ea_addr_si = ea_addr;
4295 if (spu_ea_model != 32)
4296 ea_addr_si = convert_to_mode (SImode, ea_addr, 1);
4297
4298 /* tag_index = ea_addr & (tag_array_size - 128) */
4299 emit_insn (gen_andsi3 (tag_index, ea_addr_si, index_mask));
4300
4301 /* splat ea_addr to all 4 slots. */
4302 emit_insn (gen_shufb (splat, ea_addr_si, ea_addr_si, splat_mask));
4303 /* Similarly for high 32 bits of ea_addr. */
4304 if (spu_ea_model != 32)
4305 emit_insn (gen_shufb (splat_hi, ea_addr, ea_addr, splat_mask));
4306
4307 /* block_off = ea_addr & 127 */
4308 emit_insn (gen_andsi3 (block_off, ea_addr_si, spu_const (SImode, 127)));
4309
4310 /* tag_addr = tag_arr + tag_index */
4311 emit_insn (gen_addsi3 (tag_addr, tag_arr, tag_index));
4312
4313 /* Read cache tags. */
4314 emit_move_insn (cache_tag, gen_rtx_MEM (V4SImode, tag_addr));
4315 if (spu_ea_model != 32)
4316 emit_move_insn (cache_tag_hi, gen_rtx_MEM (V4SImode,
0a81f074
RS
4317 plus_constant (Pmode,
4318 tag_addr, 16)));
299456f3
BE
4319
4320 /* tag = ea_addr & -128 */
4321 emit_insn (gen_andv4si3 (tag, splat, spu_const (V4SImode, -128)));
4322
4323 /* Read all four cache data pointers. */
4324 emit_move_insn (cache_ptrs, gen_rtx_MEM (TImode,
0a81f074
RS
4325 plus_constant (Pmode,
4326 tag_addr, 32)));
299456f3
BE
4327
4328 /* Compare tags. */
4329 emit_insn (gen_ceq_v4si (tag_equal, tag, cache_tag));
4330 if (spu_ea_model != 32)
4331 {
4332 emit_insn (gen_ceq_v4si (tag_equal_hi, splat_hi, cache_tag_hi));
4333 emit_insn (gen_andv4si3 (tag_equal, tag_equal, tag_equal_hi));
4334 }
4335
4336 /* At most one of the tags compare equal, so tag_equal has one
4337 32-bit slot set to all 1's, with the other slots all zero.
4338 gbb picks off low bit from each byte in the 128-bit registers,
4339 so tag_eq_pack is one of 0xf000, 0x0f00, 0x00f0, 0x000f, assuming
4340 we have a hit. */
4341 emit_insn (gen_spu_gbb (tag_eq_pack, spu_gen_subreg (V16QImode, tag_equal)));
4342 emit_insn (gen_spu_convert (tag_eq_pack_si, tag_eq_pack));
4343
4344 /* So counting leading zeros will set eq_index to 16, 20, 24 or 28. */
4345 emit_insn (gen_clzsi2 (eq_index, tag_eq_pack_si));
4346
4347 /* Allowing us to rotate the corresponding cache data pointer to slot0.
4348 (rotating eq_index mod 16 bytes). */
4349 emit_insn (gen_rotqby_ti (cache_ptrs, cache_ptrs, eq_index));
4350 emit_insn (gen_spu_convert (cache_ptrs_si, cache_ptrs));
4351
4352 /* Add block offset to form final data address. */
4353 emit_insn (gen_addsi3 (data_addr, cache_ptrs_si, block_off));
4354
4355 /* Check that we did hit. */
4356 hit_label = gen_label_rtx ();
4357 hit_ref = gen_rtx_LABEL_REF (VOIDmode, hit_label);
4358 bcomp = gen_rtx_NE (SImode, tag_eq_pack_si, const0_rtx);
f7df4a84 4359 insn = emit_jump_insn (gen_rtx_SET (pc_rtx,
299456f3
BE
4360 gen_rtx_IF_THEN_ELSE (VOIDmode, bcomp,
4361 hit_ref, pc_rtx)));
4362 /* Say that this branch is very likely to happen. */
4363 v = REG_BR_PROB_BASE - REG_BR_PROB_BASE / 100 - 1;
e5af9ddd 4364 add_int_reg_note (insn, REG_BR_PROB, v);
299456f3
BE
4365
4366 ea_load_store (mem, is_store, ea_addr, data_addr);
4367 cont_label = gen_label_rtx ();
4368 emit_jump_insn (gen_jump (cont_label));
4369 emit_barrier ();
4370
4371 emit_label (hit_label);
4372
4373 if (is_store)
4374 {
4375 HOST_WIDE_INT v_hi;
4376 rtx dirty_bits = gen_reg_rtx (TImode);
4377 rtx dirty_off = gen_reg_rtx (SImode);
4378 rtx dirty_128 = gen_reg_rtx (TImode);
4379 rtx neg_block_off = gen_reg_rtx (SImode);
4380
4381 /* Set up mask with one dirty bit per byte of the mem we are
4382 writing, starting from top bit. */
4383 v_hi = v = -1;
4384 v <<= (128 - GET_MODE_SIZE (GET_MODE (mem))) & 63;
4385 if ((128 - GET_MODE_SIZE (GET_MODE (mem))) >= 64)
4386 {
4387 v_hi = v;
4388 v = 0;
4389 }
4390 emit_move_insn (dirty_bits, immed_double_const (v, v_hi, TImode));
4391
4392 /* Form index into cache dirty_bits. eq_index is one of
4393 0x10, 0x14, 0x18 or 0x1c. Multiplying by 4 gives us
4394 0x40, 0x50, 0x60 or 0x70 which just happens to be the
4395 offset to each of the four dirty_bits elements. */
4396 emit_insn (gen_ashlsi3 (dirty_off, eq_index, spu_const (SImode, 2)));
4397
4398 emit_insn (gen_spu_lqx (dirty_128, tag_addr, dirty_off));
4399
4400 /* Rotate bit mask to proper bit. */
4401 emit_insn (gen_negsi2 (neg_block_off, block_off));
4402 emit_insn (gen_rotqbybi_ti (dirty_bits, dirty_bits, neg_block_off));
4403 emit_insn (gen_rotqbi_ti (dirty_bits, dirty_bits, neg_block_off));
4404
4405 /* Or in the new dirty bits. */
4406 emit_insn (gen_iorti3 (dirty_128, dirty_bits, dirty_128));
4407
4408 /* Store. */
4409 emit_insn (gen_spu_stqx (dirty_128, tag_addr, dirty_off));
4410 }
4411
4412 emit_label (cont_label);
4413}
4414
4415static rtx
4416expand_ea_mem (rtx mem, bool is_store)
4417{
4418 rtx ea_addr;
4419 rtx data_addr = gen_reg_rtx (Pmode);
4420 rtx new_mem;
4421
4422 ea_addr = force_reg (EAmode, XEXP (mem, 0));
4423 if (optimize_size || optimize == 0)
4424 ea_load_store (mem, is_store, ea_addr, data_addr);
4425 else
4426 ea_load_store_inline (mem, is_store, ea_addr, data_addr);
4427
4428 if (ea_alias_set == -1)
4429 ea_alias_set = new_alias_set ();
4430
4431 /* We generate a new MEM RTX to refer to the copy of the data
4432 in the cache. We do not copy memory attributes (except the
4433 alignment) from the original MEM, as they may no longer apply
4434 to the cache copy. */
4435 new_mem = gen_rtx_MEM (GET_MODE (mem), data_addr);
4436 set_mem_alias_set (new_mem, ea_alias_set);
4437 set_mem_align (new_mem, MIN (MEM_ALIGN (mem), 128 * 8));
4438
4439 return new_mem;
4440}
4441
85d9c13c 4442int
ef4bddc2 4443spu_expand_mov (rtx * ops, machine_mode mode)
85d9c13c
TS
4444{
4445 if (GET_CODE (ops[0]) == SUBREG && !valid_subreg (ops[0]))
46fc2305
UW
4446 {
4447 /* Perform the move in the destination SUBREG's inner mode. */
4448 ops[0] = SUBREG_REG (ops[0]);
4449 mode = GET_MODE (ops[0]);
4450 ops[1] = gen_lowpart_common (mode, ops[1]);
4451 gcc_assert (ops[1]);
4452 }
85d9c13c
TS
4453
4454 if (GET_CODE (ops[1]) == SUBREG && !valid_subreg (ops[1]))
4455 {
4456 rtx from = SUBREG_REG (ops[1]);
ef4bddc2 4457 machine_mode imode = int_mode_for_mode (GET_MODE (from));
85d9c13c
TS
4458
4459 gcc_assert (GET_MODE_CLASS (mode) == MODE_INT
4460 && GET_MODE_CLASS (imode) == MODE_INT
4461 && subreg_lowpart_p (ops[1]));
4462
4463 if (GET_MODE_SIZE (imode) < 4)
4caab5ba
UW
4464 imode = SImode;
4465 if (imode != GET_MODE (from))
4466 from = gen_rtx_SUBREG (imode, from, 0);
85d9c13c
TS
4467
4468 if (GET_MODE_SIZE (mode) < GET_MODE_SIZE (imode))
4469 {
947131ba
RS
4470 enum insn_code icode = convert_optab_handler (trunc_optab,
4471 mode, imode);
85d9c13c
TS
4472 emit_insn (GEN_FCN (icode) (ops[0], from));
4473 }
4474 else
4475 emit_insn (gen_extend_insn (ops[0], from, mode, imode, 1));
4476 return 1;
4477 }
4478
4479 /* At least one of the operands needs to be a register. */
4480 if ((reload_in_progress | reload_completed) == 0
4481 && !register_operand (ops[0], mode) && !register_operand (ops[1], mode))
4482 {
4483 rtx temp = force_reg (mode, ops[1]);
4484 emit_move_insn (ops[0], temp);
4485 return 1;
4486 }
4487 if (reload_in_progress || reload_completed)
4488 {
a1c6e4b8
TS
4489 if (CONSTANT_P (ops[1]))
4490 return spu_split_immediate (ops);
85d9c13c
TS
4491 return 0;
4492 }
eec9405e
TS
4493
4494 /* Catch the SImode immediates greater than 0x7fffffff, and sign
4495 extend them. */
4496 if (GET_CODE (ops[1]) == CONST_INT)
85d9c13c 4497 {
eec9405e
TS
4498 HOST_WIDE_INT val = trunc_int_for_mode (INTVAL (ops[1]), mode);
4499 if (val != INTVAL (ops[1]))
85d9c13c 4500 {
eec9405e
TS
4501 emit_move_insn (ops[0], GEN_INT (val));
4502 return 1;
85d9c13c
TS
4503 }
4504 }
eec9405e 4505 if (MEM_P (ops[0]))
299456f3
BE
4506 {
4507 if (MEM_ADDR_SPACE (ops[0]))
4508 ops[0] = expand_ea_mem (ops[0], true);
4509 return spu_split_store (ops);
4510 }
eec9405e 4511 if (MEM_P (ops[1]))
299456f3
BE
4512 {
4513 if (MEM_ADDR_SPACE (ops[1]))
4514 ops[1] = expand_ea_mem (ops[1], false);
4515 return spu_split_load (ops);
4516 }
eec9405e 4517
85d9c13c
TS
4518 return 0;
4519}
4520
eec9405e
TS
4521static void
4522spu_convert_move (rtx dst, rtx src)
85d9c13c 4523{
ef4bddc2
RS
4524 machine_mode mode = GET_MODE (dst);
4525 machine_mode int_mode = mode_for_size (GET_MODE_BITSIZE (mode), MODE_INT, 0);
eec9405e
TS
4526 rtx reg;
4527 gcc_assert (GET_MODE (src) == TImode);
4528 reg = int_mode != mode ? gen_reg_rtx (int_mode) : dst;
f7df4a84 4529 emit_insn (gen_rtx_SET (reg,
eec9405e
TS
4530 gen_rtx_TRUNCATE (int_mode,
4531 gen_rtx_LSHIFTRT (TImode, src,
4532 GEN_INT (int_mode == DImode ? 64 : 96)))));
4533 if (int_mode != mode)
4534 {
4535 reg = simplify_gen_subreg (mode, reg, int_mode, 0);
4536 emit_move_insn (dst, reg);
4537 }
4538}
85d9c13c 4539
eec9405e
TS
4540/* Load TImode values into DST0 and DST1 (when it is non-NULL) using
4541 the address from SRC and SRC+16. Return a REG or CONST_INT that
4542 specifies how many bytes to rotate the loaded registers, plus any
4543 extra from EXTRA_ROTQBY. The address and rotate amounts are
4544 normalized to improve merging of loads and rotate computations. */
4545static rtx
4546spu_expand_load (rtx dst0, rtx dst1, rtx src, int extra_rotby)
4547{
4548 rtx addr = XEXP (src, 0);
4549 rtx p0, p1, rot, addr0, addr1;
4550 int rot_amt;
85d9c13c
TS
4551
4552 rot = 0;
4553 rot_amt = 0;
eec9405e
TS
4554
4555 if (MEM_ALIGN (src) >= 128)
4556 /* Address is already aligned; simply perform a TImode load. */ ;
4557 else if (GET_CODE (addr) == PLUS)
85d9c13c
TS
4558 {
4559 /* 8 cases:
4560 aligned reg + aligned reg => lqx
4561 aligned reg + unaligned reg => lqx, rotqby
4562 aligned reg + aligned const => lqd
4563 aligned reg + unaligned const => lqd, rotqbyi
4564 unaligned reg + aligned reg => lqx, rotqby
4565 unaligned reg + unaligned reg => lqx, a, rotqby (1 scratch)
4566 unaligned reg + aligned const => lqd, rotqby
4567 unaligned reg + unaligned const -> not allowed by legitimate address
4568 */
4569 p0 = XEXP (addr, 0);
4570 p1 = XEXP (addr, 1);
eec9405e 4571 if (!reg_aligned_for_addr (p0))
85d9c13c 4572 {
eec9405e 4573 if (REG_P (p1) && !reg_aligned_for_addr (p1))
85d9c13c 4574 {
eec9405e
TS
4575 rot = gen_reg_rtx (SImode);
4576 emit_insn (gen_addsi3 (rot, p0, p1));
4577 }
4578 else if (GET_CODE (p1) == CONST_INT && (INTVAL (p1) & 15))
4579 {
4580 if (INTVAL (p1) > 0
4581 && REG_POINTER (p0)
4582 && INTVAL (p1) * BITS_PER_UNIT
4583 < REGNO_POINTER_ALIGN (REGNO (p0)))
4584 {
4585 rot = gen_reg_rtx (SImode);
4586 emit_insn (gen_addsi3 (rot, p0, p1));
4587 addr = p0;
4588 }
4589 else
4590 {
4591 rtx x = gen_reg_rtx (SImode);
4592 emit_move_insn (x, p1);
4593 if (!spu_arith_operand (p1, SImode))
4594 p1 = x;
4595 rot = gen_reg_rtx (SImode);
4596 emit_insn (gen_addsi3 (rot, p0, p1));
4597 addr = gen_rtx_PLUS (Pmode, p0, x);
4598 }
85d9c13c
TS
4599 }
4600 else
4601 rot = p0;
4602 }
4603 else
4604 {
4605 if (GET_CODE (p1) == CONST_INT && (INTVAL (p1) & 15))
4606 {
4607 rot_amt = INTVAL (p1) & 15;
eec9405e
TS
4608 if (INTVAL (p1) & -16)
4609 {
4610 p1 = GEN_INT (INTVAL (p1) & -16);
4611 addr = gen_rtx_PLUS (SImode, p0, p1);
4612 }
4613 else
4614 addr = p0;
85d9c13c 4615 }
eec9405e 4616 else if (REG_P (p1) && !reg_aligned_for_addr (p1))
85d9c13c
TS
4617 rot = p1;
4618 }
4619 }
eec9405e 4620 else if (REG_P (addr))
85d9c13c 4621 {
eec9405e 4622 if (!reg_aligned_for_addr (addr))
85d9c13c
TS
4623 rot = addr;
4624 }
4625 else if (GET_CODE (addr) == CONST)
4626 {
4627 if (GET_CODE (XEXP (addr, 0)) == PLUS
4628 && ALIGNED_SYMBOL_REF_P (XEXP (XEXP (addr, 0), 0))
4629 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
4630 {
4631 rot_amt = INTVAL (XEXP (XEXP (addr, 0), 1));
4632 if (rot_amt & -16)
4633 addr = gen_rtx_CONST (Pmode,
4634 gen_rtx_PLUS (Pmode,
4635 XEXP (XEXP (addr, 0), 0),
4636 GEN_INT (rot_amt & -16)));
4637 else
4638 addr = XEXP (XEXP (addr, 0), 0);
4639 }
4640 else
eec9405e
TS
4641 {
4642 rot = gen_reg_rtx (Pmode);
4643 emit_move_insn (rot, addr);
4644 }
85d9c13c
TS
4645 }
4646 else if (GET_CODE (addr) == CONST_INT)
4647 {
4648 rot_amt = INTVAL (addr);
4649 addr = GEN_INT (rot_amt & -16);
4650 }
4651 else if (!ALIGNED_SYMBOL_REF_P (addr))
eec9405e
TS
4652 {
4653 rot = gen_reg_rtx (Pmode);
4654 emit_move_insn (rot, addr);
4655 }
85d9c13c 4656
eec9405e 4657 rot_amt += extra_rotby;
85d9c13c
TS
4658
4659 rot_amt &= 15;
4660
4661 if (rot && rot_amt)
4662 {
eec9405e
TS
4663 rtx x = gen_reg_rtx (SImode);
4664 emit_insn (gen_addsi3 (x, rot, GEN_INT (rot_amt)));
4665 rot = x;
85d9c13c
TS
4666 rot_amt = 0;
4667 }
eec9405e
TS
4668 if (!rot && rot_amt)
4669 rot = GEN_INT (rot_amt);
4670
4671 addr0 = copy_rtx (addr);
4672 addr0 = gen_rtx_AND (SImode, copy_rtx (addr), GEN_INT (-16));
4673 emit_insn (gen__movti (dst0, change_address (src, TImode, addr0)));
4674
4675 if (dst1)
4676 {
0a81f074 4677 addr1 = plus_constant (SImode, copy_rtx (addr), 16);
eec9405e
TS
4678 addr1 = gen_rtx_AND (SImode, addr1, GEN_INT (-16));
4679 emit_insn (gen__movti (dst1, change_address (src, TImode, addr1)));
4680 }
85d9c13c 4681
eec9405e
TS
4682 return rot;
4683}
4684
4685int
4686spu_split_load (rtx * ops)
4687{
ef4bddc2 4688 machine_mode mode = GET_MODE (ops[0]);
eec9405e
TS
4689 rtx addr, load, rot;
4690 int rot_amt;
85d9c13c 4691
eec9405e
TS
4692 if (GET_MODE_SIZE (mode) >= 16)
4693 return 0;
85d9c13c 4694
eec9405e
TS
4695 addr = XEXP (ops[1], 0);
4696 gcc_assert (GET_CODE (addr) != AND);
4697
4698 if (!address_needs_split (ops[1]))
4699 {
4700 ops[1] = change_address (ops[1], TImode, addr);
4701 load = gen_reg_rtx (TImode);
4702 emit_insn (gen__movti (load, ops[1]));
4703 spu_convert_move (ops[0], load);
4704 return 1;
4705 }
4706
4707 rot_amt = GET_MODE_SIZE (mode) < 4 ? GET_MODE_SIZE (mode) - 4 : 0;
4708
4709 load = gen_reg_rtx (TImode);
4710 rot = spu_expand_load (load, 0, ops[1], rot_amt);
85d9c13c
TS
4711
4712 if (rot)
4713 emit_insn (gen_rotqby_ti (load, load, rot));
85d9c13c 4714
eec9405e
TS
4715 spu_convert_move (ops[0], load);
4716 return 1;
85d9c13c
TS
4717}
4718
eec9405e 4719int
85d9c13c
TS
4720spu_split_store (rtx * ops)
4721{
ef4bddc2 4722 machine_mode mode = GET_MODE (ops[0]);
eec9405e 4723 rtx reg;
85d9c13c
TS
4724 rtx addr, p0, p1, p1_lo, smem;
4725 int aform;
4726 int scalar;
4727
eec9405e
TS
4728 if (GET_MODE_SIZE (mode) >= 16)
4729 return 0;
4730
85d9c13c 4731 addr = XEXP (ops[0], 0);
eec9405e
TS
4732 gcc_assert (GET_CODE (addr) != AND);
4733
4734 if (!address_needs_split (ops[0]))
4735 {
4736 reg = gen_reg_rtx (TImode);
4737 emit_insn (gen_spu_convert (reg, ops[1]));
4738 ops[0] = change_address (ops[0], TImode, addr);
4739 emit_move_insn (ops[0], reg);
4740 return 1;
4741 }
85d9c13c
TS
4742
4743 if (GET_CODE (addr) == PLUS)
4744 {
4745 /* 8 cases:
4746 aligned reg + aligned reg => lqx, c?x, shuf, stqx
4747 aligned reg + unaligned reg => lqx, c?x, shuf, stqx
4748 aligned reg + aligned const => lqd, c?d, shuf, stqx
4749 aligned reg + unaligned const => lqd, c?d, shuf, stqx
4750 unaligned reg + aligned reg => lqx, c?x, shuf, stqx
4751 unaligned reg + unaligned reg => lqx, c?x, shuf, stqx
4752 unaligned reg + aligned const => lqd, c?d, shuf, stqx
eec9405e 4753 unaligned reg + unaligned const -> lqx, c?d, shuf, stqx
85d9c13c
TS
4754 */
4755 aform = 0;
4756 p0 = XEXP (addr, 0);
4757 p1 = p1_lo = XEXP (addr, 1);
eec9405e 4758 if (REG_P (p0) && GET_CODE (p1) == CONST_INT)
85d9c13c
TS
4759 {
4760 p1_lo = GEN_INT (INTVAL (p1) & 15);
eec9405e
TS
4761 if (reg_aligned_for_addr (p0))
4762 {
4763 p1 = GEN_INT (INTVAL (p1) & -16);
4764 if (p1 == const0_rtx)
4765 addr = p0;
4766 else
4767 addr = gen_rtx_PLUS (SImode, p0, p1);
4768 }
4769 else
4770 {
4771 rtx x = gen_reg_rtx (SImode);
4772 emit_move_insn (x, p1);
4773 addr = gen_rtx_PLUS (SImode, p0, x);
4774 }
85d9c13c
TS
4775 }
4776 }
eec9405e 4777 else if (REG_P (addr))
85d9c13c
TS
4778 {
4779 aform = 0;
4780 p0 = addr;
4781 p1 = p1_lo = const0_rtx;
4782 }
4783 else
4784 {
4785 aform = 1;
4786 p0 = gen_rtx_REG (SImode, STACK_POINTER_REGNUM);
4787 p1 = 0; /* aform doesn't use p1 */
4788 p1_lo = addr;
4789 if (ALIGNED_SYMBOL_REF_P (addr))
4790 p1_lo = const0_rtx;
eec9405e
TS
4791 else if (GET_CODE (addr) == CONST
4792 && GET_CODE (XEXP (addr, 0)) == PLUS
4793 && ALIGNED_SYMBOL_REF_P (XEXP (XEXP (addr, 0), 0))
4794 && GET_CODE (XEXP (XEXP (addr, 0), 1)) == CONST_INT)
85d9c13c 4795 {
eec9405e
TS
4796 HOST_WIDE_INT v = INTVAL (XEXP (XEXP (addr, 0), 1));
4797 if ((v & -16) != 0)
4798 addr = gen_rtx_CONST (Pmode,
4799 gen_rtx_PLUS (Pmode,
4800 XEXP (XEXP (addr, 0), 0),
4801 GEN_INT (v & -16)));
4802 else
4803 addr = XEXP (XEXP (addr, 0), 0);
4804 p1_lo = GEN_INT (v & 15);
85d9c13c
TS
4805 }
4806 else if (GET_CODE (addr) == CONST_INT)
4807 {
4808 p1_lo = GEN_INT (INTVAL (addr) & 15);
4809 addr = GEN_INT (INTVAL (addr) & -16);
4810 }
eec9405e
TS
4811 else
4812 {
4813 p1_lo = gen_reg_rtx (SImode);
4814 emit_move_insn (p1_lo, addr);
4815 }
85d9c13c
TS
4816 }
4817
d707fc77 4818 gcc_assert (aform == 0 || aform == 1);
eec9405e 4819 reg = gen_reg_rtx (TImode);
09aad82b 4820
85d9c13c
TS
4821 scalar = store_with_one_insn_p (ops[0]);
4822 if (!scalar)
4823 {
4824 /* We could copy the flags from the ops[0] MEM to mem here,
4825 We don't because we want this load to be optimized away if
4826 possible, and copying the flags will prevent that in certain
4827 cases, e.g. consider the volatile flag. */
4828
eec9405e 4829 rtx pat = gen_reg_rtx (TImode);
09aad82b
TS
4830 rtx lmem = change_address (ops[0], TImode, copy_rtx (addr));
4831 set_mem_alias_set (lmem, 0);
4832 emit_insn (gen_movti (reg, lmem));
85d9c13c 4833
eec9405e 4834 if (!p0 || reg_aligned_for_addr (p0))
85d9c13c
TS
4835 p0 = stack_pointer_rtx;
4836 if (!p1_lo)
4837 p1_lo = const0_rtx;
4838
4839 emit_insn (gen_cpat (pat, p0, p1_lo, GEN_INT (GET_MODE_SIZE (mode))));
4840 emit_insn (gen_shufb (reg, ops[1], reg, pat));
4841 }
85d9c13c
TS
4842 else
4843 {
4844 if (GET_CODE (ops[1]) == REG)
4845 emit_insn (gen_spu_convert (reg, ops[1]));
4846 else if (GET_CODE (ops[1]) == SUBREG)
4847 emit_insn (gen_spu_convert (reg, SUBREG_REG (ops[1])));
4848 else
4849 abort ();
4850 }
4851
4852 if (GET_MODE_SIZE (mode) < 4 && scalar)
eec9405e
TS
4853 emit_insn (gen_ashlti3
4854 (reg, reg, GEN_INT (32 - GET_MODE_BITSIZE (mode))));
85d9c13c 4855
eec9405e 4856 smem = change_address (ops[0], TImode, copy_rtx (addr));
85d9c13c
TS
4857 /* We can't use the previous alias set because the memory has changed
4858 size and can potentially overlap objects of other types. */
4859 set_mem_alias_set (smem, 0);
4860
09aad82b 4861 emit_insn (gen_movti (smem, reg));
eec9405e 4862 return 1;
85d9c13c
TS
4863}
4864
4865/* Return TRUE if X is MEM which is a struct member reference
4866 and the member can safely be loaded and stored with a single
4867 instruction because it is padded. */
4868static int
4869mem_is_padded_component_ref (rtx x)
4870{
4871 tree t = MEM_EXPR (x);
4872 tree r;
4873 if (!t || TREE_CODE (t) != COMPONENT_REF)
4874 return 0;
4875 t = TREE_OPERAND (t, 1);
4876 if (!t || TREE_CODE (t) != FIELD_DECL
4877 || DECL_ALIGN (t) < 128 || AGGREGATE_TYPE_P (TREE_TYPE (t)))
4878 return 0;
4879 /* Only do this for RECORD_TYPEs, not UNION_TYPEs. */
4880 r = DECL_FIELD_CONTEXT (t);
4881 if (!r || TREE_CODE (r) != RECORD_TYPE)
4882 return 0;
4883 /* Make sure they are the same mode */
4884 if (GET_MODE (x) != TYPE_MODE (TREE_TYPE (t)))
4885 return 0;
4886 /* If there are no following fields then the field alignment assures
2f8e468b
KH
4887 the structure is padded to the alignment which means this field is
4888 padded too. */
85d9c13c
TS
4889 if (TREE_CHAIN (t) == 0)
4890 return 1;
4891 /* If the following field is also aligned then this field will be
4892 padded. */
4893 t = TREE_CHAIN (t);
4894 if (TREE_CODE (t) == FIELD_DECL && DECL_ALIGN (t) >= 128)
4895 return 1;
4896 return 0;
4897}
4898
32fb22af
SL
4899/* Parse the -mfixed-range= option string. */
4900static void
4901fix_range (const char *const_str)
4902{
4903 int i, first, last;
4904 char *str, *dash, *comma;
4905
4906 /* str must be of the form REG1'-'REG2{,REG1'-'REG} where REG1 and
4907 REG2 are either register names or register numbers. The effect
4908 of this option is to mark the registers in the range from REG1 to
4909 REG2 as ``fixed'' so they won't be used by the compiler. */
4910
4911 i = strlen (const_str);
4912 str = (char *) alloca (i + 1);
4913 memcpy (str, const_str, i + 1);
4914
4915 while (1)
4916 {
4917 dash = strchr (str, '-');
4918 if (!dash)
4919 {
4920 warning (0, "value of -mfixed-range must have form REG1-REG2");
4921 return;
4922 }
4923 *dash = '\0';
4924 comma = strchr (dash + 1, ',');
4925 if (comma)
4926 *comma = '\0';
4927
4928 first = decode_reg_name (str);
4929 if (first < 0)
4930 {
4931 warning (0, "unknown register name: %s", str);
4932 return;
4933 }
4934
4935 last = decode_reg_name (dash + 1);
4936 if (last < 0)
4937 {
4938 warning (0, "unknown register name: %s", dash + 1);
4939 return;
4940 }
4941
4942 *dash = '-';
4943
4944 if (first > last)
4945 {
4946 warning (0, "%s-%s is an empty range", str, dash + 1);
4947 return;
4948 }
4949
4950 for (i = first; i <= last; ++i)
4951 fixed_regs[i] = call_used_regs[i] = 1;
4952
4953 if (!comma)
4954 break;
4955
4956 *comma = ',';
4957 str = comma + 1;
4958 }
4959}
4960
85d9c13c
TS
4961/* Return TRUE if x is a CONST_INT, CONST_DOUBLE or CONST_VECTOR that
4962 can be generated using the fsmbi instruction. */
4963int
4964fsmbi_const_p (rtx x)
4965{
a1c6e4b8
TS
4966 if (CONSTANT_P (x))
4967 {
73701e27 4968 /* We can always choose TImode for CONST_INT because the high bits
a1c6e4b8 4969 of an SImode will always be all 1s, i.e., valid for fsmbi. */
73701e27 4970 enum immediate_class c = classify_immediate (x, TImode);
6fb5fa3c 4971 return c == IC_FSMBI || (!epilogue_completed && c == IC_FSMBI2);
a1c6e4b8
TS
4972 }
4973 return 0;
4974}
4975
4976/* Return TRUE if x is a CONST_INT, CONST_DOUBLE or CONST_VECTOR that
4977 can be generated using the cbd, chd, cwd or cdd instruction. */
4978int
ef4bddc2 4979cpat_const_p (rtx x, machine_mode mode)
a1c6e4b8
TS
4980{
4981 if (CONSTANT_P (x))
4982 {
4983 enum immediate_class c = classify_immediate (x, mode);
4984 return c == IC_CPAT;
4985 }
4986 return 0;
4987}
85d9c13c 4988
a1c6e4b8
TS
4989rtx
4990gen_cpat_const (rtx * ops)
4991{
4992 unsigned char dst[16];
4993 int i, offset, shift, isize;
4994 if (GET_CODE (ops[3]) != CONST_INT
4995 || GET_CODE (ops[2]) != CONST_INT
4996 || (GET_CODE (ops[1]) != CONST_INT
4997 && GET_CODE (ops[1]) != REG))
4998 return 0;
4999 if (GET_CODE (ops[1]) == REG
5000 && (!REG_POINTER (ops[1])
5001 || REGNO_POINTER_ALIGN (ORIGINAL_REGNO (ops[1])) < 128))
5002 return 0;
85d9c13c
TS
5003
5004 for (i = 0; i < 16; i++)
a1c6e4b8
TS
5005 dst[i] = i + 16;
5006 isize = INTVAL (ops[3]);
5007 if (isize == 1)
5008 shift = 3;
5009 else if (isize == 2)
5010 shift = 2;
5011 else
5012 shift = 0;
5013 offset = (INTVAL (ops[2]) +
5014 (GET_CODE (ops[1]) ==
5015 CONST_INT ? INTVAL (ops[1]) : 0)) & 15;
5016 for (i = 0; i < isize; i++)
5017 dst[offset + i] = i + shift;
5018 return array_to_constant (TImode, dst);
85d9c13c
TS
5019}
5020
5021/* Convert a CONST_INT, CONST_DOUBLE, or CONST_VECTOR into a 16 byte
5022 array. Use MODE for CONST_INT's. When the constant's mode is smaller
5023 than 16 bytes, the value is repeated across the rest of the array. */
5024void
ef4bddc2 5025constant_to_array (machine_mode mode, rtx x, unsigned char arr[16])
85d9c13c
TS
5026{
5027 HOST_WIDE_INT val;
5028 int i, j, first;
5029
5030 memset (arr, 0, 16);
5031 mode = GET_MODE (x) != VOIDmode ? GET_MODE (x) : mode;
5032 if (GET_CODE (x) == CONST_INT
5033 || (GET_CODE (x) == CONST_DOUBLE
5034 && (mode == SFmode || mode == DFmode)))
5035 {
5036 gcc_assert (mode != VOIDmode && mode != BLKmode);
5037
5038 if (GET_CODE (x) == CONST_DOUBLE)
5039 val = const_double_to_hwint (x);
5040 else
5041 val = INTVAL (x);
5042 first = GET_MODE_SIZE (mode) - 1;
5043 for (i = first; i >= 0; i--)
5044 {
5045 arr[i] = val & 0xff;
5046 val >>= 8;
5047 }
5048 /* Splat the constant across the whole array. */
5049 for (j = 0, i = first + 1; i < 16; i++)
5050 {
5051 arr[i] = arr[j];
5052 j = (j == first) ? 0 : j + 1;
5053 }
5054 }
5055 else if (GET_CODE (x) == CONST_DOUBLE)
5056 {
5057 val = CONST_DOUBLE_LOW (x);
5058 for (i = 15; i >= 8; i--)
5059 {
5060 arr[i] = val & 0xff;
5061 val >>= 8;
5062 }
5063 val = CONST_DOUBLE_HIGH (x);
5064 for (i = 7; i >= 0; i--)
5065 {
5066 arr[i] = val & 0xff;
5067 val >>= 8;
5068 }
5069 }
5070 else if (GET_CODE (x) == CONST_VECTOR)
5071 {
5072 int units;
5073 rtx elt;
5074 mode = GET_MODE_INNER (mode);
5075 units = CONST_VECTOR_NUNITS (x);
5076 for (i = 0; i < units; i++)
5077 {
5078 elt = CONST_VECTOR_ELT (x, i);
5079 if (GET_CODE (elt) == CONST_INT || GET_CODE (elt) == CONST_DOUBLE)
5080 {
5081 if (GET_CODE (elt) == CONST_DOUBLE)
5082 val = const_double_to_hwint (elt);
5083 else
5084 val = INTVAL (elt);
5085 first = GET_MODE_SIZE (mode) - 1;
5086 if (first + i * GET_MODE_SIZE (mode) > 16)
5087 abort ();
5088 for (j = first; j >= 0; j--)
5089 {
5090 arr[j + i * GET_MODE_SIZE (mode)] = val & 0xff;
5091 val >>= 8;
5092 }
5093 }
5094 }
5095 }
5096 else
5097 gcc_unreachable();
5098}
5099
5100/* Convert a 16 byte array to a constant of mode MODE. When MODE is
5101 smaller than 16 bytes, use the bytes that would represent that value
5102 in a register, e.g., for QImode return the value of arr[3]. */
5103rtx
ef4bddc2 5104array_to_constant (machine_mode mode, const unsigned char arr[16])
85d9c13c 5105{
ef4bddc2 5106 machine_mode inner_mode;
85d9c13c
TS
5107 rtvec v;
5108 int units, size, i, j, k;
5109 HOST_WIDE_INT val;
5110
5111 if (GET_MODE_CLASS (mode) == MODE_INT
5112 && GET_MODE_BITSIZE (mode) <= HOST_BITS_PER_WIDE_INT)
5113 {
5114 j = GET_MODE_SIZE (mode);
5115 i = j < 4 ? 4 - j : 0;
5116 for (val = 0; i < j; i++)
5117 val = (val << 8) | arr[i];
5118 val = trunc_int_for_mode (val, mode);
5119 return GEN_INT (val);
5120 }
5121
5122 if (mode == TImode)
5123 {
5124 HOST_WIDE_INT high;
5125 for (i = high = 0; i < 8; i++)
5126 high = (high << 8) | arr[i];
5127 for (i = 8, val = 0; i < 16; i++)
5128 val = (val << 8) | arr[i];
5129 return immed_double_const (val, high, TImode);
5130 }
5131 if (mode == SFmode)
5132 {
5133 val = (arr[0] << 24) | (arr[1] << 16) | (arr[2] << 8) | arr[3];
5134 val = trunc_int_for_mode (val, SImode);
9dc5f9ba 5135 return hwint_to_const_double (SFmode, val);
85d9c13c
TS
5136 }
5137 if (mode == DFmode)
5138 {
e41e2ab4
UW
5139 for (i = 0, val = 0; i < 8; i++)
5140 val = (val << 8) | arr[i];
9dc5f9ba 5141 return hwint_to_const_double (DFmode, val);
85d9c13c
TS
5142 }
5143
5144 if (!VECTOR_MODE_P (mode))
5145 abort ();
5146
5147 units = GET_MODE_NUNITS (mode);
5148 size = GET_MODE_UNIT_SIZE (mode);
5149 inner_mode = GET_MODE_INNER (mode);
5150 v = rtvec_alloc (units);
5151
5152 for (k = i = 0; i < units; ++i)
5153 {
5154 val = 0;
5155 for (j = 0; j < size; j++, k++)
5156 val = (val << 8) | arr[k];
5157
5158 if (GET_MODE_CLASS (inner_mode) == MODE_FLOAT)
5159 RTVEC_ELT (v, i) = hwint_to_const_double (inner_mode, val);
5160 else
5161 RTVEC_ELT (v, i) = GEN_INT (trunc_int_for_mode (val, inner_mode));
5162 }
5163 if (k > 16)
5164 abort ();
5165
5166 return gen_rtx_CONST_VECTOR (mode, v);
5167}
5168
5169static void
5170reloc_diagnostic (rtx x)
5171{
c5d75364 5172 tree decl = 0;
85d9c13c
TS
5173 if (!flag_pic || !(TARGET_WARN_RELOC || TARGET_ERROR_RELOC))
5174 return;
5175
5176 if (GET_CODE (x) == SYMBOL_REF)
5177 decl = SYMBOL_REF_DECL (x);
5178 else if (GET_CODE (x) == CONST
5179 && GET_CODE (XEXP (XEXP (x, 0), 0)) == SYMBOL_REF)
5180 decl = SYMBOL_REF_DECL (XEXP (XEXP (x, 0), 0));
5181
5182 /* SYMBOL_REF_DECL is not necessarily a DECL. */
5183 if (decl && !DECL_P (decl))
5184 decl = 0;
5185
85d9c13c
TS
5186 /* The decl could be a string constant. */
5187 if (decl && DECL_P (decl))
c5d75364
MLI
5188 {
5189 location_t loc;
5190 /* We use last_assemble_variable_decl to get line information. It's
5191 not always going to be right and might not even be close, but will
5192 be right for the more common cases. */
5193 if (!last_assemble_variable_decl || in_section == ctors_section)
5194 loc = DECL_SOURCE_LOCATION (decl);
5195 else
5196 loc = DECL_SOURCE_LOCATION (last_assemble_variable_decl);
85d9c13c 5197
c5d75364
MLI
5198 if (TARGET_WARN_RELOC)
5199 warning_at (loc, 0,
5200 "creating run-time relocation for %qD", decl);
5201 else
5202 error_at (loc,
5203 "creating run-time relocation for %qD", decl);
5204 }
5205 else
5206 {
5207 if (TARGET_WARN_RELOC)
5208 warning_at (input_location, 0, "creating run-time relocation");
5209 else
5210 error_at (input_location, "creating run-time relocation");
5211 }
85d9c13c
TS
5212}
5213
5214/* Hook into assemble_integer so we can generate an error for run-time
5215 relocations. The SPU ABI disallows them. */
5216static bool
5217spu_assemble_integer (rtx x, unsigned int size, int aligned_p)
5218{
5219 /* By default run-time relocations aren't supported, but we allow them
5220 in case users support it in their own run-time loader. And we provide
5221 a warning for those users that don't. */
5222 if ((GET_CODE (x) == SYMBOL_REF)
5223 || GET_CODE (x) == LABEL_REF || GET_CODE (x) == CONST)
5224 reloc_diagnostic (x);
5225
5226 return default_assemble_integer (x, size, aligned_p);
5227}
5228
5229static void
5230spu_asm_globalize_label (FILE * file, const char *name)
5231{
5232 fputs ("\t.global\t", file);
5233 assemble_name (file, name);
5234 fputs ("\n", file);
5235}
5236
5237static bool
e548c9df 5238spu_rtx_costs (rtx x, machine_mode mode, int outer_code ATTRIBUTE_UNUSED,
68f932c4 5239 int opno ATTRIBUTE_UNUSED, int *total,
f40751dd 5240 bool speed ATTRIBUTE_UNUSED)
85d9c13c 5241{
e548c9df 5242 int code = GET_CODE (x);
85d9c13c
TS
5243 int cost = COSTS_N_INSNS (2);
5244
5245 /* Folding to a CONST_VECTOR will use extra space but there might
5246 be only a small savings in cycles. We'd like to use a CONST_VECTOR
9fc4da9d 5247 only if it allows us to fold away multiple insns. Changing the cost
85d9c13c
TS
5248 of a CONST_VECTOR here (or in CONST_COSTS) doesn't help though
5249 because this cost will only be compared against a single insn.
5250 if (code == CONST_VECTOR)
1a627b35 5251 return spu_legitimate_constant_p (mode, x) ? cost : COSTS_N_INSNS (6);
85d9c13c
TS
5252 */
5253
5254 /* Use defaults for float operations. Not accurate but good enough. */
5255 if (mode == DFmode)
5256 {
5257 *total = COSTS_N_INSNS (13);
5258 return true;
5259 }
5260 if (mode == SFmode)
5261 {
5262 *total = COSTS_N_INSNS (6);
5263 return true;
5264 }
5265 switch (code)
5266 {
5267 case CONST_INT:
5268 if (satisfies_constraint_K (x))
5269 *total = 0;
5270 else if (INTVAL (x) >= -0x80000000ll && INTVAL (x) <= 0xffffffffll)
5271 *total = COSTS_N_INSNS (1);
5272 else
5273 *total = COSTS_N_INSNS (3);
5274 return true;
5275
5276 case CONST:
5277 *total = COSTS_N_INSNS (3);
5278 return true;
5279
5280 case LABEL_REF:
5281 case SYMBOL_REF:
5282 *total = COSTS_N_INSNS (0);
5283 return true;
5284
5285 case CONST_DOUBLE:
5286 *total = COSTS_N_INSNS (5);
5287 return true;
5288
5289 case FLOAT_EXTEND:
5290 case FLOAT_TRUNCATE:
5291 case FLOAT:
5292 case UNSIGNED_FLOAT:
5293 case FIX:
5294 case UNSIGNED_FIX:
5295 *total = COSTS_N_INSNS (7);
5296 return true;
5297
5298 case PLUS:
5299 if (mode == TImode)
5300 {
5301 *total = COSTS_N_INSNS (9);
5302 return true;
5303 }
5304 break;
5305
5306 case MULT:
5307 cost =
5308 GET_CODE (XEXP (x, 0)) ==
5309 REG ? COSTS_N_INSNS (12) : COSTS_N_INSNS (7);
5310 if (mode == SImode && GET_CODE (XEXP (x, 0)) == REG)
5311 {
5312 if (GET_CODE (XEXP (x, 1)) == CONST_INT)
5313 {
5314 HOST_WIDE_INT val = INTVAL (XEXP (x, 1));
5315 cost = COSTS_N_INSNS (14);
5316 if ((val & 0xffff) == 0)
5317 cost = COSTS_N_INSNS (9);
5318 else if (val > 0 && val < 0x10000)
5319 cost = COSTS_N_INSNS (11);
5320 }
5321 }
5322 *total = cost;
5323 return true;
5324 case DIV:
5325 case UDIV:
5326 case MOD:
5327 case UMOD:
5328 *total = COSTS_N_INSNS (20);
5329 return true;
5330 case ROTATE:
5331 case ROTATERT:
5332 case ASHIFT:
5333 case ASHIFTRT:
5334 case LSHIFTRT:
5335 *total = COSTS_N_INSNS (4);
5336 return true;
5337 case UNSPEC:
5338 if (XINT (x, 1) == UNSPEC_CONVERT)
5339 *total = COSTS_N_INSNS (0);
5340 else
5341 *total = COSTS_N_INSNS (4);
5342 return true;
5343 }
5344 /* Scale cost by mode size. Except when initializing (cfun->decl == 0). */
5345 if (GET_MODE_CLASS (mode) == MODE_INT
5346 && GET_MODE_SIZE (mode) > GET_MODE_SIZE (SImode) && cfun && cfun->decl)
5347 cost = cost * (GET_MODE_SIZE (mode) / GET_MODE_SIZE (SImode))
5348 * (GET_MODE_SIZE (mode) / GET_MODE_SIZE (SImode));
5349 *total = cost;
5350 return true;
5351}
5352
ef4bddc2 5353static machine_mode
7b0518e3 5354spu_unwind_word_mode (void)
85d9c13c 5355{
7b0518e3 5356 return SImode;
85d9c13c
TS
5357}
5358
5359/* Decide whether we can make a sibling call to a function. DECL is the
5360 declaration of the function being targeted by the call and EXP is the
5361 CALL_EXPR representing the call. */
5362static bool
5363spu_function_ok_for_sibcall (tree decl, tree exp ATTRIBUTE_UNUSED)
5364{
5365 return decl && !TARGET_LARGE_MEM;
5366}
5367
5368/* We need to correctly update the back chain pointer and the Available
5369 Stack Size (which is in the second slot of the sp register.) */
5370void
5371spu_allocate_stack (rtx op0, rtx op1)
5372{
5373 HOST_WIDE_INT v;
5374 rtx chain = gen_reg_rtx (V4SImode);
5375 rtx stack_bot = gen_frame_mem (V4SImode, stack_pointer_rtx);
5376 rtx sp = gen_reg_rtx (V4SImode);
5377 rtx splatted = gen_reg_rtx (V4SImode);
5378 rtx pat = gen_reg_rtx (TImode);
5379
5380 /* copy the back chain so we can save it back again. */
5381 emit_move_insn (chain, stack_bot);
5382
5383 op1 = force_reg (SImode, op1);
5384
5385 v = 0x1020300010203ll;
5386 emit_move_insn (pat, immed_double_const (v, v, TImode));
5387 emit_insn (gen_shufb (splatted, op1, op1, pat));
5388
5389 emit_insn (gen_spu_convert (sp, stack_pointer_rtx));
5390 emit_insn (gen_subv4si3 (sp, sp, splatted));
5391
5392 if (flag_stack_check)
5393 {
5394 rtx avail = gen_reg_rtx(SImode);
5395 rtx result = gen_reg_rtx(SImode);
5396 emit_insn (gen_vec_extractv4si (avail, sp, GEN_INT (1)));
5397 emit_insn (gen_cgt_si(result, avail, GEN_INT (-1)));
5398 emit_insn (gen_spu_heq (result, GEN_INT(0) ));
5399 }
5400
5401 emit_insn (gen_spu_convert (stack_pointer_rtx, sp));
5402
5403 emit_move_insn (stack_bot, chain);
5404
5405 emit_move_insn (op0, virtual_stack_dynamic_rtx);
5406}
5407
5408void
5409spu_restore_stack_nonlocal (rtx op0 ATTRIBUTE_UNUSED, rtx op1)
5410{
5411 static unsigned char arr[16] =
5412 { 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 };
5413 rtx temp = gen_reg_rtx (SImode);
5414 rtx temp2 = gen_reg_rtx (SImode);
5415 rtx temp3 = gen_reg_rtx (V4SImode);
5416 rtx temp4 = gen_reg_rtx (V4SImode);
5417 rtx pat = gen_reg_rtx (TImode);
5418 rtx sp = gen_rtx_REG (V4SImode, STACK_POINTER_REGNUM);
5419
5420 /* Restore the backchain from the first word, sp from the second. */
5421 emit_move_insn (temp2, adjust_address_nv (op1, SImode, 0));
5422 emit_move_insn (temp, adjust_address_nv (op1, SImode, 4));
5423
5424 emit_move_insn (pat, array_to_constant (TImode, arr));
5425
5426 /* Compute Available Stack Size for sp */
5427 emit_insn (gen_subsi3 (temp, temp, stack_pointer_rtx));
5428 emit_insn (gen_shufb (temp3, temp, temp, pat));
5429
5430 /* Compute Available Stack Size for back chain */
5431 emit_insn (gen_subsi3 (temp2, temp2, stack_pointer_rtx));
5432 emit_insn (gen_shufb (temp4, temp2, temp2, pat));
5433 emit_insn (gen_addv4si3 (temp4, sp, temp4));
5434
5435 emit_insn (gen_addv4si3 (sp, sp, temp3));
5436 emit_move_insn (gen_frame_mem (V4SImode, stack_pointer_rtx), temp4);
5437}
5438
5439static void
5440spu_init_libfuncs (void)
5441{
5442 set_optab_libfunc (smul_optab, DImode, "__muldi3");
5443 set_optab_libfunc (sdiv_optab, DImode, "__divdi3");
5444 set_optab_libfunc (smod_optab, DImode, "__moddi3");
5445 set_optab_libfunc (udiv_optab, DImode, "__udivdi3");
5446 set_optab_libfunc (umod_optab, DImode, "__umoddi3");
5447 set_optab_libfunc (udivmod_optab, DImode, "__udivmoddi4");
5448 set_optab_libfunc (ffs_optab, DImode, "__ffsdi2");
5449 set_optab_libfunc (clz_optab, DImode, "__clzdi2");
5450 set_optab_libfunc (ctz_optab, DImode, "__ctzdi2");
4dfe3ad5 5451 set_optab_libfunc (clrsb_optab, DImode, "__clrsbdi2");
85d9c13c
TS
5452 set_optab_libfunc (popcount_optab, DImode, "__popcountdi2");
5453 set_optab_libfunc (parity_optab, DImode, "__paritydi2");
5454
5455 set_conv_libfunc (ufloat_optab, DFmode, SImode, "__float_unssidf");
5456 set_conv_libfunc (ufloat_optab, DFmode, DImode, "__float_unsdidf");
9bf85028 5457
b46ae6da
UW
5458 set_optab_libfunc (addv_optab, SImode, "__addvsi3");
5459 set_optab_libfunc (subv_optab, SImode, "__subvsi3");
5460 set_optab_libfunc (smulv_optab, SImode, "__mulvsi3");
5461 set_optab_libfunc (sdivv_optab, SImode, "__divvsi3");
5462 set_optab_libfunc (negv_optab, SImode, "__negvsi2");
5463 set_optab_libfunc (absv_optab, SImode, "__absvsi2");
5464 set_optab_libfunc (addv_optab, DImode, "__addvdi3");
5465 set_optab_libfunc (subv_optab, DImode, "__subvdi3");
5466 set_optab_libfunc (smulv_optab, DImode, "__mulvdi3");
5467 set_optab_libfunc (sdivv_optab, DImode, "__divvdi3");
5468 set_optab_libfunc (negv_optab, DImode, "__negvdi2");
5469 set_optab_libfunc (absv_optab, DImode, "__absvdi2");
5470
9bf85028
TS
5471 set_optab_libfunc (smul_optab, TImode, "__multi3");
5472 set_optab_libfunc (sdiv_optab, TImode, "__divti3");
5473 set_optab_libfunc (smod_optab, TImode, "__modti3");
5474 set_optab_libfunc (udiv_optab, TImode, "__udivti3");
5475 set_optab_libfunc (umod_optab, TImode, "__umodti3");
5476 set_optab_libfunc (udivmod_optab, TImode, "__udivmodti4");
85d9c13c
TS
5477}
5478
5479/* Make a subreg, stripping any existing subreg. We could possibly just
5480 call simplify_subreg, but in this case we know what we want. */
5481rtx
ef4bddc2 5482spu_gen_subreg (machine_mode mode, rtx x)
85d9c13c
TS
5483{
5484 if (GET_CODE (x) == SUBREG)
5485 x = SUBREG_REG (x);
5486 if (GET_MODE (x) == mode)
5487 return x;
5488 return gen_rtx_SUBREG (mode, x, 0);
5489}
5490
5491static bool
586de218 5492spu_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
85d9c13c
TS
5493{
5494 return (TYPE_MODE (type) == BLKmode
5495 && ((type) == 0
5496 || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST
5497 || int_size_in_bytes (type) >
5498 (MAX_REGISTER_RETURN * UNITS_PER_WORD)));
5499}
5500\f
5501/* Create the built-in types and functions */
5502
4a3a2376
UW
5503enum spu_function_code
5504{
5505#define DEF_BUILTIN(fcode, icode, name, type, params) fcode,
5506#include "spu-builtins.def"
5507#undef DEF_BUILTIN
5508 NUM_SPU_BUILTINS
5509};
5510
5511extern GTY(()) struct spu_builtin_description spu_builtins[NUM_SPU_BUILTINS];
5512
85d9c13c
TS
5513struct spu_builtin_description spu_builtins[] = {
5514#define DEF_BUILTIN(fcode, icode, name, type, params) \
8dc9f5bd 5515 {fcode, icode, name, type, params},
85d9c13c
TS
5516#include "spu-builtins.def"
5517#undef DEF_BUILTIN
5518};
5519
8dc9f5bd
UW
5520static GTY(()) tree spu_builtin_decls[NUM_SPU_BUILTINS];
5521
5522/* Returns the spu builtin decl for CODE. */
2c93399f
AP
5523
5524static tree
5525spu_builtin_decl (unsigned code, bool initialize_p ATTRIBUTE_UNUSED)
5526{
5527 if (code >= NUM_SPU_BUILTINS)
5528 return error_mark_node;
5529
8dc9f5bd 5530 return spu_builtin_decls[code];
2c93399f
AP
5531}
5532
5533
85d9c13c
TS
5534static void
5535spu_init_builtins (void)
5536{
5537 struct spu_builtin_description *d;
5538 unsigned int i;
5539
5540 V16QI_type_node = build_vector_type (intQI_type_node, 16);
5541 V8HI_type_node = build_vector_type (intHI_type_node, 8);
5542 V4SI_type_node = build_vector_type (intSI_type_node, 4);
5543 V2DI_type_node = build_vector_type (intDI_type_node, 2);
5544 V4SF_type_node = build_vector_type (float_type_node, 4);
5545 V2DF_type_node = build_vector_type (double_type_node, 2);
5546
5547 unsigned_V16QI_type_node = build_vector_type (unsigned_intQI_type_node, 16);
5548 unsigned_V8HI_type_node = build_vector_type (unsigned_intHI_type_node, 8);
5549 unsigned_V4SI_type_node = build_vector_type (unsigned_intSI_type_node, 4);
5550 unsigned_V2DI_type_node = build_vector_type (unsigned_intDI_type_node, 2);
5551
60c9cf8d 5552 spu_builtin_types[SPU_BTI_QUADWORD] = V16QI_type_node;
85d9c13c
TS
5553
5554 spu_builtin_types[SPU_BTI_7] = global_trees[TI_INTSI_TYPE];
5555 spu_builtin_types[SPU_BTI_S7] = global_trees[TI_INTSI_TYPE];
5556 spu_builtin_types[SPU_BTI_U7] = global_trees[TI_INTSI_TYPE];
5557 spu_builtin_types[SPU_BTI_S10] = global_trees[TI_INTSI_TYPE];
5558 spu_builtin_types[SPU_BTI_S10_4] = global_trees[TI_INTSI_TYPE];
5559 spu_builtin_types[SPU_BTI_U14] = global_trees[TI_INTSI_TYPE];
5560 spu_builtin_types[SPU_BTI_16] = global_trees[TI_INTSI_TYPE];
5561 spu_builtin_types[SPU_BTI_S16] = global_trees[TI_INTSI_TYPE];
5562 spu_builtin_types[SPU_BTI_S16_2] = global_trees[TI_INTSI_TYPE];
5563 spu_builtin_types[SPU_BTI_U16] = global_trees[TI_INTSI_TYPE];
5564 spu_builtin_types[SPU_BTI_U16_2] = global_trees[TI_INTSI_TYPE];
5565 spu_builtin_types[SPU_BTI_U18] = global_trees[TI_INTSI_TYPE];
5566
5567 spu_builtin_types[SPU_BTI_INTQI] = global_trees[TI_INTQI_TYPE];
5568 spu_builtin_types[SPU_BTI_INTHI] = global_trees[TI_INTHI_TYPE];
5569 spu_builtin_types[SPU_BTI_INTSI] = global_trees[TI_INTSI_TYPE];
5570 spu_builtin_types[SPU_BTI_INTDI] = global_trees[TI_INTDI_TYPE];
5571 spu_builtin_types[SPU_BTI_UINTQI] = global_trees[TI_UINTQI_TYPE];
5572 spu_builtin_types[SPU_BTI_UINTHI] = global_trees[TI_UINTHI_TYPE];
5573 spu_builtin_types[SPU_BTI_UINTSI] = global_trees[TI_UINTSI_TYPE];
5574 spu_builtin_types[SPU_BTI_UINTDI] = global_trees[TI_UINTDI_TYPE];
5575
5576 spu_builtin_types[SPU_BTI_FLOAT] = global_trees[TI_FLOAT_TYPE];
5577 spu_builtin_types[SPU_BTI_DOUBLE] = global_trees[TI_DOUBLE_TYPE];
5578
5579 spu_builtin_types[SPU_BTI_VOID] = global_trees[TI_VOID_TYPE];
5580
5581 spu_builtin_types[SPU_BTI_PTR] =
5582 build_pointer_type (build_qualified_type
5583 (void_type_node,
5584 TYPE_QUAL_CONST | TYPE_QUAL_VOLATILE));
5585
5586 /* For each builtin we build a new prototype. The tree code will make
5587 sure nodes are shared. */
5588 for (i = 0, d = spu_builtins; i < NUM_SPU_BUILTINS; i++, d++)
5589 {
5590 tree p;
5591 char name[64]; /* build_function will make a copy. */
5592 int parm;
5593
5594 if (d->name == 0)
5595 continue;
5596
e47f8bba 5597 /* Find last parm. */
85d9c13c 5598 for (parm = 1; d->parm[parm] != SPU_BTI_END_OF_PARAMS; parm++)
e47f8bba 5599 ;
85d9c13c
TS
5600
5601 p = void_list_node;
5602 while (parm > 1)
5603 p = tree_cons (NULL_TREE, spu_builtin_types[d->parm[--parm]], p);
5604
5605 p = build_function_type (spu_builtin_types[d->parm[0]], p);
5606
5607 sprintf (name, "__builtin_%s", d->name);
8dc9f5bd 5608 spu_builtin_decls[i] =
fec6e65b 5609 add_builtin_function (name, p, i, BUILT_IN_MD, NULL, NULL_TREE);
bbea461b 5610 if (d->fcode == SPU_MASK_FOR_LOAD)
8dc9f5bd 5611 TREE_READONLY (spu_builtin_decls[i]) = 1;
e47f8bba
BE
5612
5613 /* These builtins don't throw. */
8dc9f5bd 5614 TREE_NOTHROW (spu_builtin_decls[i]) = 1;
85d9c13c
TS
5615 }
5616}
5617
e1f1d97f
SL
5618void
5619spu_restore_stack_block (rtx op0 ATTRIBUTE_UNUSED, rtx op1)
5620{
5621 static unsigned char arr[16] =
5622 { 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3 };
5623
5624 rtx temp = gen_reg_rtx (Pmode);
5625 rtx temp2 = gen_reg_rtx (V4SImode);
5626 rtx temp3 = gen_reg_rtx (V4SImode);
5627 rtx pat = gen_reg_rtx (TImode);
5628 rtx sp = gen_rtx_REG (V4SImode, STACK_POINTER_REGNUM);
5629
5630 emit_move_insn (pat, array_to_constant (TImode, arr));
5631
5632 /* Restore the sp. */
5633 emit_move_insn (temp, op1);
5634 emit_move_insn (temp2, gen_frame_mem (V4SImode, stack_pointer_rtx));
5635
5636 /* Compute available stack size for sp. */
5637 emit_insn (gen_subsi3 (temp, temp, stack_pointer_rtx));
5638 emit_insn (gen_shufb (temp3, temp, temp, pat));
5639
5640 emit_insn (gen_addv4si3 (sp, sp, temp3));
5641 emit_move_insn (gen_frame_mem (V4SImode, stack_pointer_rtx), temp2);
5642}
5643
85d9c13c
TS
5644int
5645spu_safe_dma (HOST_WIDE_INT channel)
5646{
4230d0fe 5647 return TARGET_SAFE_DMA && channel >= 21 && channel <= 27;
85d9c13c
TS
5648}
5649
5650void
5651spu_builtin_splats (rtx ops[])
5652{
ef4bddc2 5653 machine_mode mode = GET_MODE (ops[0]);
85d9c13c
TS
5654 if (GET_CODE (ops[1]) == CONST_INT || GET_CODE (ops[1]) == CONST_DOUBLE)
5655 {
5656 unsigned char arr[16];
5657 constant_to_array (GET_MODE_INNER (mode), ops[1], arr);
5658 emit_move_insn (ops[0], array_to_constant (mode, arr));
5659 }
85d9c13c
TS
5660 else
5661 {
5662 rtx reg = gen_reg_rtx (TImode);
5663 rtx shuf;
5664 if (GET_CODE (ops[1]) != REG
5665 && GET_CODE (ops[1]) != SUBREG)
5666 ops[1] = force_reg (GET_MODE_INNER (mode), ops[1]);
5667 switch (mode)
5668 {
5669 case V2DImode:
5670 case V2DFmode:
5671 shuf =
5672 immed_double_const (0x0001020304050607ll, 0x1011121314151617ll,
5673 TImode);
5674 break;
5675 case V4SImode:
5676 case V4SFmode:
5677 shuf =
5678 immed_double_const (0x0001020300010203ll, 0x0001020300010203ll,
5679 TImode);
5680 break;
5681 case V8HImode:
5682 shuf =
5683 immed_double_const (0x0203020302030203ll, 0x0203020302030203ll,
5684 TImode);
5685 break;
5686 case V16QImode:
5687 shuf =
5688 immed_double_const (0x0303030303030303ll, 0x0303030303030303ll,
5689 TImode);
5690 break;
5691 default:
5692 abort ();
5693 }
5694 emit_move_insn (reg, shuf);
5695 emit_insn (gen_shufb (ops[0], ops[1], ops[1], reg));
5696 }
5697}
5698
5699void
5700spu_builtin_extract (rtx ops[])
5701{
ef4bddc2 5702 machine_mode mode;
85d9c13c
TS
5703 rtx rot, from, tmp;
5704
5705 mode = GET_MODE (ops[1]);
5706
5707 if (GET_CODE (ops[2]) == CONST_INT)
5708 {
5709 switch (mode)
5710 {
5711 case V16QImode:
5712 emit_insn (gen_vec_extractv16qi (ops[0], ops[1], ops[2]));
5713 break;
5714 case V8HImode:
5715 emit_insn (gen_vec_extractv8hi (ops[0], ops[1], ops[2]));
5716 break;
5717 case V4SFmode:
5718 emit_insn (gen_vec_extractv4sf (ops[0], ops[1], ops[2]));
5719 break;
5720 case V4SImode:
5721 emit_insn (gen_vec_extractv4si (ops[0], ops[1], ops[2]));
5722 break;
5723 case V2DImode:
5724 emit_insn (gen_vec_extractv2di (ops[0], ops[1], ops[2]));
5725 break;
5726 case V2DFmode:
5727 emit_insn (gen_vec_extractv2df (ops[0], ops[1], ops[2]));
5728 break;
5729 default:
5730 abort ();
5731 }
5732 return;
5733 }
5734
5735 from = spu_gen_subreg (TImode, ops[1]);
5736 rot = gen_reg_rtx (TImode);
5737 tmp = gen_reg_rtx (SImode);
5738
5739 switch (mode)
5740 {
5741 case V16QImode:
5742 emit_insn (gen_addsi3 (tmp, ops[2], GEN_INT (-3)));
5743 break;
5744 case V8HImode:
5745 emit_insn (gen_addsi3 (tmp, ops[2], ops[2]));
5746 emit_insn (gen_addsi3 (tmp, tmp, GEN_INT (-2)));
5747 break;
5748 case V4SFmode:
5749 case V4SImode:
5750 emit_insn (gen_ashlsi3 (tmp, ops[2], GEN_INT (2)));
5751 break;
5752 case V2DImode:
5753 case V2DFmode:
5754 emit_insn (gen_ashlsi3 (tmp, ops[2], GEN_INT (3)));
5755 break;
5756 default:
5757 abort ();
5758 }
5759 emit_insn (gen_rotqby_ti (rot, from, tmp));
5760
5761 emit_insn (gen_spu_convert (ops[0], rot));
5762}
5763
5764void
5765spu_builtin_insert (rtx ops[])
5766{
ef4bddc2
RS
5767 machine_mode mode = GET_MODE (ops[0]);
5768 machine_mode imode = GET_MODE_INNER (mode);
85d9c13c
TS
5769 rtx mask = gen_reg_rtx (TImode);
5770 rtx offset;
5771
5772 if (GET_CODE (ops[3]) == CONST_INT)
5773 offset = GEN_INT (INTVAL (ops[3]) * GET_MODE_SIZE (imode));
5774 else
5775 {
5776 offset = gen_reg_rtx (SImode);
5777 emit_insn (gen_mulsi3
5778 (offset, ops[3], GEN_INT (GET_MODE_SIZE (imode))));
5779 }
5780 emit_insn (gen_cpat
5781 (mask, stack_pointer_rtx, offset,
5782 GEN_INT (GET_MODE_SIZE (imode))));
5783 emit_insn (gen_shufb (ops[0], ops[1], ops[2], mask));
5784}
5785
5786void
5787spu_builtin_promote (rtx ops[])
5788{
ef4bddc2 5789 machine_mode mode, imode;
85d9c13c
TS
5790 rtx rot, from, offset;
5791 HOST_WIDE_INT pos;
5792
5793 mode = GET_MODE (ops[0]);
5794 imode = GET_MODE_INNER (mode);
5795
5796 from = gen_reg_rtx (TImode);
5797 rot = spu_gen_subreg (TImode, ops[0]);
5798
5799 emit_insn (gen_spu_convert (from, ops[1]));
5800
5801 if (GET_CODE (ops[2]) == CONST_INT)
5802 {
5803 pos = -GET_MODE_SIZE (imode) * INTVAL (ops[2]);
5804 if (GET_MODE_SIZE (imode) < 4)
5805 pos += 4 - GET_MODE_SIZE (imode);
5806 offset = GEN_INT (pos & 15);
5807 }
5808 else
5809 {
5810 offset = gen_reg_rtx (SImode);
5811 switch (mode)
5812 {
5813 case V16QImode:
5814 emit_insn (gen_subsi3 (offset, GEN_INT (3), ops[2]));
5815 break;
5816 case V8HImode:
5817 emit_insn (gen_subsi3 (offset, GEN_INT (1), ops[2]));
5818 emit_insn (gen_addsi3 (offset, offset, offset));
5819 break;
5820 case V4SFmode:
5821 case V4SImode:
5822 emit_insn (gen_subsi3 (offset, GEN_INT (0), ops[2]));
5823 emit_insn (gen_ashlsi3 (offset, offset, GEN_INT (2)));
5824 break;
5825 case V2DImode:
5826 case V2DFmode:
5827 emit_insn (gen_ashlsi3 (offset, ops[2], GEN_INT (3)));
5828 break;
5829 default:
5830 abort ();
5831 }
5832 }
5833 emit_insn (gen_rotqby_ti (rot, from, offset));
5834}
5835
a85b4c91
RH
5836static void
5837spu_trampoline_init (rtx m_tramp, tree fndecl, rtx cxt)
85d9c13c 5838{
a85b4c91 5839 rtx fnaddr = XEXP (DECL_RTL (fndecl), 0);
85d9c13c
TS
5840 rtx shuf = gen_reg_rtx (V4SImode);
5841 rtx insn = gen_reg_rtx (V4SImode);
5842 rtx shufc;
5843 rtx insnc;
5844 rtx mem;
5845
5846 fnaddr = force_reg (SImode, fnaddr);
5847 cxt = force_reg (SImode, cxt);
5848
5849 if (TARGET_LARGE_MEM)
5850 {
5851 rtx rotl = gen_reg_rtx (V4SImode);
5852 rtx mask = gen_reg_rtx (V4SImode);
5853 rtx bi = gen_reg_rtx (SImode);
a85b4c91 5854 static unsigned char const shufa[16] = {
85d9c13c
TS
5855 2, 3, 0, 1, 18, 19, 16, 17,
5856 0, 1, 2, 3, 16, 17, 18, 19
5857 };
a85b4c91 5858 static unsigned char const insna[16] = {
85d9c13c
TS
5859 0x41, 0, 0, 79,
5860 0x41, 0, 0, STATIC_CHAIN_REGNUM,
5861 0x60, 0x80, 0, 79,
5862 0x60, 0x80, 0, STATIC_CHAIN_REGNUM
5863 };
5864
5865 shufc = force_reg (TImode, array_to_constant (TImode, shufa));
5866 insnc = force_reg (V4SImode, array_to_constant (V4SImode, insna));
5867
5868 emit_insn (gen_shufb (shuf, fnaddr, cxt, shufc));
71d46ca5 5869 emit_insn (gen_vrotlv4si3 (rotl, shuf, spu_const (V4SImode, 7)));
85d9c13c
TS
5870 emit_insn (gen_movv4si (mask, spu_const (V4SImode, 0xffff << 7)));
5871 emit_insn (gen_selb (insn, insnc, rotl, mask));
5872
a85b4c91
RH
5873 mem = adjust_address (m_tramp, V4SImode, 0);
5874 emit_move_insn (mem, insn);
85d9c13c
TS
5875
5876 emit_move_insn (bi, GEN_INT (0x35000000 + (79 << 7)));
a85b4c91
RH
5877 mem = adjust_address (m_tramp, Pmode, 16);
5878 emit_move_insn (mem, bi);
85d9c13c
TS
5879 }
5880 else
5881 {
5882 rtx scxt = gen_reg_rtx (SImode);
5883 rtx sfnaddr = gen_reg_rtx (SImode);
a85b4c91 5884 static unsigned char const insna[16] = {
85d9c13c
TS
5885 0x42, 0, 0, STATIC_CHAIN_REGNUM,
5886 0x30, 0, 0, 0,
5887 0, 0, 0, 0,
5888 0, 0, 0, 0
5889 };
5890
5891 shufc = gen_reg_rtx (TImode);
5892 insnc = force_reg (V4SImode, array_to_constant (V4SImode, insna));
5893
5894 /* By or'ing all of cxt with the ila opcode we are assuming cxt
5895 fits 18 bits and the last 4 are zeros. This will be true if
5896 the stack pointer is initialized to 0x3fff0 at program start,
5897 otherwise the ila instruction will be garbage. */
5898
5899 emit_insn (gen_ashlsi3 (scxt, cxt, GEN_INT (7)));
5900 emit_insn (gen_ashlsi3 (sfnaddr, fnaddr, GEN_INT (5)));
5901 emit_insn (gen_cpat
5902 (shufc, stack_pointer_rtx, GEN_INT (4), GEN_INT (4)));
5903 emit_insn (gen_shufb (shuf, sfnaddr, scxt, shufc));
5904 emit_insn (gen_iorv4si3 (insn, insnc, shuf));
5905
a85b4c91
RH
5906 mem = adjust_address (m_tramp, V4SImode, 0);
5907 emit_move_insn (mem, insn);
85d9c13c
TS
5908 }
5909 emit_insn (gen_sync ());
5910}
5911
d45eae79
SL
5912static bool
5913spu_warn_func_return (tree decl)
5914{
5915 /* Naked functions are implemented entirely in assembly, including the
5916 return sequence, so suppress warnings about this. */
5917 return !spu_naked_function_p (decl);
5918}
5919
85d9c13c
TS
5920void
5921spu_expand_sign_extend (rtx ops[])
5922{
5923 unsigned char arr[16];
5924 rtx pat = gen_reg_rtx (TImode);
5925 rtx sign, c;
5926 int i, last;
5927 last = GET_MODE (ops[0]) == DImode ? 7 : 15;
5928 if (GET_MODE (ops[1]) == QImode)
5929 {
5930 sign = gen_reg_rtx (HImode);
5931 emit_insn (gen_extendqihi2 (sign, ops[1]));
5932 for (i = 0; i < 16; i++)
5933 arr[i] = 0x12;
5934 arr[last] = 0x13;
5935 }
5936 else
5937 {
5938 for (i = 0; i < 16; i++)
5939 arr[i] = 0x10;
5940 switch (GET_MODE (ops[1]))
5941 {
5942 case HImode:
5943 sign = gen_reg_rtx (SImode);
5944 emit_insn (gen_extendhisi2 (sign, ops[1]));
5945 arr[last] = 0x03;
5946 arr[last - 1] = 0x02;
5947 break;
5948 case SImode:
5949 sign = gen_reg_rtx (SImode);
5950 emit_insn (gen_ashrsi3 (sign, ops[1], GEN_INT (31)));
5951 for (i = 0; i < 4; i++)
5952 arr[last - i] = 3 - i;
5953 break;
5954 case DImode:
5955 sign = gen_reg_rtx (SImode);
5956 c = gen_reg_rtx (SImode);
5957 emit_insn (gen_spu_convert (c, ops[1]));
5958 emit_insn (gen_ashrsi3 (sign, c, GEN_INT (31)));
5959 for (i = 0; i < 8; i++)
5960 arr[last - i] = 7 - i;
5961 break;
5962 default:
5963 abort ();
5964 }
5965 }
5966 emit_move_insn (pat, array_to_constant (TImode, arr));
5967 emit_insn (gen_shufb (ops[0], ops[1], sign, pat));
5968}
5969
5970/* expand vector initialization. If there are any constant parts,
5971 load constant parts first. Then load any non-constant parts. */
5972void
5973spu_expand_vector_init (rtx target, rtx vals)
5974{
ef4bddc2 5975 machine_mode mode = GET_MODE (target);
85d9c13c
TS
5976 int n_elts = GET_MODE_NUNITS (mode);
5977 int n_var = 0;
5978 bool all_same = true;
b509487e 5979 rtx first, x = NULL_RTX, first_constant = NULL_RTX;
85d9c13c
TS
5980 int i;
5981
5982 first = XVECEXP (vals, 0, 0);
5983 for (i = 0; i < n_elts; ++i)
5984 {
5985 x = XVECEXP (vals, 0, i);
d74032d9
UW
5986 if (!(CONST_INT_P (x)
5987 || GET_CODE (x) == CONST_DOUBLE
5988 || GET_CODE (x) == CONST_FIXED))
85d9c13c
TS
5989 ++n_var;
5990 else
5991 {
5992 if (first_constant == NULL_RTX)
5993 first_constant = x;
5994 }
5995 if (i > 0 && !rtx_equal_p (x, first))
5996 all_same = false;
5997 }
5998
5999 /* if all elements are the same, use splats to repeat elements */
6000 if (all_same)
6001 {
6002 if (!CONSTANT_P (first)
6003 && !register_operand (first, GET_MODE (x)))
6004 first = force_reg (GET_MODE (first), first);
6005 emit_insn (gen_spu_splats (target, first));
6006 return;
6007 }
6008
6009 /* load constant parts */
6010 if (n_var != n_elts)
6011 {
6012 if (n_var == 0)
6013 {
6014 emit_move_insn (target,
6015 gen_rtx_CONST_VECTOR (mode, XVEC (vals, 0)));
6016 }
6017 else
6018 {
6019 rtx constant_parts_rtx = copy_rtx (vals);
6020
6021 gcc_assert (first_constant != NULL_RTX);
6022 /* fill empty slots with the first constant, this increases
6023 our chance of using splats in the recursive call below. */
6024 for (i = 0; i < n_elts; ++i)
d74032d9
UW
6025 {
6026 x = XVECEXP (constant_parts_rtx, 0, i);
6027 if (!(CONST_INT_P (x)
6028 || GET_CODE (x) == CONST_DOUBLE
6029 || GET_CODE (x) == CONST_FIXED))
6030 XVECEXP (constant_parts_rtx, 0, i) = first_constant;
6031 }
85d9c13c
TS
6032
6033 spu_expand_vector_init (target, constant_parts_rtx);
6034 }
6035 }
6036
6037 /* load variable parts */
6038 if (n_var != 0)
6039 {
6040 rtx insert_operands[4];
6041
6042 insert_operands[0] = target;
6043 insert_operands[2] = target;
6044 for (i = 0; i < n_elts; ++i)
6045 {
6046 x = XVECEXP (vals, 0, i);
d74032d9
UW
6047 if (!(CONST_INT_P (x)
6048 || GET_CODE (x) == CONST_DOUBLE
6049 || GET_CODE (x) == CONST_FIXED))
85d9c13c
TS
6050 {
6051 if (!register_operand (x, GET_MODE (x)))
6052 x = force_reg (GET_MODE (x), x);
6053 insert_operands[1] = x;
6054 insert_operands[3] = GEN_INT (i);
6055 spu_builtin_insert (insert_operands);
6056 }
6057 }
6058 }
6059}
b66b813d 6060
39aeae85
SL
6061/* Return insn index for the vector compare instruction for given CODE,
6062 and DEST_MODE, OP_MODE. Return -1 if valid insn is not available. */
6063
6064static int
6065get_vec_cmp_insn (enum rtx_code code,
ef4bddc2
RS
6066 machine_mode dest_mode,
6067 machine_mode op_mode)
39aeae85
SL
6068
6069{
6070 switch (code)
6071 {
6072 case EQ:
6073 if (dest_mode == V16QImode && op_mode == V16QImode)
6074 return CODE_FOR_ceq_v16qi;
6075 if (dest_mode == V8HImode && op_mode == V8HImode)
6076 return CODE_FOR_ceq_v8hi;
6077 if (dest_mode == V4SImode && op_mode == V4SImode)
6078 return CODE_FOR_ceq_v4si;
6079 if (dest_mode == V4SImode && op_mode == V4SFmode)
6080 return CODE_FOR_ceq_v4sf;
6081 if (dest_mode == V2DImode && op_mode == V2DFmode)
6082 return CODE_FOR_ceq_v2df;
6083 break;
6084 case GT:
6085 if (dest_mode == V16QImode && op_mode == V16QImode)
6086 return CODE_FOR_cgt_v16qi;
6087 if (dest_mode == V8HImode && op_mode == V8HImode)
6088 return CODE_FOR_cgt_v8hi;
6089 if (dest_mode == V4SImode && op_mode == V4SImode)
6090 return CODE_FOR_cgt_v4si;
6091 if (dest_mode == V4SImode && op_mode == V4SFmode)
6092 return CODE_FOR_cgt_v4sf;
6093 if (dest_mode == V2DImode && op_mode == V2DFmode)
6094 return CODE_FOR_cgt_v2df;
6095 break;
6096 case GTU:
6097 if (dest_mode == V16QImode && op_mode == V16QImode)
6098 return CODE_FOR_clgt_v16qi;
6099 if (dest_mode == V8HImode && op_mode == V8HImode)
6100 return CODE_FOR_clgt_v8hi;
6101 if (dest_mode == V4SImode && op_mode == V4SImode)
6102 return CODE_FOR_clgt_v4si;
6103 break;
6104 default:
6105 break;
6106 }
6107 return -1;
6108}
6109
6110/* Emit vector compare for operands OP0 and OP1 using code RCODE.
6111 DMODE is expected destination mode. This is a recursive function. */
6112
6113static rtx
6114spu_emit_vector_compare (enum rtx_code rcode,
6115 rtx op0, rtx op1,
ef4bddc2 6116 machine_mode dmode)
39aeae85
SL
6117{
6118 int vec_cmp_insn;
6119 rtx mask;
ef4bddc2
RS
6120 machine_mode dest_mode;
6121 machine_mode op_mode = GET_MODE (op1);
39aeae85
SL
6122
6123 gcc_assert (GET_MODE (op0) == GET_MODE (op1));
6124
6125 /* Floating point vector compare instructions uses destination V4SImode.
6126 Double floating point vector compare instructions uses destination V2DImode.
6127 Move destination to appropriate mode later. */
6128 if (dmode == V4SFmode)
6129 dest_mode = V4SImode;
6130 else if (dmode == V2DFmode)
6131 dest_mode = V2DImode;
6132 else
6133 dest_mode = dmode;
6134
6135 mask = gen_reg_rtx (dest_mode);
6136 vec_cmp_insn = get_vec_cmp_insn (rcode, dest_mode, op_mode);
6137
6138 if (vec_cmp_insn == -1)
6139 {
6140 bool swap_operands = false;
6141 bool try_again = false;
6142 switch (rcode)
6143 {
6144 case LT:
6145 rcode = GT;
6146 swap_operands = true;
6147 try_again = true;
6148 break;
6149 case LTU:
6150 rcode = GTU;
6151 swap_operands = true;
6152 try_again = true;
6153 break;
6154 case NE:
7f9a3dcd
UW
6155 case UNEQ:
6156 case UNLE:
6157 case UNLT:
6158 case UNGE:
6159 case UNGT:
6160 case UNORDERED:
39aeae85
SL
6161 /* Treat A != B as ~(A==B). */
6162 {
7f9a3dcd 6163 enum rtx_code rev_code;
39aeae85 6164 enum insn_code nor_code;
7f9a3dcd
UW
6165 rtx rev_mask;
6166
6167 rev_code = reverse_condition_maybe_unordered (rcode);
6168 rev_mask = spu_emit_vector_compare (rev_code, op0, op1, dest_mode);
6169
947131ba 6170 nor_code = optab_handler (one_cmpl_optab, dest_mode);
39aeae85 6171 gcc_assert (nor_code != CODE_FOR_nothing);
7f9a3dcd 6172 emit_insn (GEN_FCN (nor_code) (mask, rev_mask));
39aeae85
SL
6173 if (dmode != dest_mode)
6174 {
6175 rtx temp = gen_reg_rtx (dest_mode);
6176 convert_move (temp, mask, 0);
6177 return temp;
6178 }
6179 return mask;
6180 }
6181 break;
6182 case GE:
6183 case GEU:
6184 case LE:
6185 case LEU:
6186 /* Try GT/GTU/LT/LTU OR EQ */
6187 {
6188 rtx c_rtx, eq_rtx;
6189 enum insn_code ior_code;
6190 enum rtx_code new_code;
6191
6192 switch (rcode)
6193 {
6194 case GE: new_code = GT; break;
6195 case GEU: new_code = GTU; break;
6196 case LE: new_code = LT; break;
6197 case LEU: new_code = LTU; break;
6198 default:
6199 gcc_unreachable ();
6200 }
6201
6202 c_rtx = spu_emit_vector_compare (new_code, op0, op1, dest_mode);
6203 eq_rtx = spu_emit_vector_compare (EQ, op0, op1, dest_mode);
6204
947131ba 6205 ior_code = optab_handler (ior_optab, dest_mode);
39aeae85
SL
6206 gcc_assert (ior_code != CODE_FOR_nothing);
6207 emit_insn (GEN_FCN (ior_code) (mask, c_rtx, eq_rtx));
6208 if (dmode != dest_mode)
6209 {
6210 rtx temp = gen_reg_rtx (dest_mode);
6211 convert_move (temp, mask, 0);
6212 return temp;
6213 }
6214 return mask;
6215 }
6216 break;
7f9a3dcd
UW
6217 case LTGT:
6218 /* Try LT OR GT */
6219 {
6220 rtx lt_rtx, gt_rtx;
6221 enum insn_code ior_code;
6222
6223 lt_rtx = spu_emit_vector_compare (LT, op0, op1, dest_mode);
6224 gt_rtx = spu_emit_vector_compare (GT, op0, op1, dest_mode);
6225
6226 ior_code = optab_handler (ior_optab, dest_mode);
6227 gcc_assert (ior_code != CODE_FOR_nothing);
6228 emit_insn (GEN_FCN (ior_code) (mask, lt_rtx, gt_rtx));
6229 if (dmode != dest_mode)
6230 {
6231 rtx temp = gen_reg_rtx (dest_mode);
6232 convert_move (temp, mask, 0);
6233 return temp;
6234 }
6235 return mask;
6236 }
6237 break;
6238 case ORDERED:
6239 /* Implement as (A==A) & (B==B) */
6240 {
6241 rtx a_rtx, b_rtx;
6242 enum insn_code and_code;
6243
6244 a_rtx = spu_emit_vector_compare (EQ, op0, op0, dest_mode);
6245 b_rtx = spu_emit_vector_compare (EQ, op1, op1, dest_mode);
6246
6247 and_code = optab_handler (and_optab, dest_mode);
6248 gcc_assert (and_code != CODE_FOR_nothing);
6249 emit_insn (GEN_FCN (and_code) (mask, a_rtx, b_rtx));
6250 if (dmode != dest_mode)
6251 {
6252 rtx temp = gen_reg_rtx (dest_mode);
6253 convert_move (temp, mask, 0);
6254 return temp;
6255 }
6256 return mask;
6257 }
6258 break;
39aeae85
SL
6259 default:
6260 gcc_unreachable ();
6261 }
6262
6263 /* You only get two chances. */
6264 if (try_again)
6265 vec_cmp_insn = get_vec_cmp_insn (rcode, dest_mode, op_mode);
6266
6267 gcc_assert (vec_cmp_insn != -1);
6268
6269 if (swap_operands)
6270 {
6271 rtx tmp;
6272 tmp = op0;
6273 op0 = op1;
6274 op1 = tmp;
6275 }
6276 }
6277
6278 emit_insn (GEN_FCN (vec_cmp_insn) (mask, op0, op1));
6279 if (dmode != dest_mode)
6280 {
6281 rtx temp = gen_reg_rtx (dest_mode);
6282 convert_move (temp, mask, 0);
6283 return temp;
6284 }
6285 return mask;
6286}
6287
6288
6289/* Emit vector conditional expression.
6290 DEST is destination. OP1 and OP2 are two VEC_COND_EXPR operands.
6291 CC_OP0 and CC_OP1 are the two operands for the relation operation COND. */
6292
6293int
6294spu_emit_vector_cond_expr (rtx dest, rtx op1, rtx op2,
6295 rtx cond, rtx cc_op0, rtx cc_op1)
6296{
ef4bddc2 6297 machine_mode dest_mode = GET_MODE (dest);
39aeae85
SL
6298 enum rtx_code rcode = GET_CODE (cond);
6299 rtx mask;
6300
6301 /* Get the vector mask for the given relational operations. */
6302 mask = spu_emit_vector_compare (rcode, cc_op0, cc_op1, dest_mode);
6303
6304 emit_insn(gen_selb (dest, op2, op1, mask));
6305
6306 return 1;
6307}
6308
b66b813d 6309static rtx
ef4bddc2 6310spu_force_reg (machine_mode mode, rtx op)
b66b813d
AP
6311{
6312 rtx x, r;
6313 if (GET_MODE (op) == VOIDmode || GET_MODE (op) == BLKmode)
6314 {
6315 if ((SCALAR_INT_MODE_P (mode) && GET_CODE (op) == CONST_INT)
6316 || GET_MODE (op) == BLKmode)
6317 return force_reg (mode, convert_to_mode (mode, op, 0));
6318 abort ();
6319 }
6320
6321 r = force_reg (GET_MODE (op), op);
6322 if (GET_MODE_SIZE (GET_MODE (op)) == GET_MODE_SIZE (mode))
6323 {
6324 x = simplify_gen_subreg (mode, r, GET_MODE (op), 0);
6325 if (x)
6326 return x;
6327 }
6328
6329 x = gen_reg_rtx (mode);
6330 emit_insn (gen_spu_convert (x, r));
6331 return x;
6332}
6333
6334static void
6335spu_check_builtin_parm (struct spu_builtin_description *d, rtx op, int p)
6336{
6337 HOST_WIDE_INT v = 0;
6338 int lsbits;
6339 /* Check the range of immediate operands. */
6340 if (p >= SPU_BTI_7 && p <= SPU_BTI_U18)
6341 {
6342 int range = p - SPU_BTI_7;
73701e27
TS
6343
6344 if (!CONSTANT_P (op))
d8a07487 6345 error ("%s expects an integer literal in the range [%d, %d]",
b66b813d
AP
6346 d->name,
6347 spu_builtin_range[range].low, spu_builtin_range[range].high);
6348
6349 if (GET_CODE (op) == CONST
6350 && (GET_CODE (XEXP (op, 0)) == PLUS
6351 || GET_CODE (XEXP (op, 0)) == MINUS))
6352 {
6353 v = INTVAL (XEXP (XEXP (op, 0), 1));
6354 op = XEXP (XEXP (op, 0), 0);
6355 }
6356 else if (GET_CODE (op) == CONST_INT)
6357 v = INTVAL (op);
73701e27
TS
6358 else if (GET_CODE (op) == CONST_VECTOR
6359 && GET_CODE (CONST_VECTOR_ELT (op, 0)) == CONST_INT)
6360 v = INTVAL (CONST_VECTOR_ELT (op, 0));
6361
6362 /* The default for v is 0 which is valid in every range. */
6363 if (v < spu_builtin_range[range].low
6364 || v > spu_builtin_range[range].high)
d8a07487 6365 error ("%s expects an integer literal in the range [%d, %d]. (%wd)",
73701e27
TS
6366 d->name,
6367 spu_builtin_range[range].low, spu_builtin_range[range].high,
6368 v);
b66b813d
AP
6369
6370 switch (p)
6371 {
6372 case SPU_BTI_S10_4:
6373 lsbits = 4;
6374 break;
6375 case SPU_BTI_U16_2:
6376 /* This is only used in lqa, and stqa. Even though the insns
6377 encode 16 bits of the address (all but the 2 least
6378 significant), only 14 bits are used because it is masked to
6379 be 16 byte aligned. */
6380 lsbits = 4;
6381 break;
6382 case SPU_BTI_S16_2:
6383 /* This is used for lqr and stqr. */
6384 lsbits = 2;
6385 break;
6386 default:
6387 lsbits = 0;
6388 }
6389
6390 if (GET_CODE (op) == LABEL_REF
6391 || (GET_CODE (op) == SYMBOL_REF
6392 && SYMBOL_REF_FUNCTION_P (op))
73701e27 6393 || (v & ((1 << lsbits) - 1)) != 0)
d8a07487 6394 warning (0, "%d least significant bits of %s are ignored", lsbits,
b66b813d
AP
6395 d->name);
6396 }
6397}
6398
6399
d7815554 6400static int
73701e27 6401expand_builtin_args (struct spu_builtin_description *d, tree exp,
b66b813d
AP
6402 rtx target, rtx ops[])
6403{
81f40b79 6404 enum insn_code icode = (enum insn_code) d->icode;
73701e27 6405 int i = 0, a;
b66b813d
AP
6406
6407 /* Expand the arguments into rtl. */
6408
6409 if (d->parm[0] != SPU_BTI_VOID)
6410 ops[i++] = target;
6411
d7815554 6412 for (a = 0; d->parm[a+1] != SPU_BTI_END_OF_PARAMS; i++, a++)
b66b813d 6413 {
73701e27 6414 tree arg = CALL_EXPR_ARG (exp, a);
b66b813d
AP
6415 if (arg == 0)
6416 abort ();
bbbbb16a 6417 ops[i] = expand_expr (arg, NULL_RTX, VOIDmode, EXPAND_NORMAL);
b66b813d 6418 }
d7815554 6419
f04713ee 6420 gcc_assert (i == insn_data[icode].n_generator_args);
d7815554 6421 return i;
b66b813d
AP
6422}
6423
6424static rtx
6425spu_expand_builtin_1 (struct spu_builtin_description *d,
73701e27 6426 tree exp, rtx target)
b66b813d
AP
6427{
6428 rtx pat;
6429 rtx ops[8];
81f40b79 6430 enum insn_code icode = (enum insn_code) d->icode;
ef4bddc2 6431 machine_mode mode, tmode;
b66b813d 6432 int i, p;
d7815554 6433 int n_operands;
b66b813d
AP
6434 tree return_type;
6435
6436 /* Set up ops[] with values from arglist. */
d7815554 6437 n_operands = expand_builtin_args (d, exp, target, ops);
b66b813d
AP
6438
6439 /* Handle the target operand which must be operand 0. */
6440 i = 0;
6441 if (d->parm[0] != SPU_BTI_VOID)
6442 {
6443
6444 /* We prefer the mode specified for the match_operand otherwise
6445 use the mode from the builtin function prototype. */
6446 tmode = insn_data[d->icode].operand[0].mode;
6447 if (tmode == VOIDmode)
6448 tmode = TYPE_MODE (spu_builtin_types[d->parm[0]]);
6449
6450 /* Try to use target because not using it can lead to extra copies
6451 and when we are using all of the registers extra copies leads
6452 to extra spills. */
6453 if (target && GET_CODE (target) == REG && GET_MODE (target) == tmode)
6454 ops[0] = target;
6455 else
6456 target = ops[0] = gen_reg_rtx (tmode);
6457
6458 if (!(*insn_data[icode].operand[0].predicate) (ops[0], tmode))
6459 abort ();
6460
6461 i++;
6462 }
6463
bbea461b
DN
6464 if (d->fcode == SPU_MASK_FOR_LOAD)
6465 {
ef4bddc2 6466 machine_mode mode = insn_data[icode].operand[1].mode;
bbea461b
DN
6467 tree arg;
6468 rtx addr, op, pat;
6469
6470 /* get addr */
73701e27 6471 arg = CALL_EXPR_ARG (exp, 0);
643afedb 6472 gcc_assert (POINTER_TYPE_P (TREE_TYPE (arg)));
bbea461b
DN
6473 op = expand_expr (arg, NULL_RTX, Pmode, EXPAND_NORMAL);
6474 addr = memory_address (mode, op);
6475
6476 /* negate addr */
6477 op = gen_reg_rtx (GET_MODE (addr));
f7df4a84 6478 emit_insn (gen_rtx_SET (op, gen_rtx_NEG (GET_MODE (addr), addr)));
bbea461b
DN
6479 op = gen_rtx_MEM (mode, op);
6480
6481 pat = GEN_FCN (icode) (target, op);
6482 if (!pat)
6483 return 0;
6484 emit_insn (pat);
6485 return target;
6486 }
6487
b66b813d
AP
6488 /* Ignore align_hint, but still expand it's args in case they have
6489 side effects. */
6490 if (icode == CODE_FOR_spu_align_hint)
6491 return 0;
6492
6493 /* Handle the rest of the operands. */
d7815554 6494 for (p = 1; i < n_operands; i++, p++)
b66b813d
AP
6495 {
6496 if (insn_data[d->icode].operand[i].mode != VOIDmode)
6497 mode = insn_data[d->icode].operand[i].mode;
6498 else
6499 mode = TYPE_MODE (spu_builtin_types[d->parm[i]]);
6500
6501 /* mode can be VOIDmode here for labels */
6502
6503 /* For specific intrinsics with an immediate operand, e.g.,
6504 si_ai(), we sometimes need to convert the scalar argument to a
6505 vector argument by splatting the scalar. */
6506 if (VECTOR_MODE_P (mode)
6507 && (GET_CODE (ops[i]) == CONST_INT
6508 || GET_MODE_CLASS (GET_MODE (ops[i])) == MODE_INT
6717c544 6509 || GET_MODE_CLASS (GET_MODE (ops[i])) == MODE_FLOAT))
b66b813d
AP
6510 {
6511 if (GET_CODE (ops[i]) == CONST_INT)
6512 ops[i] = spu_const (mode, INTVAL (ops[i]));
6513 else
6514 {
6515 rtx reg = gen_reg_rtx (mode);
ef4bddc2 6516 machine_mode imode = GET_MODE_INNER (mode);
b66b813d
AP
6517 if (!spu_nonmem_operand (ops[i], GET_MODE (ops[i])))
6518 ops[i] = force_reg (GET_MODE (ops[i]), ops[i]);
6519 if (imode != GET_MODE (ops[i]))
6520 ops[i] = convert_to_mode (imode, ops[i],
6521 TYPE_UNSIGNED (spu_builtin_types
6522 [d->parm[i]]));
6523 emit_insn (gen_spu_splats (reg, ops[i]));
6524 ops[i] = reg;
6525 }
6526 }
6527
73701e27
TS
6528 spu_check_builtin_parm (d, ops[i], d->parm[p]);
6529
b66b813d
AP
6530 if (!(*insn_data[icode].operand[i].predicate) (ops[i], mode))
6531 ops[i] = spu_force_reg (mode, ops[i]);
b66b813d
AP
6532 }
6533
d7815554 6534 switch (n_operands)
b66b813d
AP
6535 {
6536 case 0:
6537 pat = GEN_FCN (icode) (0);
6538 break;
6539 case 1:
6540 pat = GEN_FCN (icode) (ops[0]);
6541 break;
6542 case 2:
6543 pat = GEN_FCN (icode) (ops[0], ops[1]);
6544 break;
6545 case 3:
6546 pat = GEN_FCN (icode) (ops[0], ops[1], ops[2]);
6547 break;
6548 case 4:
6549 pat = GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]);
6550 break;
6551 case 5:
6552 pat = GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3], ops[4]);
6553 break;
6554 case 6:
6555 pat = GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3], ops[4], ops[5]);
6556 break;
6557 default:
6558 abort ();
6559 }
6560
6561 if (!pat)
6562 abort ();
6563
6564 if (d->type == B_CALL || d->type == B_BISLED)
6565 emit_call_insn (pat);
6566 else if (d->type == B_JUMP)
6567 {
6568 emit_jump_insn (pat);
6569 emit_barrier ();
6570 }
6571 else
6572 emit_insn (pat);
6573
6574 return_type = spu_builtin_types[d->parm[0]];
6575 if (d->parm[0] != SPU_BTI_VOID
6576 && GET_MODE (target) != TYPE_MODE (return_type))
6577 {
6578 /* target is the return value. It should always be the mode of
6579 the builtin function prototype. */
6580 target = spu_force_reg (TYPE_MODE (return_type), target);
6581 }
6582
6583 return target;
6584}
6585
6586rtx
6587spu_expand_builtin (tree exp,
6588 rtx target,
6589 rtx subtarget ATTRIBUTE_UNUSED,
ef4bddc2 6590 machine_mode mode ATTRIBUTE_UNUSED,
b66b813d
AP
6591 int ignore ATTRIBUTE_UNUSED)
6592{
73701e27 6593 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
fec6e65b 6594 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
b66b813d
AP
6595 struct spu_builtin_description *d;
6596
6597 if (fcode < NUM_SPU_BUILTINS)
6598 {
6599 d = &spu_builtins[fcode];
6600
73701e27 6601 return spu_expand_builtin_1 (d, exp, target);
b66b813d
AP
6602 }
6603 abort ();
6604}
6605
bbea461b
DN
6606/* Implement targetm.vectorize.builtin_mask_for_load. */
6607static tree
6608spu_builtin_mask_for_load (void)
6609{
8dc9f5bd 6610 return spu_builtin_decls[SPU_MASK_FOR_LOAD];
bbea461b 6611}
73701e27 6612
e95b59d2
DN
6613/* Implement targetm.vectorize.builtin_vectorization_cost. */
6614static int
720f5239 6615spu_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
a21892ad 6616 tree vectype,
720f5239 6617 int misalign ATTRIBUTE_UNUSED)
35e1a5e7 6618{
a21892ad
BS
6619 unsigned elements;
6620
35e1a5e7
IR
6621 switch (type_of_cost)
6622 {
6623 case scalar_stmt:
6624 case vector_stmt:
6625 case vector_load:
6626 case vector_store:
6627 case vec_to_scalar:
6628 case scalar_to_vec:
6629 case cond_branch_not_taken:
6630 case vec_perm:
8bd37302 6631 case vec_promote_demote:
35e1a5e7
IR
6632 return 1;
6633
6634 case scalar_store:
6635 return 10;
6636
6637 case scalar_load:
6638 /* Load + rotate. */
6639 return 2;
6640
6641 case unaligned_load:
6642 return 2;
6643
6644 case cond_branch_taken:
6645 return 6;
6646
a21892ad
BS
6647 case vec_construct:
6648 elements = TYPE_VECTOR_SUBPARTS (vectype);
6649 return elements / 2 + 1;
6650
35e1a5e7
IR
6651 default:
6652 gcc_unreachable ();
6653 }
e95b59d2
DN
6654}
6655
c3e7ee41
BS
6656/* Implement targetm.vectorize.init_cost. */
6657
cf0d4e15 6658static void *
c3e7ee41
BS
6659spu_init_cost (struct loop *loop_info ATTRIBUTE_UNUSED)
6660{
92345349
BS
6661 unsigned *cost = XNEWVEC (unsigned, 3);
6662 cost[vect_prologue] = cost[vect_body] = cost[vect_epilogue] = 0;
c3e7ee41
BS
6663 return cost;
6664}
6665
6666/* Implement targetm.vectorize.add_stmt_cost. */
6667
cf0d4e15 6668static unsigned
c3e7ee41 6669spu_add_stmt_cost (void *data, int count, enum vect_cost_for_stmt kind,
92345349
BS
6670 struct _stmt_vec_info *stmt_info, int misalign,
6671 enum vect_cost_model_location where)
c3e7ee41
BS
6672{
6673 unsigned *cost = (unsigned *) data;
6674 unsigned retval = 0;
6675
6676 if (flag_vect_cost_model)
6677 {
92345349 6678 tree vectype = stmt_info ? stmt_vectype (stmt_info) : NULL_TREE;
c3e7ee41
BS
6679 int stmt_cost = spu_builtin_vectorization_cost (kind, vectype, misalign);
6680
6681 /* Statements in an inner loop relative to the loop being
6682 vectorized are weighted more heavily. The value here is
6683 arbitrary and could potentially be improved with analysis. */
92345349 6684 if (where == vect_body && stmt_info && stmt_in_inner_loop_p (stmt_info))
c3e7ee41
BS
6685 count *= 50; /* FIXME. */
6686
6687 retval = (unsigned) (count * stmt_cost);
92345349 6688 cost[where] += retval;
c3e7ee41
BS
6689 }
6690
6691 return retval;
6692}
6693
6694/* Implement targetm.vectorize.finish_cost. */
6695
92345349
BS
6696static void
6697spu_finish_cost (void *data, unsigned *prologue_cost,
6698 unsigned *body_cost, unsigned *epilogue_cost)
c3e7ee41 6699{
92345349
BS
6700 unsigned *cost = (unsigned *) data;
6701 *prologue_cost = cost[vect_prologue];
6702 *body_cost = cost[vect_body];
6703 *epilogue_cost = cost[vect_epilogue];
c3e7ee41
BS
6704}
6705
6706/* Implement targetm.vectorize.destroy_cost_data. */
6707
cf0d4e15 6708static void
c3e7ee41
BS
6709spu_destroy_cost_data (void *data)
6710{
6711 free (data);
6712}
6713
99c9c69a
DN
6714/* Return true iff, data reference of TYPE can reach vector alignment (16)
6715 after applying N number of iterations. This routine does not determine
6716 how may iterations are required to reach desired alignment. */
6717
6718static bool
3101faab 6719spu_vector_alignment_reachable (const_tree type ATTRIBUTE_UNUSED, bool is_packed)
99c9c69a
DN
6720{
6721 if (is_packed)
6722 return false;
6723
6724 /* All other types are naturally aligned. */
6725 return true;
6726}
6727
299456f3 6728/* Return the appropriate mode for a named address pointer. */
ef4bddc2 6729static machine_mode
299456f3
BE
6730spu_addr_space_pointer_mode (addr_space_t addrspace)
6731{
6732 switch (addrspace)
6733 {
6734 case ADDR_SPACE_GENERIC:
6735 return ptr_mode;
6736 case ADDR_SPACE_EA:
6737 return EAmode;
6738 default:
6739 gcc_unreachable ();
6740 }
6741}
6742
6743/* Return the appropriate mode for a named address address. */
ef4bddc2 6744static machine_mode
299456f3
BE
6745spu_addr_space_address_mode (addr_space_t addrspace)
6746{
6747 switch (addrspace)
6748 {
6749 case ADDR_SPACE_GENERIC:
6750 return Pmode;
6751 case ADDR_SPACE_EA:
6752 return EAmode;
6753 default:
6754 gcc_unreachable ();
6755 }
6756}
6757
6758/* Determine if one named address space is a subset of another. */
6759
6760static bool
6761spu_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
6762{
6763 gcc_assert (subset == ADDR_SPACE_GENERIC || subset == ADDR_SPACE_EA);
6764 gcc_assert (superset == ADDR_SPACE_GENERIC || superset == ADDR_SPACE_EA);
6765
6766 if (subset == superset)
6767 return true;
6768
6769 /* If we have -mno-address-space-conversion, treat __ea and generic as not
6770 being subsets but instead as disjoint address spaces. */
6771 else if (!TARGET_ADDRESS_SPACE_CONVERSION)
6772 return false;
6773
6774 else
6775 return (subset == ADDR_SPACE_GENERIC && superset == ADDR_SPACE_EA);
6776}
6777
6778/* Convert from one address space to another. */
6779static rtx
6780spu_addr_space_convert (rtx op, tree from_type, tree to_type)
6781{
6782 addr_space_t from_as = TYPE_ADDR_SPACE (TREE_TYPE (from_type));
6783 addr_space_t to_as = TYPE_ADDR_SPACE (TREE_TYPE (to_type));
6784
6785 gcc_assert (from_as == ADDR_SPACE_GENERIC || from_as == ADDR_SPACE_EA);
6786 gcc_assert (to_as == ADDR_SPACE_GENERIC || to_as == ADDR_SPACE_EA);
6787
6788 if (to_as == ADDR_SPACE_GENERIC && from_as == ADDR_SPACE_EA)
6789 {
6790 rtx result, ls;
6791
6792 ls = gen_const_mem (DImode,
6793 gen_rtx_SYMBOL_REF (Pmode, "__ea_local_store"));
6794 set_mem_align (ls, 128);
6795
6796 result = gen_reg_rtx (Pmode);
6797 ls = force_reg (Pmode, convert_modes (Pmode, DImode, ls, 1));
6798 op = force_reg (Pmode, convert_modes (Pmode, EAmode, op, 1));
6799 ls = emit_conditional_move (ls, NE, op, const0_rtx, Pmode,
6800 ls, const0_rtx, Pmode, 1);
6801
6802 emit_insn (gen_subsi3 (result, op, ls));
6803
6804 return result;
6805 }
6806
6807 else if (to_as == ADDR_SPACE_EA && from_as == ADDR_SPACE_GENERIC)
6808 {
6809 rtx result, ls;
6810
6811 ls = gen_const_mem (DImode,
6812 gen_rtx_SYMBOL_REF (Pmode, "__ea_local_store"));
6813 set_mem_align (ls, 128);
6814
6815 result = gen_reg_rtx (EAmode);
6816 ls = force_reg (EAmode, convert_modes (EAmode, DImode, ls, 1));
6817 op = force_reg (Pmode, op);
6818 ls = emit_conditional_move (ls, NE, op, const0_rtx, Pmode,
6819 ls, const0_rtx, EAmode, 1);
6820 op = force_reg (EAmode, convert_modes (EAmode, Pmode, op, 1));
6821
6822 if (EAmode == SImode)
6823 emit_insn (gen_addsi3 (result, op, ls));
6824 else
6825 emit_insn (gen_adddi3 (result, op, ls));
6826
6827 return result;
6828 }
6829
6830 else
6831 gcc_unreachable ();
6832}
6833
6834
67186a97
TS
6835/* Count the total number of instructions in each pipe and return the
6836 maximum, which is used as the Minimum Iteration Interval (MII)
6837 in the modulo scheduler. get_pipe() will return -2, -1, 0, or 1.
6838 -2 are instructions that can go in pipe0 or pipe1. */
6839static int
6840spu_sms_res_mii (struct ddg *g)
6841{
6842 int i;
6843 unsigned t[4] = {0, 0, 0, 0};
6844
6845 for (i = 0; i < g->num_nodes; i++)
6846 {
23c39aaa 6847 rtx_insn *insn = g->nodes[i].insn;
67186a97
TS
6848 int p = get_pipe (insn) + 2;
6849
819bfe0e
JM
6850 gcc_assert (p >= 0);
6851 gcc_assert (p < 4);
67186a97
TS
6852
6853 t[p]++;
6854 if (dump_file && INSN_P (insn))
6855 fprintf (dump_file, "i%d %s %d %d\n",
6856 INSN_UID (insn),
6857 insn_data[INSN_CODE(insn)].name,
6858 p, t[p]);
6859 }
6860 if (dump_file)
6861 fprintf (dump_file, "%d %d %d %d\n", t[0], t[1], t[2], t[3]);
6862
6863 return MAX ((t[0] + t[2] + t[3] + 1) / 2, MAX (t[2], t[3]));
6864}
6865
6866
73701e27
TS
6867void
6868spu_init_expanders (void)
eec9405e 6869{
73701e27 6870 if (cfun)
eec9405e
TS
6871 {
6872 rtx r0, r1;
6873 /* HARD_FRAME_REGISTER is only 128 bit aligned when
6874 frame_pointer_needed is true. We don't know that until we're
6875 expanding the prologue. */
6876 REGNO_POINTER_ALIGN (HARD_FRAME_POINTER_REGNUM) = 8;
6877
6878 /* A number of passes use LAST_VIRTUAL_REGISTER+1 and
6879 LAST_VIRTUAL_REGISTER+2 to test the back-end. We want them
6880 to be treated as aligned, so generate them here. */
6881 r0 = gen_reg_rtx (SImode);
6882 r1 = gen_reg_rtx (SImode);
6883 mark_reg_pointer (r0, 128);
6884 mark_reg_pointer (r1, 128);
6885 gcc_assert (REGNO (r0) == LAST_VIRTUAL_REGISTER + 1
6886 && REGNO (r1) == LAST_VIRTUAL_REGISTER + 2);
6887 }
7fecf2c7
AP
6888}
6889
ef4bddc2 6890static machine_mode
7fecf2c7
AP
6891spu_libgcc_cmp_return_mode (void)
6892{
6893
6894/* For SPU word mode is TI mode so it is better to use SImode
6895 for compare returns. */
6896 return SImode;
6897}
6898
ef4bddc2 6899static machine_mode
7fecf2c7
AP
6900spu_libgcc_shift_count_mode (void)
6901{
6902/* For SPU word mode is TI mode so it is better to use SImode
6903 for shift counts. */
6904 return SImode;
6905}
9dcc2e87 6906
500a1f85
UW
6907/* Implement targetm.section_type_flags. */
6908static unsigned int
6909spu_section_type_flags (tree decl, const char *name, int reloc)
6910{
6911 /* .toe needs to have type @nobits. */
6912 if (strcmp (name, ".toe") == 0)
6913 return SECTION_BSS;
299456f3
BE
6914 /* Don't load _ea into the current address space. */
6915 if (strcmp (name, "._ea") == 0)
6916 return SECTION_WRITE | SECTION_DEBUG;
500a1f85
UW
6917 return default_section_type_flags (decl, name, reloc);
6918}
4a3a2376 6919
299456f3
BE
6920/* Implement targetm.select_section. */
6921static section *
6922spu_select_section (tree decl, int reloc, unsigned HOST_WIDE_INT align)
6923{
6924 /* Variables and constants defined in the __ea address space
6925 go into a special section named "._ea". */
6926 if (TREE_TYPE (decl) != error_mark_node
6927 && TYPE_ADDR_SPACE (TREE_TYPE (decl)) == ADDR_SPACE_EA)
6928 {
6929 /* We might get called with string constants, but get_named_section
6930 doesn't like them as they are not DECLs. Also, we need to set
6931 flags in that case. */
6932 if (!DECL_P (decl))
6933 return get_section ("._ea", SECTION_WRITE | SECTION_DEBUG, NULL);
6934
6935 return get_named_section (decl, "._ea", reloc);
6936 }
6937
6938 return default_elf_select_section (decl, reloc, align);
6939}
6940
6941/* Implement targetm.unique_section. */
6942static void
6943spu_unique_section (tree decl, int reloc)
6944{
6945 /* We don't support unique section names in the __ea address
6946 space for now. */
6947 if (TREE_TYPE (decl) != error_mark_node
6948 && TYPE_ADDR_SPACE (TREE_TYPE (decl)) != 0)
6949 return;
6950
6951 default_unique_section (decl, reloc);
6952}
6953
5345cf68
TS
6954/* Generate a constant or register which contains 2^SCALE. We assume
6955 the result is valid for MODE. Currently, MODE must be V4SFmode and
6956 SCALE must be SImode. */
6957rtx
ef4bddc2 6958spu_gen_exp2 (machine_mode mode, rtx scale)
5345cf68
TS
6959{
6960 gcc_assert (mode == V4SFmode);
6961 gcc_assert (GET_MODE (scale) == SImode || GET_CODE (scale) == CONST_INT);
6962 if (GET_CODE (scale) != CONST_INT)
6963 {
6964 /* unsigned int exp = (127 + scale) << 23;
6965 __vector float m = (__vector float) spu_splats (exp); */
6966 rtx reg = force_reg (SImode, scale);
6967 rtx exp = gen_reg_rtx (SImode);
6968 rtx mul = gen_reg_rtx (mode);
6969 emit_insn (gen_addsi3 (exp, reg, GEN_INT (127)));
6970 emit_insn (gen_ashlsi3 (exp, exp, GEN_INT (23)));
6971 emit_insn (gen_spu_splats (mul, gen_rtx_SUBREG (GET_MODE_INNER (mode), exp, 0)));
6972 return mul;
6973 }
6974 else
6975 {
6976 HOST_WIDE_INT exp = 127 + INTVAL (scale);
6977 unsigned char arr[16];
6978 arr[0] = arr[4] = arr[8] = arr[12] = exp >> 1;
6979 arr[1] = arr[5] = arr[9] = arr[13] = exp << 7;
6980 arr[2] = arr[6] = arr[10] = arr[14] = 0;
6981 arr[3] = arr[7] = arr[11] = arr[15] = 0;
6982 return array_to_constant (mode, arr);
6983 }
6984}
6985
eec9405e
TS
6986/* After reload, just change the convert into a move instruction
6987 or a dead instruction. */
6988void
6989spu_split_convert (rtx ops[])
6990{
6991 if (REGNO (ops[0]) == REGNO (ops[1]))
6992 emit_note (NOTE_INSN_DELETED);
6993 else
6994 {
6995 /* Use TImode always as this might help hard reg copyprop. */
6996 rtx op0 = gen_rtx_REG (TImode, REGNO (ops[0]));
6997 rtx op1 = gen_rtx_REG (TImode, REGNO (ops[1]));
6998 emit_insn (gen_move_insn (op0, op1));
6999 }
7000}
7001
75741fed 7002void
d707fc77 7003spu_function_profiler (FILE * file, int labelno ATTRIBUTE_UNUSED)
75741fed
KW
7004{
7005 fprintf (file, "# profile\n");
7006 fprintf (file, "brsl $75, _mcount\n");
7007}
7008
d4f2460a
UW
7009/* Implement targetm.ref_may_alias_errno. */
7010static bool
7011spu_ref_may_alias_errno (ao_ref *ref)
7012{
7013 tree base = ao_ref_base (ref);
7014
7015 /* With SPU newlib, errno is defined as something like
7016 _impure_data._errno
7017 The default implementation of this target macro does not
7018 recognize such expressions, so special-code for it here. */
7019
7020 if (TREE_CODE (base) == VAR_DECL
7021 && !TREE_STATIC (base)
7022 && DECL_EXTERNAL (base)
7023 && TREE_CODE (TREE_TYPE (base)) == RECORD_TYPE
7024 && strcmp (IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (base)),
7025 "_impure_data") == 0
7026 /* _errno is the first member of _impure_data. */
7027 && ref->offset == 0)
7028 return true;
7029
7030 return default_ref_may_alias_errno (ref);
7031}
7032
07ea0048
UW
7033/* Output thunk to FILE that implements a C++ virtual function call (with
7034 multiple inheritance) to FUNCTION. The thunk adjusts the this pointer
7035 by DELTA, and unless VCALL_OFFSET is zero, applies an additional adjustment
7036 stored at VCALL_OFFSET in the vtable whose address is located at offset 0
7037 relative to the resulting this pointer. */
7038
7039static void
7040spu_output_mi_thunk (FILE *file, tree thunk ATTRIBUTE_UNUSED,
7041 HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
7042 tree function)
7043{
7044 rtx op[8];
7045
7046 /* Make sure unwind info is emitted for the thunk if needed. */
7047 final_start_function (emit_barrier (), file, 1);
7048
7049 /* Operand 0 is the target function. */
7050 op[0] = XEXP (DECL_RTL (function), 0);
7051
7052 /* Operand 1 is the 'this' pointer. */
7053 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
7054 op[1] = gen_rtx_REG (Pmode, FIRST_ARG_REGNUM + 1);
7055 else
7056 op[1] = gen_rtx_REG (Pmode, FIRST_ARG_REGNUM);
7057
7058 /* Operands 2/3 are the low/high halfwords of delta. */
7059 op[2] = GEN_INT (trunc_int_for_mode (delta, HImode));
7060 op[3] = GEN_INT (trunc_int_for_mode (delta >> 16, HImode));
7061
7062 /* Operands 4/5 are the low/high halfwords of vcall_offset. */
7063 op[4] = GEN_INT (trunc_int_for_mode (vcall_offset, HImode));
7064 op[5] = GEN_INT (trunc_int_for_mode (vcall_offset >> 16, HImode));
7065
7066 /* Operands 6/7 are temporary registers. */
7067 op[6] = gen_rtx_REG (Pmode, 79);
7068 op[7] = gen_rtx_REG (Pmode, 78);
7069
7070 /* Add DELTA to this pointer. */
7071 if (delta)
7072 {
7073 if (delta >= -0x200 && delta < 0x200)
7074 output_asm_insn ("ai\t%1,%1,%2", op);
7075 else if (delta >= -0x8000 && delta < 0x8000)
7076 {
7077 output_asm_insn ("il\t%6,%2", op);
7078 output_asm_insn ("a\t%1,%1,%6", op);
7079 }
7080 else
7081 {
7082 output_asm_insn ("ilhu\t%6,%3", op);
7083 output_asm_insn ("iohl\t%6,%2", op);
7084 output_asm_insn ("a\t%1,%1,%6", op);
7085 }
7086 }
7087
7088 /* Perform vcall adjustment. */
7089 if (vcall_offset)
7090 {
7091 output_asm_insn ("lqd\t%7,0(%1)", op);
7092 output_asm_insn ("rotqby\t%7,%7,%1", op);
7093
7094 if (vcall_offset >= -0x200 && vcall_offset < 0x200)
7095 output_asm_insn ("ai\t%7,%7,%4", op);
7096 else if (vcall_offset >= -0x8000 && vcall_offset < 0x8000)
7097 {
7098 output_asm_insn ("il\t%6,%4", op);
7099 output_asm_insn ("a\t%7,%7,%6", op);
7100 }
7101 else
7102 {
7103 output_asm_insn ("ilhu\t%6,%5", op);
7104 output_asm_insn ("iohl\t%6,%4", op);
7105 output_asm_insn ("a\t%7,%7,%6", op);
7106 }
7107
7108 output_asm_insn ("lqd\t%6,0(%7)", op);
7109 output_asm_insn ("rotqby\t%6,%6,%7", op);
7110 output_asm_insn ("a\t%1,%1,%6", op);
7111 }
7112
7113 /* Jump to target. */
7114 output_asm_insn ("br\t%0", op);
7115
7116 final_end_function ();
7117}
7118
c354951b
AK
7119/* Canonicalize a comparison from one we don't have to one we do have. */
7120static void
7121spu_canonicalize_comparison (int *code, rtx *op0, rtx *op1,
7122 bool op0_preserve_value)
7123{
7124 if (!op0_preserve_value
7125 && (*code == LE || *code == LT || *code == LEU || *code == LTU))
7126 {
7127 rtx tem = *op0;
7128 *op0 = *op1;
7129 *op1 = tem;
7130 *code = (int)swap_condition ((enum rtx_code)*code);
7131 }
7132}
c9c72699
UW
7133\f
7134/* Table of machine attributes. */
7135static const struct attribute_spec spu_attribute_table[] =
7136{
7137 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler,
7138 affects_type_identity } */
7139 { "naked", 0, 0, true, false, false, spu_handle_fndecl_attribute,
7140 false },
7141 { "spu_vector", 0, 0, false, true, false, spu_handle_vector_attribute,
7142 false },
7143 { NULL, 0, 0, false, false, false, NULL, false }
7144};
7145
7146/* TARGET overrides. */
7147
7148#undef TARGET_ADDR_SPACE_POINTER_MODE
7149#define TARGET_ADDR_SPACE_POINTER_MODE spu_addr_space_pointer_mode
7150
7151#undef TARGET_ADDR_SPACE_ADDRESS_MODE
7152#define TARGET_ADDR_SPACE_ADDRESS_MODE spu_addr_space_address_mode
7153
7154#undef TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P
7155#define TARGET_ADDR_SPACE_LEGITIMATE_ADDRESS_P \
7156 spu_addr_space_legitimate_address_p
7157
7158#undef TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS
7159#define TARGET_ADDR_SPACE_LEGITIMIZE_ADDRESS spu_addr_space_legitimize_address
7160
7161#undef TARGET_ADDR_SPACE_SUBSET_P
7162#define TARGET_ADDR_SPACE_SUBSET_P spu_addr_space_subset_p
7163
7164#undef TARGET_ADDR_SPACE_CONVERT
7165#define TARGET_ADDR_SPACE_CONVERT spu_addr_space_convert
7166
7167#undef TARGET_INIT_BUILTINS
7168#define TARGET_INIT_BUILTINS spu_init_builtins
7169#undef TARGET_BUILTIN_DECL
7170#define TARGET_BUILTIN_DECL spu_builtin_decl
7171
7172#undef TARGET_EXPAND_BUILTIN
7173#define TARGET_EXPAND_BUILTIN spu_expand_builtin
7174
7175#undef TARGET_UNWIND_WORD_MODE
7176#define TARGET_UNWIND_WORD_MODE spu_unwind_word_mode
7177
7178#undef TARGET_LEGITIMIZE_ADDRESS
7179#define TARGET_LEGITIMIZE_ADDRESS spu_legitimize_address
7180
7181/* The current assembler doesn't like .4byte foo@ppu, so use the normal .long
7182 and .quad for the debugger. When it is known that the assembler is fixed,
7183 these can be removed. */
7184#undef TARGET_ASM_UNALIGNED_SI_OP
7185#define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
7186
7187#undef TARGET_ASM_ALIGNED_DI_OP
7188#define TARGET_ASM_ALIGNED_DI_OP "\t.quad\t"
7189
7190/* The .8byte directive doesn't seem to work well for a 32 bit
7191 architecture. */
7192#undef TARGET_ASM_UNALIGNED_DI_OP
7193#define TARGET_ASM_UNALIGNED_DI_OP NULL
7194
7195#undef TARGET_RTX_COSTS
7196#define TARGET_RTX_COSTS spu_rtx_costs
7197
7198#undef TARGET_ADDRESS_COST
b413068c 7199#define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
c9c72699
UW
7200
7201#undef TARGET_SCHED_ISSUE_RATE
7202#define TARGET_SCHED_ISSUE_RATE spu_sched_issue_rate
7203
7204#undef TARGET_SCHED_INIT_GLOBAL
7205#define TARGET_SCHED_INIT_GLOBAL spu_sched_init_global
7206
7207#undef TARGET_SCHED_INIT
7208#define TARGET_SCHED_INIT spu_sched_init
7209
7210#undef TARGET_SCHED_VARIABLE_ISSUE
7211#define TARGET_SCHED_VARIABLE_ISSUE spu_sched_variable_issue
7212
7213#undef TARGET_SCHED_REORDER
7214#define TARGET_SCHED_REORDER spu_sched_reorder
7215
7216#undef TARGET_SCHED_REORDER2
7217#define TARGET_SCHED_REORDER2 spu_sched_reorder
7218
7219#undef TARGET_SCHED_ADJUST_COST
7220#define TARGET_SCHED_ADJUST_COST spu_sched_adjust_cost
7221
7222#undef TARGET_ATTRIBUTE_TABLE
7223#define TARGET_ATTRIBUTE_TABLE spu_attribute_table
7224
7225#undef TARGET_ASM_INTEGER
7226#define TARGET_ASM_INTEGER spu_assemble_integer
7227
7228#undef TARGET_SCALAR_MODE_SUPPORTED_P
7229#define TARGET_SCALAR_MODE_SUPPORTED_P spu_scalar_mode_supported_p
7230
7231#undef TARGET_VECTOR_MODE_SUPPORTED_P
7232#define TARGET_VECTOR_MODE_SUPPORTED_P spu_vector_mode_supported_p
7233
7234#undef TARGET_FUNCTION_OK_FOR_SIBCALL
7235#define TARGET_FUNCTION_OK_FOR_SIBCALL spu_function_ok_for_sibcall
7236
7237#undef TARGET_ASM_GLOBALIZE_LABEL
7238#define TARGET_ASM_GLOBALIZE_LABEL spu_asm_globalize_label
7239
7240#undef TARGET_PASS_BY_REFERENCE
7241#define TARGET_PASS_BY_REFERENCE spu_pass_by_reference
7242
7243#undef TARGET_FUNCTION_ARG
7244#define TARGET_FUNCTION_ARG spu_function_arg
7245
7246#undef TARGET_FUNCTION_ARG_ADVANCE
7247#define TARGET_FUNCTION_ARG_ADVANCE spu_function_arg_advance
7248
7249#undef TARGET_MUST_PASS_IN_STACK
7250#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
7251
7252#undef TARGET_BUILD_BUILTIN_VA_LIST
7253#define TARGET_BUILD_BUILTIN_VA_LIST spu_build_builtin_va_list
7254
7255#undef TARGET_EXPAND_BUILTIN_VA_START
7256#define TARGET_EXPAND_BUILTIN_VA_START spu_va_start
7257
7258#undef TARGET_SETUP_INCOMING_VARARGS
7259#define TARGET_SETUP_INCOMING_VARARGS spu_setup_incoming_varargs
7260
7261#undef TARGET_MACHINE_DEPENDENT_REORG
7262#define TARGET_MACHINE_DEPENDENT_REORG spu_machine_dependent_reorg
7263
7264#undef TARGET_GIMPLIFY_VA_ARG_EXPR
7265#define TARGET_GIMPLIFY_VA_ARG_EXPR spu_gimplify_va_arg_expr
7266
7267#undef TARGET_INIT_LIBFUNCS
7268#define TARGET_INIT_LIBFUNCS spu_init_libfuncs
7269
7270#undef TARGET_RETURN_IN_MEMORY
7271#define TARGET_RETURN_IN_MEMORY spu_return_in_memory
7272
7273#undef TARGET_ENCODE_SECTION_INFO
7274#define TARGET_ENCODE_SECTION_INFO spu_encode_section_info
7275
7276#undef TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD
7277#define TARGET_VECTORIZE_BUILTIN_MASK_FOR_LOAD spu_builtin_mask_for_load
7278
7279#undef TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST
7280#define TARGET_VECTORIZE_BUILTIN_VECTORIZATION_COST spu_builtin_vectorization_cost
7281
7282#undef TARGET_VECTORIZE_INIT_COST
7283#define TARGET_VECTORIZE_INIT_COST spu_init_cost
7284
7285#undef TARGET_VECTORIZE_ADD_STMT_COST
7286#define TARGET_VECTORIZE_ADD_STMT_COST spu_add_stmt_cost
7287
7288#undef TARGET_VECTORIZE_FINISH_COST
7289#define TARGET_VECTORIZE_FINISH_COST spu_finish_cost
7290
7291#undef TARGET_VECTORIZE_DESTROY_COST_DATA
7292#define TARGET_VECTORIZE_DESTROY_COST_DATA spu_destroy_cost_data
7293
7294#undef TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE
7295#define TARGET_VECTORIZE_VECTOR_ALIGNMENT_REACHABLE spu_vector_alignment_reachable
7296
7297#undef TARGET_LIBGCC_CMP_RETURN_MODE
7298#define TARGET_LIBGCC_CMP_RETURN_MODE spu_libgcc_cmp_return_mode
7299
7300#undef TARGET_LIBGCC_SHIFT_COUNT_MODE
7301#define TARGET_LIBGCC_SHIFT_COUNT_MODE spu_libgcc_shift_count_mode
7302
7303#undef TARGET_SCHED_SMS_RES_MII
7304#define TARGET_SCHED_SMS_RES_MII spu_sms_res_mii
7305
7306#undef TARGET_SECTION_TYPE_FLAGS
7307#define TARGET_SECTION_TYPE_FLAGS spu_section_type_flags
7308
7309#undef TARGET_ASM_SELECT_SECTION
7310#define TARGET_ASM_SELECT_SECTION spu_select_section
7311
7312#undef TARGET_ASM_UNIQUE_SECTION
7313#define TARGET_ASM_UNIQUE_SECTION spu_unique_section
7314
7315#undef TARGET_LEGITIMATE_ADDRESS_P
7316#define TARGET_LEGITIMATE_ADDRESS_P spu_legitimate_address_p
7317
7318#undef TARGET_LEGITIMATE_CONSTANT_P
7319#define TARGET_LEGITIMATE_CONSTANT_P spu_legitimate_constant_p
7320
7321#undef TARGET_TRAMPOLINE_INIT
7322#define TARGET_TRAMPOLINE_INIT spu_trampoline_init
7323
d45eae79
SL
7324#undef TARGET_WARN_FUNC_RETURN
7325#define TARGET_WARN_FUNC_RETURN spu_warn_func_return
7326
c9c72699
UW
7327#undef TARGET_OPTION_OVERRIDE
7328#define TARGET_OPTION_OVERRIDE spu_option_override
7329
7330#undef TARGET_CONDITIONAL_REGISTER_USAGE
7331#define TARGET_CONDITIONAL_REGISTER_USAGE spu_conditional_register_usage
7332
7333#undef TARGET_REF_MAY_ALIAS_ERRNO
7334#define TARGET_REF_MAY_ALIAS_ERRNO spu_ref_may_alias_errno
7335
7336#undef TARGET_ASM_OUTPUT_MI_THUNK
7337#define TARGET_ASM_OUTPUT_MI_THUNK spu_output_mi_thunk
7338#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
7339#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_const_tree_hwi_hwi_const_tree_true
7340
7341/* Variable tracking should be run after all optimizations which
7342 change order of insns. It also needs a valid CFG. */
7343#undef TARGET_DELAY_VARTRACK
7344#define TARGET_DELAY_VARTRACK true
7345
c354951b
AK
7346#undef TARGET_CANONICALIZE_COMPARISON
7347#define TARGET_CANONICALIZE_COMPARISON spu_canonicalize_comparison
7348
1d0216c8
RS
7349#undef TARGET_CAN_USE_DOLOOP_P
7350#define TARGET_CAN_USE_DOLOOP_P can_use_doloop_if_innermost
7351
c9c72699
UW
7352struct gcc_target targetm = TARGET_INITIALIZER;
7353
4a3a2376 7354#include "gt-spu.h"