]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/xtensa/xtensa.c
Update Copyright years for files modified in 2011 and/or 2012.
[thirdparty/gcc.git] / gcc / config / xtensa / xtensa.c
1 /* Subroutines for insn-output.c for Tensilica's Xtensa architecture.
2 Copyright 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011,
3 2012 Free Software Foundation, Inc.
4 Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
5
6 This file is part of GCC.
7
8 GCC is free software; you can redistribute it and/or modify it under
9 the terms of the GNU General Public License as published by the Free
10 Software Foundation; either version 3, or (at your option) any later
11 version.
12
13 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
14 WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with GCC; see the file COPYING3. If not see
20 <http://www.gnu.org/licenses/>. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "basic-block.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-flags.h"
33 #include "insn-attr.h"
34 #include "insn-codes.h"
35 #include "recog.h"
36 #include "output.h"
37 #include "tree.h"
38 #include "expr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "tm_p.h"
42 #include "function.h"
43 #include "diagnostic-core.h"
44 #include "optabs.h"
45 #include "libfuncs.h"
46 #include "ggc.h"
47 #include "target.h"
48 #include "target-def.h"
49 #include "langhooks.h"
50 #include "gimple.h"
51 #include "df.h"
52
53
54 /* Enumeration for all of the relational tests, so that we can build
55 arrays indexed by the test type, and not worry about the order
56 of EQ, NE, etc. */
57
58 enum internal_test
59 {
60 ITEST_EQ,
61 ITEST_NE,
62 ITEST_GT,
63 ITEST_GE,
64 ITEST_LT,
65 ITEST_LE,
66 ITEST_GTU,
67 ITEST_GEU,
68 ITEST_LTU,
69 ITEST_LEU,
70 ITEST_MAX
71 };
72
73 /* Array giving truth value on whether or not a given hard register
74 can support a given mode. */
75 char xtensa_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
76
77 /* Current frame size calculated by compute_frame_size. */
78 unsigned xtensa_current_frame_size;
79
80 /* Largest block move to handle in-line. */
81 #define LARGEST_MOVE_RATIO 15
82
83 /* Define the structure for the machine field in struct function. */
84 struct GTY(()) machine_function
85 {
86 int accesses_prev_frame;
87 bool need_a7_copy;
88 bool vararg_a7;
89 rtx vararg_a7_copy;
90 rtx set_frame_ptr_insn;
91 };
92
93 /* Vector, indexed by hard register number, which contains 1 for a
94 register that is allowable in a candidate for leaf function
95 treatment. */
96
97 const char xtensa_leaf_regs[FIRST_PSEUDO_REGISTER] =
98 {
99 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
100 1, 1, 1,
101 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
102 1
103 };
104
105 /* Map hard register number to register class */
106 const enum reg_class xtensa_regno_to_class[FIRST_PSEUDO_REGISTER] =
107 {
108 RL_REGS, SP_REG, RL_REGS, RL_REGS,
109 RL_REGS, RL_REGS, RL_REGS, GR_REGS,
110 RL_REGS, RL_REGS, RL_REGS, RL_REGS,
111 RL_REGS, RL_REGS, RL_REGS, RL_REGS,
112 AR_REGS, AR_REGS, BR_REGS,
113 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
114 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
115 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
116 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
117 ACC_REG,
118 };
119
120 static void xtensa_option_override (void);
121 static enum internal_test map_test_to_internal_test (enum rtx_code);
122 static rtx gen_int_relational (enum rtx_code, rtx, rtx, int *);
123 static rtx gen_float_relational (enum rtx_code, rtx, rtx);
124 static rtx gen_conditional_move (enum rtx_code, enum machine_mode, rtx, rtx);
125 static rtx fixup_subreg_mem (rtx);
126 static struct machine_function * xtensa_init_machine_status (void);
127 static rtx xtensa_legitimize_tls_address (rtx);
128 static rtx xtensa_legitimize_address (rtx, rtx, enum machine_mode);
129 static bool xtensa_mode_dependent_address_p (const_rtx, addr_space_t);
130 static bool xtensa_return_in_msb (const_tree);
131 static void printx (FILE *, signed int);
132 static void xtensa_function_epilogue (FILE *, HOST_WIDE_INT);
133 static rtx xtensa_builtin_saveregs (void);
134 static bool xtensa_legitimate_address_p (enum machine_mode, rtx, bool);
135 static unsigned int xtensa_multibss_section_type_flags (tree, const char *,
136 int) ATTRIBUTE_UNUSED;
137 static section *xtensa_select_rtx_section (enum machine_mode, rtx,
138 unsigned HOST_WIDE_INT);
139 static bool xtensa_rtx_costs (rtx, int, int, int, int *, bool);
140 static int xtensa_register_move_cost (enum machine_mode, reg_class_t,
141 reg_class_t);
142 static int xtensa_memory_move_cost (enum machine_mode, reg_class_t, bool);
143 static tree xtensa_build_builtin_va_list (void);
144 static bool xtensa_return_in_memory (const_tree, const_tree);
145 static tree xtensa_gimplify_va_arg_expr (tree, tree, gimple_seq *,
146 gimple_seq *);
147 static void xtensa_function_arg_advance (cumulative_args_t, enum machine_mode,
148 const_tree, bool);
149 static rtx xtensa_function_arg (cumulative_args_t, enum machine_mode,
150 const_tree, bool);
151 static rtx xtensa_function_incoming_arg (cumulative_args_t,
152 enum machine_mode, const_tree, bool);
153 static rtx xtensa_function_value (const_tree, const_tree, bool);
154 static rtx xtensa_libcall_value (enum machine_mode, const_rtx);
155 static bool xtensa_function_value_regno_p (const unsigned int);
156 static unsigned int xtensa_function_arg_boundary (enum machine_mode,
157 const_tree);
158 static void xtensa_init_builtins (void);
159 static tree xtensa_fold_builtin (tree, int, tree *, bool);
160 static rtx xtensa_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
161 static void xtensa_va_start (tree, rtx);
162 static bool xtensa_frame_pointer_required (void);
163 static rtx xtensa_static_chain (const_tree, bool);
164 static void xtensa_asm_trampoline_template (FILE *);
165 static void xtensa_trampoline_init (rtx, tree, rtx);
166 static bool xtensa_output_addr_const_extra (FILE *, rtx);
167 static bool xtensa_cannot_force_const_mem (enum machine_mode, rtx);
168
169 static reg_class_t xtensa_preferred_reload_class (rtx, reg_class_t);
170 static reg_class_t xtensa_preferred_output_reload_class (rtx, reg_class_t);
171 static reg_class_t xtensa_secondary_reload (bool, rtx, reg_class_t,
172 enum machine_mode,
173 struct secondary_reload_info *);
174
175 static bool constantpool_address_p (const_rtx addr);
176 static bool xtensa_legitimate_constant_p (enum machine_mode, rtx);
177
178 static bool xtensa_member_type_forces_blk (const_tree,
179 enum machine_mode mode);
180
181 static const int reg_nonleaf_alloc_order[FIRST_PSEUDO_REGISTER] =
182 REG_ALLOC_ORDER;
183 \f
184
185 /* This macro generates the assembly code for function exit,
186 on machines that need it. If FUNCTION_EPILOGUE is not defined
187 then individual return instructions are generated for each
188 return statement. Args are same as for FUNCTION_PROLOGUE. */
189
190 #undef TARGET_ASM_FUNCTION_EPILOGUE
191 #define TARGET_ASM_FUNCTION_EPILOGUE xtensa_function_epilogue
192
193 /* These hooks specify assembly directives for creating certain kinds
194 of integer object. */
195
196 #undef TARGET_ASM_ALIGNED_SI_OP
197 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
198
199 #undef TARGET_ASM_SELECT_RTX_SECTION
200 #define TARGET_ASM_SELECT_RTX_SECTION xtensa_select_rtx_section
201
202 #undef TARGET_LEGITIMIZE_ADDRESS
203 #define TARGET_LEGITIMIZE_ADDRESS xtensa_legitimize_address
204 #undef TARGET_MODE_DEPENDENT_ADDRESS_P
205 #define TARGET_MODE_DEPENDENT_ADDRESS_P xtensa_mode_dependent_address_p
206
207 #undef TARGET_REGISTER_MOVE_COST
208 #define TARGET_REGISTER_MOVE_COST xtensa_register_move_cost
209 #undef TARGET_MEMORY_MOVE_COST
210 #define TARGET_MEMORY_MOVE_COST xtensa_memory_move_cost
211 #undef TARGET_RTX_COSTS
212 #define TARGET_RTX_COSTS xtensa_rtx_costs
213 #undef TARGET_ADDRESS_COST
214 #define TARGET_ADDRESS_COST hook_int_rtx_mode_as_bool_0
215
216 #undef TARGET_MEMBER_TYPE_FORCES_BLK
217 #define TARGET_MEMBER_TYPE_FORCES_BLK xtensa_member_type_forces_blk
218
219 #undef TARGET_BUILD_BUILTIN_VA_LIST
220 #define TARGET_BUILD_BUILTIN_VA_LIST xtensa_build_builtin_va_list
221
222 #undef TARGET_EXPAND_BUILTIN_VA_START
223 #define TARGET_EXPAND_BUILTIN_VA_START xtensa_va_start
224
225 #undef TARGET_PROMOTE_FUNCTION_MODE
226 #define TARGET_PROMOTE_FUNCTION_MODE default_promote_function_mode_always_promote
227 #undef TARGET_PROMOTE_PROTOTYPES
228 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
229
230 #undef TARGET_RETURN_IN_MEMORY
231 #define TARGET_RETURN_IN_MEMORY xtensa_return_in_memory
232 #undef TARGET_FUNCTION_VALUE
233 #define TARGET_FUNCTION_VALUE xtensa_function_value
234 #undef TARGET_LIBCALL_VALUE
235 #define TARGET_LIBCALL_VALUE xtensa_libcall_value
236 #undef TARGET_FUNCTION_VALUE_REGNO_P
237 #define TARGET_FUNCTION_VALUE_REGNO_P xtensa_function_value_regno_p
238
239 #undef TARGET_SPLIT_COMPLEX_ARG
240 #define TARGET_SPLIT_COMPLEX_ARG hook_bool_const_tree_true
241 #undef TARGET_MUST_PASS_IN_STACK
242 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
243 #undef TARGET_FUNCTION_ARG_ADVANCE
244 #define TARGET_FUNCTION_ARG_ADVANCE xtensa_function_arg_advance
245 #undef TARGET_FUNCTION_ARG
246 #define TARGET_FUNCTION_ARG xtensa_function_arg
247 #undef TARGET_FUNCTION_INCOMING_ARG
248 #define TARGET_FUNCTION_INCOMING_ARG xtensa_function_incoming_arg
249 #undef TARGET_FUNCTION_ARG_BOUNDARY
250 #define TARGET_FUNCTION_ARG_BOUNDARY xtensa_function_arg_boundary
251
252 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
253 #define TARGET_EXPAND_BUILTIN_SAVEREGS xtensa_builtin_saveregs
254 #undef TARGET_GIMPLIFY_VA_ARG_EXPR
255 #define TARGET_GIMPLIFY_VA_ARG_EXPR xtensa_gimplify_va_arg_expr
256
257 #undef TARGET_RETURN_IN_MSB
258 #define TARGET_RETURN_IN_MSB xtensa_return_in_msb
259
260 #undef TARGET_INIT_BUILTINS
261 #define TARGET_INIT_BUILTINS xtensa_init_builtins
262 #undef TARGET_FOLD_BUILTIN
263 #define TARGET_FOLD_BUILTIN xtensa_fold_builtin
264 #undef TARGET_EXPAND_BUILTIN
265 #define TARGET_EXPAND_BUILTIN xtensa_expand_builtin
266
267 #undef TARGET_PREFERRED_RELOAD_CLASS
268 #define TARGET_PREFERRED_RELOAD_CLASS xtensa_preferred_reload_class
269 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
270 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS xtensa_preferred_output_reload_class
271
272 #undef TARGET_SECONDARY_RELOAD
273 #define TARGET_SECONDARY_RELOAD xtensa_secondary_reload
274
275 #undef TARGET_HAVE_TLS
276 #define TARGET_HAVE_TLS (TARGET_THREADPTR && HAVE_AS_TLS)
277
278 #undef TARGET_CANNOT_FORCE_CONST_MEM
279 #define TARGET_CANNOT_FORCE_CONST_MEM xtensa_cannot_force_const_mem
280
281 #undef TARGET_LEGITIMATE_ADDRESS_P
282 #define TARGET_LEGITIMATE_ADDRESS_P xtensa_legitimate_address_p
283
284 #undef TARGET_FRAME_POINTER_REQUIRED
285 #define TARGET_FRAME_POINTER_REQUIRED xtensa_frame_pointer_required
286
287 #undef TARGET_STATIC_CHAIN
288 #define TARGET_STATIC_CHAIN xtensa_static_chain
289 #undef TARGET_ASM_TRAMPOLINE_TEMPLATE
290 #define TARGET_ASM_TRAMPOLINE_TEMPLATE xtensa_asm_trampoline_template
291 #undef TARGET_TRAMPOLINE_INIT
292 #define TARGET_TRAMPOLINE_INIT xtensa_trampoline_init
293
294 #undef TARGET_OPTION_OVERRIDE
295 #define TARGET_OPTION_OVERRIDE xtensa_option_override
296
297 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
298 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA xtensa_output_addr_const_extra
299
300 #undef TARGET_LEGITIMATE_CONSTANT_P
301 #define TARGET_LEGITIMATE_CONSTANT_P xtensa_legitimate_constant_p
302
303 struct gcc_target targetm = TARGET_INITIALIZER;
304
305 \f
306 /* Functions to test Xtensa immediate operand validity. */
307
308 bool
309 xtensa_simm8 (HOST_WIDE_INT v)
310 {
311 return v >= -128 && v <= 127;
312 }
313
314
315 bool
316 xtensa_simm8x256 (HOST_WIDE_INT v)
317 {
318 return (v & 255) == 0 && (v >= -32768 && v <= 32512);
319 }
320
321
322 bool
323 xtensa_simm12b (HOST_WIDE_INT v)
324 {
325 return v >= -2048 && v <= 2047;
326 }
327
328
329 static bool
330 xtensa_uimm8 (HOST_WIDE_INT v)
331 {
332 return v >= 0 && v <= 255;
333 }
334
335
336 static bool
337 xtensa_uimm8x2 (HOST_WIDE_INT v)
338 {
339 return (v & 1) == 0 && (v >= 0 && v <= 510);
340 }
341
342
343 static bool
344 xtensa_uimm8x4 (HOST_WIDE_INT v)
345 {
346 return (v & 3) == 0 && (v >= 0 && v <= 1020);
347 }
348
349
350 static bool
351 xtensa_b4const (HOST_WIDE_INT v)
352 {
353 switch (v)
354 {
355 case -1:
356 case 1:
357 case 2:
358 case 3:
359 case 4:
360 case 5:
361 case 6:
362 case 7:
363 case 8:
364 case 10:
365 case 12:
366 case 16:
367 case 32:
368 case 64:
369 case 128:
370 case 256:
371 return true;
372 }
373 return false;
374 }
375
376
377 bool
378 xtensa_b4const_or_zero (HOST_WIDE_INT v)
379 {
380 if (v == 0)
381 return true;
382 return xtensa_b4const (v);
383 }
384
385
386 bool
387 xtensa_b4constu (HOST_WIDE_INT v)
388 {
389 switch (v)
390 {
391 case 32768:
392 case 65536:
393 case 2:
394 case 3:
395 case 4:
396 case 5:
397 case 6:
398 case 7:
399 case 8:
400 case 10:
401 case 12:
402 case 16:
403 case 32:
404 case 64:
405 case 128:
406 case 256:
407 return true;
408 }
409 return false;
410 }
411
412
413 bool
414 xtensa_mask_immediate (HOST_WIDE_INT v)
415 {
416 #define MAX_MASK_SIZE 16
417 int mask_size;
418
419 for (mask_size = 1; mask_size <= MAX_MASK_SIZE; mask_size++)
420 {
421 if ((v & 1) == 0)
422 return false;
423 v = v >> 1;
424 if (v == 0)
425 return true;
426 }
427
428 return false;
429 }
430
431
432 /* This is just like the standard true_regnum() function except that it
433 works even when reg_renumber is not initialized. */
434
435 int
436 xt_true_regnum (rtx x)
437 {
438 if (GET_CODE (x) == REG)
439 {
440 if (reg_renumber
441 && REGNO (x) >= FIRST_PSEUDO_REGISTER
442 && reg_renumber[REGNO (x)] >= 0)
443 return reg_renumber[REGNO (x)];
444 return REGNO (x);
445 }
446 if (GET_CODE (x) == SUBREG)
447 {
448 int base = xt_true_regnum (SUBREG_REG (x));
449 if (base >= 0 && base < FIRST_PSEUDO_REGISTER)
450 return base + subreg_regno_offset (REGNO (SUBREG_REG (x)),
451 GET_MODE (SUBREG_REG (x)),
452 SUBREG_BYTE (x), GET_MODE (x));
453 }
454 return -1;
455 }
456
457
458 int
459 xtensa_valid_move (enum machine_mode mode, rtx *operands)
460 {
461 /* Either the destination or source must be a register, and the
462 MAC16 accumulator doesn't count. */
463
464 if (register_operand (operands[0], mode))
465 {
466 int dst_regnum = xt_true_regnum (operands[0]);
467
468 /* The stack pointer can only be assigned with a MOVSP opcode. */
469 if (dst_regnum == STACK_POINTER_REGNUM)
470 return (mode == SImode
471 && register_operand (operands[1], mode)
472 && !ACC_REG_P (xt_true_regnum (operands[1])));
473
474 if (!ACC_REG_P (dst_regnum))
475 return true;
476 }
477 if (register_operand (operands[1], mode))
478 {
479 int src_regnum = xt_true_regnum (operands[1]);
480 if (!ACC_REG_P (src_regnum))
481 return true;
482 }
483 return FALSE;
484 }
485
486
487 int
488 smalloffset_mem_p (rtx op)
489 {
490 if (GET_CODE (op) == MEM)
491 {
492 rtx addr = XEXP (op, 0);
493 if (GET_CODE (addr) == REG)
494 return BASE_REG_P (addr, 0);
495 if (GET_CODE (addr) == PLUS)
496 {
497 rtx offset = XEXP (addr, 0);
498 HOST_WIDE_INT val;
499 if (GET_CODE (offset) != CONST_INT)
500 offset = XEXP (addr, 1);
501 if (GET_CODE (offset) != CONST_INT)
502 return FALSE;
503
504 val = INTVAL (offset);
505 return (val & 3) == 0 && (val >= 0 && val <= 60);
506 }
507 }
508 return FALSE;
509 }
510
511
512 static bool
513 constantpool_address_p (const_rtx addr)
514 {
515 const_rtx sym = addr;
516
517 if (GET_CODE (addr) == CONST)
518 {
519 rtx offset;
520
521 /* Only handle (PLUS (SYM, OFFSET)) form. */
522 addr = XEXP (addr, 0);
523 if (GET_CODE (addr) != PLUS)
524 return false;
525
526 /* Make sure the address is word aligned. */
527 offset = XEXP (addr, 1);
528 if ((!CONST_INT_P (offset))
529 || ((INTVAL (offset) & 3) != 0))
530 return false;
531
532 sym = XEXP (addr, 0);
533 }
534
535 if ((GET_CODE (sym) == SYMBOL_REF)
536 && CONSTANT_POOL_ADDRESS_P (sym))
537 return true;
538 return false;
539 }
540
541
542 int
543 constantpool_mem_p (rtx op)
544 {
545 if (GET_CODE (op) == SUBREG)
546 op = SUBREG_REG (op);
547 if (GET_CODE (op) == MEM)
548 return constantpool_address_p (XEXP (op, 0));
549 return FALSE;
550 }
551
552
553 /* Return TRUE if X is a thread-local symbol. */
554
555 static bool
556 xtensa_tls_symbol_p (rtx x)
557 {
558 if (! TARGET_HAVE_TLS)
559 return false;
560
561 return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
562 }
563
564
565 void
566 xtensa_extend_reg (rtx dst, rtx src)
567 {
568 rtx temp = gen_reg_rtx (SImode);
569 rtx shift = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (GET_MODE (src)));
570
571 /* Generate paradoxical subregs as needed so that the modes match. */
572 src = simplify_gen_subreg (SImode, src, GET_MODE (src), 0);
573 dst = simplify_gen_subreg (SImode, dst, GET_MODE (dst), 0);
574
575 emit_insn (gen_ashlsi3 (temp, src, shift));
576 emit_insn (gen_ashrsi3 (dst, temp, shift));
577 }
578
579
580 bool
581 xtensa_mem_offset (unsigned v, enum machine_mode mode)
582 {
583 switch (mode)
584 {
585 case BLKmode:
586 /* Handle the worst case for block moves. See xtensa_expand_block_move
587 where we emit an optimized block move operation if the block can be
588 moved in < "move_ratio" pieces. The worst case is when the block is
589 aligned but has a size of (3 mod 4) (does this happen?) so that the
590 last piece requires a byte load/store. */
591 return (xtensa_uimm8 (v)
592 && xtensa_uimm8 (v + MOVE_MAX * LARGEST_MOVE_RATIO));
593
594 case QImode:
595 return xtensa_uimm8 (v);
596
597 case HImode:
598 return xtensa_uimm8x2 (v);
599
600 case DFmode:
601 return (xtensa_uimm8x4 (v) && xtensa_uimm8x4 (v + 4));
602
603 default:
604 break;
605 }
606
607 return xtensa_uimm8x4 (v);
608 }
609
610
611 /* Make normal rtx_code into something we can index from an array. */
612
613 static enum internal_test
614 map_test_to_internal_test (enum rtx_code test_code)
615 {
616 enum internal_test test = ITEST_MAX;
617
618 switch (test_code)
619 {
620 default: break;
621 case EQ: test = ITEST_EQ; break;
622 case NE: test = ITEST_NE; break;
623 case GT: test = ITEST_GT; break;
624 case GE: test = ITEST_GE; break;
625 case LT: test = ITEST_LT; break;
626 case LE: test = ITEST_LE; break;
627 case GTU: test = ITEST_GTU; break;
628 case GEU: test = ITEST_GEU; break;
629 case LTU: test = ITEST_LTU; break;
630 case LEU: test = ITEST_LEU; break;
631 }
632
633 return test;
634 }
635
636
637 /* Generate the code to compare two integer values. The return value is
638 the comparison expression. */
639
640 static rtx
641 gen_int_relational (enum rtx_code test_code, /* relational test (EQ, etc) */
642 rtx cmp0, /* first operand to compare */
643 rtx cmp1, /* second operand to compare */
644 int *p_invert /* whether branch needs to reverse test */)
645 {
646 struct cmp_info
647 {
648 enum rtx_code test_code; /* test code to use in insn */
649 bool (*const_range_p) (HOST_WIDE_INT); /* range check function */
650 int const_add; /* constant to add (convert LE -> LT) */
651 int reverse_regs; /* reverse registers in test */
652 int invert_const; /* != 0 if invert value if cmp1 is constant */
653 int invert_reg; /* != 0 if invert value if cmp1 is register */
654 int unsignedp; /* != 0 for unsigned comparisons. */
655 };
656
657 static struct cmp_info info[ (int)ITEST_MAX ] = {
658
659 { EQ, xtensa_b4const_or_zero, 0, 0, 0, 0, 0 }, /* EQ */
660 { NE, xtensa_b4const_or_zero, 0, 0, 0, 0, 0 }, /* NE */
661
662 { LT, xtensa_b4const_or_zero, 1, 1, 1, 0, 0 }, /* GT */
663 { GE, xtensa_b4const_or_zero, 0, 0, 0, 0, 0 }, /* GE */
664 { LT, xtensa_b4const_or_zero, 0, 0, 0, 0, 0 }, /* LT */
665 { GE, xtensa_b4const_or_zero, 1, 1, 1, 0, 0 }, /* LE */
666
667 { LTU, xtensa_b4constu, 1, 1, 1, 0, 1 }, /* GTU */
668 { GEU, xtensa_b4constu, 0, 0, 0, 0, 1 }, /* GEU */
669 { LTU, xtensa_b4constu, 0, 0, 0, 0, 1 }, /* LTU */
670 { GEU, xtensa_b4constu, 1, 1, 1, 0, 1 }, /* LEU */
671 };
672
673 enum internal_test test;
674 enum machine_mode mode;
675 struct cmp_info *p_info;
676
677 test = map_test_to_internal_test (test_code);
678 gcc_assert (test != ITEST_MAX);
679
680 p_info = &info[ (int)test ];
681
682 mode = GET_MODE (cmp0);
683 if (mode == VOIDmode)
684 mode = GET_MODE (cmp1);
685
686 /* Make sure we can handle any constants given to us. */
687 if (GET_CODE (cmp1) == CONST_INT)
688 {
689 HOST_WIDE_INT value = INTVAL (cmp1);
690 unsigned HOST_WIDE_INT uvalue = (unsigned HOST_WIDE_INT)value;
691
692 /* if the immediate overflows or does not fit in the immediate field,
693 spill it to a register */
694
695 if ((p_info->unsignedp ?
696 (uvalue + p_info->const_add > uvalue) :
697 (value + p_info->const_add > value)) != (p_info->const_add > 0))
698 {
699 cmp1 = force_reg (mode, cmp1);
700 }
701 else if (!(p_info->const_range_p) (value + p_info->const_add))
702 {
703 cmp1 = force_reg (mode, cmp1);
704 }
705 }
706 else if ((GET_CODE (cmp1) != REG) && (GET_CODE (cmp1) != SUBREG))
707 {
708 cmp1 = force_reg (mode, cmp1);
709 }
710
711 /* See if we need to invert the result. */
712 *p_invert = ((GET_CODE (cmp1) == CONST_INT)
713 ? p_info->invert_const
714 : p_info->invert_reg);
715
716 /* Comparison to constants, may involve adding 1 to change a LT into LE.
717 Comparison between two registers, may involve switching operands. */
718 if (GET_CODE (cmp1) == CONST_INT)
719 {
720 if (p_info->const_add != 0)
721 cmp1 = GEN_INT (INTVAL (cmp1) + p_info->const_add);
722
723 }
724 else if (p_info->reverse_regs)
725 {
726 rtx temp = cmp0;
727 cmp0 = cmp1;
728 cmp1 = temp;
729 }
730
731 return gen_rtx_fmt_ee (p_info->test_code, VOIDmode, cmp0, cmp1);
732 }
733
734
735 /* Generate the code to compare two float values. The return value is
736 the comparison expression. */
737
738 static rtx
739 gen_float_relational (enum rtx_code test_code, /* relational test (EQ, etc) */
740 rtx cmp0, /* first operand to compare */
741 rtx cmp1 /* second operand to compare */)
742 {
743 rtx (*gen_fn) (rtx, rtx, rtx);
744 rtx brtmp;
745 int reverse_regs, invert;
746
747 switch (test_code)
748 {
749 case EQ: reverse_regs = 0; invert = 0; gen_fn = gen_seq_sf; break;
750 case NE: reverse_regs = 0; invert = 1; gen_fn = gen_seq_sf; break;
751 case LE: reverse_regs = 0; invert = 0; gen_fn = gen_sle_sf; break;
752 case GT: reverse_regs = 1; invert = 0; gen_fn = gen_slt_sf; break;
753 case LT: reverse_regs = 0; invert = 0; gen_fn = gen_slt_sf; break;
754 case GE: reverse_regs = 1; invert = 0; gen_fn = gen_sle_sf; break;
755 case UNEQ: reverse_regs = 0; invert = 0; gen_fn = gen_suneq_sf; break;
756 case LTGT: reverse_regs = 0; invert = 1; gen_fn = gen_suneq_sf; break;
757 case UNLE: reverse_regs = 0; invert = 0; gen_fn = gen_sunle_sf; break;
758 case UNGT: reverse_regs = 1; invert = 0; gen_fn = gen_sunlt_sf; break;
759 case UNLT: reverse_regs = 0; invert = 0; gen_fn = gen_sunlt_sf; break;
760 case UNGE: reverse_regs = 1; invert = 0; gen_fn = gen_sunle_sf; break;
761 case UNORDERED:
762 reverse_regs = 0; invert = 0; gen_fn = gen_sunordered_sf; break;
763 case ORDERED:
764 reverse_regs = 0; invert = 1; gen_fn = gen_sunordered_sf; break;
765 default:
766 fatal_insn ("bad test", gen_rtx_fmt_ee (test_code, VOIDmode, cmp0, cmp1));
767 reverse_regs = 0; invert = 0; gen_fn = 0; /* avoid compiler warnings */
768 }
769
770 if (reverse_regs)
771 {
772 rtx temp = cmp0;
773 cmp0 = cmp1;
774 cmp1 = temp;
775 }
776
777 brtmp = gen_rtx_REG (CCmode, FPCC_REGNUM);
778 emit_insn (gen_fn (brtmp, cmp0, cmp1));
779
780 return gen_rtx_fmt_ee (invert ? EQ : NE, VOIDmode, brtmp, const0_rtx);
781 }
782
783
784 void
785 xtensa_expand_conditional_branch (rtx *operands, enum machine_mode mode)
786 {
787 enum rtx_code test_code = GET_CODE (operands[0]);
788 rtx cmp0 = operands[1];
789 rtx cmp1 = operands[2];
790 rtx cmp;
791 int invert;
792 rtx label1, label2;
793
794 switch (mode)
795 {
796 case DFmode:
797 default:
798 fatal_insn ("bad test", gen_rtx_fmt_ee (test_code, VOIDmode, cmp0, cmp1));
799
800 case SImode:
801 invert = FALSE;
802 cmp = gen_int_relational (test_code, cmp0, cmp1, &invert);
803 break;
804
805 case SFmode:
806 if (!TARGET_HARD_FLOAT)
807 fatal_insn ("bad test", gen_rtx_fmt_ee (test_code, VOIDmode,
808 cmp0, cmp1));
809 invert = FALSE;
810 cmp = gen_float_relational (test_code, cmp0, cmp1);
811 break;
812 }
813
814 /* Generate the branch. */
815
816 label1 = gen_rtx_LABEL_REF (VOIDmode, operands[3]);
817 label2 = pc_rtx;
818
819 if (invert)
820 {
821 label2 = label1;
822 label1 = pc_rtx;
823 }
824
825 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
826 gen_rtx_IF_THEN_ELSE (VOIDmode, cmp,
827 label1,
828 label2)));
829 }
830
831
832 static rtx
833 gen_conditional_move (enum rtx_code code, enum machine_mode mode,
834 rtx op0, rtx op1)
835 {
836 if (mode == SImode)
837 {
838 rtx cmp;
839
840 /* Jump optimization calls get_condition() which canonicalizes
841 comparisons like (GE x <const>) to (GT x <const-1>).
842 Transform those comparisons back to GE, since that is the
843 comparison supported in Xtensa. We shouldn't have to
844 transform <LE x const> comparisons, because neither
845 xtensa_expand_conditional_branch() nor get_condition() will
846 produce them. */
847
848 if ((code == GT) && (op1 == constm1_rtx))
849 {
850 code = GE;
851 op1 = const0_rtx;
852 }
853 cmp = gen_rtx_fmt_ee (code, VOIDmode, cc0_rtx, const0_rtx);
854
855 if (boolean_operator (cmp, VOIDmode))
856 {
857 /* Swap the operands to make const0 second. */
858 if (op0 == const0_rtx)
859 {
860 op0 = op1;
861 op1 = const0_rtx;
862 }
863
864 /* If not comparing against zero, emit a comparison (subtract). */
865 if (op1 != const0_rtx)
866 {
867 op0 = expand_binop (SImode, sub_optab, op0, op1,
868 0, 0, OPTAB_LIB_WIDEN);
869 op1 = const0_rtx;
870 }
871 }
872 else if (branch_operator (cmp, VOIDmode))
873 {
874 /* Swap the operands to make const0 second. */
875 if (op0 == const0_rtx)
876 {
877 op0 = op1;
878 op1 = const0_rtx;
879
880 switch (code)
881 {
882 case LT: code = GE; break;
883 case GE: code = LT; break;
884 default: gcc_unreachable ();
885 }
886 }
887
888 if (op1 != const0_rtx)
889 return 0;
890 }
891 else
892 return 0;
893
894 return gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
895 }
896
897 if (TARGET_HARD_FLOAT && mode == SFmode)
898 return gen_float_relational (code, op0, op1);
899
900 return 0;
901 }
902
903
904 int
905 xtensa_expand_conditional_move (rtx *operands, int isflt)
906 {
907 rtx dest = operands[0];
908 rtx cmp = operands[1];
909 enum machine_mode cmp_mode = GET_MODE (XEXP (cmp, 0));
910 rtx (*gen_fn) (rtx, rtx, rtx, rtx, rtx);
911
912 if (!(cmp = gen_conditional_move (GET_CODE (cmp), cmp_mode,
913 XEXP (cmp, 0), XEXP (cmp, 1))))
914 return 0;
915
916 if (isflt)
917 gen_fn = (cmp_mode == SImode
918 ? gen_movsfcc_internal0
919 : gen_movsfcc_internal1);
920 else
921 gen_fn = (cmp_mode == SImode
922 ? gen_movsicc_internal0
923 : gen_movsicc_internal1);
924
925 emit_insn (gen_fn (dest, XEXP (cmp, 0), operands[2], operands[3], cmp));
926 return 1;
927 }
928
929
930 int
931 xtensa_expand_scc (rtx operands[4], enum machine_mode cmp_mode)
932 {
933 rtx dest = operands[0];
934 rtx cmp;
935 rtx one_tmp, zero_tmp;
936 rtx (*gen_fn) (rtx, rtx, rtx, rtx, rtx);
937
938 if (!(cmp = gen_conditional_move (GET_CODE (operands[1]), cmp_mode,
939 operands[2], operands[3])))
940 return 0;
941
942 one_tmp = gen_reg_rtx (SImode);
943 zero_tmp = gen_reg_rtx (SImode);
944 emit_insn (gen_movsi (one_tmp, const_true_rtx));
945 emit_insn (gen_movsi (zero_tmp, const0_rtx));
946
947 gen_fn = (cmp_mode == SImode
948 ? gen_movsicc_internal0
949 : gen_movsicc_internal1);
950 emit_insn (gen_fn (dest, XEXP (cmp, 0), one_tmp, zero_tmp, cmp));
951 return 1;
952 }
953
954
955 /* Split OP[1] into OP[2,3] and likewise for OP[0] into OP[0,1]. MODE is
956 for the output, i.e., the input operands are twice as big as MODE. */
957
958 void
959 xtensa_split_operand_pair (rtx operands[4], enum machine_mode mode)
960 {
961 switch (GET_CODE (operands[1]))
962 {
963 case REG:
964 operands[3] = gen_rtx_REG (mode, REGNO (operands[1]) + 1);
965 operands[2] = gen_rtx_REG (mode, REGNO (operands[1]));
966 break;
967
968 case MEM:
969 operands[3] = adjust_address (operands[1], mode, GET_MODE_SIZE (mode));
970 operands[2] = adjust_address (operands[1], mode, 0);
971 break;
972
973 case CONST_INT:
974 case CONST_DOUBLE:
975 split_double (operands[1], &operands[2], &operands[3]);
976 break;
977
978 default:
979 gcc_unreachable ();
980 }
981
982 switch (GET_CODE (operands[0]))
983 {
984 case REG:
985 operands[1] = gen_rtx_REG (mode, REGNO (operands[0]) + 1);
986 operands[0] = gen_rtx_REG (mode, REGNO (operands[0]));
987 break;
988
989 case MEM:
990 operands[1] = adjust_address (operands[0], mode, GET_MODE_SIZE (mode));
991 operands[0] = adjust_address (operands[0], mode, 0);
992 break;
993
994 default:
995 gcc_unreachable ();
996 }
997 }
998
999
1000 /* Emit insns to move operands[1] into operands[0].
1001 Return 1 if we have written out everything that needs to be done to
1002 do the move. Otherwise, return 0 and the caller will emit the move
1003 normally. */
1004
1005 int
1006 xtensa_emit_move_sequence (rtx *operands, enum machine_mode mode)
1007 {
1008 rtx src = operands[1];
1009
1010 if (CONSTANT_P (src)
1011 && (GET_CODE (src) != CONST_INT || ! xtensa_simm12b (INTVAL (src))))
1012 {
1013 rtx dst = operands[0];
1014
1015 if (xtensa_tls_referenced_p (src))
1016 {
1017 rtx addend = NULL;
1018
1019 if (GET_CODE (src) == CONST && GET_CODE (XEXP (src, 0)) == PLUS)
1020 {
1021 addend = XEXP (XEXP (src, 0), 1);
1022 src = XEXP (XEXP (src, 0), 0);
1023 }
1024
1025 src = xtensa_legitimize_tls_address (src);
1026 if (addend)
1027 {
1028 src = gen_rtx_PLUS (mode, src, addend);
1029 src = force_operand (src, dst);
1030 }
1031 emit_move_insn (dst, src);
1032 return 1;
1033 }
1034
1035 if (! TARGET_CONST16)
1036 {
1037 src = force_const_mem (SImode, src);
1038 operands[1] = src;
1039 }
1040
1041 /* PC-relative loads are always SImode, and CONST16 is only
1042 supported in the movsi pattern, so add a SUBREG for any other
1043 (smaller) mode. */
1044
1045 if (mode != SImode)
1046 {
1047 if (register_operand (dst, mode))
1048 {
1049 emit_move_insn (simplify_gen_subreg (SImode, dst, mode, 0), src);
1050 return 1;
1051 }
1052 else
1053 {
1054 src = force_reg (SImode, src);
1055 src = gen_lowpart_SUBREG (mode, src);
1056 operands[1] = src;
1057 }
1058 }
1059 }
1060
1061 if (!(reload_in_progress | reload_completed)
1062 && !xtensa_valid_move (mode, operands))
1063 operands[1] = force_reg (mode, operands[1]);
1064
1065 operands[1] = xtensa_copy_incoming_a7 (operands[1]);
1066
1067 /* During reload we don't want to emit (subreg:X (mem:Y)) since that
1068 instruction won't be recognized after reload, so we remove the
1069 subreg and adjust mem accordingly. */
1070 if (reload_in_progress)
1071 {
1072 operands[0] = fixup_subreg_mem (operands[0]);
1073 operands[1] = fixup_subreg_mem (operands[1]);
1074 }
1075 return 0;
1076 }
1077
1078
1079 static rtx
1080 fixup_subreg_mem (rtx x)
1081 {
1082 if (GET_CODE (x) == SUBREG
1083 && GET_CODE (SUBREG_REG (x)) == REG
1084 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER)
1085 {
1086 rtx temp =
1087 gen_rtx_SUBREG (GET_MODE (x),
1088 reg_equiv_mem (REGNO (SUBREG_REG (x))),
1089 SUBREG_BYTE (x));
1090 x = alter_subreg (&temp, true);
1091 }
1092 return x;
1093 }
1094
1095
1096 /* Check if an incoming argument in a7 is expected to be used soon and
1097 if OPND is a register or register pair that includes a7. If so,
1098 create a new pseudo and copy a7 into that pseudo at the very
1099 beginning of the function, followed by the special "set_frame_ptr"
1100 unspec_volatile insn. The return value is either the original
1101 operand, if it is not a7, or the new pseudo containing a copy of
1102 the incoming argument. This is necessary because the register
1103 allocator will ignore conflicts with a7 and may either assign some
1104 other pseudo to a7 or use a7 as the hard_frame_pointer, clobbering
1105 the incoming argument in a7. By copying the argument out of a7 as
1106 the very first thing, and then immediately following that with an
1107 unspec_volatile to keep the scheduler away, we should avoid any
1108 problems. Putting the set_frame_ptr insn at the beginning, with
1109 only the a7 copy before it, also makes it easier for the prologue
1110 expander to initialize the frame pointer after the a7 copy and to
1111 fix up the a7 copy to use the stack pointer instead of the frame
1112 pointer. */
1113
1114 rtx
1115 xtensa_copy_incoming_a7 (rtx opnd)
1116 {
1117 rtx entry_insns = 0;
1118 rtx reg, tmp;
1119 enum machine_mode mode;
1120
1121 if (!cfun->machine->need_a7_copy)
1122 return opnd;
1123
1124 /* This function should never be called again once a7 has been copied. */
1125 gcc_assert (!cfun->machine->set_frame_ptr_insn);
1126
1127 mode = GET_MODE (opnd);
1128
1129 /* The operand using a7 may come in a later instruction, so just return
1130 the original operand if it doesn't use a7. */
1131 reg = opnd;
1132 if (GET_CODE (reg) == SUBREG)
1133 {
1134 gcc_assert (SUBREG_BYTE (reg) == 0);
1135 reg = SUBREG_REG (reg);
1136 }
1137 if (GET_CODE (reg) != REG
1138 || REGNO (reg) > A7_REG
1139 || REGNO (reg) + HARD_REGNO_NREGS (A7_REG, mode) <= A7_REG)
1140 return opnd;
1141
1142 /* 1-word args will always be in a7; 2-word args in a6/a7. */
1143 gcc_assert (REGNO (reg) + HARD_REGNO_NREGS (A7_REG, mode) - 1 == A7_REG);
1144
1145 cfun->machine->need_a7_copy = false;
1146
1147 /* Copy a7 to a new pseudo at the function entry. Use gen_raw_REG to
1148 create the REG for a7 so that hard_frame_pointer_rtx is not used. */
1149
1150 start_sequence ();
1151 tmp = gen_reg_rtx (mode);
1152
1153 switch (mode)
1154 {
1155 case DFmode:
1156 case DImode:
1157 /* Copy the value out of A7 here but keep the first word in A6 until
1158 after the set_frame_ptr insn. Otherwise, the register allocator
1159 may decide to put "subreg (tmp, 0)" in A7 and clobber the incoming
1160 value. */
1161 emit_insn (gen_movsi_internal (gen_rtx_SUBREG (SImode, tmp, 4),
1162 gen_raw_REG (SImode, A7_REG)));
1163 break;
1164 case SFmode:
1165 emit_insn (gen_movsf_internal (tmp, gen_raw_REG (mode, A7_REG)));
1166 break;
1167 case SImode:
1168 emit_insn (gen_movsi_internal (tmp, gen_raw_REG (mode, A7_REG)));
1169 break;
1170 case HImode:
1171 emit_insn (gen_movhi_internal (tmp, gen_raw_REG (mode, A7_REG)));
1172 break;
1173 case QImode:
1174 emit_insn (gen_movqi_internal (tmp, gen_raw_REG (mode, A7_REG)));
1175 break;
1176 default:
1177 gcc_unreachable ();
1178 }
1179
1180 cfun->machine->set_frame_ptr_insn = emit_insn (gen_set_frame_ptr ());
1181
1182 /* For DF and DI mode arguments, copy the incoming value in A6 now. */
1183 if (mode == DFmode || mode == DImode)
1184 emit_insn (gen_movsi_internal (gen_rtx_SUBREG (SImode, tmp, 0),
1185 gen_rtx_REG (SImode, A7_REG - 1)));
1186 entry_insns = get_insns ();
1187 end_sequence ();
1188
1189 if (cfun->machine->vararg_a7)
1190 {
1191 /* This is called from within builtin_saveregs, which will insert the
1192 saveregs code at the function entry, ahead of anything placed at
1193 the function entry now. Instead, save the sequence to be inserted
1194 at the beginning of the saveregs code. */
1195 cfun->machine->vararg_a7_copy = entry_insns;
1196 }
1197 else
1198 {
1199 /* Put entry_insns after the NOTE that starts the function. If
1200 this is inside a start_sequence, make the outer-level insn
1201 chain current, so the code is placed at the start of the
1202 function. */
1203 push_topmost_sequence ();
1204 /* Do not use entry_of_function() here. This is called from within
1205 expand_function_start, when the CFG still holds GIMPLE. */
1206 emit_insn_after (entry_insns, get_insns ());
1207 pop_topmost_sequence ();
1208 }
1209
1210 return tmp;
1211 }
1212
1213
1214 /* Try to expand a block move operation to a sequence of RTL move
1215 instructions. If not optimizing, or if the block size is not a
1216 constant, or if the block is too large, the expansion fails and GCC
1217 falls back to calling memcpy().
1218
1219 operands[0] is the destination
1220 operands[1] is the source
1221 operands[2] is the length
1222 operands[3] is the alignment */
1223
1224 int
1225 xtensa_expand_block_move (rtx *operands)
1226 {
1227 static const enum machine_mode mode_from_align[] =
1228 {
1229 VOIDmode, QImode, HImode, VOIDmode, SImode,
1230 };
1231
1232 rtx dst_mem = operands[0];
1233 rtx src_mem = operands[1];
1234 HOST_WIDE_INT bytes, align;
1235 int num_pieces, move_ratio;
1236 rtx temp[2];
1237 enum machine_mode mode[2];
1238 int amount[2];
1239 bool active[2];
1240 int phase = 0;
1241 int next;
1242 int offset_ld = 0;
1243 int offset_st = 0;
1244 rtx x;
1245
1246 /* If this is not a fixed size move, just call memcpy. */
1247 if (!optimize || (GET_CODE (operands[2]) != CONST_INT))
1248 return 0;
1249
1250 bytes = INTVAL (operands[2]);
1251 align = INTVAL (operands[3]);
1252
1253 /* Anything to move? */
1254 if (bytes <= 0)
1255 return 0;
1256
1257 if (align > MOVE_MAX)
1258 align = MOVE_MAX;
1259
1260 /* Decide whether to expand inline based on the optimization level. */
1261 move_ratio = 4;
1262 if (optimize > 2)
1263 move_ratio = LARGEST_MOVE_RATIO;
1264 num_pieces = (bytes / align) + (bytes % align); /* Close enough anyway. */
1265 if (num_pieces > move_ratio)
1266 return 0;
1267
1268 x = XEXP (dst_mem, 0);
1269 if (!REG_P (x))
1270 {
1271 x = force_reg (Pmode, x);
1272 dst_mem = replace_equiv_address (dst_mem, x);
1273 }
1274
1275 x = XEXP (src_mem, 0);
1276 if (!REG_P (x))
1277 {
1278 x = force_reg (Pmode, x);
1279 src_mem = replace_equiv_address (src_mem, x);
1280 }
1281
1282 active[0] = active[1] = false;
1283
1284 do
1285 {
1286 next = phase;
1287 phase ^= 1;
1288
1289 if (bytes > 0)
1290 {
1291 int next_amount;
1292
1293 next_amount = (bytes >= 4 ? 4 : (bytes >= 2 ? 2 : 1));
1294 next_amount = MIN (next_amount, align);
1295
1296 amount[next] = next_amount;
1297 mode[next] = mode_from_align[next_amount];
1298 temp[next] = gen_reg_rtx (mode[next]);
1299
1300 x = adjust_address (src_mem, mode[next], offset_ld);
1301 emit_insn (gen_rtx_SET (VOIDmode, temp[next], x));
1302
1303 offset_ld += next_amount;
1304 bytes -= next_amount;
1305 active[next] = true;
1306 }
1307
1308 if (active[phase])
1309 {
1310 active[phase] = false;
1311
1312 x = adjust_address (dst_mem, mode[phase], offset_st);
1313 emit_insn (gen_rtx_SET (VOIDmode, x, temp[phase]));
1314
1315 offset_st += amount[phase];
1316 }
1317 }
1318 while (active[next]);
1319
1320 return 1;
1321 }
1322
1323
1324 void
1325 xtensa_expand_nonlocal_goto (rtx *operands)
1326 {
1327 rtx goto_handler = operands[1];
1328 rtx containing_fp = operands[3];
1329
1330 /* Generate a call to "__xtensa_nonlocal_goto" (in libgcc); the code
1331 is too big to generate in-line. */
1332
1333 if (GET_CODE (containing_fp) != REG)
1334 containing_fp = force_reg (Pmode, containing_fp);
1335
1336 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__xtensa_nonlocal_goto"),
1337 LCT_NORMAL, VOIDmode, 2,
1338 containing_fp, Pmode,
1339 goto_handler, Pmode);
1340 }
1341
1342
1343 static struct machine_function *
1344 xtensa_init_machine_status (void)
1345 {
1346 return ggc_alloc_cleared_machine_function ();
1347 }
1348
1349
1350 /* Shift VAL of mode MODE left by COUNT bits. */
1351
1352 static inline rtx
1353 xtensa_expand_mask_and_shift (rtx val, enum machine_mode mode, rtx count)
1354 {
1355 val = expand_simple_binop (SImode, AND, val, GEN_INT (GET_MODE_MASK (mode)),
1356 NULL_RTX, 1, OPTAB_DIRECT);
1357 return expand_simple_binop (SImode, ASHIFT, val, count,
1358 NULL_RTX, 1, OPTAB_DIRECT);
1359 }
1360
1361
1362 /* Structure to hold the initial parameters for a compare_and_swap operation
1363 in HImode and QImode. */
1364
1365 struct alignment_context
1366 {
1367 rtx memsi; /* SI aligned memory location. */
1368 rtx shift; /* Bit offset with regard to lsb. */
1369 rtx modemask; /* Mask of the HQImode shifted by SHIFT bits. */
1370 rtx modemaski; /* ~modemask */
1371 };
1372
1373
1374 /* Initialize structure AC for word access to HI and QI mode memory. */
1375
1376 static void
1377 init_alignment_context (struct alignment_context *ac, rtx mem)
1378 {
1379 enum machine_mode mode = GET_MODE (mem);
1380 rtx byteoffset = NULL_RTX;
1381 bool aligned = (MEM_ALIGN (mem) >= GET_MODE_BITSIZE (SImode));
1382
1383 if (aligned)
1384 ac->memsi = adjust_address (mem, SImode, 0); /* Memory is aligned. */
1385 else
1386 {
1387 /* Alignment is unknown. */
1388 rtx addr, align;
1389
1390 /* Force the address into a register. */
1391 addr = force_reg (Pmode, XEXP (mem, 0));
1392
1393 /* Align it to SImode. */
1394 align = expand_simple_binop (Pmode, AND, addr,
1395 GEN_INT (-GET_MODE_SIZE (SImode)),
1396 NULL_RTX, 1, OPTAB_DIRECT);
1397 /* Generate MEM. */
1398 ac->memsi = gen_rtx_MEM (SImode, align);
1399 MEM_VOLATILE_P (ac->memsi) = MEM_VOLATILE_P (mem);
1400 set_mem_alias_set (ac->memsi, ALIAS_SET_MEMORY_BARRIER);
1401 set_mem_align (ac->memsi, GET_MODE_BITSIZE (SImode));
1402
1403 byteoffset = expand_simple_binop (Pmode, AND, addr,
1404 GEN_INT (GET_MODE_SIZE (SImode) - 1),
1405 NULL_RTX, 1, OPTAB_DIRECT);
1406 }
1407
1408 /* Calculate shiftcount. */
1409 if (TARGET_BIG_ENDIAN)
1410 {
1411 ac->shift = GEN_INT (GET_MODE_SIZE (SImode) - GET_MODE_SIZE (mode));
1412 if (!aligned)
1413 ac->shift = expand_simple_binop (SImode, MINUS, ac->shift, byteoffset,
1414 NULL_RTX, 1, OPTAB_DIRECT);
1415 }
1416 else
1417 {
1418 if (aligned)
1419 ac->shift = NULL_RTX;
1420 else
1421 ac->shift = byteoffset;
1422 }
1423
1424 if (ac->shift != NULL_RTX)
1425 {
1426 /* Shift is the byte count, but we need the bitcount. */
1427 ac->shift = expand_simple_binop (SImode, MULT, ac->shift,
1428 GEN_INT (BITS_PER_UNIT),
1429 NULL_RTX, 1, OPTAB_DIRECT);
1430 ac->modemask = expand_simple_binop (SImode, ASHIFT,
1431 GEN_INT (GET_MODE_MASK (mode)),
1432 ac->shift,
1433 NULL_RTX, 1, OPTAB_DIRECT);
1434 }
1435 else
1436 ac->modemask = GEN_INT (GET_MODE_MASK (mode));
1437
1438 ac->modemaski = expand_simple_unop (SImode, NOT, ac->modemask, NULL_RTX, 1);
1439 }
1440
1441
1442 /* Expand an atomic compare and swap operation for HImode and QImode.
1443 MEM is the memory location, CMP the old value to compare MEM with
1444 and NEW_RTX the value to set if CMP == MEM. */
1445
1446 void
1447 xtensa_expand_compare_and_swap (rtx target, rtx mem, rtx cmp, rtx new_rtx)
1448 {
1449 enum machine_mode mode = GET_MODE (mem);
1450 struct alignment_context ac;
1451 rtx tmp, cmpv, newv, val;
1452 rtx oldval = gen_reg_rtx (SImode);
1453 rtx res = gen_reg_rtx (SImode);
1454 rtx csloop = gen_label_rtx ();
1455 rtx csend = gen_label_rtx ();
1456
1457 init_alignment_context (&ac, mem);
1458
1459 if (ac.shift != NULL_RTX)
1460 {
1461 cmp = xtensa_expand_mask_and_shift (cmp, mode, ac.shift);
1462 new_rtx = xtensa_expand_mask_and_shift (new_rtx, mode, ac.shift);
1463 }
1464
1465 /* Load the surrounding word into VAL with the MEM value masked out. */
1466 val = force_reg (SImode, expand_simple_binop (SImode, AND, ac.memsi,
1467 ac.modemaski, NULL_RTX, 1,
1468 OPTAB_DIRECT));
1469 emit_label (csloop);
1470
1471 /* Patch CMP and NEW_RTX into VAL at correct position. */
1472 cmpv = force_reg (SImode, expand_simple_binop (SImode, IOR, cmp, val,
1473 NULL_RTX, 1, OPTAB_DIRECT));
1474 newv = force_reg (SImode, expand_simple_binop (SImode, IOR, new_rtx, val,
1475 NULL_RTX, 1, OPTAB_DIRECT));
1476
1477 /* Jump to end if we're done. */
1478 emit_insn (gen_sync_compare_and_swapsi (res, ac.memsi, cmpv, newv));
1479 emit_cmp_and_jump_insns (res, cmpv, EQ, const0_rtx, SImode, true, csend);
1480
1481 /* Check for changes outside mode. */
1482 emit_move_insn (oldval, val);
1483 tmp = expand_simple_binop (SImode, AND, res, ac.modemaski,
1484 val, 1, OPTAB_DIRECT);
1485 if (tmp != val)
1486 emit_move_insn (val, tmp);
1487
1488 /* Loop internal if so. */
1489 emit_cmp_and_jump_insns (oldval, val, NE, const0_rtx, SImode, true, csloop);
1490
1491 emit_label (csend);
1492
1493 /* Return the correct part of the bitfield. */
1494 convert_move (target,
1495 (ac.shift == NULL_RTX ? res
1496 : expand_simple_binop (SImode, LSHIFTRT, res, ac.shift,
1497 NULL_RTX, 1, OPTAB_DIRECT)),
1498 1);
1499 }
1500
1501
1502 /* Expand an atomic operation CODE of mode MODE (either HImode or QImode --
1503 the default expansion works fine for SImode). MEM is the memory location
1504 and VAL the value to play with. If AFTER is true then store the value
1505 MEM holds after the operation, if AFTER is false then store the value MEM
1506 holds before the operation. If TARGET is zero then discard that value, else
1507 store it to TARGET. */
1508
1509 void
1510 xtensa_expand_atomic (enum rtx_code code, rtx target, rtx mem, rtx val,
1511 bool after)
1512 {
1513 enum machine_mode mode = GET_MODE (mem);
1514 struct alignment_context ac;
1515 rtx csloop = gen_label_rtx ();
1516 rtx cmp, tmp;
1517 rtx old = gen_reg_rtx (SImode);
1518 rtx new_rtx = gen_reg_rtx (SImode);
1519 rtx orig = NULL_RTX;
1520
1521 init_alignment_context (&ac, mem);
1522
1523 /* Prepare values before the compare-and-swap loop. */
1524 if (ac.shift != NULL_RTX)
1525 val = xtensa_expand_mask_and_shift (val, mode, ac.shift);
1526 switch (code)
1527 {
1528 case PLUS:
1529 case MINUS:
1530 orig = gen_reg_rtx (SImode);
1531 convert_move (orig, val, 1);
1532 break;
1533
1534 case SET:
1535 case IOR:
1536 case XOR:
1537 break;
1538
1539 case MULT: /* NAND */
1540 case AND:
1541 /* val = "11..1<val>11..1" */
1542 val = expand_simple_binop (SImode, XOR, val, ac.modemaski,
1543 NULL_RTX, 1, OPTAB_DIRECT);
1544 break;
1545
1546 default:
1547 gcc_unreachable ();
1548 }
1549
1550 /* Load full word. Subsequent loads are performed by S32C1I. */
1551 cmp = force_reg (SImode, ac.memsi);
1552
1553 emit_label (csloop);
1554 emit_move_insn (old, cmp);
1555
1556 switch (code)
1557 {
1558 case PLUS:
1559 case MINUS:
1560 val = expand_simple_binop (SImode, code, old, orig,
1561 NULL_RTX, 1, OPTAB_DIRECT);
1562 val = expand_simple_binop (SImode, AND, val, ac.modemask,
1563 NULL_RTX, 1, OPTAB_DIRECT);
1564 /* FALLTHRU */
1565 case SET:
1566 tmp = expand_simple_binop (SImode, AND, old, ac.modemaski,
1567 NULL_RTX, 1, OPTAB_DIRECT);
1568 tmp = expand_simple_binop (SImode, IOR, tmp, val,
1569 new_rtx, 1, OPTAB_DIRECT);
1570 break;
1571
1572 case AND:
1573 case IOR:
1574 case XOR:
1575 tmp = expand_simple_binop (SImode, code, old, val,
1576 new_rtx, 1, OPTAB_DIRECT);
1577 break;
1578
1579 case MULT: /* NAND */
1580 tmp = expand_simple_binop (SImode, XOR, old, ac.modemask,
1581 NULL_RTX, 1, OPTAB_DIRECT);
1582 tmp = expand_simple_binop (SImode, AND, tmp, val,
1583 new_rtx, 1, OPTAB_DIRECT);
1584 break;
1585
1586 default:
1587 gcc_unreachable ();
1588 }
1589
1590 if (tmp != new_rtx)
1591 emit_move_insn (new_rtx, tmp);
1592 emit_insn (gen_sync_compare_and_swapsi (cmp, ac.memsi, old, new_rtx));
1593 emit_cmp_and_jump_insns (cmp, old, NE, const0_rtx, SImode, true, csloop);
1594
1595 if (target)
1596 {
1597 tmp = (after ? new_rtx : cmp);
1598 convert_move (target,
1599 (ac.shift == NULL_RTX ? tmp
1600 : expand_simple_binop (SImode, LSHIFTRT, tmp, ac.shift,
1601 NULL_RTX, 1, OPTAB_DIRECT)),
1602 1);
1603 }
1604 }
1605
1606
1607 void
1608 xtensa_setup_frame_addresses (void)
1609 {
1610 /* Set flag to cause TARGET_FRAME_POINTER_REQUIRED to return true. */
1611 cfun->machine->accesses_prev_frame = 1;
1612
1613 emit_library_call
1614 (gen_rtx_SYMBOL_REF (Pmode, "__xtensa_libgcc_window_spill"),
1615 LCT_NORMAL, VOIDmode, 0);
1616 }
1617
1618
1619 /* Emit the assembly for the end of a zero-cost loop. Normally we just emit
1620 a comment showing where the end of the loop is. However, if there is a
1621 label or a branch at the end of the loop then we need to place a nop
1622 there. If the loop ends with a label we need the nop so that branches
1623 targeting that label will target the nop (and thus remain in the loop),
1624 instead of targeting the instruction after the loop (and thus exiting
1625 the loop). If the loop ends with a branch, we need the nop in case the
1626 branch is targeting a location inside the loop. When the branch
1627 executes it will cause the loop count to be decremented even if it is
1628 taken (because it is the last instruction in the loop), so we need to
1629 nop after the branch to prevent the loop count from being decremented
1630 when the branch is taken. */
1631
1632 void
1633 xtensa_emit_loop_end (rtx insn, rtx *operands)
1634 {
1635 char done = 0;
1636
1637 for (insn = PREV_INSN (insn); insn && !done; insn = PREV_INSN (insn))
1638 {
1639 switch (GET_CODE (insn))
1640 {
1641 case NOTE:
1642 case BARRIER:
1643 break;
1644
1645 case CODE_LABEL:
1646 output_asm_insn (TARGET_DENSITY ? "nop.n" : "nop", operands);
1647 done = 1;
1648 break;
1649
1650 default:
1651 {
1652 rtx body = PATTERN (insn);
1653
1654 if (GET_CODE (body) == JUMP_INSN)
1655 {
1656 output_asm_insn (TARGET_DENSITY ? "nop.n" : "nop", operands);
1657 done = 1;
1658 }
1659 else if ((GET_CODE (body) != USE)
1660 && (GET_CODE (body) != CLOBBER))
1661 done = 1;
1662 }
1663 break;
1664 }
1665 }
1666
1667 output_asm_insn ("# loop end for %0", operands);
1668 }
1669
1670
1671 char *
1672 xtensa_emit_branch (bool inverted, bool immed, rtx *operands)
1673 {
1674 static char result[64];
1675 enum rtx_code code;
1676 const char *op;
1677
1678 code = GET_CODE (operands[3]);
1679 switch (code)
1680 {
1681 case EQ: op = inverted ? "ne" : "eq"; break;
1682 case NE: op = inverted ? "eq" : "ne"; break;
1683 case LT: op = inverted ? "ge" : "lt"; break;
1684 case GE: op = inverted ? "lt" : "ge"; break;
1685 case LTU: op = inverted ? "geu" : "ltu"; break;
1686 case GEU: op = inverted ? "ltu" : "geu"; break;
1687 default: gcc_unreachable ();
1688 }
1689
1690 if (immed)
1691 {
1692 if (INTVAL (operands[1]) == 0)
1693 sprintf (result, "b%sz%s\t%%0, %%2", op,
1694 (TARGET_DENSITY && (code == EQ || code == NE)) ? ".n" : "");
1695 else
1696 sprintf (result, "b%si\t%%0, %%d1, %%2", op);
1697 }
1698 else
1699 sprintf (result, "b%s\t%%0, %%1, %%2", op);
1700
1701 return result;
1702 }
1703
1704
1705 char *
1706 xtensa_emit_bit_branch (bool inverted, bool immed, rtx *operands)
1707 {
1708 static char result[64];
1709 const char *op;
1710
1711 switch (GET_CODE (operands[3]))
1712 {
1713 case EQ: op = inverted ? "bs" : "bc"; break;
1714 case NE: op = inverted ? "bc" : "bs"; break;
1715 default: gcc_unreachable ();
1716 }
1717
1718 if (immed)
1719 {
1720 unsigned bitnum = INTVAL (operands[1]) & 0x1f;
1721 operands[1] = GEN_INT (bitnum);
1722 sprintf (result, "b%si\t%%0, %%d1, %%2", op);
1723 }
1724 else
1725 sprintf (result, "b%s\t%%0, %%1, %%2", op);
1726
1727 return result;
1728 }
1729
1730
1731 char *
1732 xtensa_emit_movcc (bool inverted, bool isfp, bool isbool, rtx *operands)
1733 {
1734 static char result[64];
1735 enum rtx_code code;
1736 const char *op;
1737
1738 code = GET_CODE (operands[4]);
1739 if (isbool)
1740 {
1741 switch (code)
1742 {
1743 case EQ: op = inverted ? "t" : "f"; break;
1744 case NE: op = inverted ? "f" : "t"; break;
1745 default: gcc_unreachable ();
1746 }
1747 }
1748 else
1749 {
1750 switch (code)
1751 {
1752 case EQ: op = inverted ? "nez" : "eqz"; break;
1753 case NE: op = inverted ? "eqz" : "nez"; break;
1754 case LT: op = inverted ? "gez" : "ltz"; break;
1755 case GE: op = inverted ? "ltz" : "gez"; break;
1756 default: gcc_unreachable ();
1757 }
1758 }
1759
1760 sprintf (result, "mov%s%s\t%%0, %%%d, %%1",
1761 op, isfp ? ".s" : "", inverted ? 3 : 2);
1762 return result;
1763 }
1764
1765
1766 char *
1767 xtensa_emit_call (int callop, rtx *operands)
1768 {
1769 static char result[64];
1770 rtx tgt = operands[callop];
1771
1772 if (GET_CODE (tgt) == CONST_INT)
1773 sprintf (result, "call8\t0x%lx", INTVAL (tgt));
1774 else if (register_operand (tgt, VOIDmode))
1775 sprintf (result, "callx8\t%%%d", callop);
1776 else
1777 sprintf (result, "call8\t%%%d", callop);
1778
1779 return result;
1780 }
1781
1782
1783 bool
1784 xtensa_legitimate_address_p (enum machine_mode mode, rtx addr, bool strict)
1785 {
1786 /* Allow constant pool addresses. */
1787 if (mode != BLKmode && GET_MODE_SIZE (mode) >= UNITS_PER_WORD
1788 && ! TARGET_CONST16 && constantpool_address_p (addr)
1789 && ! xtensa_tls_referenced_p (addr))
1790 return true;
1791
1792 while (GET_CODE (addr) == SUBREG)
1793 addr = SUBREG_REG (addr);
1794
1795 /* Allow base registers. */
1796 if (GET_CODE (addr) == REG && BASE_REG_P (addr, strict))
1797 return true;
1798
1799 /* Check for "register + offset" addressing. */
1800 if (GET_CODE (addr) == PLUS)
1801 {
1802 rtx xplus0 = XEXP (addr, 0);
1803 rtx xplus1 = XEXP (addr, 1);
1804 enum rtx_code code0;
1805 enum rtx_code code1;
1806
1807 while (GET_CODE (xplus0) == SUBREG)
1808 xplus0 = SUBREG_REG (xplus0);
1809 code0 = GET_CODE (xplus0);
1810
1811 while (GET_CODE (xplus1) == SUBREG)
1812 xplus1 = SUBREG_REG (xplus1);
1813 code1 = GET_CODE (xplus1);
1814
1815 /* Swap operands if necessary so the register is first. */
1816 if (code0 != REG && code1 == REG)
1817 {
1818 xplus0 = XEXP (addr, 1);
1819 xplus1 = XEXP (addr, 0);
1820 code0 = GET_CODE (xplus0);
1821 code1 = GET_CODE (xplus1);
1822 }
1823
1824 if (code0 == REG && BASE_REG_P (xplus0, strict)
1825 && code1 == CONST_INT
1826 && xtensa_mem_offset (INTVAL (xplus1), mode))
1827 return true;
1828 }
1829
1830 return false;
1831 }
1832
1833
1834 /* Construct the SYMBOL_REF for the _TLS_MODULE_BASE_ symbol. */
1835
1836 static GTY(()) rtx xtensa_tls_module_base_symbol;
1837
1838 static rtx
1839 xtensa_tls_module_base (void)
1840 {
1841 if (! xtensa_tls_module_base_symbol)
1842 {
1843 xtensa_tls_module_base_symbol =
1844 gen_rtx_SYMBOL_REF (Pmode, "_TLS_MODULE_BASE_");
1845 SYMBOL_REF_FLAGS (xtensa_tls_module_base_symbol)
1846 |= TLS_MODEL_GLOBAL_DYNAMIC << SYMBOL_FLAG_TLS_SHIFT;
1847 }
1848
1849 return xtensa_tls_module_base_symbol;
1850 }
1851
1852
1853 static rtx
1854 xtensa_call_tls_desc (rtx sym, rtx *retp)
1855 {
1856 rtx fn, arg, a10, call_insn, insns;
1857
1858 start_sequence ();
1859 fn = gen_reg_rtx (Pmode);
1860 arg = gen_reg_rtx (Pmode);
1861 a10 = gen_rtx_REG (Pmode, 10);
1862
1863 emit_insn (gen_tls_func (fn, sym));
1864 emit_insn (gen_tls_arg (arg, sym));
1865 emit_move_insn (a10, arg);
1866 call_insn = emit_call_insn (gen_tls_call (a10, fn, sym, const1_rtx));
1867 use_reg (&CALL_INSN_FUNCTION_USAGE (call_insn), a10);
1868 insns = get_insns ();
1869 end_sequence ();
1870
1871 *retp = a10;
1872 return insns;
1873 }
1874
1875
1876 static rtx
1877 xtensa_legitimize_tls_address (rtx x)
1878 {
1879 unsigned int model = SYMBOL_REF_TLS_MODEL (x);
1880 rtx dest, tp, ret, modbase, base, addend, insns;
1881
1882 dest = gen_reg_rtx (Pmode);
1883 switch (model)
1884 {
1885 case TLS_MODEL_GLOBAL_DYNAMIC:
1886 insns = xtensa_call_tls_desc (x, &ret);
1887 emit_libcall_block (insns, dest, ret, x);
1888 break;
1889
1890 case TLS_MODEL_LOCAL_DYNAMIC:
1891 base = gen_reg_rtx (Pmode);
1892 modbase = xtensa_tls_module_base ();
1893 insns = xtensa_call_tls_desc (modbase, &ret);
1894 emit_libcall_block (insns, base, ret, modbase);
1895 addend = force_reg (SImode, gen_sym_DTPOFF (x));
1896 emit_insn (gen_addsi3 (dest, base, addend));
1897 break;
1898
1899 case TLS_MODEL_INITIAL_EXEC:
1900 case TLS_MODEL_LOCAL_EXEC:
1901 tp = gen_reg_rtx (SImode);
1902 emit_insn (gen_get_thread_pointersi (tp));
1903 addend = force_reg (SImode, gen_sym_TPOFF (x));
1904 emit_insn (gen_addsi3 (dest, tp, addend));
1905 break;
1906
1907 default:
1908 gcc_unreachable ();
1909 }
1910
1911 return dest;
1912 }
1913
1914
1915 rtx
1916 xtensa_legitimize_address (rtx x,
1917 rtx oldx ATTRIBUTE_UNUSED,
1918 enum machine_mode mode)
1919 {
1920 if (xtensa_tls_symbol_p (x))
1921 return xtensa_legitimize_tls_address (x);
1922
1923 if (GET_CODE (x) == PLUS)
1924 {
1925 rtx plus0 = XEXP (x, 0);
1926 rtx plus1 = XEXP (x, 1);
1927
1928 if (GET_CODE (plus0) != REG && GET_CODE (plus1) == REG)
1929 {
1930 plus0 = XEXP (x, 1);
1931 plus1 = XEXP (x, 0);
1932 }
1933
1934 /* Try to split up the offset to use an ADDMI instruction. */
1935 if (GET_CODE (plus0) == REG
1936 && GET_CODE (plus1) == CONST_INT
1937 && !xtensa_mem_offset (INTVAL (plus1), mode)
1938 && !xtensa_simm8 (INTVAL (plus1))
1939 && xtensa_mem_offset (INTVAL (plus1) & 0xff, mode)
1940 && xtensa_simm8x256 (INTVAL (plus1) & ~0xff))
1941 {
1942 rtx temp = gen_reg_rtx (Pmode);
1943 rtx addmi_offset = GEN_INT (INTVAL (plus1) & ~0xff);
1944 emit_insn (gen_rtx_SET (Pmode, temp,
1945 gen_rtx_PLUS (Pmode, plus0, addmi_offset)));
1946 return gen_rtx_PLUS (Pmode, temp, GEN_INT (INTVAL (plus1) & 0xff));
1947 }
1948 }
1949
1950 return x;
1951 }
1952
1953 /* Worker function for TARGET_MODE_DEPENDENT_ADDRESS_P.
1954
1955 Treat constant-pool references as "mode dependent" since they can
1956 only be accessed with SImode loads. This works around a bug in the
1957 combiner where a constant pool reference is temporarily converted
1958 to an HImode load, which is then assumed to zero-extend based on
1959 our definition of LOAD_EXTEND_OP. This is wrong because the high
1960 bits of a 16-bit value in the constant pool are now sign-extended
1961 by default. */
1962
1963 static bool
1964 xtensa_mode_dependent_address_p (const_rtx addr,
1965 addr_space_t as ATTRIBUTE_UNUSED)
1966 {
1967 return constantpool_address_p (addr);
1968 }
1969
1970 /* Helper for xtensa_tls_referenced_p. */
1971
1972 static int
1973 xtensa_tls_referenced_p_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1974 {
1975 if (GET_CODE (*x) == SYMBOL_REF)
1976 return SYMBOL_REF_TLS_MODEL (*x) != 0;
1977
1978 /* Ignore TLS references that have already been legitimized. */
1979 if (GET_CODE (*x) == UNSPEC)
1980 {
1981 switch (XINT (*x, 1))
1982 {
1983 case UNSPEC_TPOFF:
1984 case UNSPEC_DTPOFF:
1985 case UNSPEC_TLS_FUNC:
1986 case UNSPEC_TLS_ARG:
1987 case UNSPEC_TLS_CALL:
1988 return -1;
1989 default:
1990 break;
1991 }
1992 }
1993
1994 return 0;
1995 }
1996
1997
1998 /* Return TRUE if X contains any TLS symbol references. */
1999
2000 bool
2001 xtensa_tls_referenced_p (rtx x)
2002 {
2003 if (! TARGET_HAVE_TLS)
2004 return false;
2005
2006 return for_each_rtx (&x, xtensa_tls_referenced_p_1, NULL);
2007 }
2008
2009
2010 /* Implement TARGET_CANNOT_FORCE_CONST_MEM. */
2011
2012 static bool
2013 xtensa_cannot_force_const_mem (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2014 {
2015 return xtensa_tls_referenced_p (x);
2016 }
2017
2018
2019 /* Return the debugger register number to use for 'regno'. */
2020
2021 int
2022 xtensa_dbx_register_number (int regno)
2023 {
2024 int first = -1;
2025
2026 if (GP_REG_P (regno))
2027 {
2028 regno -= GP_REG_FIRST;
2029 first = 0;
2030 }
2031 else if (BR_REG_P (regno))
2032 {
2033 regno -= BR_REG_FIRST;
2034 first = 16;
2035 }
2036 else if (FP_REG_P (regno))
2037 {
2038 regno -= FP_REG_FIRST;
2039 first = 48;
2040 }
2041 else if (ACC_REG_P (regno))
2042 {
2043 first = 0x200; /* Start of Xtensa special registers. */
2044 regno = 16; /* ACCLO is special register 16. */
2045 }
2046
2047 /* When optimizing, we sometimes get asked about pseudo-registers
2048 that don't represent hard registers. Return 0 for these. */
2049 if (first == -1)
2050 return 0;
2051
2052 return first + regno;
2053 }
2054
2055
2056 /* Argument support functions. */
2057
2058 /* Initialize CUMULATIVE_ARGS for a function. */
2059
2060 void
2061 init_cumulative_args (CUMULATIVE_ARGS *cum, int incoming)
2062 {
2063 cum->arg_words = 0;
2064 cum->incoming = incoming;
2065 }
2066
2067
2068 /* Advance the argument to the next argument position. */
2069
2070 static void
2071 xtensa_function_arg_advance (cumulative_args_t cum, enum machine_mode mode,
2072 const_tree type, bool named ATTRIBUTE_UNUSED)
2073 {
2074 int words, max;
2075 int *arg_words;
2076
2077 arg_words = &get_cumulative_args (cum)->arg_words;
2078 max = MAX_ARGS_IN_REGISTERS;
2079
2080 words = (((mode != BLKmode)
2081 ? (int) GET_MODE_SIZE (mode)
2082 : int_size_in_bytes (type)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2083
2084 if (*arg_words < max
2085 && (targetm.calls.must_pass_in_stack (mode, type)
2086 || *arg_words + words > max))
2087 *arg_words = max;
2088
2089 *arg_words += words;
2090 }
2091
2092
2093 /* Return an RTL expression containing the register for the given mode,
2094 or 0 if the argument is to be passed on the stack. INCOMING_P is nonzero
2095 if this is an incoming argument to the current function. */
2096
2097 static rtx
2098 xtensa_function_arg_1 (cumulative_args_t cum_v, enum machine_mode mode,
2099 const_tree type, bool incoming_p)
2100 {
2101 CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
2102 int regbase, words, max;
2103 int *arg_words;
2104 int regno;
2105
2106 arg_words = &cum->arg_words;
2107 regbase = (incoming_p ? GP_ARG_FIRST : GP_OUTGOING_ARG_FIRST);
2108 max = MAX_ARGS_IN_REGISTERS;
2109
2110 words = (((mode != BLKmode)
2111 ? (int) GET_MODE_SIZE (mode)
2112 : int_size_in_bytes (type)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2113
2114 if (type && (TYPE_ALIGN (type) > BITS_PER_WORD))
2115 {
2116 int align = MIN (TYPE_ALIGN (type), STACK_BOUNDARY) / BITS_PER_WORD;
2117 *arg_words = (*arg_words + align - 1) & -align;
2118 }
2119
2120 if (*arg_words + words > max)
2121 return (rtx)0;
2122
2123 regno = regbase + *arg_words;
2124
2125 if (cum->incoming && regno <= A7_REG && regno + words > A7_REG)
2126 cfun->machine->need_a7_copy = true;
2127
2128 return gen_rtx_REG (mode, regno);
2129 }
2130
2131 /* Implement TARGET_FUNCTION_ARG. */
2132
2133 static rtx
2134 xtensa_function_arg (cumulative_args_t cum, enum machine_mode mode,
2135 const_tree type, bool named ATTRIBUTE_UNUSED)
2136 {
2137 return xtensa_function_arg_1 (cum, mode, type, false);
2138 }
2139
2140 /* Implement TARGET_FUNCTION_INCOMING_ARG. */
2141
2142 static rtx
2143 xtensa_function_incoming_arg (cumulative_args_t cum, enum machine_mode mode,
2144 const_tree type, bool named ATTRIBUTE_UNUSED)
2145 {
2146 return xtensa_function_arg_1 (cum, mode, type, true);
2147 }
2148
2149 static unsigned int
2150 xtensa_function_arg_boundary (enum machine_mode mode, const_tree type)
2151 {
2152 unsigned int alignment;
2153
2154 alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
2155 if (alignment < PARM_BOUNDARY)
2156 alignment = PARM_BOUNDARY;
2157 if (alignment > STACK_BOUNDARY)
2158 alignment = STACK_BOUNDARY;
2159 return alignment;
2160 }
2161
2162
2163 static bool
2164 xtensa_return_in_msb (const_tree valtype)
2165 {
2166 return (TARGET_BIG_ENDIAN
2167 && AGGREGATE_TYPE_P (valtype)
2168 && int_size_in_bytes (valtype) >= UNITS_PER_WORD);
2169 }
2170
2171
2172 static void
2173 xtensa_option_override (void)
2174 {
2175 int regno;
2176 enum machine_mode mode;
2177
2178 if (!TARGET_BOOLEANS && TARGET_HARD_FLOAT)
2179 error ("boolean registers required for the floating-point option");
2180
2181 /* Set up array giving whether a given register can hold a given mode. */
2182 for (mode = VOIDmode;
2183 mode != MAX_MACHINE_MODE;
2184 mode = (enum machine_mode) ((int) mode + 1))
2185 {
2186 int size = GET_MODE_SIZE (mode);
2187 enum mode_class mclass = GET_MODE_CLASS (mode);
2188
2189 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
2190 {
2191 int temp;
2192
2193 if (ACC_REG_P (regno))
2194 temp = (TARGET_MAC16
2195 && (mclass == MODE_INT) && (size <= UNITS_PER_WORD));
2196 else if (GP_REG_P (regno))
2197 temp = ((regno & 1) == 0 || (size <= UNITS_PER_WORD));
2198 else if (FP_REG_P (regno))
2199 temp = (TARGET_HARD_FLOAT && (mode == SFmode));
2200 else if (BR_REG_P (regno))
2201 temp = (TARGET_BOOLEANS && (mode == CCmode));
2202 else
2203 temp = FALSE;
2204
2205 xtensa_hard_regno_mode_ok[(int) mode][regno] = temp;
2206 }
2207 }
2208
2209 init_machine_status = xtensa_init_machine_status;
2210
2211 /* Check PIC settings. PIC is only supported when using L32R
2212 instructions, and some targets need to always use PIC. */
2213 if (flag_pic && TARGET_CONST16)
2214 error ("-f%s is not supported with CONST16 instructions",
2215 (flag_pic > 1 ? "PIC" : "pic"));
2216 else if (TARGET_FORCE_NO_PIC)
2217 flag_pic = 0;
2218 else if (XTENSA_ALWAYS_PIC)
2219 {
2220 if (TARGET_CONST16)
2221 error ("PIC is required but not supported with CONST16 instructions");
2222 flag_pic = 1;
2223 }
2224 /* There's no need for -fPIC (as opposed to -fpic) on Xtensa. */
2225 if (flag_pic > 1)
2226 flag_pic = 1;
2227 if (flag_pic && !flag_pie)
2228 flag_shlib = 1;
2229
2230 /* Hot/cold partitioning does not work on this architecture, because of
2231 constant pools (the load instruction cannot necessarily reach that far).
2232 Therefore disable it on this architecture. */
2233 if (flag_reorder_blocks_and_partition)
2234 {
2235 flag_reorder_blocks_and_partition = 0;
2236 flag_reorder_blocks = 1;
2237 }
2238 }
2239
2240 /* A C compound statement to output to stdio stream STREAM the
2241 assembler syntax for an instruction operand X. X is an RTL
2242 expression.
2243
2244 CODE is a value that can be used to specify one of several ways
2245 of printing the operand. It is used when identical operands
2246 must be printed differently depending on the context. CODE
2247 comes from the '%' specification that was used to request
2248 printing of the operand. If the specification was just '%DIGIT'
2249 then CODE is 0; if the specification was '%LTR DIGIT' then CODE
2250 is the ASCII code for LTR.
2251
2252 If X is a register, this macro should print the register's name.
2253 The names can be found in an array 'reg_names' whose type is
2254 'char *[]'. 'reg_names' is initialized from 'REGISTER_NAMES'.
2255
2256 When the machine description has a specification '%PUNCT' (a '%'
2257 followed by a punctuation character), this macro is called with
2258 a null pointer for X and the punctuation character for CODE.
2259
2260 'a', 'c', 'l', and 'n' are reserved.
2261
2262 The Xtensa specific codes are:
2263
2264 'd' CONST_INT, print as signed decimal
2265 'x' CONST_INT, print as signed hexadecimal
2266 'K' CONST_INT, print number of bits in mask for EXTUI
2267 'R' CONST_INT, print (X & 0x1f)
2268 'L' CONST_INT, print ((32 - X) & 0x1f)
2269 'D' REG, print second register of double-word register operand
2270 'N' MEM, print address of next word following a memory operand
2271 'v' MEM, if memory reference is volatile, output a MEMW before it
2272 't' any constant, add "@h" suffix for top 16 bits
2273 'b' any constant, add "@l" suffix for bottom 16 bits
2274 */
2275
2276 static void
2277 printx (FILE *file, signed int val)
2278 {
2279 /* Print a hexadecimal value in a nice way. */
2280 if ((val > -0xa) && (val < 0xa))
2281 fprintf (file, "%d", val);
2282 else if (val < 0)
2283 fprintf (file, "-0x%x", -val);
2284 else
2285 fprintf (file, "0x%x", val);
2286 }
2287
2288
2289 void
2290 print_operand (FILE *file, rtx x, int letter)
2291 {
2292 if (!x)
2293 error ("PRINT_OPERAND null pointer");
2294
2295 switch (letter)
2296 {
2297 case 'D':
2298 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2299 fprintf (file, "%s", reg_names[xt_true_regnum (x) + 1]);
2300 else
2301 output_operand_lossage ("invalid %%D value");
2302 break;
2303
2304 case 'v':
2305 if (GET_CODE (x) == MEM)
2306 {
2307 /* For a volatile memory reference, emit a MEMW before the
2308 load or store. */
2309 if (MEM_VOLATILE_P (x) && TARGET_SERIALIZE_VOLATILE)
2310 fprintf (file, "memw\n\t");
2311 }
2312 else
2313 output_operand_lossage ("invalid %%v value");
2314 break;
2315
2316 case 'N':
2317 if (GET_CODE (x) == MEM
2318 && (GET_MODE (x) == DFmode || GET_MODE (x) == DImode))
2319 {
2320 x = adjust_address (x, GET_MODE (x) == DFmode ? SFmode : SImode, 4);
2321 output_address (XEXP (x, 0));
2322 }
2323 else
2324 output_operand_lossage ("invalid %%N value");
2325 break;
2326
2327 case 'K':
2328 if (GET_CODE (x) == CONST_INT)
2329 {
2330 int num_bits = 0;
2331 unsigned val = INTVAL (x);
2332 while (val & 1)
2333 {
2334 num_bits += 1;
2335 val = val >> 1;
2336 }
2337 if ((val != 0) || (num_bits == 0) || (num_bits > 16))
2338 fatal_insn ("invalid mask", x);
2339
2340 fprintf (file, "%d", num_bits);
2341 }
2342 else
2343 output_operand_lossage ("invalid %%K value");
2344 break;
2345
2346 case 'L':
2347 if (GET_CODE (x) == CONST_INT)
2348 fprintf (file, "%ld", (32 - INTVAL (x)) & 0x1f);
2349 else
2350 output_operand_lossage ("invalid %%L value");
2351 break;
2352
2353 case 'R':
2354 if (GET_CODE (x) == CONST_INT)
2355 fprintf (file, "%ld", INTVAL (x) & 0x1f);
2356 else
2357 output_operand_lossage ("invalid %%R value");
2358 break;
2359
2360 case 'x':
2361 if (GET_CODE (x) == CONST_INT)
2362 printx (file, INTVAL (x));
2363 else
2364 output_operand_lossage ("invalid %%x value");
2365 break;
2366
2367 case 'd':
2368 if (GET_CODE (x) == CONST_INT)
2369 fprintf (file, "%ld", INTVAL (x));
2370 else
2371 output_operand_lossage ("invalid %%d value");
2372 break;
2373
2374 case 't':
2375 case 'b':
2376 if (GET_CODE (x) == CONST_INT)
2377 {
2378 printx (file, INTVAL (x));
2379 fputs (letter == 't' ? "@h" : "@l", file);
2380 }
2381 else if (GET_CODE (x) == CONST_DOUBLE)
2382 {
2383 REAL_VALUE_TYPE r;
2384 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
2385 if (GET_MODE (x) == SFmode)
2386 {
2387 long l;
2388 REAL_VALUE_TO_TARGET_SINGLE (r, l);
2389 fprintf (file, "0x%08lx@%c", l, letter == 't' ? 'h' : 'l');
2390 }
2391 else
2392 output_operand_lossage ("invalid %%t/%%b value");
2393 }
2394 else if (GET_CODE (x) == CONST)
2395 {
2396 /* X must be a symbolic constant on ELF. Write an expression
2397 suitable for 'const16' that sets the high or low 16 bits. */
2398 if (GET_CODE (XEXP (x, 0)) != PLUS
2399 || (GET_CODE (XEXP (XEXP (x, 0), 0)) != SYMBOL_REF
2400 && GET_CODE (XEXP (XEXP (x, 0), 0)) != LABEL_REF)
2401 || GET_CODE (XEXP (XEXP (x, 0), 1)) != CONST_INT)
2402 output_operand_lossage ("invalid %%t/%%b value");
2403 print_operand (file, XEXP (XEXP (x, 0), 0), 0);
2404 fputs (letter == 't' ? "@h" : "@l", file);
2405 /* There must be a non-alphanumeric character between 'h' or 'l'
2406 and the number. The '-' is added by print_operand() already. */
2407 if (INTVAL (XEXP (XEXP (x, 0), 1)) >= 0)
2408 fputs ("+", file);
2409 print_operand (file, XEXP (XEXP (x, 0), 1), 0);
2410 }
2411 else
2412 {
2413 output_addr_const (file, x);
2414 fputs (letter == 't' ? "@h" : "@l", file);
2415 }
2416 break;
2417
2418 default:
2419 if (GET_CODE (x) == REG || GET_CODE (x) == SUBREG)
2420 fprintf (file, "%s", reg_names[xt_true_regnum (x)]);
2421 else if (GET_CODE (x) == MEM)
2422 output_address (XEXP (x, 0));
2423 else if (GET_CODE (x) == CONST_INT)
2424 fprintf (file, "%ld", INTVAL (x));
2425 else
2426 output_addr_const (file, x);
2427 }
2428 }
2429
2430
2431 /* A C compound statement to output to stdio stream STREAM the
2432 assembler syntax for an instruction operand that is a memory
2433 reference whose address is ADDR. ADDR is an RTL expression. */
2434
2435 void
2436 print_operand_address (FILE *file, rtx addr)
2437 {
2438 if (!addr)
2439 error ("PRINT_OPERAND_ADDRESS, null pointer");
2440
2441 switch (GET_CODE (addr))
2442 {
2443 default:
2444 fatal_insn ("invalid address", addr);
2445 break;
2446
2447 case REG:
2448 fprintf (file, "%s, 0", reg_names [REGNO (addr)]);
2449 break;
2450
2451 case PLUS:
2452 {
2453 rtx reg = (rtx)0;
2454 rtx offset = (rtx)0;
2455 rtx arg0 = XEXP (addr, 0);
2456 rtx arg1 = XEXP (addr, 1);
2457
2458 if (GET_CODE (arg0) == REG)
2459 {
2460 reg = arg0;
2461 offset = arg1;
2462 }
2463 else if (GET_CODE (arg1) == REG)
2464 {
2465 reg = arg1;
2466 offset = arg0;
2467 }
2468 else
2469 fatal_insn ("no register in address", addr);
2470
2471 if (CONSTANT_P (offset))
2472 {
2473 fprintf (file, "%s, ", reg_names [REGNO (reg)]);
2474 output_addr_const (file, offset);
2475 }
2476 else
2477 fatal_insn ("address offset not a constant", addr);
2478 }
2479 break;
2480
2481 case LABEL_REF:
2482 case SYMBOL_REF:
2483 case CONST_INT:
2484 case CONST:
2485 output_addr_const (file, addr);
2486 break;
2487 }
2488 }
2489
2490 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA. */
2491
2492 static bool
2493 xtensa_output_addr_const_extra (FILE *fp, rtx x)
2494 {
2495 if (GET_CODE (x) == UNSPEC && XVECLEN (x, 0) == 1)
2496 {
2497 switch (XINT (x, 1))
2498 {
2499 case UNSPEC_TPOFF:
2500 output_addr_const (fp, XVECEXP (x, 0, 0));
2501 fputs ("@TPOFF", fp);
2502 return true;
2503 case UNSPEC_DTPOFF:
2504 output_addr_const (fp, XVECEXP (x, 0, 0));
2505 fputs ("@DTPOFF", fp);
2506 return true;
2507 case UNSPEC_PLT:
2508 if (flag_pic)
2509 {
2510 output_addr_const (fp, XVECEXP (x, 0, 0));
2511 fputs ("@PLT", fp);
2512 return true;
2513 }
2514 break;
2515 default:
2516 break;
2517 }
2518 }
2519 return false;
2520 }
2521
2522
2523 void
2524 xtensa_output_literal (FILE *file, rtx x, enum machine_mode mode, int labelno)
2525 {
2526 long value_long[2];
2527 REAL_VALUE_TYPE r;
2528 int size;
2529 rtx first, second;
2530
2531 fprintf (file, "\t.literal .LC%u, ", (unsigned) labelno);
2532
2533 switch (GET_MODE_CLASS (mode))
2534 {
2535 case MODE_FLOAT:
2536 gcc_assert (GET_CODE (x) == CONST_DOUBLE);
2537
2538 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
2539 switch (mode)
2540 {
2541 case SFmode:
2542 REAL_VALUE_TO_TARGET_SINGLE (r, value_long[0]);
2543 if (HOST_BITS_PER_LONG > 32)
2544 value_long[0] &= 0xffffffff;
2545 fprintf (file, "0x%08lx\n", value_long[0]);
2546 break;
2547
2548 case DFmode:
2549 REAL_VALUE_TO_TARGET_DOUBLE (r, value_long);
2550 if (HOST_BITS_PER_LONG > 32)
2551 {
2552 value_long[0] &= 0xffffffff;
2553 value_long[1] &= 0xffffffff;
2554 }
2555 fprintf (file, "0x%08lx, 0x%08lx\n",
2556 value_long[0], value_long[1]);
2557 break;
2558
2559 default:
2560 gcc_unreachable ();
2561 }
2562
2563 break;
2564
2565 case MODE_INT:
2566 case MODE_PARTIAL_INT:
2567 size = GET_MODE_SIZE (mode);
2568 switch (size)
2569 {
2570 case 4:
2571 output_addr_const (file, x);
2572 fputs ("\n", file);
2573 break;
2574
2575 case 8:
2576 split_double (x, &first, &second);
2577 output_addr_const (file, first);
2578 fputs (", ", file);
2579 output_addr_const (file, second);
2580 fputs ("\n", file);
2581 break;
2582
2583 default:
2584 gcc_unreachable ();
2585 }
2586 break;
2587
2588 default:
2589 gcc_unreachable ();
2590 }
2591 }
2592
2593
2594 /* Return the bytes needed to compute the frame pointer from the current
2595 stack pointer. */
2596
2597 #define STACK_BYTES (STACK_BOUNDARY / BITS_PER_UNIT)
2598 #define XTENSA_STACK_ALIGN(LOC) (((LOC) + STACK_BYTES-1) & ~(STACK_BYTES-1))
2599
2600 long
2601 compute_frame_size (int size)
2602 {
2603 /* Add space for the incoming static chain value. */
2604 if (cfun->static_chain_decl != NULL)
2605 size += (1 * UNITS_PER_WORD);
2606
2607 xtensa_current_frame_size =
2608 XTENSA_STACK_ALIGN (size
2609 + crtl->outgoing_args_size
2610 + (WINDOW_SIZE * UNITS_PER_WORD));
2611 return xtensa_current_frame_size;
2612 }
2613
2614
2615 bool
2616 xtensa_frame_pointer_required (void)
2617 {
2618 /* The code to expand builtin_frame_addr and builtin_return_addr
2619 currently uses the hard_frame_pointer instead of frame_pointer.
2620 This seems wrong but maybe it's necessary for other architectures.
2621 This function is derived from the i386 code. */
2622
2623 if (cfun->machine->accesses_prev_frame)
2624 return true;
2625
2626 return false;
2627 }
2628
2629
2630 /* minimum frame = reg save area (4 words) plus static chain (1 word)
2631 and the total number of words must be a multiple of 128 bits. */
2632 #define MIN_FRAME_SIZE (8 * UNITS_PER_WORD)
2633
2634 void
2635 xtensa_expand_prologue (void)
2636 {
2637 HOST_WIDE_INT total_size;
2638 rtx size_rtx;
2639 rtx insn, note_rtx;
2640
2641 total_size = compute_frame_size (get_frame_size ());
2642 size_rtx = GEN_INT (total_size);
2643
2644 if (total_size < (1 << (12+3)))
2645 insn = emit_insn (gen_entry (size_rtx));
2646 else
2647 {
2648 /* Use a8 as a temporary since a0-a7 may be live. */
2649 rtx tmp_reg = gen_rtx_REG (Pmode, A8_REG);
2650 emit_insn (gen_entry (GEN_INT (MIN_FRAME_SIZE)));
2651 emit_move_insn (tmp_reg, GEN_INT (total_size - MIN_FRAME_SIZE));
2652 emit_insn (gen_subsi3 (tmp_reg, stack_pointer_rtx, tmp_reg));
2653 insn = emit_insn (gen_movsi (stack_pointer_rtx, tmp_reg));
2654 }
2655
2656 if (frame_pointer_needed)
2657 {
2658 if (cfun->machine->set_frame_ptr_insn)
2659 {
2660 rtx first;
2661
2662 push_topmost_sequence ();
2663 first = get_insns ();
2664 pop_topmost_sequence ();
2665
2666 /* For all instructions prior to set_frame_ptr_insn, replace
2667 hard_frame_pointer references with stack_pointer. */
2668 for (insn = first;
2669 insn != cfun->machine->set_frame_ptr_insn;
2670 insn = NEXT_INSN (insn))
2671 {
2672 if (INSN_P (insn))
2673 {
2674 PATTERN (insn) = replace_rtx (copy_rtx (PATTERN (insn)),
2675 hard_frame_pointer_rtx,
2676 stack_pointer_rtx);
2677 df_insn_rescan (insn);
2678 }
2679 }
2680 }
2681 else
2682 insn = emit_insn (gen_movsi (hard_frame_pointer_rtx,
2683 stack_pointer_rtx));
2684 }
2685
2686 /* Create a note to describe the CFA. Because this is only used to set
2687 DW_AT_frame_base for debug info, don't bother tracking changes through
2688 each instruction in the prologue. It just takes up space. */
2689 note_rtx = gen_rtx_SET (VOIDmode, (frame_pointer_needed
2690 ? hard_frame_pointer_rtx
2691 : stack_pointer_rtx),
2692 plus_constant (Pmode, stack_pointer_rtx,
2693 -total_size));
2694 RTX_FRAME_RELATED_P (insn) = 1;
2695 add_reg_note (insn, REG_FRAME_RELATED_EXPR, note_rtx);
2696 }
2697
2698
2699 /* Clear variables at function end. */
2700
2701 void
2702 xtensa_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
2703 HOST_WIDE_INT size ATTRIBUTE_UNUSED)
2704 {
2705 xtensa_current_frame_size = 0;
2706 }
2707
2708
2709 rtx
2710 xtensa_return_addr (int count, rtx frame)
2711 {
2712 rtx result, retaddr, curaddr, label;
2713
2714 if (count == -1)
2715 retaddr = gen_rtx_REG (Pmode, A0_REG);
2716 else
2717 {
2718 rtx addr = plus_constant (Pmode, frame, -4 * UNITS_PER_WORD);
2719 addr = memory_address (Pmode, addr);
2720 retaddr = gen_reg_rtx (Pmode);
2721 emit_move_insn (retaddr, gen_rtx_MEM (Pmode, addr));
2722 }
2723
2724 /* The 2 most-significant bits of the return address on Xtensa hold
2725 the register window size. To get the real return address, these
2726 bits must be replaced with the high bits from some address in the
2727 code. */
2728
2729 /* Get the 2 high bits of a local label in the code. */
2730 curaddr = gen_reg_rtx (Pmode);
2731 label = gen_label_rtx ();
2732 emit_label (label);
2733 LABEL_PRESERVE_P (label) = 1;
2734 emit_move_insn (curaddr, gen_rtx_LABEL_REF (Pmode, label));
2735 emit_insn (gen_lshrsi3 (curaddr, curaddr, GEN_INT (30)));
2736 emit_insn (gen_ashlsi3 (curaddr, curaddr, GEN_INT (30)));
2737
2738 /* Clear the 2 high bits of the return address. */
2739 result = gen_reg_rtx (Pmode);
2740 emit_insn (gen_ashlsi3 (result, retaddr, GEN_INT (2)));
2741 emit_insn (gen_lshrsi3 (result, result, GEN_INT (2)));
2742
2743 /* Combine them to get the result. */
2744 emit_insn (gen_iorsi3 (result, result, curaddr));
2745 return result;
2746 }
2747
2748 /* Disable the use of word-sized or smaller complex modes for structures,
2749 and for function arguments in particular, where they cause problems with
2750 register a7. The xtensa_copy_incoming_a7 function assumes that there is
2751 a single reference to an argument in a7, but with small complex modes the
2752 real and imaginary components may be extracted separately, leading to two
2753 uses of the register, only one of which would be replaced. */
2754
2755 static bool
2756 xtensa_member_type_forces_blk (const_tree, enum machine_mode mode)
2757 {
2758 return mode == CQImode || mode == CHImode;
2759 }
2760
2761 /* Create the va_list data type.
2762
2763 This structure is set up by __builtin_saveregs. The __va_reg field
2764 points to a stack-allocated region holding the contents of the
2765 incoming argument registers. The __va_ndx field is an index
2766 initialized to the position of the first unnamed (variable)
2767 argument. This same index is also used to address the arguments
2768 passed in memory. Thus, the __va_stk field is initialized to point
2769 to the position of the first argument in memory offset to account
2770 for the arguments passed in registers and to account for the size
2771 of the argument registers not being 16-byte aligned. E.G., there
2772 are 6 argument registers of 4 bytes each, but we want the __va_ndx
2773 for the first stack argument to have the maximal alignment of 16
2774 bytes, so we offset the __va_stk address by 32 bytes so that
2775 __va_stk[32] references the first argument on the stack. */
2776
2777 static tree
2778 xtensa_build_builtin_va_list (void)
2779 {
2780 tree f_stk, f_reg, f_ndx, record, type_decl;
2781
2782 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
2783 type_decl = build_decl (BUILTINS_LOCATION,
2784 TYPE_DECL, get_identifier ("__va_list_tag"), record);
2785
2786 f_stk = build_decl (BUILTINS_LOCATION,
2787 FIELD_DECL, get_identifier ("__va_stk"),
2788 ptr_type_node);
2789 f_reg = build_decl (BUILTINS_LOCATION,
2790 FIELD_DECL, get_identifier ("__va_reg"),
2791 ptr_type_node);
2792 f_ndx = build_decl (BUILTINS_LOCATION,
2793 FIELD_DECL, get_identifier ("__va_ndx"),
2794 integer_type_node);
2795
2796 DECL_FIELD_CONTEXT (f_stk) = record;
2797 DECL_FIELD_CONTEXT (f_reg) = record;
2798 DECL_FIELD_CONTEXT (f_ndx) = record;
2799
2800 TYPE_STUB_DECL (record) = type_decl;
2801 TYPE_NAME (record) = type_decl;
2802 TYPE_FIELDS (record) = f_stk;
2803 DECL_CHAIN (f_stk) = f_reg;
2804 DECL_CHAIN (f_reg) = f_ndx;
2805
2806 layout_type (record);
2807 return record;
2808 }
2809
2810
2811 /* Save the incoming argument registers on the stack. Returns the
2812 address of the saved registers. */
2813
2814 static rtx
2815 xtensa_builtin_saveregs (void)
2816 {
2817 rtx gp_regs;
2818 int arg_words = crtl->args.info.arg_words;
2819 int gp_left = MAX_ARGS_IN_REGISTERS - arg_words;
2820
2821 if (gp_left <= 0)
2822 return const0_rtx;
2823
2824 /* Allocate the general-purpose register space. */
2825 gp_regs = assign_stack_local
2826 (BLKmode, MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, -1);
2827 set_mem_alias_set (gp_regs, get_varargs_alias_set ());
2828
2829 /* Now store the incoming registers. */
2830 cfun->machine->need_a7_copy = true;
2831 cfun->machine->vararg_a7 = true;
2832 move_block_from_reg (GP_ARG_FIRST + arg_words,
2833 adjust_address (gp_regs, BLKmode,
2834 arg_words * UNITS_PER_WORD),
2835 gp_left);
2836 gcc_assert (cfun->machine->vararg_a7_copy != 0);
2837 emit_insn_before (cfun->machine->vararg_a7_copy, get_insns ());
2838
2839 return XEXP (gp_regs, 0);
2840 }
2841
2842
2843 /* Implement `va_start' for varargs and stdarg. We look at the
2844 current function to fill in an initial va_list. */
2845
2846 static void
2847 xtensa_va_start (tree valist, rtx nextarg ATTRIBUTE_UNUSED)
2848 {
2849 tree f_stk, stk;
2850 tree f_reg, reg;
2851 tree f_ndx, ndx;
2852 tree t, u;
2853 int arg_words;
2854
2855 arg_words = crtl->args.info.arg_words;
2856
2857 f_stk = TYPE_FIELDS (va_list_type_node);
2858 f_reg = DECL_CHAIN (f_stk);
2859 f_ndx = DECL_CHAIN (f_reg);
2860
2861 stk = build3 (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk, NULL_TREE);
2862 reg = build3 (COMPONENT_REF, TREE_TYPE (f_reg), unshare_expr (valist),
2863 f_reg, NULL_TREE);
2864 ndx = build3 (COMPONENT_REF, TREE_TYPE (f_ndx), unshare_expr (valist),
2865 f_ndx, NULL_TREE);
2866
2867 /* Call __builtin_saveregs; save the result in __va_reg */
2868 u = make_tree (sizetype, expand_builtin_saveregs ());
2869 u = fold_convert (ptr_type_node, u);
2870 t = build2 (MODIFY_EXPR, ptr_type_node, reg, u);
2871 TREE_SIDE_EFFECTS (t) = 1;
2872 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2873
2874 /* Set the __va_stk member to ($arg_ptr - 32). */
2875 u = make_tree (ptr_type_node, virtual_incoming_args_rtx);
2876 u = fold_build_pointer_plus_hwi (u, -32);
2877 t = build2 (MODIFY_EXPR, ptr_type_node, stk, u);
2878 TREE_SIDE_EFFECTS (t) = 1;
2879 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2880
2881 /* Set the __va_ndx member. If the first variable argument is on
2882 the stack, adjust __va_ndx by 2 words to account for the extra
2883 alignment offset for __va_stk. */
2884 if (arg_words >= MAX_ARGS_IN_REGISTERS)
2885 arg_words += 2;
2886 t = build2 (MODIFY_EXPR, integer_type_node, ndx,
2887 build_int_cst (integer_type_node, arg_words * UNITS_PER_WORD));
2888 TREE_SIDE_EFFECTS (t) = 1;
2889 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2890 }
2891
2892
2893 /* Implement `va_arg'. */
2894
2895 static tree
2896 xtensa_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
2897 gimple_seq *post_p ATTRIBUTE_UNUSED)
2898 {
2899 tree f_stk, stk;
2900 tree f_reg, reg;
2901 tree f_ndx, ndx;
2902 tree type_size, array, orig_ndx, addr, size, va_size, t;
2903 tree lab_false, lab_over, lab_false2;
2904 bool indirect;
2905
2906 indirect = pass_by_reference (NULL, TYPE_MODE (type), type, false);
2907 if (indirect)
2908 type = build_pointer_type (type);
2909
2910 /* Handle complex values as separate real and imaginary parts. */
2911 if (TREE_CODE (type) == COMPLEX_TYPE)
2912 {
2913 tree real_part, imag_part;
2914
2915 real_part = xtensa_gimplify_va_arg_expr (valist, TREE_TYPE (type),
2916 pre_p, NULL);
2917 real_part = get_initialized_tmp_var (real_part, pre_p, NULL);
2918
2919 imag_part = xtensa_gimplify_va_arg_expr (unshare_expr (valist),
2920 TREE_TYPE (type),
2921 pre_p, NULL);
2922 imag_part = get_initialized_tmp_var (imag_part, pre_p, NULL);
2923
2924 return build2 (COMPLEX_EXPR, type, real_part, imag_part);
2925 }
2926
2927 f_stk = TYPE_FIELDS (va_list_type_node);
2928 f_reg = DECL_CHAIN (f_stk);
2929 f_ndx = DECL_CHAIN (f_reg);
2930
2931 stk = build3 (COMPONENT_REF, TREE_TYPE (f_stk), valist,
2932 f_stk, NULL_TREE);
2933 reg = build3 (COMPONENT_REF, TREE_TYPE (f_reg), unshare_expr (valist),
2934 f_reg, NULL_TREE);
2935 ndx = build3 (COMPONENT_REF, TREE_TYPE (f_ndx), unshare_expr (valist),
2936 f_ndx, NULL_TREE);
2937
2938 type_size = size_in_bytes (type);
2939 va_size = round_up (type_size, UNITS_PER_WORD);
2940 gimplify_expr (&va_size, pre_p, NULL, is_gimple_val, fb_rvalue);
2941
2942
2943 /* First align __va_ndx if necessary for this arg:
2944
2945 orig_ndx = (AP).__va_ndx;
2946 if (__alignof__ (TYPE) > 4 )
2947 orig_ndx = ((orig_ndx + __alignof__ (TYPE) - 1)
2948 & -__alignof__ (TYPE)); */
2949
2950 orig_ndx = get_initialized_tmp_var (ndx, pre_p, NULL);
2951
2952 if (TYPE_ALIGN (type) > BITS_PER_WORD)
2953 {
2954 int align = MIN (TYPE_ALIGN (type), STACK_BOUNDARY) / BITS_PER_UNIT;
2955
2956 t = build2 (PLUS_EXPR, integer_type_node, unshare_expr (orig_ndx),
2957 build_int_cst (integer_type_node, align - 1));
2958 t = build2 (BIT_AND_EXPR, integer_type_node, t,
2959 build_int_cst (integer_type_node, -align));
2960 gimplify_assign (unshare_expr (orig_ndx), t, pre_p);
2961 }
2962
2963
2964 /* Increment __va_ndx to point past the argument:
2965
2966 (AP).__va_ndx = orig_ndx + __va_size (TYPE); */
2967
2968 t = fold_convert (integer_type_node, va_size);
2969 t = build2 (PLUS_EXPR, integer_type_node, orig_ndx, t);
2970 gimplify_assign (unshare_expr (ndx), t, pre_p);
2971
2972
2973 /* Check if the argument is in registers:
2974
2975 if ((AP).__va_ndx <= __MAX_ARGS_IN_REGISTERS * 4
2976 && !must_pass_in_stack (type))
2977 __array = (AP).__va_reg; */
2978
2979 array = create_tmp_var (ptr_type_node, NULL);
2980
2981 lab_over = NULL;
2982 if (!targetm.calls.must_pass_in_stack (TYPE_MODE (type), type))
2983 {
2984 lab_false = create_artificial_label (UNKNOWN_LOCATION);
2985 lab_over = create_artificial_label (UNKNOWN_LOCATION);
2986
2987 t = build2 (GT_EXPR, boolean_type_node, unshare_expr (ndx),
2988 build_int_cst (integer_type_node,
2989 MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD));
2990 t = build3 (COND_EXPR, void_type_node, t,
2991 build1 (GOTO_EXPR, void_type_node, lab_false),
2992 NULL_TREE);
2993 gimplify_and_add (t, pre_p);
2994
2995 gimplify_assign (unshare_expr (array), reg, pre_p);
2996
2997 t = build1 (GOTO_EXPR, void_type_node, lab_over);
2998 gimplify_and_add (t, pre_p);
2999
3000 t = build1 (LABEL_EXPR, void_type_node, lab_false);
3001 gimplify_and_add (t, pre_p);
3002 }
3003
3004
3005 /* ...otherwise, the argument is on the stack (never split between
3006 registers and the stack -- change __va_ndx if necessary):
3007
3008 else
3009 {
3010 if (orig_ndx <= __MAX_ARGS_IN_REGISTERS * 4)
3011 (AP).__va_ndx = 32 + __va_size (TYPE);
3012 __array = (AP).__va_stk;
3013 } */
3014
3015 lab_false2 = create_artificial_label (UNKNOWN_LOCATION);
3016
3017 t = build2 (GT_EXPR, boolean_type_node, unshare_expr (orig_ndx),
3018 build_int_cst (integer_type_node,
3019 MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD));
3020 t = build3 (COND_EXPR, void_type_node, t,
3021 build1 (GOTO_EXPR, void_type_node, lab_false2),
3022 NULL_TREE);
3023 gimplify_and_add (t, pre_p);
3024
3025 t = size_binop (PLUS_EXPR, unshare_expr (va_size), size_int (32));
3026 t = fold_convert (integer_type_node, t);
3027 gimplify_assign (unshare_expr (ndx), t, pre_p);
3028
3029 t = build1 (LABEL_EXPR, void_type_node, lab_false2);
3030 gimplify_and_add (t, pre_p);
3031
3032 gimplify_assign (array, stk, pre_p);
3033
3034 if (lab_over)
3035 {
3036 t = build1 (LABEL_EXPR, void_type_node, lab_over);
3037 gimplify_and_add (t, pre_p);
3038 }
3039
3040
3041 /* Given the base array pointer (__array) and index to the subsequent
3042 argument (__va_ndx), find the address:
3043
3044 __array + (AP).__va_ndx - (BYTES_BIG_ENDIAN && sizeof (TYPE) < 4
3045 ? sizeof (TYPE)
3046 : __va_size (TYPE))
3047
3048 The results are endian-dependent because values smaller than one word
3049 are aligned differently. */
3050
3051
3052 if (BYTES_BIG_ENDIAN && TREE_CODE (type_size) == INTEGER_CST)
3053 {
3054 t = fold_build2 (GE_EXPR, boolean_type_node, unshare_expr (type_size),
3055 size_int (PARM_BOUNDARY / BITS_PER_UNIT));
3056 t = fold_build3 (COND_EXPR, sizetype, t, unshare_expr (va_size),
3057 unshare_expr (type_size));
3058 size = t;
3059 }
3060 else
3061 size = unshare_expr (va_size);
3062
3063 t = fold_convert (sizetype, unshare_expr (ndx));
3064 t = build2 (MINUS_EXPR, sizetype, t, size);
3065 addr = fold_build_pointer_plus (unshare_expr (array), t);
3066
3067 addr = fold_convert (build_pointer_type (type), addr);
3068 if (indirect)
3069 addr = build_va_arg_indirect_ref (addr);
3070 return build_va_arg_indirect_ref (addr);
3071 }
3072
3073
3074 /* Builtins. */
3075
3076 enum xtensa_builtin
3077 {
3078 XTENSA_BUILTIN_UMULSIDI3,
3079 XTENSA_BUILTIN_max
3080 };
3081
3082
3083 static void
3084 xtensa_init_builtins (void)
3085 {
3086 tree ftype, decl;
3087
3088 ftype = build_function_type_list (unsigned_intDI_type_node,
3089 unsigned_intSI_type_node,
3090 unsigned_intSI_type_node, NULL_TREE);
3091
3092 decl = add_builtin_function ("__builtin_umulsidi3", ftype,
3093 XTENSA_BUILTIN_UMULSIDI3, BUILT_IN_MD,
3094 "__umulsidi3", NULL_TREE);
3095 TREE_NOTHROW (decl) = 1;
3096 TREE_READONLY (decl) = 1;
3097 }
3098
3099
3100 static tree
3101 xtensa_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args,
3102 bool ignore ATTRIBUTE_UNUSED)
3103 {
3104 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
3105 tree arg0, arg1;
3106
3107 switch (fcode)
3108 {
3109 case XTENSA_BUILTIN_UMULSIDI3:
3110 arg0 = args[0];
3111 arg1 = args[1];
3112 if ((TREE_CODE (arg0) == INTEGER_CST && TREE_CODE (arg1) == INTEGER_CST)
3113 || TARGET_MUL32_HIGH)
3114 return fold_build2 (MULT_EXPR, unsigned_intDI_type_node,
3115 fold_convert (unsigned_intDI_type_node, arg0),
3116 fold_convert (unsigned_intDI_type_node, arg1));
3117 break;
3118
3119 default:
3120 internal_error ("bad builtin code");
3121 break;
3122 }
3123
3124 return NULL;
3125 }
3126
3127
3128 static rtx
3129 xtensa_expand_builtin (tree exp, rtx target,
3130 rtx subtarget ATTRIBUTE_UNUSED,
3131 enum machine_mode mode ATTRIBUTE_UNUSED,
3132 int ignore)
3133 {
3134 tree fndecl = TREE_OPERAND (CALL_EXPR_FN (exp), 0);
3135 unsigned int fcode = DECL_FUNCTION_CODE (fndecl);
3136
3137 switch (fcode)
3138 {
3139 case XTENSA_BUILTIN_UMULSIDI3:
3140 /* The umulsidi3 builtin is just a mechanism to avoid calling the real
3141 __umulsidi3 function when the Xtensa configuration can directly
3142 implement it. If not, just call the function. */
3143 return expand_call (exp, target, ignore);
3144
3145 default:
3146 internal_error ("bad builtin code");
3147 }
3148 return NULL_RTX;
3149 }
3150
3151 /* Worker function for TARGET_PREFERRED_RELOAD_CLASS. */
3152
3153 static reg_class_t
3154 xtensa_preferred_reload_class (rtx x, reg_class_t rclass)
3155 {
3156 if (CONSTANT_P (x) && CONST_DOUBLE_P (x))
3157 return NO_REGS;
3158
3159 /* Don't use the stack pointer or hard frame pointer for reloads!
3160 The hard frame pointer would normally be OK except that it may
3161 briefly hold an incoming argument in the prologue, and reload
3162 won't know that it is live because the hard frame pointer is
3163 treated specially. */
3164
3165 if (rclass == AR_REGS || rclass == GR_REGS)
3166 return RL_REGS;
3167
3168 return rclass;
3169 }
3170
3171 /* Worker function for TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
3172
3173 static reg_class_t
3174 xtensa_preferred_output_reload_class (rtx x ATTRIBUTE_UNUSED,
3175 reg_class_t rclass)
3176 {
3177 /* Don't use the stack pointer or hard frame pointer for reloads!
3178 The hard frame pointer would normally be OK except that it may
3179 briefly hold an incoming argument in the prologue, and reload
3180 won't know that it is live because the hard frame pointer is
3181 treated specially. */
3182
3183 if (rclass == AR_REGS || rclass == GR_REGS)
3184 return RL_REGS;
3185
3186 return rclass;
3187 }
3188
3189 /* Worker function for TARGET_SECONDARY_RELOAD. */
3190
3191 static reg_class_t
3192 xtensa_secondary_reload (bool in_p, rtx x, reg_class_t rclass,
3193 enum machine_mode mode, secondary_reload_info *sri)
3194 {
3195 int regno;
3196
3197 if (in_p && constantpool_mem_p (x))
3198 {
3199 if (rclass == FP_REGS)
3200 return RL_REGS;
3201
3202 if (mode == QImode)
3203 sri->icode = CODE_FOR_reloadqi_literal;
3204 else if (mode == HImode)
3205 sri->icode = CODE_FOR_reloadhi_literal;
3206 }
3207
3208 regno = xt_true_regnum (x);
3209 if (ACC_REG_P (regno))
3210 return ((rclass == GR_REGS || rclass == RL_REGS) ? NO_REGS : RL_REGS);
3211 if (rclass == ACC_REG)
3212 return (GP_REG_P (regno) ? NO_REGS : RL_REGS);
3213
3214 return NO_REGS;
3215 }
3216
3217
3218 void
3219 order_regs_for_local_alloc (void)
3220 {
3221 if (!leaf_function_p ())
3222 {
3223 memcpy (reg_alloc_order, reg_nonleaf_alloc_order,
3224 FIRST_PSEUDO_REGISTER * sizeof (int));
3225 }
3226 else
3227 {
3228 int i, num_arg_regs;
3229 int nxt = 0;
3230
3231 /* Use the AR registers in increasing order (skipping a0 and a1)
3232 but save the incoming argument registers for a last resort. */
3233 num_arg_regs = crtl->args.info.arg_words;
3234 if (num_arg_regs > MAX_ARGS_IN_REGISTERS)
3235 num_arg_regs = MAX_ARGS_IN_REGISTERS;
3236 for (i = GP_ARG_FIRST; i < 16 - num_arg_regs; i++)
3237 reg_alloc_order[nxt++] = i + num_arg_regs;
3238 for (i = 0; i < num_arg_regs; i++)
3239 reg_alloc_order[nxt++] = GP_ARG_FIRST + i;
3240
3241 /* List the coprocessor registers in order. */
3242 for (i = 0; i < BR_REG_NUM; i++)
3243 reg_alloc_order[nxt++] = BR_REG_FIRST + i;
3244
3245 /* List the FP registers in order for now. */
3246 for (i = 0; i < 16; i++)
3247 reg_alloc_order[nxt++] = FP_REG_FIRST + i;
3248
3249 /* GCC requires that we list *all* the registers.... */
3250 reg_alloc_order[nxt++] = 0; /* a0 = return address */
3251 reg_alloc_order[nxt++] = 1; /* a1 = stack pointer */
3252 reg_alloc_order[nxt++] = 16; /* pseudo frame pointer */
3253 reg_alloc_order[nxt++] = 17; /* pseudo arg pointer */
3254
3255 reg_alloc_order[nxt++] = ACC_REG_FIRST; /* MAC16 accumulator */
3256 }
3257 }
3258
3259
3260 /* Some Xtensa targets support multiple bss sections. If the section
3261 name ends with ".bss", add SECTION_BSS to the flags. */
3262
3263 static unsigned int
3264 xtensa_multibss_section_type_flags (tree decl, const char *name, int reloc)
3265 {
3266 unsigned int flags = default_section_type_flags (decl, name, reloc);
3267 const char *suffix;
3268
3269 suffix = strrchr (name, '.');
3270 if (suffix && strcmp (suffix, ".bss") == 0)
3271 {
3272 if (!decl || (TREE_CODE (decl) == VAR_DECL
3273 && DECL_INITIAL (decl) == NULL_TREE))
3274 flags |= SECTION_BSS; /* @nobits */
3275 else
3276 warning (0, "only uninitialized variables can be placed in a "
3277 ".bss section");
3278 }
3279
3280 return flags;
3281 }
3282
3283
3284 /* The literal pool stays with the function. */
3285
3286 static section *
3287 xtensa_select_rtx_section (enum machine_mode mode ATTRIBUTE_UNUSED,
3288 rtx x ATTRIBUTE_UNUSED,
3289 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED)
3290 {
3291 return function_section (current_function_decl);
3292 }
3293
3294 /* Worker function for TARGET_REGISTER_MOVE_COST. */
3295
3296 static int
3297 xtensa_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
3298 reg_class_t from, reg_class_t to)
3299 {
3300 if (from == to && from != BR_REGS && to != BR_REGS)
3301 return 2;
3302 else if (reg_class_subset_p (from, AR_REGS)
3303 && reg_class_subset_p (to, AR_REGS))
3304 return 2;
3305 else if (reg_class_subset_p (from, AR_REGS) && to == ACC_REG)
3306 return 3;
3307 else if (from == ACC_REG && reg_class_subset_p (to, AR_REGS))
3308 return 3;
3309 else
3310 return 10;
3311 }
3312
3313 /* Worker function for TARGET_MEMORY_MOVE_COST. */
3314
3315 static int
3316 xtensa_memory_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
3317 reg_class_t rclass ATTRIBUTE_UNUSED,
3318 bool in ATTRIBUTE_UNUSED)
3319 {
3320 return 4;
3321 }
3322
3323 /* Compute a (partial) cost for rtx X. Return true if the complete
3324 cost has been computed, and false if subexpressions should be
3325 scanned. In either case, *TOTAL contains the cost result. */
3326
3327 static bool
3328 xtensa_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
3329 int *total, bool speed ATTRIBUTE_UNUSED)
3330 {
3331 switch (code)
3332 {
3333 case CONST_INT:
3334 switch (outer_code)
3335 {
3336 case SET:
3337 if (xtensa_simm12b (INTVAL (x)))
3338 {
3339 *total = 4;
3340 return true;
3341 }
3342 break;
3343 case PLUS:
3344 if (xtensa_simm8 (INTVAL (x))
3345 || xtensa_simm8x256 (INTVAL (x)))
3346 {
3347 *total = 0;
3348 return true;
3349 }
3350 break;
3351 case AND:
3352 if (xtensa_mask_immediate (INTVAL (x)))
3353 {
3354 *total = 0;
3355 return true;
3356 }
3357 break;
3358 case COMPARE:
3359 if ((INTVAL (x) == 0) || xtensa_b4const (INTVAL (x)))
3360 {
3361 *total = 0;
3362 return true;
3363 }
3364 break;
3365 case ASHIFT:
3366 case ASHIFTRT:
3367 case LSHIFTRT:
3368 case ROTATE:
3369 case ROTATERT:
3370 /* No way to tell if X is the 2nd operand so be conservative. */
3371 default: break;
3372 }
3373 if (xtensa_simm12b (INTVAL (x)))
3374 *total = 5;
3375 else if (TARGET_CONST16)
3376 *total = COSTS_N_INSNS (2);
3377 else
3378 *total = 6;
3379 return true;
3380
3381 case CONST:
3382 case LABEL_REF:
3383 case SYMBOL_REF:
3384 if (TARGET_CONST16)
3385 *total = COSTS_N_INSNS (2);
3386 else
3387 *total = 5;
3388 return true;
3389
3390 case CONST_DOUBLE:
3391 if (TARGET_CONST16)
3392 *total = COSTS_N_INSNS (4);
3393 else
3394 *total = 7;
3395 return true;
3396
3397 case MEM:
3398 {
3399 int num_words =
3400 (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD) ? 2 : 1;
3401
3402 if (memory_address_p (GET_MODE (x), XEXP ((x), 0)))
3403 *total = COSTS_N_INSNS (num_words);
3404 else
3405 *total = COSTS_N_INSNS (2*num_words);
3406 return true;
3407 }
3408
3409 case FFS:
3410 case CTZ:
3411 *total = COSTS_N_INSNS (TARGET_NSA ? 5 : 50);
3412 return true;
3413
3414 case CLZ:
3415 *total = COSTS_N_INSNS (TARGET_NSA ? 1 : 50);
3416 return true;
3417
3418 case NOT:
3419 *total = COSTS_N_INSNS ((GET_MODE (x) == DImode) ? 3 : 2);
3420 return true;
3421
3422 case AND:
3423 case IOR:
3424 case XOR:
3425 if (GET_MODE (x) == DImode)
3426 *total = COSTS_N_INSNS (2);
3427 else
3428 *total = COSTS_N_INSNS (1);
3429 return true;
3430
3431 case ASHIFT:
3432 case ASHIFTRT:
3433 case LSHIFTRT:
3434 if (GET_MODE (x) == DImode)
3435 *total = COSTS_N_INSNS (50);
3436 else
3437 *total = COSTS_N_INSNS (1);
3438 return true;
3439
3440 case ABS:
3441 {
3442 enum machine_mode xmode = GET_MODE (x);
3443 if (xmode == SFmode)
3444 *total = COSTS_N_INSNS (TARGET_HARD_FLOAT ? 1 : 50);
3445 else if (xmode == DFmode)
3446 *total = COSTS_N_INSNS (50);
3447 else
3448 *total = COSTS_N_INSNS (4);
3449 return true;
3450 }
3451
3452 case PLUS:
3453 case MINUS:
3454 {
3455 enum machine_mode xmode = GET_MODE (x);
3456 if (xmode == SFmode)
3457 *total = COSTS_N_INSNS (TARGET_HARD_FLOAT ? 1 : 50);
3458 else if (xmode == DFmode || xmode == DImode)
3459 *total = COSTS_N_INSNS (50);
3460 else
3461 *total = COSTS_N_INSNS (1);
3462 return true;
3463 }
3464
3465 case NEG:
3466 *total = COSTS_N_INSNS ((GET_MODE (x) == DImode) ? 4 : 2);
3467 return true;
3468
3469 case MULT:
3470 {
3471 enum machine_mode xmode = GET_MODE (x);
3472 if (xmode == SFmode)
3473 *total = COSTS_N_INSNS (TARGET_HARD_FLOAT ? 4 : 50);
3474 else if (xmode == DFmode)
3475 *total = COSTS_N_INSNS (50);
3476 else if (xmode == DImode)
3477 *total = COSTS_N_INSNS (TARGET_MUL32_HIGH ? 10 : 50);
3478 else if (TARGET_MUL32)
3479 *total = COSTS_N_INSNS (4);
3480 else if (TARGET_MAC16)
3481 *total = COSTS_N_INSNS (16);
3482 else if (TARGET_MUL16)
3483 *total = COSTS_N_INSNS (12);
3484 else
3485 *total = COSTS_N_INSNS (50);
3486 return true;
3487 }
3488
3489 case DIV:
3490 case MOD:
3491 {
3492 enum machine_mode xmode = GET_MODE (x);
3493 if (xmode == SFmode)
3494 {
3495 *total = COSTS_N_INSNS (TARGET_HARD_FLOAT_DIV ? 8 : 50);
3496 return true;
3497 }
3498 else if (xmode == DFmode)
3499 {
3500 *total = COSTS_N_INSNS (50);
3501 return true;
3502 }
3503 }
3504 /* Fall through. */
3505
3506 case UDIV:
3507 case UMOD:
3508 {
3509 enum machine_mode xmode = GET_MODE (x);
3510 if (xmode == DImode)
3511 *total = COSTS_N_INSNS (50);
3512 else if (TARGET_DIV32)
3513 *total = COSTS_N_INSNS (32);
3514 else
3515 *total = COSTS_N_INSNS (50);
3516 return true;
3517 }
3518
3519 case SQRT:
3520 if (GET_MODE (x) == SFmode)
3521 *total = COSTS_N_INSNS (TARGET_HARD_FLOAT_SQRT ? 8 : 50);
3522 else
3523 *total = COSTS_N_INSNS (50);
3524 return true;
3525
3526 case SMIN:
3527 case UMIN:
3528 case SMAX:
3529 case UMAX:
3530 *total = COSTS_N_INSNS (TARGET_MINMAX ? 1 : 50);
3531 return true;
3532
3533 case SIGN_EXTRACT:
3534 case SIGN_EXTEND:
3535 *total = COSTS_N_INSNS (TARGET_SEXT ? 1 : 2);
3536 return true;
3537
3538 case ZERO_EXTRACT:
3539 case ZERO_EXTEND:
3540 *total = COSTS_N_INSNS (1);
3541 return true;
3542
3543 default:
3544 return false;
3545 }
3546 }
3547
3548 /* Worker function for TARGET_RETURN_IN_MEMORY. */
3549
3550 static bool
3551 xtensa_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
3552 {
3553 return ((unsigned HOST_WIDE_INT) int_size_in_bytes (type)
3554 > 4 * UNITS_PER_WORD);
3555 }
3556
3557 /* Worker function for TARGET_FUNCTION_VALUE. */
3558
3559 rtx
3560 xtensa_function_value (const_tree valtype, const_tree func ATTRIBUTE_UNUSED,
3561 bool outgoing)
3562 {
3563 return gen_rtx_REG ((INTEGRAL_TYPE_P (valtype)
3564 && TYPE_PRECISION (valtype) < BITS_PER_WORD)
3565 ? SImode : TYPE_MODE (valtype),
3566 outgoing ? GP_OUTGOING_RETURN : GP_RETURN);
3567 }
3568
3569 /* Worker function for TARGET_LIBCALL_VALUE. */
3570
3571 static rtx
3572 xtensa_libcall_value (enum machine_mode mode, const_rtx fun ATTRIBUTE_UNUSED)
3573 {
3574 return gen_rtx_REG ((GET_MODE_CLASS (mode) == MODE_INT
3575 && GET_MODE_SIZE (mode) < UNITS_PER_WORD)
3576 ? SImode : mode, GP_RETURN);
3577 }
3578
3579 /* Worker function TARGET_FUNCTION_VALUE_REGNO_P. */
3580
3581 static bool
3582 xtensa_function_value_regno_p (const unsigned int regno)
3583 {
3584 return (regno == GP_RETURN);
3585 }
3586
3587 /* The static chain is passed in memory. Provide rtx giving 'mem'
3588 expressions that denote where they are stored. */
3589
3590 static rtx
3591 xtensa_static_chain (const_tree ARG_UNUSED (fndecl), bool incoming_p)
3592 {
3593 rtx base = incoming_p ? arg_pointer_rtx : stack_pointer_rtx;
3594 return gen_frame_mem (Pmode, plus_constant (Pmode, base,
3595 -5 * UNITS_PER_WORD));
3596 }
3597
3598
3599 /* TRAMPOLINE_TEMPLATE: For Xtensa, the trampoline must perform an ENTRY
3600 instruction with a minimal stack frame in order to get some free
3601 registers. Once the actual call target is known, the proper stack frame
3602 size is extracted from the ENTRY instruction at the target and the
3603 current frame is adjusted to match. The trampoline then transfers
3604 control to the instruction following the ENTRY at the target. Note:
3605 this assumes that the target begins with an ENTRY instruction. */
3606
3607 static void
3608 xtensa_asm_trampoline_template (FILE *stream)
3609 {
3610 bool use_call0 = (TARGET_CONST16 || TARGET_ABSOLUTE_LITERALS);
3611
3612 fprintf (stream, "\t.begin no-transform\n");
3613 fprintf (stream, "\tentry\tsp, %d\n", MIN_FRAME_SIZE);
3614
3615 if (use_call0)
3616 {
3617 /* Save the return address. */
3618 fprintf (stream, "\tmov\ta10, a0\n");
3619
3620 /* Use a CALL0 instruction to skip past the constants and in the
3621 process get the PC into A0. This allows PC-relative access to
3622 the constants without relying on L32R. */
3623 fprintf (stream, "\tcall0\t.Lskipconsts\n");
3624 }
3625 else
3626 fprintf (stream, "\tj\t.Lskipconsts\n");
3627
3628 fprintf (stream, "\t.align\t4\n");
3629 fprintf (stream, ".Lchainval:%s0\n", integer_asm_op (4, TRUE));
3630 fprintf (stream, ".Lfnaddr:%s0\n", integer_asm_op (4, TRUE));
3631 fprintf (stream, ".Lskipconsts:\n");
3632
3633 /* Load the static chain and function address from the trampoline. */
3634 if (use_call0)
3635 {
3636 fprintf (stream, "\taddi\ta0, a0, 3\n");
3637 fprintf (stream, "\tl32i\ta9, a0, 0\n");
3638 fprintf (stream, "\tl32i\ta8, a0, 4\n");
3639 }
3640 else
3641 {
3642 fprintf (stream, "\tl32r\ta9, .Lchainval\n");
3643 fprintf (stream, "\tl32r\ta8, .Lfnaddr\n");
3644 }
3645
3646 /* Store the static chain. */
3647 fprintf (stream, "\ts32i\ta9, sp, %d\n", MIN_FRAME_SIZE - 20);
3648
3649 /* Set the proper stack pointer value. */
3650 fprintf (stream, "\tl32i\ta9, a8, 0\n");
3651 fprintf (stream, "\textui\ta9, a9, %d, 12\n",
3652 TARGET_BIG_ENDIAN ? 8 : 12);
3653 fprintf (stream, "\tslli\ta9, a9, 3\n");
3654 fprintf (stream, "\taddi\ta9, a9, %d\n", -MIN_FRAME_SIZE);
3655 fprintf (stream, "\tsub\ta9, sp, a9\n");
3656 fprintf (stream, "\tmovsp\tsp, a9\n");
3657
3658 if (use_call0)
3659 /* Restore the return address. */
3660 fprintf (stream, "\tmov\ta0, a10\n");
3661
3662 /* Jump to the instruction following the ENTRY. */
3663 fprintf (stream, "\taddi\ta8, a8, 3\n");
3664 fprintf (stream, "\tjx\ta8\n");
3665
3666 /* Pad size to a multiple of TRAMPOLINE_ALIGNMENT. */
3667 if (use_call0)
3668 fprintf (stream, "\t.byte\t0\n");
3669 else
3670 fprintf (stream, "\tnop\n");
3671
3672 fprintf (stream, "\t.end no-transform\n");
3673 }
3674
3675 static void
3676 xtensa_trampoline_init (rtx m_tramp, tree fndecl, rtx chain)
3677 {
3678 rtx func = XEXP (DECL_RTL (fndecl), 0);
3679 bool use_call0 = (TARGET_CONST16 || TARGET_ABSOLUTE_LITERALS);
3680 int chain_off = use_call0 ? 12 : 8;
3681 int func_off = use_call0 ? 16 : 12;
3682
3683 emit_block_move (m_tramp, assemble_trampoline_template (),
3684 GEN_INT (TRAMPOLINE_SIZE), BLOCK_OP_NORMAL);
3685
3686 emit_move_insn (adjust_address (m_tramp, SImode, chain_off), chain);
3687 emit_move_insn (adjust_address (m_tramp, SImode, func_off), func);
3688 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__xtensa_sync_caches"),
3689 LCT_NORMAL, VOIDmode, 1, XEXP (m_tramp, 0), Pmode);
3690 }
3691
3692 /* Implement TARGET_LEGITIMATE_CONSTANT_P. */
3693
3694 static bool
3695 xtensa_legitimate_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED, rtx x)
3696 {
3697 return !xtensa_tls_referenced_p (x);
3698 }
3699
3700 #include "gt-xtensa.h"