]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/xtensa/xtensa.c
* config/xtensa/xtensa.c (order_regs_for_local_alloc): Order the
[thirdparty/gcc.git] / gcc / config / xtensa / xtensa.c
1 /* Subroutines for insn-output.c for Tensilica's Xtensa architecture.
2 Copyright 2001,2002 Free Software Foundation, Inc.
3 Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "coretypes.h"
25 #include "tm.h"
26 #include "rtl.h"
27 #include "regs.h"
28 #include "hard-reg-set.h"
29 #include "basic-block.h"
30 #include "real.h"
31 #include "insn-config.h"
32 #include "conditions.h"
33 #include "insn-flags.h"
34 #include "insn-attr.h"
35 #include "insn-codes.h"
36 #include "recog.h"
37 #include "output.h"
38 #include "tree.h"
39 #include "expr.h"
40 #include "flags.h"
41 #include "reload.h"
42 #include "tm_p.h"
43 #include "function.h"
44 #include "toplev.h"
45 #include "optabs.h"
46 #include "output.h"
47 #include "libfuncs.h"
48 #include "ggc.h"
49 #include "target.h"
50 #include "target-def.h"
51 #include "langhooks.h"
52
53 /* Enumeration for all of the relational tests, so that we can build
54 arrays indexed by the test type, and not worry about the order
55 of EQ, NE, etc. */
56
57 enum internal_test {
58 ITEST_EQ,
59 ITEST_NE,
60 ITEST_GT,
61 ITEST_GE,
62 ITEST_LT,
63 ITEST_LE,
64 ITEST_GTU,
65 ITEST_GEU,
66 ITEST_LTU,
67 ITEST_LEU,
68 ITEST_MAX
69 };
70
71 /* Cached operands, and operator to compare for use in set/branch on
72 condition codes. */
73 rtx branch_cmp[2];
74
75 /* what type of branch to use */
76 enum cmp_type branch_type;
77
78 /* Array giving truth value on whether or not a given hard register
79 can support a given mode. */
80 char xtensa_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
81
82 /* Current frame size calculated by compute_frame_size. */
83 unsigned xtensa_current_frame_size;
84
85 /* Tables of ld/st opcode names for block moves */
86 const char *xtensa_ld_opcodes[(int) MAX_MACHINE_MODE];
87 const char *xtensa_st_opcodes[(int) MAX_MACHINE_MODE];
88 #define LARGEST_MOVE_RATIO 15
89
90 /* Define the structure for the machine field in struct function. */
91 struct machine_function GTY(())
92 {
93 int accesses_prev_frame;
94 bool incoming_a7_copied;
95 };
96
97 /* Vector, indexed by hard register number, which contains 1 for a
98 register that is allowable in a candidate for leaf function
99 treatment. */
100
101 const char xtensa_leaf_regs[FIRST_PSEUDO_REGISTER] =
102 {
103 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
104 1, 1, 1,
105 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
106 1
107 };
108
109 /* Map hard register number to register class */
110 const enum reg_class xtensa_regno_to_class[FIRST_PSEUDO_REGISTER] =
111 {
112 RL_REGS, SP_REG, RL_REGS, RL_REGS,
113 RL_REGS, RL_REGS, RL_REGS, GR_REGS,
114 RL_REGS, RL_REGS, RL_REGS, RL_REGS,
115 RL_REGS, RL_REGS, RL_REGS, RL_REGS,
116 AR_REGS, AR_REGS, BR_REGS,
117 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
118 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
119 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
120 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
121 ACC_REG,
122 };
123
124 /* Map register constraint character to register class. */
125 enum reg_class xtensa_char_to_class[256] =
126 {
127 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
128 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
129 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
130 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
131 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
132 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
133 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
134 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
135 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
136 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
137 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
138 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
139 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
140 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
141 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
142 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
143 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
144 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
145 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
146 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
147 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
148 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
149 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
150 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
151 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
152 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
153 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
154 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
155 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
156 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
157 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
158 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
159 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
160 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
161 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
162 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
163 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
164 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
165 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
166 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
167 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
168 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
169 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
170 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
171 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
172 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
173 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
174 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
175 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
176 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
177 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
178 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
179 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
180 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
181 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
182 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
183 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
184 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
185 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
186 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
187 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
188 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
189 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
190 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
191 };
192
193 static int b4const_or_zero PARAMS ((int));
194 static enum internal_test map_test_to_internal_test PARAMS ((enum rtx_code));
195 static rtx gen_int_relational PARAMS ((enum rtx_code, rtx, rtx, int *));
196 static rtx gen_float_relational PARAMS ((enum rtx_code, rtx, rtx));
197 static rtx gen_conditional_move PARAMS ((rtx));
198 static rtx fixup_subreg_mem PARAMS ((rtx x));
199 static enum machine_mode xtensa_find_mode_for_size PARAMS ((unsigned));
200 static struct machine_function * xtensa_init_machine_status PARAMS ((void));
201 static void printx PARAMS ((FILE *, signed int));
202 static unsigned int xtensa_multibss_section_type_flags
203 PARAMS ((tree, const char *, int));
204 static void xtensa_select_rtx_section
205 PARAMS ((enum machine_mode, rtx, unsigned HOST_WIDE_INT));
206 static void xtensa_encode_section_info PARAMS ((tree, int));
207 static bool xtensa_rtx_costs PARAMS ((rtx, int, int, int *));
208
209 static rtx frame_size_const;
210 static int current_function_arg_words;
211 static const int reg_nonleaf_alloc_order[FIRST_PSEUDO_REGISTER] =
212 REG_ALLOC_ORDER;
213 \f
214 /* This macro generates the assembly code for function entry.
215 FILE is a stdio stream to output the code to.
216 SIZE is an int: how many units of temporary storage to allocate.
217 Refer to the array 'regs_ever_live' to determine which registers
218 to save; 'regs_ever_live[I]' is nonzero if register number I
219 is ever used in the function. This macro is responsible for
220 knowing which registers should not be saved even if used. */
221
222 #undef TARGET_ASM_FUNCTION_PROLOGUE
223 #define TARGET_ASM_FUNCTION_PROLOGUE xtensa_function_prologue
224
225 /* This macro generates the assembly code for function exit,
226 on machines that need it. If FUNCTION_EPILOGUE is not defined
227 then individual return instructions are generated for each
228 return statement. Args are same as for FUNCTION_PROLOGUE. */
229
230 #undef TARGET_ASM_FUNCTION_EPILOGUE
231 #define TARGET_ASM_FUNCTION_EPILOGUE xtensa_function_epilogue
232
233 /* These hooks specify assembly directives for creating certain kinds
234 of integer object. */
235
236 #undef TARGET_ASM_ALIGNED_SI_OP
237 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
238
239 #undef TARGET_ASM_SELECT_RTX_SECTION
240 #define TARGET_ASM_SELECT_RTX_SECTION xtensa_select_rtx_section
241 #undef TARGET_ENCODE_SECTION_INFO
242 #define TARGET_ENCODE_SECTION_INFO xtensa_encode_section_info
243
244 #undef TARGET_RTX_COSTS
245 #define TARGET_RTX_COSTS xtensa_rtx_costs
246 #undef TARGET_ADDRESS_COST
247 #define TARGET_ADDRESS_COST hook_int_rtx_0
248
249 struct gcc_target targetm = TARGET_INITIALIZER;
250 \f
251
252 /*
253 * Functions to test Xtensa immediate operand validity.
254 */
255
256 int
257 xtensa_b4constu (v)
258 int v;
259 {
260 switch (v)
261 {
262 case 32768:
263 case 65536:
264 case 2:
265 case 3:
266 case 4:
267 case 5:
268 case 6:
269 case 7:
270 case 8:
271 case 10:
272 case 12:
273 case 16:
274 case 32:
275 case 64:
276 case 128:
277 case 256:
278 return 1;
279 }
280 return 0;
281 }
282
283 int
284 xtensa_simm8x256 (v)
285 int v;
286 {
287 return (v & 255) == 0 && (v >= -32768 && v <= 32512);
288 }
289
290 int
291 xtensa_ai4const (v)
292 int v;
293 {
294 return (v == -1 || (v >= 1 && v <= 15));
295 }
296
297 int
298 xtensa_simm7 (v)
299 int v;
300 {
301 return v >= -32 && v <= 95;
302 }
303
304 int
305 xtensa_b4const (v)
306 int v;
307 {
308 switch (v)
309 {
310 case -1:
311 case 1:
312 case 2:
313 case 3:
314 case 4:
315 case 5:
316 case 6:
317 case 7:
318 case 8:
319 case 10:
320 case 12:
321 case 16:
322 case 32:
323 case 64:
324 case 128:
325 case 256:
326 return 1;
327 }
328 return 0;
329 }
330
331 int
332 xtensa_simm8 (v)
333 int v;
334 {
335 return v >= -128 && v <= 127;
336 }
337
338 int
339 xtensa_tp7 (v)
340 int v;
341 {
342 return (v >= 7 && v <= 22);
343 }
344
345 int
346 xtensa_lsi4x4 (v)
347 int v;
348 {
349 return (v & 3) == 0 && (v >= 0 && v <= 60);
350 }
351
352 int
353 xtensa_simm12b (v)
354 int v;
355 {
356 return v >= -2048 && v <= 2047;
357 }
358
359 int
360 xtensa_uimm8 (v)
361 int v;
362 {
363 return v >= 0 && v <= 255;
364 }
365
366 int
367 xtensa_uimm8x2 (v)
368 int v;
369 {
370 return (v & 1) == 0 && (v >= 0 && v <= 510);
371 }
372
373 int
374 xtensa_uimm8x4 (v)
375 int v;
376 {
377 return (v & 3) == 0 && (v >= 0 && v <= 1020);
378 }
379
380
381 /* This is just like the standard true_regnum() function except that it
382 works even when reg_renumber is not initialized. */
383
384 int
385 xt_true_regnum (x)
386 rtx x;
387 {
388 if (GET_CODE (x) == REG)
389 {
390 if (reg_renumber
391 && REGNO (x) >= FIRST_PSEUDO_REGISTER
392 && reg_renumber[REGNO (x)] >= 0)
393 return reg_renumber[REGNO (x)];
394 return REGNO (x);
395 }
396 if (GET_CODE (x) == SUBREG)
397 {
398 int base = xt_true_regnum (SUBREG_REG (x));
399 if (base >= 0 && base < FIRST_PSEUDO_REGISTER)
400 return base + subreg_regno_offset (REGNO (SUBREG_REG (x)),
401 GET_MODE (SUBREG_REG (x)),
402 SUBREG_BYTE (x), GET_MODE (x));
403 }
404 return -1;
405 }
406
407
408 int
409 add_operand (op, mode)
410 rtx op;
411 enum machine_mode mode;
412 {
413 if (GET_CODE (op) == CONST_INT)
414 return (xtensa_simm8 (INTVAL (op)) ||
415 xtensa_simm8x256 (INTVAL (op)));
416
417 return register_operand (op, mode);
418 }
419
420
421 int
422 arith_operand (op, mode)
423 rtx op;
424 enum machine_mode mode;
425 {
426 if (GET_CODE (op) == CONST_INT)
427 return xtensa_simm8 (INTVAL (op));
428
429 return register_operand (op, mode);
430 }
431
432
433 int
434 nonimmed_operand (op, mode)
435 rtx op;
436 enum machine_mode mode;
437 {
438 /* We cannot use the standard nonimmediate_operand() predicate because
439 it includes constant pool memory operands. */
440
441 if (memory_operand (op, mode))
442 return !constantpool_address_p (XEXP (op, 0));
443
444 return register_operand (op, mode);
445 }
446
447
448 int
449 mem_operand (op, mode)
450 rtx op;
451 enum machine_mode mode;
452 {
453 /* We cannot use the standard memory_operand() predicate because
454 it includes constant pool memory operands. */
455
456 if (memory_operand (op, mode))
457 return !constantpool_address_p (XEXP (op, 0));
458
459 return FALSE;
460 }
461
462
463 int
464 xtensa_valid_move (mode, operands)
465 enum machine_mode mode;
466 rtx *operands;
467 {
468 /* Either the destination or source must be a register, and the
469 MAC16 accumulator doesn't count. */
470
471 if (register_operand (operands[0], mode))
472 {
473 int dst_regnum = xt_true_regnum (operands[0]);
474
475 /* The stack pointer can only be assigned with a MOVSP opcode. */
476 if (dst_regnum == STACK_POINTER_REGNUM)
477 return (mode == SImode
478 && register_operand (operands[1], mode)
479 && !ACC_REG_P (xt_true_regnum (operands[1])));
480
481 if (!ACC_REG_P (dst_regnum))
482 return true;
483 }
484 if (register_operand (operands[1], mode))
485 {
486 int src_regnum = xt_true_regnum (operands[1]);
487 if (!ACC_REG_P (src_regnum))
488 return true;
489 }
490 return FALSE;
491 }
492
493
494 int
495 mask_operand (op, mode)
496 rtx op;
497 enum machine_mode mode;
498 {
499 if (GET_CODE (op) == CONST_INT)
500 return xtensa_mask_immediate (INTVAL (op));
501
502 return register_operand (op, mode);
503 }
504
505
506 int
507 extui_fldsz_operand (op, mode)
508 rtx op;
509 enum machine_mode mode ATTRIBUTE_UNUSED;
510 {
511 return ((GET_CODE (op) == CONST_INT)
512 && xtensa_mask_immediate ((1 << INTVAL (op)) - 1));
513 }
514
515
516 int
517 sext_operand (op, mode)
518 rtx op;
519 enum machine_mode mode;
520 {
521 if (TARGET_SEXT)
522 return nonimmed_operand (op, mode);
523 return mem_operand (op, mode);
524 }
525
526
527 int
528 sext_fldsz_operand (op, mode)
529 rtx op;
530 enum machine_mode mode ATTRIBUTE_UNUSED;
531 {
532 return ((GET_CODE (op) == CONST_INT) && xtensa_tp7 (INTVAL (op) - 1));
533 }
534
535
536 int
537 lsbitnum_operand (op, mode)
538 rtx op;
539 enum machine_mode mode ATTRIBUTE_UNUSED;
540 {
541 if (GET_CODE (op) == CONST_INT)
542 {
543 return (BITS_BIG_ENDIAN
544 ? (INTVAL (op) == BITS_PER_WORD-1)
545 : (INTVAL (op) == 0));
546 }
547 return FALSE;
548 }
549
550
551 static int
552 b4const_or_zero (v)
553 int v;
554 {
555 if (v == 0)
556 return TRUE;
557 return xtensa_b4const (v);
558 }
559
560
561 int
562 branch_operand (op, mode)
563 rtx op;
564 enum machine_mode mode;
565 {
566 if (GET_CODE (op) == CONST_INT)
567 return b4const_or_zero (INTVAL (op));
568
569 return register_operand (op, mode);
570 }
571
572
573 int
574 ubranch_operand (op, mode)
575 rtx op;
576 enum machine_mode mode;
577 {
578 if (GET_CODE (op) == CONST_INT)
579 return xtensa_b4constu (INTVAL (op));
580
581 return register_operand (op, mode);
582 }
583
584
585 int
586 call_insn_operand (op, mode)
587 rtx op;
588 enum machine_mode mode ATTRIBUTE_UNUSED;
589 {
590 if ((GET_CODE (op) == REG)
591 && (op != arg_pointer_rtx)
592 && ((REGNO (op) < FRAME_POINTER_REGNUM)
593 || (REGNO (op) > LAST_VIRTUAL_REGISTER)))
594 return TRUE;
595
596 if (CONSTANT_ADDRESS_P (op))
597 {
598 /* Direct calls only allowed to static functions with PIC. */
599 return (!flag_pic || (GET_CODE (op) == SYMBOL_REF
600 && SYMBOL_REF_FLAG (op)));
601 }
602
603 return FALSE;
604 }
605
606
607 int
608 move_operand (op, mode)
609 rtx op;
610 enum machine_mode mode;
611 {
612 if (register_operand (op, mode))
613 return TRUE;
614
615 /* Accept CONSTANT_P_RTX, since it will be gone by CSE1 and
616 result in 0/1. */
617 if (GET_CODE (op) == CONSTANT_P_RTX)
618 return TRUE;
619
620 if (GET_CODE (op) == CONST_INT)
621 return xtensa_simm12b (INTVAL (op));
622
623 if (GET_CODE (op) == MEM)
624 return memory_address_p (mode, XEXP (op, 0));
625
626 return FALSE;
627 }
628
629
630 int
631 smalloffset_mem_p (op)
632 rtx op;
633 {
634 if (GET_CODE (op) == MEM)
635 {
636 rtx addr = XEXP (op, 0);
637 if (GET_CODE (addr) == REG)
638 return REG_OK_FOR_BASE_P (addr);
639 if (GET_CODE (addr) == PLUS)
640 {
641 rtx offset = XEXP (addr, 0);
642 if (GET_CODE (offset) != CONST_INT)
643 offset = XEXP (addr, 1);
644 if (GET_CODE (offset) != CONST_INT)
645 return FALSE;
646 return xtensa_lsi4x4 (INTVAL (offset));
647 }
648 }
649 return FALSE;
650 }
651
652
653 int
654 smalloffset_double_mem_p (op)
655 rtx op;
656 {
657 if (!smalloffset_mem_p (op))
658 return FALSE;
659 return smalloffset_mem_p (adjust_address (op, GET_MODE (op), 4));
660 }
661
662
663 int
664 constantpool_address_p (addr)
665 rtx addr;
666 {
667 rtx sym = addr;
668
669 if (GET_CODE (addr) == CONST)
670 {
671 rtx offset;
672
673 /* only handle (PLUS (SYM, OFFSET)) form */
674 addr = XEXP (addr, 0);
675 if (GET_CODE (addr) != PLUS)
676 return FALSE;
677
678 /* make sure the address is word aligned */
679 offset = XEXP (addr, 1);
680 if ((GET_CODE (offset) != CONST_INT)
681 || ((INTVAL (offset) & 3) != 0))
682 return FALSE;
683
684 sym = XEXP (addr, 0);
685 }
686
687 if ((GET_CODE (sym) == SYMBOL_REF)
688 && CONSTANT_POOL_ADDRESS_P (sym))
689 return TRUE;
690 return FALSE;
691 }
692
693
694 int
695 constantpool_mem_p (op)
696 rtx op;
697 {
698 if (GET_CODE (op) == MEM)
699 return constantpool_address_p (XEXP (op, 0));
700 return FALSE;
701 }
702
703
704 int
705 non_const_move_operand (op, mode)
706 rtx op;
707 enum machine_mode mode;
708 {
709 if (register_operand (op, mode))
710 return 1;
711 if (GET_CODE (op) == SUBREG)
712 op = SUBREG_REG (op);
713 if (GET_CODE (op) == MEM)
714 return memory_address_p (mode, XEXP (op, 0));
715 return FALSE;
716 }
717
718
719 /* Accept the floating point constant 1 in the appropriate mode. */
720
721 int
722 const_float_1_operand (op, mode)
723 rtx op;
724 enum machine_mode mode;
725 {
726 REAL_VALUE_TYPE d;
727 static REAL_VALUE_TYPE onedf;
728 static REAL_VALUE_TYPE onesf;
729 static int one_initialized;
730
731 if ((GET_CODE (op) != CONST_DOUBLE)
732 || (mode != GET_MODE (op))
733 || (mode != DFmode && mode != SFmode))
734 return FALSE;
735
736 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
737
738 if (! one_initialized)
739 {
740 onedf = REAL_VALUE_ATOF ("1.0", DFmode);
741 onesf = REAL_VALUE_ATOF ("1.0", SFmode);
742 one_initialized = TRUE;
743 }
744
745 if (mode == DFmode)
746 return REAL_VALUES_EQUAL (d, onedf);
747 else
748 return REAL_VALUES_EQUAL (d, onesf);
749 }
750
751
752 int
753 fpmem_offset_operand (op, mode)
754 rtx op;
755 enum machine_mode mode ATTRIBUTE_UNUSED;
756 {
757 if (GET_CODE (op) == CONST_INT)
758 return xtensa_mem_offset (INTVAL (op), SFmode);
759 return 0;
760 }
761
762
763 void
764 xtensa_extend_reg (dst, src)
765 rtx dst;
766 rtx src;
767 {
768 rtx temp = gen_reg_rtx (SImode);
769 rtx shift = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (GET_MODE (src)));
770
771 /* generate paradoxical subregs as needed so that the modes match */
772 src = simplify_gen_subreg (SImode, src, GET_MODE (src), 0);
773 dst = simplify_gen_subreg (SImode, dst, GET_MODE (dst), 0);
774
775 emit_insn (gen_ashlsi3 (temp, src, shift));
776 emit_insn (gen_ashrsi3 (dst, temp, shift));
777 }
778
779
780 void
781 xtensa_load_constant (dst, src)
782 rtx dst;
783 rtx src;
784 {
785 enum machine_mode mode = GET_MODE (dst);
786 src = force_const_mem (SImode, src);
787
788 /* PC-relative loads are always SImode so we have to add a SUBREG if that
789 is not the desired mode */
790
791 if (mode != SImode)
792 {
793 if (register_operand (dst, mode))
794 dst = simplify_gen_subreg (SImode, dst, mode, 0);
795 else
796 {
797 src = force_reg (SImode, src);
798 src = gen_lowpart_SUBREG (mode, src);
799 }
800 }
801
802 emit_move_insn (dst, src);
803 }
804
805
806 int
807 branch_operator (x, mode)
808 rtx x;
809 enum machine_mode mode;
810 {
811 if (GET_MODE (x) != mode)
812 return FALSE;
813
814 switch (GET_CODE (x))
815 {
816 case EQ:
817 case NE:
818 case LT:
819 case GE:
820 return TRUE;
821 default:
822 break;
823 }
824 return FALSE;
825 }
826
827
828 int
829 ubranch_operator (x, mode)
830 rtx x;
831 enum machine_mode mode;
832 {
833 if (GET_MODE (x) != mode)
834 return FALSE;
835
836 switch (GET_CODE (x))
837 {
838 case LTU:
839 case GEU:
840 return TRUE;
841 default:
842 break;
843 }
844 return FALSE;
845 }
846
847
848 int
849 boolean_operator (x, mode)
850 rtx x;
851 enum machine_mode mode;
852 {
853 if (GET_MODE (x) != mode)
854 return FALSE;
855
856 switch (GET_CODE (x))
857 {
858 case EQ:
859 case NE:
860 return TRUE;
861 default:
862 break;
863 }
864 return FALSE;
865 }
866
867
868 int
869 xtensa_mask_immediate (v)
870 int v;
871 {
872 #define MAX_MASK_SIZE 16
873 int mask_size;
874
875 for (mask_size = 1; mask_size <= MAX_MASK_SIZE; mask_size++)
876 {
877 if ((v & 1) == 0)
878 return FALSE;
879 v = v >> 1;
880 if (v == 0)
881 return TRUE;
882 }
883
884 return FALSE;
885 }
886
887
888 int
889 xtensa_mem_offset (v, mode)
890 unsigned v;
891 enum machine_mode mode;
892 {
893 switch (mode)
894 {
895 case BLKmode:
896 /* Handle the worst case for block moves. See xtensa_expand_block_move
897 where we emit an optimized block move operation if the block can be
898 moved in < "move_ratio" pieces. The worst case is when the block is
899 aligned but has a size of (3 mod 4) (does this happen?) so that the
900 last piece requires a byte load/store. */
901 return (xtensa_uimm8 (v) &&
902 xtensa_uimm8 (v + MOVE_MAX * LARGEST_MOVE_RATIO));
903
904 case QImode:
905 return xtensa_uimm8 (v);
906
907 case HImode:
908 return xtensa_uimm8x2 (v);
909
910 case DFmode:
911 return (xtensa_uimm8x4 (v) && xtensa_uimm8x4 (v + 4));
912
913 default:
914 break;
915 }
916
917 return xtensa_uimm8x4 (v);
918 }
919
920
921 /* Make normal rtx_code into something we can index from an array */
922
923 static enum internal_test
924 map_test_to_internal_test (test_code)
925 enum rtx_code test_code;
926 {
927 enum internal_test test = ITEST_MAX;
928
929 switch (test_code)
930 {
931 default: break;
932 case EQ: test = ITEST_EQ; break;
933 case NE: test = ITEST_NE; break;
934 case GT: test = ITEST_GT; break;
935 case GE: test = ITEST_GE; break;
936 case LT: test = ITEST_LT; break;
937 case LE: test = ITEST_LE; break;
938 case GTU: test = ITEST_GTU; break;
939 case GEU: test = ITEST_GEU; break;
940 case LTU: test = ITEST_LTU; break;
941 case LEU: test = ITEST_LEU; break;
942 }
943
944 return test;
945 }
946
947
948 /* Generate the code to compare two integer values. The return value is
949 the comparison expression. */
950
951 static rtx
952 gen_int_relational (test_code, cmp0, cmp1, p_invert)
953 enum rtx_code test_code; /* relational test (EQ, etc) */
954 rtx cmp0; /* first operand to compare */
955 rtx cmp1; /* second operand to compare */
956 int *p_invert; /* whether branch needs to reverse its test */
957 {
958 struct cmp_info {
959 enum rtx_code test_code; /* test code to use in insn */
960 int (*const_range_p) PARAMS ((int)); /* predicate function to check range */
961 int const_add; /* constant to add (convert LE -> LT) */
962 int reverse_regs; /* reverse registers in test */
963 int invert_const; /* != 0 if invert value if cmp1 is constant */
964 int invert_reg; /* != 0 if invert value if cmp1 is register */
965 int unsignedp; /* != 0 for unsigned comparisons. */
966 };
967
968 static struct cmp_info info[ (int)ITEST_MAX ] = {
969
970 { EQ, b4const_or_zero, 0, 0, 0, 0, 0 }, /* EQ */
971 { NE, b4const_or_zero, 0, 0, 0, 0, 0 }, /* NE */
972
973 { LT, b4const_or_zero, 1, 1, 1, 0, 0 }, /* GT */
974 { GE, b4const_or_zero, 0, 0, 0, 0, 0 }, /* GE */
975 { LT, b4const_or_zero, 0, 0, 0, 0, 0 }, /* LT */
976 { GE, b4const_or_zero, 1, 1, 1, 0, 0 }, /* LE */
977
978 { LTU, xtensa_b4constu, 1, 1, 1, 0, 1 }, /* GTU */
979 { GEU, xtensa_b4constu, 0, 0, 0, 0, 1 }, /* GEU */
980 { LTU, xtensa_b4constu, 0, 0, 0, 0, 1 }, /* LTU */
981 { GEU, xtensa_b4constu, 1, 1, 1, 0, 1 }, /* LEU */
982 };
983
984 enum internal_test test;
985 enum machine_mode mode;
986 struct cmp_info *p_info;
987
988 test = map_test_to_internal_test (test_code);
989 if (test == ITEST_MAX)
990 abort ();
991
992 p_info = &info[ (int)test ];
993
994 mode = GET_MODE (cmp0);
995 if (mode == VOIDmode)
996 mode = GET_MODE (cmp1);
997
998 /* Make sure we can handle any constants given to us. */
999 if (GET_CODE (cmp1) == CONST_INT)
1000 {
1001 HOST_WIDE_INT value = INTVAL (cmp1);
1002 unsigned HOST_WIDE_INT uvalue = (unsigned HOST_WIDE_INT)value;
1003
1004 /* if the immediate overflows or does not fit in the immediate field,
1005 spill it to a register */
1006
1007 if ((p_info->unsignedp ?
1008 (uvalue + p_info->const_add > uvalue) :
1009 (value + p_info->const_add > value)) != (p_info->const_add > 0))
1010 {
1011 cmp1 = force_reg (mode, cmp1);
1012 }
1013 else if (!(p_info->const_range_p) (value + p_info->const_add))
1014 {
1015 cmp1 = force_reg (mode, cmp1);
1016 }
1017 }
1018 else if ((GET_CODE (cmp1) != REG) && (GET_CODE (cmp1) != SUBREG))
1019 {
1020 cmp1 = force_reg (mode, cmp1);
1021 }
1022
1023 /* See if we need to invert the result. */
1024 *p_invert = ((GET_CODE (cmp1) == CONST_INT)
1025 ? p_info->invert_const
1026 : p_info->invert_reg);
1027
1028 /* Comparison to constants, may involve adding 1 to change a LT into LE.
1029 Comparison between two registers, may involve switching operands. */
1030 if (GET_CODE (cmp1) == CONST_INT)
1031 {
1032 if (p_info->const_add != 0)
1033 cmp1 = GEN_INT (INTVAL (cmp1) + p_info->const_add);
1034
1035 }
1036 else if (p_info->reverse_regs)
1037 {
1038 rtx temp = cmp0;
1039 cmp0 = cmp1;
1040 cmp1 = temp;
1041 }
1042
1043 return gen_rtx (p_info->test_code, VOIDmode, cmp0, cmp1);
1044 }
1045
1046
1047 /* Generate the code to compare two float values. The return value is
1048 the comparison expression. */
1049
1050 static rtx
1051 gen_float_relational (test_code, cmp0, cmp1)
1052 enum rtx_code test_code; /* relational test (EQ, etc) */
1053 rtx cmp0; /* first operand to compare */
1054 rtx cmp1; /* second operand to compare */
1055 {
1056 rtx (*gen_fn) PARAMS ((rtx, rtx, rtx));
1057 rtx brtmp;
1058 int reverse_regs, invert;
1059
1060 switch (test_code)
1061 {
1062 case EQ: reverse_regs = 0; invert = 0; gen_fn = gen_seq_sf; break;
1063 case NE: reverse_regs = 0; invert = 1; gen_fn = gen_seq_sf; break;
1064 case LE: reverse_regs = 0; invert = 0; gen_fn = gen_sle_sf; break;
1065 case GT: reverse_regs = 1; invert = 0; gen_fn = gen_slt_sf; break;
1066 case LT: reverse_regs = 0; invert = 0; gen_fn = gen_slt_sf; break;
1067 case GE: reverse_regs = 1; invert = 0; gen_fn = gen_sle_sf; break;
1068 default:
1069 fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));
1070 reverse_regs = 0; invert = 0; gen_fn = 0; /* avoid compiler warnings */
1071 }
1072
1073 if (reverse_regs)
1074 {
1075 rtx temp = cmp0;
1076 cmp0 = cmp1;
1077 cmp1 = temp;
1078 }
1079
1080 brtmp = gen_rtx_REG (CCmode, FPCC_REGNUM);
1081 emit_insn (gen_fn (brtmp, cmp0, cmp1));
1082
1083 return gen_rtx (invert ? EQ : NE, VOIDmode, brtmp, const0_rtx);
1084 }
1085
1086
1087 void
1088 xtensa_expand_conditional_branch (operands, test_code)
1089 rtx *operands;
1090 enum rtx_code test_code;
1091 {
1092 enum cmp_type type = branch_type;
1093 rtx cmp0 = branch_cmp[0];
1094 rtx cmp1 = branch_cmp[1];
1095 rtx cmp;
1096 int invert;
1097 rtx label1, label2;
1098
1099 switch (type)
1100 {
1101 case CMP_DF:
1102 default:
1103 fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));
1104
1105 case CMP_SI:
1106 invert = FALSE;
1107 cmp = gen_int_relational (test_code, cmp0, cmp1, &invert);
1108 break;
1109
1110 case CMP_SF:
1111 if (!TARGET_HARD_FLOAT)
1112 fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));
1113 invert = FALSE;
1114 cmp = gen_float_relational (test_code, cmp0, cmp1);
1115 break;
1116 }
1117
1118 /* Generate the branch. */
1119
1120 label1 = gen_rtx_LABEL_REF (VOIDmode, operands[0]);
1121 label2 = pc_rtx;
1122
1123 if (invert)
1124 {
1125 label2 = label1;
1126 label1 = pc_rtx;
1127 }
1128
1129 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
1130 gen_rtx_IF_THEN_ELSE (VOIDmode, cmp,
1131 label1,
1132 label2)));
1133 }
1134
1135
1136 static rtx
1137 gen_conditional_move (cmp)
1138 rtx cmp;
1139 {
1140 enum rtx_code code = GET_CODE (cmp);
1141 rtx op0 = branch_cmp[0];
1142 rtx op1 = branch_cmp[1];
1143
1144 if (branch_type == CMP_SI)
1145 {
1146 /* Jump optimization calls get_condition() which canonicalizes
1147 comparisons like (GE x <const>) to (GT x <const-1>).
1148 Transform those comparisons back to GE, since that is the
1149 comparison supported in Xtensa. We shouldn't have to
1150 transform <LE x const> comparisons, because neither
1151 xtensa_expand_conditional_branch() nor get_condition() will
1152 produce them. */
1153
1154 if ((code == GT) && (op1 == constm1_rtx))
1155 {
1156 code = GE;
1157 op1 = const0_rtx;
1158 }
1159 cmp = gen_rtx (code, VOIDmode, cc0_rtx, const0_rtx);
1160
1161 if (boolean_operator (cmp, VOIDmode))
1162 {
1163 /* swap the operands to make const0 second */
1164 if (op0 == const0_rtx)
1165 {
1166 op0 = op1;
1167 op1 = const0_rtx;
1168 }
1169
1170 /* if not comparing against zero, emit a comparison (subtract) */
1171 if (op1 != const0_rtx)
1172 {
1173 op0 = expand_binop (SImode, sub_optab, op0, op1,
1174 0, 0, OPTAB_LIB_WIDEN);
1175 op1 = const0_rtx;
1176 }
1177 }
1178 else if (branch_operator (cmp, VOIDmode))
1179 {
1180 /* swap the operands to make const0 second */
1181 if (op0 == const0_rtx)
1182 {
1183 op0 = op1;
1184 op1 = const0_rtx;
1185
1186 switch (code)
1187 {
1188 case LT: code = GE; break;
1189 case GE: code = LT; break;
1190 default: abort ();
1191 }
1192 }
1193
1194 if (op1 != const0_rtx)
1195 return 0;
1196 }
1197 else
1198 return 0;
1199
1200 return gen_rtx (code, VOIDmode, op0, op1);
1201 }
1202
1203 if (TARGET_HARD_FLOAT && (branch_type == CMP_SF))
1204 return gen_float_relational (code, op0, op1);
1205
1206 return 0;
1207 }
1208
1209
1210 int
1211 xtensa_expand_conditional_move (operands, isflt)
1212 rtx *operands;
1213 int isflt;
1214 {
1215 rtx cmp;
1216 rtx (*gen_fn) PARAMS ((rtx, rtx, rtx, rtx, rtx));
1217
1218 if (!(cmp = gen_conditional_move (operands[1])))
1219 return 0;
1220
1221 if (isflt)
1222 gen_fn = (branch_type == CMP_SI
1223 ? gen_movsfcc_internal0
1224 : gen_movsfcc_internal1);
1225 else
1226 gen_fn = (branch_type == CMP_SI
1227 ? gen_movsicc_internal0
1228 : gen_movsicc_internal1);
1229
1230 emit_insn (gen_fn (operands[0], XEXP (cmp, 0),
1231 operands[2], operands[3], cmp));
1232 return 1;
1233 }
1234
1235
1236 int
1237 xtensa_expand_scc (operands)
1238 rtx *operands;
1239 {
1240 rtx dest = operands[0];
1241 rtx cmp = operands[1];
1242 rtx one_tmp, zero_tmp;
1243 rtx (*gen_fn) PARAMS ((rtx, rtx, rtx, rtx, rtx));
1244
1245 if (!(cmp = gen_conditional_move (cmp)))
1246 return 0;
1247
1248 one_tmp = gen_reg_rtx (SImode);
1249 zero_tmp = gen_reg_rtx (SImode);
1250 emit_insn (gen_movsi (one_tmp, const_true_rtx));
1251 emit_insn (gen_movsi (zero_tmp, const0_rtx));
1252
1253 gen_fn = (branch_type == CMP_SI
1254 ? gen_movsicc_internal0
1255 : gen_movsicc_internal1);
1256 emit_insn (gen_fn (dest, XEXP (cmp, 0), one_tmp, zero_tmp, cmp));
1257 return 1;
1258 }
1259
1260
1261 /* Emit insns to move operands[1] into operands[0].
1262
1263 Return 1 if we have written out everything that needs to be done to
1264 do the move. Otherwise, return 0 and the caller will emit the move
1265 normally. */
1266
1267 int
1268 xtensa_emit_move_sequence (operands, mode)
1269 rtx *operands;
1270 enum machine_mode mode;
1271 {
1272 if (CONSTANT_P (operands[1])
1273 && GET_CODE (operands[1]) != CONSTANT_P_RTX
1274 && (GET_CODE (operands[1]) != CONST_INT
1275 || !xtensa_simm12b (INTVAL (operands[1]))))
1276 {
1277 xtensa_load_constant (operands[0], operands[1]);
1278 return 1;
1279 }
1280
1281 if (!(reload_in_progress | reload_completed))
1282 {
1283 if (!xtensa_valid_move (mode, operands))
1284 operands[1] = force_reg (mode, operands[1]);
1285
1286 if (xtensa_copy_incoming_a7 (operands, mode))
1287 return 1;
1288 }
1289
1290 /* During reload we don't want to emit (subreg:X (mem:Y)) since that
1291 instruction won't be recognized after reload. So we remove the
1292 subreg and adjust mem accordingly. */
1293 if (reload_in_progress)
1294 {
1295 operands[0] = fixup_subreg_mem (operands[0]);
1296 operands[1] = fixup_subreg_mem (operands[1]);
1297 }
1298 return 0;
1299 }
1300
1301 static rtx
1302 fixup_subreg_mem (x)
1303 rtx x;
1304 {
1305 if (GET_CODE (x) == SUBREG
1306 && GET_CODE (SUBREG_REG (x)) == REG
1307 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER)
1308 {
1309 rtx temp =
1310 gen_rtx_SUBREG (GET_MODE (x),
1311 reg_equiv_mem [REGNO (SUBREG_REG (x))],
1312 SUBREG_BYTE (x));
1313 x = alter_subreg (&temp);
1314 }
1315 return x;
1316 }
1317
1318
1319 /* Check if this move is copying an incoming argument in a7. If so,
1320 emit the move, followed by the special "set_frame_ptr"
1321 unspec_volatile insn, at the very beginning of the function. This
1322 is necessary because the register allocator will ignore conflicts
1323 with a7 and may assign some other pseudo to a7. If that pseudo was
1324 assigned prior to this move, it would clobber the incoming argument
1325 in a7. By copying the argument out of a7 as the very first thing,
1326 and then immediately following that with an unspec_volatile to keep
1327 the scheduler away, we should avoid any problems. */
1328
1329 bool
1330 xtensa_copy_incoming_a7 (operands, mode)
1331 rtx *operands;
1332 enum machine_mode mode;
1333 {
1334 if (a7_overlap_mentioned_p (operands[1])
1335 && !cfun->machine->incoming_a7_copied)
1336 {
1337 rtx mov;
1338 switch (mode)
1339 {
1340 case DFmode:
1341 mov = gen_movdf_internal (operands[0], operands[1]);
1342 break;
1343 case SFmode:
1344 mov = gen_movsf_internal (operands[0], operands[1]);
1345 break;
1346 case DImode:
1347 mov = gen_movdi_internal (operands[0], operands[1]);
1348 break;
1349 case SImode:
1350 mov = gen_movsi_internal (operands[0], operands[1]);
1351 break;
1352 case HImode:
1353 mov = gen_movhi_internal (operands[0], operands[1]);
1354 break;
1355 case QImode:
1356 mov = gen_movqi_internal (operands[0], operands[1]);
1357 break;
1358 default:
1359 abort ();
1360 }
1361
1362 /* Insert the instructions before any other argument copies.
1363 (The set_frame_ptr insn comes _after_ the move, so push it
1364 out first.) */
1365 push_topmost_sequence ();
1366 emit_insn_after (gen_set_frame_ptr (), get_insns ());
1367 emit_insn_after (mov, get_insns ());
1368 pop_topmost_sequence ();
1369
1370 /* Ideally the incoming argument in a7 would only be copied
1371 once, since propagating a7 into the body of a function
1372 will almost certainly lead to errors. However, there is
1373 at least one harmless case (in GCSE) where the original
1374 copy from a7 is changed to copy into a new pseudo. Thus,
1375 we use a flag to only do this special treatment for the
1376 first copy of a7. */
1377
1378 cfun->machine->incoming_a7_copied = true;
1379
1380 return 1;
1381 }
1382
1383 return 0;
1384 }
1385
1386
1387 /* Try to expand a block move operation to an RTL block move instruction.
1388 If not optimizing or if the block size is not a constant or if the
1389 block is small, the expansion fails and GCC falls back to calling
1390 memcpy().
1391
1392 operands[0] is the destination
1393 operands[1] is the source
1394 operands[2] is the length
1395 operands[3] is the alignment */
1396
1397 int
1398 xtensa_expand_block_move (operands)
1399 rtx *operands;
1400 {
1401 rtx dest = operands[0];
1402 rtx src = operands[1];
1403 int bytes = INTVAL (operands[2]);
1404 int align = XINT (operands[3], 0);
1405 int num_pieces, move_ratio;
1406
1407 /* If this is not a fixed size move, just call memcpy */
1408 if (!optimize || (GET_CODE (operands[2]) != CONST_INT))
1409 return 0;
1410
1411 /* Anything to move? */
1412 if (bytes <= 0)
1413 return 1;
1414
1415 if (align > MOVE_MAX)
1416 align = MOVE_MAX;
1417
1418 /* decide whether to expand inline based on the optimization level */
1419 move_ratio = 4;
1420 if (optimize > 2)
1421 move_ratio = LARGEST_MOVE_RATIO;
1422 num_pieces = (bytes / align) + (bytes % align); /* close enough anyway */
1423 if (num_pieces >= move_ratio)
1424 return 0;
1425
1426 /* make sure the memory addresses are valid */
1427 operands[0] = validize_mem (dest);
1428 operands[1] = validize_mem (src);
1429
1430 emit_insn (gen_movstrsi_internal (operands[0], operands[1],
1431 operands[2], operands[3]));
1432 return 1;
1433 }
1434
1435
1436 /* Emit a sequence of instructions to implement a block move, trying
1437 to hide load delay slots as much as possible. Load N values into
1438 temporary registers, store those N values, and repeat until the
1439 complete block has been moved. N=delay_slots+1 */
1440
1441 struct meminsnbuf {
1442 char template[30];
1443 rtx operands[2];
1444 };
1445
1446 void
1447 xtensa_emit_block_move (operands, tmpregs, delay_slots)
1448 rtx *operands;
1449 rtx *tmpregs;
1450 int delay_slots;
1451 {
1452 rtx dest = operands[0];
1453 rtx src = operands[1];
1454 int bytes = INTVAL (operands[2]);
1455 int align = XINT (operands[3], 0);
1456 rtx from_addr = XEXP (src, 0);
1457 rtx to_addr = XEXP (dest, 0);
1458 int from_struct = MEM_IN_STRUCT_P (src);
1459 int to_struct = MEM_IN_STRUCT_P (dest);
1460 int offset = 0;
1461 int chunk_size, item_size;
1462 struct meminsnbuf *ldinsns, *stinsns;
1463 const char *ldname, *stname;
1464 enum machine_mode mode;
1465
1466 if (align > MOVE_MAX)
1467 align = MOVE_MAX;
1468 item_size = align;
1469 chunk_size = delay_slots + 1;
1470
1471 ldinsns = (struct meminsnbuf *)
1472 alloca (chunk_size * sizeof (struct meminsnbuf));
1473 stinsns = (struct meminsnbuf *)
1474 alloca (chunk_size * sizeof (struct meminsnbuf));
1475
1476 mode = xtensa_find_mode_for_size (item_size);
1477 item_size = GET_MODE_SIZE (mode);
1478 ldname = xtensa_ld_opcodes[(int) mode];
1479 stname = xtensa_st_opcodes[(int) mode];
1480
1481 while (bytes > 0)
1482 {
1483 int n;
1484
1485 for (n = 0; n < chunk_size; n++)
1486 {
1487 rtx addr, mem;
1488
1489 if (bytes == 0)
1490 {
1491 chunk_size = n;
1492 break;
1493 }
1494
1495 if (bytes < item_size)
1496 {
1497 /* find a smaller item_size which we can load & store */
1498 item_size = bytes;
1499 mode = xtensa_find_mode_for_size (item_size);
1500 item_size = GET_MODE_SIZE (mode);
1501 ldname = xtensa_ld_opcodes[(int) mode];
1502 stname = xtensa_st_opcodes[(int) mode];
1503 }
1504
1505 /* record the load instruction opcode and operands */
1506 addr = plus_constant (from_addr, offset);
1507 mem = gen_rtx_MEM (mode, addr);
1508 if (! memory_address_p (mode, addr))
1509 abort ();
1510 MEM_IN_STRUCT_P (mem) = from_struct;
1511 ldinsns[n].operands[0] = tmpregs[n];
1512 ldinsns[n].operands[1] = mem;
1513 sprintf (ldinsns[n].template, "%s\t%%0, %%1", ldname);
1514
1515 /* record the store instruction opcode and operands */
1516 addr = plus_constant (to_addr, offset);
1517 mem = gen_rtx_MEM (mode, addr);
1518 if (! memory_address_p (mode, addr))
1519 abort ();
1520 MEM_IN_STRUCT_P (mem) = to_struct;
1521 stinsns[n].operands[0] = tmpregs[n];
1522 stinsns[n].operands[1] = mem;
1523 sprintf (stinsns[n].template, "%s\t%%0, %%1", stname);
1524
1525 offset += item_size;
1526 bytes -= item_size;
1527 }
1528
1529 /* now output the loads followed by the stores */
1530 for (n = 0; n < chunk_size; n++)
1531 output_asm_insn (ldinsns[n].template, ldinsns[n].operands);
1532 for (n = 0; n < chunk_size; n++)
1533 output_asm_insn (stinsns[n].template, stinsns[n].operands);
1534 }
1535 }
1536
1537
1538 static enum machine_mode
1539 xtensa_find_mode_for_size (item_size)
1540 unsigned item_size;
1541 {
1542 enum machine_mode mode, tmode;
1543
1544 while (1)
1545 {
1546 mode = VOIDmode;
1547
1548 /* find mode closest to but not bigger than item_size */
1549 for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
1550 tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
1551 if (GET_MODE_SIZE (tmode) <= item_size)
1552 mode = tmode;
1553 if (mode == VOIDmode)
1554 abort ();
1555
1556 item_size = GET_MODE_SIZE (mode);
1557
1558 if (xtensa_ld_opcodes[(int) mode]
1559 && xtensa_st_opcodes[(int) mode])
1560 break;
1561
1562 /* cannot load & store this mode; try something smaller */
1563 item_size -= 1;
1564 }
1565
1566 return mode;
1567 }
1568
1569
1570 void
1571 xtensa_expand_nonlocal_goto (operands)
1572 rtx *operands;
1573 {
1574 rtx goto_handler = operands[1];
1575 rtx containing_fp = operands[3];
1576
1577 /* generate a call to "__xtensa_nonlocal_goto" (in libgcc); the code
1578 is too big to generate in-line */
1579
1580 if (GET_CODE (containing_fp) != REG)
1581 containing_fp = force_reg (Pmode, containing_fp);
1582
1583 goto_handler = replace_rtx (copy_rtx (goto_handler),
1584 virtual_stack_vars_rtx,
1585 containing_fp);
1586
1587 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__xtensa_nonlocal_goto"),
1588 0, VOIDmode, 2,
1589 containing_fp, Pmode,
1590 goto_handler, Pmode);
1591 }
1592
1593
1594 static struct machine_function *
1595 xtensa_init_machine_status ()
1596 {
1597 return ggc_alloc_cleared (sizeof (struct machine_function));
1598 }
1599
1600
1601 void
1602 xtensa_setup_frame_addresses ()
1603 {
1604 /* Set flag to cause FRAME_POINTER_REQUIRED to be set. */
1605 cfun->machine->accesses_prev_frame = 1;
1606
1607 emit_library_call
1608 (gen_rtx_SYMBOL_REF (Pmode, "__xtensa_libgcc_window_spill"),
1609 0, VOIDmode, 0);
1610 }
1611
1612
1613 /* Emit the assembly for the end of a zero-cost loop. Normally we just emit
1614 a comment showing where the end of the loop is. However, if there is a
1615 label or a branch at the end of the loop then we need to place a nop
1616 there. If the loop ends with a label we need the nop so that branches
1617 targetting that label will target the nop (and thus remain in the loop),
1618 instead of targetting the instruction after the loop (and thus exiting
1619 the loop). If the loop ends with a branch, we need the nop in case the
1620 branch is targetting a location inside the loop. When the branch
1621 executes it will cause the loop count to be decremented even if it is
1622 taken (because it is the last instruction in the loop), so we need to
1623 nop after the branch to prevent the loop count from being decremented
1624 when the branch is taken. */
1625
1626 void
1627 xtensa_emit_loop_end (insn, operands)
1628 rtx insn;
1629 rtx *operands;
1630 {
1631 char done = 0;
1632
1633 for (insn = PREV_INSN (insn); insn && !done; insn = PREV_INSN (insn))
1634 {
1635 switch (GET_CODE (insn))
1636 {
1637 case NOTE:
1638 case BARRIER:
1639 break;
1640
1641 case CODE_LABEL:
1642 output_asm_insn ("nop.n", operands);
1643 done = 1;
1644 break;
1645
1646 default:
1647 {
1648 rtx body = PATTERN (insn);
1649
1650 if (GET_CODE (body) == JUMP_INSN)
1651 {
1652 output_asm_insn ("nop.n", operands);
1653 done = 1;
1654 }
1655 else if ((GET_CODE (body) != USE)
1656 && (GET_CODE (body) != CLOBBER))
1657 done = 1;
1658 }
1659 break;
1660 }
1661 }
1662
1663 output_asm_insn ("# loop end for %0", operands);
1664 }
1665
1666
1667 char *
1668 xtensa_emit_call (callop, operands)
1669 int callop;
1670 rtx *operands;
1671 {
1672 static char result[64];
1673 rtx tgt = operands[callop];
1674
1675 if (GET_CODE (tgt) == CONST_INT)
1676 sprintf (result, "call8\t0x%lx", INTVAL (tgt));
1677 else if (register_operand (tgt, VOIDmode))
1678 sprintf (result, "callx8\t%%%d", callop);
1679 else
1680 sprintf (result, "call8\t%%%d", callop);
1681
1682 return result;
1683 }
1684
1685
1686 /* Return the stabs register number to use for 'regno'. */
1687
1688 int
1689 xtensa_dbx_register_number (regno)
1690 int regno;
1691 {
1692 int first = -1;
1693
1694 if (GP_REG_P (regno)) {
1695 regno -= GP_REG_FIRST;
1696 first = 0;
1697 }
1698 else if (BR_REG_P (regno)) {
1699 regno -= BR_REG_FIRST;
1700 first = 16;
1701 }
1702 else if (FP_REG_P (regno)) {
1703 regno -= FP_REG_FIRST;
1704 /* The current numbering convention is that TIE registers are
1705 numbered in libcc order beginning with 256. We can't guarantee
1706 that the FP registers will come first, so the following is just
1707 a guess. It seems like we should make a special case for FP
1708 registers and give them fixed numbers < 256. */
1709 first = 256;
1710 }
1711 else if (ACC_REG_P (regno))
1712 {
1713 first = 0;
1714 regno = -1;
1715 }
1716
1717 /* When optimizing, we sometimes get asked about pseudo-registers
1718 that don't represent hard registers. Return 0 for these. */
1719 if (first == -1)
1720 return 0;
1721
1722 return first + regno;
1723 }
1724
1725
1726 /* Argument support functions. */
1727
1728 /* Initialize CUMULATIVE_ARGS for a function. */
1729
1730 void
1731 init_cumulative_args (cum, fntype, libname)
1732 CUMULATIVE_ARGS *cum; /* argument info to initialize */
1733 tree fntype ATTRIBUTE_UNUSED; /* tree ptr for function decl */
1734 rtx libname ATTRIBUTE_UNUSED; /* SYMBOL_REF of library name or 0 */
1735 {
1736 cum->arg_words = 0;
1737 }
1738
1739 /* Advance the argument to the next argument position. */
1740
1741 void
1742 function_arg_advance (cum, mode, type)
1743 CUMULATIVE_ARGS *cum; /* current arg information */
1744 enum machine_mode mode; /* current arg mode */
1745 tree type; /* type of the argument or 0 if lib support */
1746 {
1747 int words, max;
1748 int *arg_words;
1749
1750 arg_words = &cum->arg_words;
1751 max = MAX_ARGS_IN_REGISTERS;
1752
1753 words = (((mode != BLKmode)
1754 ? (int) GET_MODE_SIZE (mode)
1755 : int_size_in_bytes (type)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1756
1757 if ((*arg_words + words > max) && (*arg_words < max))
1758 *arg_words = max;
1759
1760 *arg_words += words;
1761 }
1762
1763
1764 /* Return an RTL expression containing the register for the given mode,
1765 or 0 if the argument is to be passed on the stack. */
1766
1767 rtx
1768 function_arg (cum, mode, type, incoming_p)
1769 CUMULATIVE_ARGS *cum; /* current arg information */
1770 enum machine_mode mode; /* current arg mode */
1771 tree type; /* type of the argument or 0 if lib support */
1772 int incoming_p; /* computing the incoming registers? */
1773 {
1774 int regbase, words, max;
1775 int *arg_words;
1776 int regno;
1777 enum machine_mode result_mode;
1778
1779 arg_words = &cum->arg_words;
1780 regbase = (incoming_p ? GP_ARG_FIRST : GP_OUTGOING_ARG_FIRST);
1781 max = MAX_ARGS_IN_REGISTERS;
1782
1783 words = (((mode != BLKmode)
1784 ? (int) GET_MODE_SIZE (mode)
1785 : int_size_in_bytes (type)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1786
1787 if (type && (TYPE_ALIGN (type) > BITS_PER_WORD))
1788 *arg_words += (*arg_words & 1);
1789
1790 if (*arg_words + words > max)
1791 return (rtx)0;
1792
1793 regno = regbase + *arg_words;
1794 result_mode = (mode == BLKmode ? TYPE_MODE (type) : mode);
1795
1796 /* We need to make sure that references to a7 are represented with
1797 rtx that is not equal to hard_frame_pointer_rtx. For BLKmode and
1798 modes bigger than 2 words (because we only have patterns for
1799 modes of 2 words or smaller), we can't control the expansion
1800 unless we explicitly list the individual registers in a PARALLEL. */
1801
1802 if ((mode == BLKmode || words > 2)
1803 && regno < A7_REG
1804 && regno + words > A7_REG)
1805 {
1806 rtx result;
1807 int n;
1808
1809 result = gen_rtx_PARALLEL (result_mode, rtvec_alloc (words));
1810 for (n = 0; n < words; n++)
1811 {
1812 XVECEXP (result, 0, n) =
1813 gen_rtx_EXPR_LIST (VOIDmode,
1814 gen_raw_REG (SImode, regno + n),
1815 GEN_INT (n * UNITS_PER_WORD));
1816 }
1817 return result;
1818 }
1819
1820 return gen_raw_REG (result_mode, regno);
1821 }
1822
1823
1824 void
1825 override_options ()
1826 {
1827 int regno;
1828 enum machine_mode mode;
1829
1830 if (!TARGET_BOOLEANS && TARGET_HARD_FLOAT)
1831 error ("boolean registers required for the floating-point option");
1832
1833 /* set up the tables of ld/st opcode names for block moves */
1834 xtensa_ld_opcodes[(int) SImode] = "l32i";
1835 xtensa_ld_opcodes[(int) HImode] = "l16ui";
1836 xtensa_ld_opcodes[(int) QImode] = "l8ui";
1837 xtensa_st_opcodes[(int) SImode] = "s32i";
1838 xtensa_st_opcodes[(int) HImode] = "s16i";
1839 xtensa_st_opcodes[(int) QImode] = "s8i";
1840
1841 xtensa_char_to_class['q'] = SP_REG;
1842 xtensa_char_to_class['a'] = GR_REGS;
1843 xtensa_char_to_class['b'] = ((TARGET_BOOLEANS) ? BR_REGS : NO_REGS);
1844 xtensa_char_to_class['f'] = ((TARGET_HARD_FLOAT) ? FP_REGS : NO_REGS);
1845 xtensa_char_to_class['A'] = ((TARGET_MAC16) ? ACC_REG : NO_REGS);
1846 xtensa_char_to_class['B'] = ((TARGET_SEXT) ? GR_REGS : NO_REGS);
1847 xtensa_char_to_class['C'] = ((TARGET_MUL16) ? GR_REGS: NO_REGS);
1848 xtensa_char_to_class['D'] = ((TARGET_DENSITY) ? GR_REGS: NO_REGS);
1849 xtensa_char_to_class['d'] = ((TARGET_DENSITY) ? AR_REGS: NO_REGS);
1850
1851 /* Set up array giving whether a given register can hold a given mode. */
1852 for (mode = VOIDmode;
1853 mode != MAX_MACHINE_MODE;
1854 mode = (enum machine_mode) ((int) mode + 1))
1855 {
1856 int size = GET_MODE_SIZE (mode);
1857 enum mode_class class = GET_MODE_CLASS (mode);
1858
1859 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1860 {
1861 int temp;
1862
1863 if (ACC_REG_P (regno))
1864 temp = (TARGET_MAC16 &&
1865 (class == MODE_INT) && (size <= UNITS_PER_WORD));
1866 else if (GP_REG_P (regno))
1867 temp = ((regno & 1) == 0 || (size <= UNITS_PER_WORD));
1868 else if (FP_REG_P (regno))
1869 temp = (TARGET_HARD_FLOAT && (mode == SFmode));
1870 else if (BR_REG_P (regno))
1871 temp = (TARGET_BOOLEANS && (mode == CCmode));
1872 else
1873 temp = FALSE;
1874
1875 xtensa_hard_regno_mode_ok[(int) mode][regno] = temp;
1876 }
1877 }
1878
1879 init_machine_status = xtensa_init_machine_status;
1880
1881 /* Check PIC settings. There's no need for -fPIC on Xtensa and
1882 some targets need to always use PIC. */
1883 if (flag_pic > 1 || (XTENSA_ALWAYS_PIC))
1884 flag_pic = 1;
1885 }
1886
1887
1888 /* A C compound statement to output to stdio stream STREAM the
1889 assembler syntax for an instruction operand X. X is an RTL
1890 expression.
1891
1892 CODE is a value that can be used to specify one of several ways
1893 of printing the operand. It is used when identical operands
1894 must be printed differently depending on the context. CODE
1895 comes from the '%' specification that was used to request
1896 printing of the operand. If the specification was just '%DIGIT'
1897 then CODE is 0; if the specification was '%LTR DIGIT' then CODE
1898 is the ASCII code for LTR.
1899
1900 If X is a register, this macro should print the register's name.
1901 The names can be found in an array 'reg_names' whose type is
1902 'char *[]'. 'reg_names' is initialized from 'REGISTER_NAMES'.
1903
1904 When the machine description has a specification '%PUNCT' (a '%'
1905 followed by a punctuation character), this macro is called with
1906 a null pointer for X and the punctuation character for CODE.
1907
1908 'a', 'c', 'l', and 'n' are reserved.
1909
1910 The Xtensa specific codes are:
1911
1912 'd' CONST_INT, print as signed decimal
1913 'x' CONST_INT, print as signed hexadecimal
1914 'K' CONST_INT, print number of bits in mask for EXTUI
1915 'R' CONST_INT, print (X & 0x1f)
1916 'L' CONST_INT, print ((32 - X) & 0x1f)
1917 'D' REG, print second register of double-word register operand
1918 'N' MEM, print address of next word following a memory operand
1919 'v' MEM, if memory reference is volatile, output a MEMW before it
1920 */
1921
1922 static void
1923 printx (file, val)
1924 FILE *file;
1925 signed int val;
1926 {
1927 /* print a hexadecimal value in a nice way */
1928 if ((val > -0xa) && (val < 0xa))
1929 fprintf (file, "%d", val);
1930 else if (val < 0)
1931 fprintf (file, "-0x%x", -val);
1932 else
1933 fprintf (file, "0x%x", val);
1934 }
1935
1936
1937 void
1938 print_operand (file, op, letter)
1939 FILE *file; /* file to write to */
1940 rtx op; /* operand to print */
1941 int letter; /* %<letter> or 0 */
1942 {
1943 enum rtx_code code;
1944
1945 if (! op)
1946 error ("PRINT_OPERAND null pointer");
1947
1948 code = GET_CODE (op);
1949 switch (code)
1950 {
1951 case REG:
1952 case SUBREG:
1953 {
1954 int regnum = xt_true_regnum (op);
1955 if (letter == 'D')
1956 regnum++;
1957 fprintf (file, "%s", reg_names[regnum]);
1958 break;
1959 }
1960
1961 case MEM:
1962 /* For a volatile memory reference, emit a MEMW before the
1963 load or store. */
1964 if (letter == 'v')
1965 {
1966 if (MEM_VOLATILE_P (op) && TARGET_SERIALIZE_VOLATILE)
1967 fprintf (file, "memw\n\t");
1968 break;
1969 }
1970 else if (letter == 'N')
1971 {
1972 enum machine_mode mode;
1973 switch (GET_MODE (op))
1974 {
1975 case DFmode: mode = SFmode; break;
1976 case DImode: mode = SImode; break;
1977 default: abort ();
1978 }
1979 op = adjust_address (op, mode, 4);
1980 }
1981
1982 output_address (XEXP (op, 0));
1983 break;
1984
1985 case CONST_INT:
1986 switch (letter)
1987 {
1988 case 'K':
1989 {
1990 int num_bits = 0;
1991 unsigned val = INTVAL (op);
1992 while (val & 1)
1993 {
1994 num_bits += 1;
1995 val = val >> 1;
1996 }
1997 if ((val != 0) || (num_bits == 0) || (num_bits > 16))
1998 fatal_insn ("invalid mask", op);
1999
2000 fprintf (file, "%d", num_bits);
2001 break;
2002 }
2003
2004 case 'L':
2005 fprintf (file, "%ld", (32 - INTVAL (op)) & 0x1f);
2006 break;
2007
2008 case 'R':
2009 fprintf (file, "%ld", INTVAL (op) & 0x1f);
2010 break;
2011
2012 case 'x':
2013 printx (file, INTVAL (op));
2014 break;
2015
2016 case 'd':
2017 default:
2018 fprintf (file, "%ld", INTVAL (op));
2019 break;
2020
2021 }
2022 break;
2023
2024 default:
2025 output_addr_const (file, op);
2026 }
2027 }
2028
2029
2030 /* A C compound statement to output to stdio stream STREAM the
2031 assembler syntax for an instruction operand that is a memory
2032 reference whose address is ADDR. ADDR is an RTL expression. */
2033
2034 void
2035 print_operand_address (file, addr)
2036 FILE *file;
2037 rtx addr;
2038 {
2039 if (!addr)
2040 error ("PRINT_OPERAND_ADDRESS, null pointer");
2041
2042 switch (GET_CODE (addr))
2043 {
2044 default:
2045 fatal_insn ("invalid address", addr);
2046 break;
2047
2048 case REG:
2049 fprintf (file, "%s, 0", reg_names [REGNO (addr)]);
2050 break;
2051
2052 case PLUS:
2053 {
2054 rtx reg = (rtx)0;
2055 rtx offset = (rtx)0;
2056 rtx arg0 = XEXP (addr, 0);
2057 rtx arg1 = XEXP (addr, 1);
2058
2059 if (GET_CODE (arg0) == REG)
2060 {
2061 reg = arg0;
2062 offset = arg1;
2063 }
2064 else if (GET_CODE (arg1) == REG)
2065 {
2066 reg = arg1;
2067 offset = arg0;
2068 }
2069 else
2070 fatal_insn ("no register in address", addr);
2071
2072 if (CONSTANT_P (offset))
2073 {
2074 fprintf (file, "%s, ", reg_names [REGNO (reg)]);
2075 output_addr_const (file, offset);
2076 }
2077 else
2078 fatal_insn ("address offset not a constant", addr);
2079 }
2080 break;
2081
2082 case LABEL_REF:
2083 case SYMBOL_REF:
2084 case CONST_INT:
2085 case CONST:
2086 output_addr_const (file, addr);
2087 break;
2088 }
2089 }
2090
2091
2092 /* Emit either a label, .comm, or .lcomm directive. */
2093
2094 void
2095 xtensa_declare_object (file, name, init_string, final_string, size)
2096 FILE *file;
2097 char *name;
2098 char *init_string;
2099 char *final_string;
2100 int size;
2101 {
2102 fputs (init_string, file); /* "", "\t.comm\t", or "\t.lcomm\t" */
2103 assemble_name (file, name);
2104 fprintf (file, final_string, size); /* ":\n", ",%u\n", ",%u\n" */
2105 }
2106
2107
2108 void
2109 xtensa_output_literal (file, x, mode, labelno)
2110 FILE *file;
2111 rtx x;
2112 enum machine_mode mode;
2113 int labelno;
2114 {
2115 long value_long[2];
2116 REAL_VALUE_TYPE r;
2117 int size;
2118
2119 fprintf (file, "\t.literal .LC%u, ", (unsigned) labelno);
2120
2121 switch (GET_MODE_CLASS (mode))
2122 {
2123 case MODE_FLOAT:
2124 if (GET_CODE (x) != CONST_DOUBLE)
2125 abort ();
2126
2127 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
2128 switch (mode)
2129 {
2130 case SFmode:
2131 REAL_VALUE_TO_TARGET_SINGLE (r, value_long[0]);
2132 fprintf (file, "0x%08lx\n", value_long[0]);
2133 break;
2134
2135 case DFmode:
2136 REAL_VALUE_TO_TARGET_DOUBLE (r, value_long);
2137 fprintf (file, "0x%08lx, 0x%08lx\n",
2138 value_long[0], value_long[1]);
2139 break;
2140
2141 default:
2142 abort ();
2143 }
2144
2145 break;
2146
2147 case MODE_INT:
2148 case MODE_PARTIAL_INT:
2149 size = GET_MODE_SIZE (mode);
2150 if (size == 4)
2151 {
2152 output_addr_const (file, x);
2153 fputs ("\n", file);
2154 }
2155 else if (size == 8)
2156 {
2157 output_addr_const (file, operand_subword (x, 0, 0, DImode));
2158 fputs (", ", file);
2159 output_addr_const (file, operand_subword (x, 1, 0, DImode));
2160 fputs ("\n", file);
2161 }
2162 else
2163 abort ();
2164 break;
2165
2166 default:
2167 abort ();
2168 }
2169 }
2170
2171
2172 /* Return the bytes needed to compute the frame pointer from the current
2173 stack pointer. */
2174
2175 #define STACK_BYTES (STACK_BOUNDARY / BITS_PER_UNIT)
2176 #define XTENSA_STACK_ALIGN(LOC) (((LOC) + STACK_BYTES-1) & ~(STACK_BYTES-1))
2177
2178 long
2179 compute_frame_size (size)
2180 int size; /* # of var. bytes allocated */
2181 {
2182 /* add space for the incoming static chain value */
2183 if (current_function_needs_context)
2184 size += (1 * UNITS_PER_WORD);
2185
2186 xtensa_current_frame_size =
2187 XTENSA_STACK_ALIGN (size
2188 + current_function_outgoing_args_size
2189 + (WINDOW_SIZE * UNITS_PER_WORD));
2190 return xtensa_current_frame_size;
2191 }
2192
2193
2194 int
2195 xtensa_frame_pointer_required ()
2196 {
2197 /* The code to expand builtin_frame_addr and builtin_return_addr
2198 currently uses the hard_frame_pointer instead of frame_pointer.
2199 This seems wrong but maybe it's necessary for other architectures.
2200 This function is derived from the i386 code. */
2201
2202 if (cfun->machine->accesses_prev_frame)
2203 return 1;
2204
2205 return 0;
2206 }
2207
2208
2209 void
2210 xtensa_reorg (first)
2211 rtx first;
2212 {
2213 rtx insn, set_frame_ptr_insn = 0;
2214
2215 unsigned long tsize = compute_frame_size (get_frame_size ());
2216 if (tsize < (1 << (12+3)))
2217 frame_size_const = 0;
2218 else
2219 {
2220 frame_size_const = force_const_mem (SImode, GEN_INT (tsize - 16));;
2221
2222 /* make sure the constant is used so it doesn't get eliminated
2223 from the constant pool */
2224 emit_insn_before (gen_rtx_USE (SImode, frame_size_const), first);
2225 }
2226
2227 if (!frame_pointer_needed)
2228 return;
2229
2230 /* Search all instructions, looking for the insn that sets up the
2231 frame pointer. This search will fail if the function does not
2232 have an incoming argument in $a7, but in that case, we can just
2233 set up the frame pointer at the very beginning of the
2234 function. */
2235
2236 for (insn = first; insn; insn = NEXT_INSN (insn))
2237 {
2238 rtx pat;
2239
2240 if (!INSN_P (insn))
2241 continue;
2242
2243 pat = PATTERN (insn);
2244 if (GET_CODE (pat) == UNSPEC_VOLATILE
2245 && (XINT (pat, 1) == UNSPECV_SET_FP))
2246 {
2247 set_frame_ptr_insn = insn;
2248 break;
2249 }
2250 }
2251
2252 if (set_frame_ptr_insn)
2253 {
2254 /* for all instructions prior to set_frame_ptr_insn, replace
2255 hard_frame_pointer references with stack_pointer */
2256 for (insn = first; insn != set_frame_ptr_insn; insn = NEXT_INSN (insn))
2257 {
2258 if (INSN_P (insn))
2259 PATTERN (insn) = replace_rtx (copy_rtx (PATTERN (insn)),
2260 hard_frame_pointer_rtx,
2261 stack_pointer_rtx);
2262 }
2263 }
2264 else
2265 {
2266 /* emit the frame pointer move immediately after the NOTE that starts
2267 the function */
2268 emit_insn_after (gen_movsi (hard_frame_pointer_rtx,
2269 stack_pointer_rtx), first);
2270 }
2271 }
2272
2273
2274 /* Set up the stack and frame (if desired) for the function. */
2275
2276 void
2277 xtensa_function_prologue (file, size)
2278 FILE *file;
2279 HOST_WIDE_INT size ATTRIBUTE_UNUSED;
2280 {
2281 unsigned long tsize = compute_frame_size (get_frame_size ());
2282
2283 if (frame_pointer_needed)
2284 fprintf (file, "\t.frame\ta7, %ld\n", tsize);
2285 else
2286 fprintf (file, "\t.frame\tsp, %ld\n", tsize);
2287
2288
2289 if (tsize < (1 << (12+3)))
2290 {
2291 fprintf (file, "\tentry\tsp, %ld\n", tsize);
2292 }
2293 else
2294 {
2295 fprintf (file, "\tentry\tsp, 16\n");
2296
2297 /* use a8 as a temporary since a0-a7 may be live */
2298 fprintf (file, "\tl32r\ta8, ");
2299 print_operand (file, frame_size_const, 0);
2300 fprintf (file, "\n\tsub\ta8, sp, a8\n");
2301 fprintf (file, "\tmovsp\tsp, a8\n");
2302 }
2303 }
2304
2305
2306 /* Do any necessary cleanup after a function to restore
2307 stack, frame, and regs. */
2308
2309 void
2310 xtensa_function_epilogue (file, size)
2311 FILE *file;
2312 HOST_WIDE_INT size ATTRIBUTE_UNUSED;
2313 {
2314 rtx insn = get_last_insn ();
2315 /* If the last insn was a BARRIER, we don't have to write anything. */
2316 if (GET_CODE (insn) == NOTE)
2317 insn = prev_nonnote_insn (insn);
2318 if (insn == 0 || GET_CODE (insn) != BARRIER)
2319 fprintf (file, TARGET_DENSITY ? "\tretw.n\n" : "\tretw\n");
2320
2321 xtensa_current_frame_size = 0;
2322 }
2323
2324
2325 rtx
2326 xtensa_return_addr (count, frame)
2327 int count;
2328 rtx frame;
2329 {
2330 rtx result, retaddr;
2331
2332 if (count == -1)
2333 retaddr = gen_rtx_REG (Pmode, 0);
2334 else
2335 {
2336 rtx addr = plus_constant (frame, -4 * UNITS_PER_WORD);
2337 addr = memory_address (Pmode, addr);
2338 retaddr = gen_reg_rtx (Pmode);
2339 emit_move_insn (retaddr, gen_rtx_MEM (Pmode, addr));
2340 }
2341
2342 /* The 2 most-significant bits of the return address on Xtensa hold
2343 the register window size. To get the real return address, these
2344 bits must be replaced with the high bits from the current PC. */
2345
2346 result = gen_reg_rtx (Pmode);
2347 emit_insn (gen_fix_return_addr (result, retaddr));
2348 return result;
2349 }
2350
2351
2352 /* Create the va_list data type.
2353 This structure is set up by __builtin_saveregs. The __va_reg
2354 field points to a stack-allocated region holding the contents of the
2355 incoming argument registers. The __va_ndx field is an index initialized
2356 to the position of the first unnamed (variable) argument. This same index
2357 is also used to address the arguments passed in memory. Thus, the
2358 __va_stk field is initialized to point to the position of the first
2359 argument in memory offset to account for the arguments passed in
2360 registers. E.G., if there are 6 argument registers, and each register is
2361 4 bytes, then __va_stk is set to $sp - (6 * 4); then __va_reg[N*4]
2362 references argument word N for 0 <= N < 6, and __va_stk[N*4] references
2363 argument word N for N >= 6. */
2364
2365 tree
2366 xtensa_build_va_list ()
2367 {
2368 tree f_stk, f_reg, f_ndx, record, type_decl;
2369
2370 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
2371 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
2372
2373 f_stk = build_decl (FIELD_DECL, get_identifier ("__va_stk"),
2374 ptr_type_node);
2375 f_reg = build_decl (FIELD_DECL, get_identifier ("__va_reg"),
2376 ptr_type_node);
2377 f_ndx = build_decl (FIELD_DECL, get_identifier ("__va_ndx"),
2378 integer_type_node);
2379
2380 DECL_FIELD_CONTEXT (f_stk) = record;
2381 DECL_FIELD_CONTEXT (f_reg) = record;
2382 DECL_FIELD_CONTEXT (f_ndx) = record;
2383
2384 TREE_CHAIN (record) = type_decl;
2385 TYPE_NAME (record) = type_decl;
2386 TYPE_FIELDS (record) = f_stk;
2387 TREE_CHAIN (f_stk) = f_reg;
2388 TREE_CHAIN (f_reg) = f_ndx;
2389
2390 layout_type (record);
2391 return record;
2392 }
2393
2394
2395 /* Save the incoming argument registers on the stack. Returns the
2396 address of the saved registers. */
2397
2398 rtx
2399 xtensa_builtin_saveregs ()
2400 {
2401 rtx gp_regs, dest;
2402 int arg_words = current_function_arg_words;
2403 int gp_left = MAX_ARGS_IN_REGISTERS - arg_words;
2404 int i;
2405
2406 if (gp_left == 0)
2407 return const0_rtx;
2408
2409 /* allocate the general-purpose register space */
2410 gp_regs = assign_stack_local
2411 (BLKmode, MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, -1);
2412 set_mem_alias_set (gp_regs, get_varargs_alias_set ());
2413
2414 /* Now store the incoming registers. */
2415 dest = change_address (gp_regs, SImode,
2416 plus_constant (XEXP (gp_regs, 0),
2417 arg_words * UNITS_PER_WORD));
2418
2419 /* Note: Don't use move_block_from_reg() here because the incoming
2420 argument in a7 cannot be represented by hard_frame_pointer_rtx.
2421 Instead, call gen_raw_REG() directly so that we get a distinct
2422 instance of (REG:SI 7). */
2423 for (i = 0; i < gp_left; i++)
2424 {
2425 emit_move_insn (operand_subword (dest, i, 1, BLKmode),
2426 gen_raw_REG (SImode, GP_ARG_FIRST + arg_words + i));
2427 }
2428
2429 return XEXP (gp_regs, 0);
2430 }
2431
2432
2433 /* Implement `va_start' for varargs and stdarg. We look at the
2434 current function to fill in an initial va_list. */
2435
2436 void
2437 xtensa_va_start (valist, nextarg)
2438 tree valist;
2439 rtx nextarg ATTRIBUTE_UNUSED;
2440 {
2441 tree f_stk, stk;
2442 tree f_reg, reg;
2443 tree f_ndx, ndx;
2444 tree t, u;
2445 int arg_words;
2446
2447 arg_words = current_function_args_info.arg_words;
2448
2449 f_stk = TYPE_FIELDS (va_list_type_node);
2450 f_reg = TREE_CHAIN (f_stk);
2451 f_ndx = TREE_CHAIN (f_reg);
2452
2453 stk = build (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk);
2454 reg = build (COMPONENT_REF, TREE_TYPE (f_reg), valist, f_reg);
2455 ndx = build (COMPONENT_REF, TREE_TYPE (f_ndx), valist, f_ndx);
2456
2457 /* Call __builtin_saveregs; save the result in __va_reg */
2458 current_function_arg_words = arg_words;
2459 u = make_tree (ptr_type_node, expand_builtin_saveregs ());
2460 t = build (MODIFY_EXPR, ptr_type_node, reg, u);
2461 TREE_SIDE_EFFECTS (t) = 1;
2462 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2463
2464 /* Set the __va_stk member to $arg_ptr - (size of __va_reg area) */
2465 u = make_tree (ptr_type_node, virtual_incoming_args_rtx);
2466 u = fold (build (PLUS_EXPR, ptr_type_node, u,
2467 build_int_2 (-MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, -1)));
2468 t = build (MODIFY_EXPR, ptr_type_node, stk, u);
2469 TREE_SIDE_EFFECTS (t) = 1;
2470 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2471
2472 /* Set the __va_ndx member. */
2473 u = build_int_2 (arg_words * UNITS_PER_WORD, 0);
2474 t = build (MODIFY_EXPR, integer_type_node, ndx, u);
2475 TREE_SIDE_EFFECTS (t) = 1;
2476 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2477 }
2478
2479
2480 /* Implement `va_arg'. */
2481
2482 rtx
2483 xtensa_va_arg (valist, type)
2484 tree valist, type;
2485 {
2486 tree f_stk, stk;
2487 tree f_reg, reg;
2488 tree f_ndx, ndx;
2489 tree tmp, addr_tree, type_size;
2490 rtx array, orig_ndx, r, addr, size, va_size;
2491 rtx lab_false, lab_over, lab_false2;
2492
2493 f_stk = TYPE_FIELDS (va_list_type_node);
2494 f_reg = TREE_CHAIN (f_stk);
2495 f_ndx = TREE_CHAIN (f_reg);
2496
2497 stk = build (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk);
2498 reg = build (COMPONENT_REF, TREE_TYPE (f_reg), valist, f_reg);
2499 ndx = build (COMPONENT_REF, TREE_TYPE (f_ndx), valist, f_ndx);
2500
2501 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
2502
2503 va_size = gen_reg_rtx (SImode);
2504 tmp = fold (build (MULT_EXPR, sizetype,
2505 fold (build (TRUNC_DIV_EXPR, sizetype,
2506 fold (build (PLUS_EXPR, sizetype,
2507 type_size,
2508 size_int (UNITS_PER_WORD - 1))),
2509 size_int (UNITS_PER_WORD))),
2510 size_int (UNITS_PER_WORD)));
2511 r = expand_expr (tmp, va_size, SImode, EXPAND_NORMAL);
2512 if (r != va_size)
2513 emit_move_insn (va_size, r);
2514
2515
2516 /* First align __va_ndx to a double word boundary if necessary for this arg:
2517
2518 if (__alignof__ (TYPE) > 4)
2519 (AP).__va_ndx = (((AP).__va_ndx + 7) & -8)
2520 */
2521
2522 if (TYPE_ALIGN (type) > BITS_PER_WORD)
2523 {
2524 tmp = build (PLUS_EXPR, integer_type_node, ndx,
2525 build_int_2 ((2 * UNITS_PER_WORD) - 1, 0));
2526 tmp = build (BIT_AND_EXPR, integer_type_node, tmp,
2527 build_int_2 (-2 * UNITS_PER_WORD, -1));
2528 tmp = build (MODIFY_EXPR, integer_type_node, ndx, tmp);
2529 TREE_SIDE_EFFECTS (tmp) = 1;
2530 expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
2531 }
2532
2533
2534 /* Increment __va_ndx to point past the argument:
2535
2536 orig_ndx = (AP).__va_ndx;
2537 (AP).__va_ndx += __va_size (TYPE);
2538 */
2539
2540 orig_ndx = gen_reg_rtx (SImode);
2541 r = expand_expr (ndx, orig_ndx, SImode, EXPAND_NORMAL);
2542 if (r != orig_ndx)
2543 emit_move_insn (orig_ndx, r);
2544
2545 tmp = build (PLUS_EXPR, integer_type_node, ndx,
2546 make_tree (intSI_type_node, va_size));
2547 tmp = build (MODIFY_EXPR, integer_type_node, ndx, tmp);
2548 TREE_SIDE_EFFECTS (tmp) = 1;
2549 expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
2550
2551
2552 /* Check if the argument is in registers:
2553
2554 if ((AP).__va_ndx <= __MAX_ARGS_IN_REGISTERS * 4
2555 && !MUST_PASS_IN_STACK (type))
2556 __array = (AP).__va_reg;
2557 */
2558
2559 array = gen_reg_rtx (Pmode);
2560
2561 lab_over = NULL_RTX;
2562 if (!MUST_PASS_IN_STACK (VOIDmode, type))
2563 {
2564 lab_false = gen_label_rtx ();
2565 lab_over = gen_label_rtx ();
2566
2567 emit_cmp_and_jump_insns (expand_expr (ndx, NULL_RTX, SImode,
2568 EXPAND_NORMAL),
2569 GEN_INT (MAX_ARGS_IN_REGISTERS
2570 * UNITS_PER_WORD),
2571 GT, const1_rtx, SImode, 0, lab_false);
2572
2573 r = expand_expr (reg, array, Pmode, EXPAND_NORMAL);
2574 if (r != array)
2575 emit_move_insn (array, r);
2576
2577 emit_jump_insn (gen_jump (lab_over));
2578 emit_barrier ();
2579 emit_label (lab_false);
2580 }
2581
2582 /* ...otherwise, the argument is on the stack (never split between
2583 registers and the stack -- change __va_ndx if necessary):
2584
2585 else
2586 {
2587 if (orig_ndx < __MAX_ARGS_IN_REGISTERS * 4)
2588 (AP).__va_ndx = __MAX_ARGS_IN_REGISTERS * 4 + __va_size (TYPE);
2589 __array = (AP).__va_stk;
2590 }
2591 */
2592
2593 lab_false2 = gen_label_rtx ();
2594 emit_cmp_and_jump_insns (orig_ndx,
2595 GEN_INT (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD),
2596 GE, const1_rtx, SImode, 0, lab_false2);
2597
2598 tmp = build (PLUS_EXPR, sizetype, make_tree (intSI_type_node, va_size),
2599 build_int_2 (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, 0));
2600 tmp = build (MODIFY_EXPR, integer_type_node, ndx, tmp);
2601 TREE_SIDE_EFFECTS (tmp) = 1;
2602 expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
2603
2604 emit_label (lab_false2);
2605
2606 r = expand_expr (stk, array, Pmode, EXPAND_NORMAL);
2607 if (r != array)
2608 emit_move_insn (array, r);
2609
2610 if (lab_over != NULL_RTX)
2611 emit_label (lab_over);
2612
2613
2614 /* Given the base array pointer (__array) and index to the subsequent
2615 argument (__va_ndx), find the address:
2616
2617 __array + (AP).__va_ndx - (BYTES_BIG_ENDIAN && sizeof (TYPE) < 4
2618 ? sizeof (TYPE)
2619 : __va_size (TYPE))
2620
2621 The results are endian-dependent because values smaller than one word
2622 are aligned differently.
2623 */
2624
2625 size = gen_reg_rtx (SImode);
2626 emit_move_insn (size, va_size);
2627
2628 if (BYTES_BIG_ENDIAN)
2629 {
2630 rtx lab_use_va_size = gen_label_rtx ();
2631
2632 emit_cmp_and_jump_insns (expand_expr (type_size, NULL_RTX, SImode,
2633 EXPAND_NORMAL),
2634 GEN_INT (PARM_BOUNDARY / BITS_PER_UNIT),
2635 GE, const1_rtx, SImode, 0, lab_use_va_size);
2636
2637 r = expand_expr (type_size, size, SImode, EXPAND_NORMAL);
2638 if (r != size)
2639 emit_move_insn (size, r);
2640
2641 emit_label (lab_use_va_size);
2642 }
2643
2644 addr_tree = build (PLUS_EXPR, ptr_type_node,
2645 make_tree (ptr_type_node, array),
2646 ndx);
2647 addr_tree = build (MINUS_EXPR, ptr_type_node, addr_tree,
2648 make_tree (intSI_type_node, size));
2649 addr = expand_expr (addr_tree, NULL_RTX, Pmode, EXPAND_NORMAL);
2650 addr = copy_to_reg (addr);
2651 return addr;
2652 }
2653
2654
2655 enum reg_class
2656 xtensa_preferred_reload_class (x, class, isoutput)
2657 rtx x;
2658 enum reg_class class;
2659 int isoutput;
2660 {
2661 if (!isoutput && CONSTANT_P (x) && GET_CODE (x) == CONST_DOUBLE)
2662 return NO_REGS;
2663
2664 /* Don't use the stack pointer or hard frame pointer for reloads!
2665 The hard frame pointer would normally be OK except that it may
2666 briefly hold an incoming argument in the prologue, and reload
2667 won't know that it is live because the hard frame pointer is
2668 treated specially. */
2669
2670 if (class == AR_REGS || class == GR_REGS)
2671 return RL_REGS;
2672
2673 return class;
2674 }
2675
2676
2677 enum reg_class
2678 xtensa_secondary_reload_class (class, mode, x, isoutput)
2679 enum reg_class class;
2680 enum machine_mode mode ATTRIBUTE_UNUSED;
2681 rtx x;
2682 int isoutput;
2683 {
2684 int regno;
2685
2686 if (GET_CODE (x) == SIGN_EXTEND)
2687 x = XEXP (x, 0);
2688 regno = xt_true_regnum (x);
2689
2690 if (!isoutput)
2691 {
2692 if (class == FP_REGS && constantpool_mem_p (x))
2693 return RL_REGS;
2694 }
2695
2696 if (ACC_REG_P (regno))
2697 return ((class == GR_REGS || class == RL_REGS) ? NO_REGS : RL_REGS);
2698 if (class == ACC_REG)
2699 return (GP_REG_P (regno) ? NO_REGS : RL_REGS);
2700
2701 return NO_REGS;
2702 }
2703
2704
2705 void
2706 order_regs_for_local_alloc ()
2707 {
2708 if (!leaf_function_p ())
2709 {
2710 memcpy (reg_alloc_order, reg_nonleaf_alloc_order,
2711 FIRST_PSEUDO_REGISTER * sizeof (int));
2712 }
2713 else
2714 {
2715 int i, num_arg_regs;
2716 int nxt = 0;
2717
2718 /* use the AR registers in increasing order (skipping a0 and a1)
2719 but save the incoming argument registers for a last resort */
2720 num_arg_regs = current_function_args_info.arg_words;
2721 if (num_arg_regs > MAX_ARGS_IN_REGISTERS)
2722 num_arg_regs = MAX_ARGS_IN_REGISTERS;
2723 for (i = GP_ARG_FIRST; i < 16 - num_arg_regs; i++)
2724 reg_alloc_order[nxt++] = i + num_arg_regs;
2725 for (i = 0; i < num_arg_regs; i++)
2726 reg_alloc_order[nxt++] = GP_ARG_FIRST + i;
2727
2728 /* list the coprocessor registers in order */
2729 for (i = 0; i < BR_REG_NUM; i++)
2730 reg_alloc_order[nxt++] = BR_REG_FIRST + i;
2731
2732 /* list the FP registers in order for now */
2733 for (i = 0; i < 16; i++)
2734 reg_alloc_order[nxt++] = FP_REG_FIRST + i;
2735
2736 /* GCC requires that we list *all* the registers.... */
2737 reg_alloc_order[nxt++] = 0; /* a0 = return address */
2738 reg_alloc_order[nxt++] = 1; /* a1 = stack pointer */
2739 reg_alloc_order[nxt++] = 16; /* pseudo frame pointer */
2740 reg_alloc_order[nxt++] = 17; /* pseudo arg pointer */
2741
2742 reg_alloc_order[nxt++] = ACC_REG_FIRST; /* MAC16 accumulator */
2743 }
2744 }
2745
2746
2747 /* A customized version of reg_overlap_mentioned_p that only looks for
2748 references to a7 (as opposed to hard_frame_pointer_rtx). */
2749
2750 int
2751 a7_overlap_mentioned_p (x)
2752 rtx x;
2753 {
2754 int i, j;
2755 unsigned int x_regno;
2756 const char *fmt;
2757
2758 if (GET_CODE (x) == REG)
2759 {
2760 x_regno = REGNO (x);
2761 return (x != hard_frame_pointer_rtx
2762 && x_regno < A7_REG + 1
2763 && x_regno + HARD_REGNO_NREGS (A7_REG, GET_MODE (x)) > A7_REG);
2764 }
2765
2766 if (GET_CODE (x) == SUBREG
2767 && GET_CODE (SUBREG_REG (x)) == REG
2768 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
2769 {
2770 x_regno = subreg_regno (x);
2771 return (SUBREG_REG (x) != hard_frame_pointer_rtx
2772 && x_regno < A7_REG + 1
2773 && x_regno + HARD_REGNO_NREGS (A7_REG, GET_MODE (x)) > A7_REG);
2774 }
2775
2776 /* X does not match, so try its subexpressions. */
2777 fmt = GET_RTX_FORMAT (GET_CODE (x));
2778 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2779 {
2780 if (fmt[i] == 'e')
2781 {
2782 if (a7_overlap_mentioned_p (XEXP (x, i)))
2783 return 1;
2784 }
2785 else if (fmt[i] == 'E')
2786 {
2787 for (j = XVECLEN (x, i) - 1; j >=0; j--)
2788 if (a7_overlap_mentioned_p (XVECEXP (x, i, j)))
2789 return 1;
2790 }
2791 }
2792
2793 return 0;
2794 }
2795
2796
2797 /* Some Xtensa targets support multiple bss sections. If the section
2798 name ends with ".bss", add SECTION_BSS to the flags. */
2799
2800 static unsigned int
2801 xtensa_multibss_section_type_flags (decl, name, reloc)
2802 tree decl;
2803 const char *name;
2804 int reloc;
2805 {
2806 unsigned int flags = default_section_type_flags (decl, name, reloc);
2807 const char *suffix;
2808
2809 suffix = strrchr (name, '.');
2810 if (suffix && strcmp (suffix, ".bss") == 0)
2811 {
2812 if (!decl || (TREE_CODE (decl) == VAR_DECL
2813 && DECL_INITIAL (decl) == NULL_TREE))
2814 flags |= SECTION_BSS; /* @nobits */
2815 else
2816 warning ("only uninitialized variables can be placed in a "
2817 ".bss section");
2818 }
2819
2820 return flags;
2821 }
2822
2823
2824 /* The literal pool stays with the function. */
2825
2826 static void
2827 xtensa_select_rtx_section (mode, x, align)
2828 enum machine_mode mode ATTRIBUTE_UNUSED;
2829 rtx x ATTRIBUTE_UNUSED;
2830 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED;
2831 {
2832 function_section (current_function_decl);
2833 }
2834
2835 /* If we are referencing a function that is static, make the SYMBOL_REF
2836 special so that we can generate direct calls to it even with -fpic. */
2837
2838 static void
2839 xtensa_encode_section_info (decl, first)
2840 tree decl;
2841 int first ATTRIBUTE_UNUSED;
2842 {
2843 if (TREE_CODE (decl) == FUNCTION_DECL && ! TREE_PUBLIC (decl))
2844 SYMBOL_REF_FLAG (XEXP (DECL_RTL (decl), 0)) = 1;
2845 }
2846
2847 /* Compute a (partial) cost for rtx X. Return true if the complete
2848 cost has been computed, and false if subexpressions should be
2849 scanned. In either case, *TOTAL contains the cost result. */
2850
2851 static bool
2852 xtensa_rtx_costs (x, code, outer_code, total)
2853 rtx x;
2854 int code, outer_code;
2855 int *total;
2856 {
2857 switch (code)
2858 {
2859 case CONST_INT:
2860 switch (outer_code)
2861 {
2862 case SET:
2863 if (xtensa_simm12b (INTVAL (x)))
2864 {
2865 *total = 4;
2866 return true;
2867 }
2868 break;
2869 case PLUS:
2870 if (xtensa_simm8 (INTVAL (x))
2871 || xtensa_simm8x256 (INTVAL (x)))
2872 {
2873 *total = 0;
2874 return true;
2875 }
2876 break;
2877 case AND:
2878 if (xtensa_mask_immediate (INTVAL (x)))
2879 {
2880 *total = 0;
2881 return true;
2882 }
2883 break;
2884 case COMPARE:
2885 if ((INTVAL (x) == 0) || xtensa_b4const (INTVAL (x)))
2886 {
2887 *total = 0;
2888 return true;
2889 }
2890 break;
2891 case ASHIFT:
2892 case ASHIFTRT:
2893 case LSHIFTRT:
2894 case ROTATE:
2895 case ROTATERT:
2896 /* no way to tell if X is the 2nd operand so be conservative */
2897 default: break;
2898 }
2899 if (xtensa_simm12b (INTVAL (x)))
2900 *total = 5;
2901 else
2902 *total = 6;
2903 return true;
2904
2905 case CONST:
2906 case LABEL_REF:
2907 case SYMBOL_REF:
2908 *total = 5;
2909 return true;
2910
2911 case CONST_DOUBLE:
2912 *total = 7;
2913 return true;
2914
2915 case MEM:
2916 {
2917 int num_words =
2918 (GET_MODE_SIZE (GET_MODE (x)) > UNITS_PER_WORD) ? 2 : 1;
2919
2920 if (memory_address_p (GET_MODE (x), XEXP ((x), 0)))
2921 *total = COSTS_N_INSNS (num_words);
2922 else
2923 *total = COSTS_N_INSNS (2*num_words);
2924 return true;
2925 }
2926
2927 case FFS:
2928 *total = COSTS_N_INSNS (TARGET_NSA ? 5 : 50);
2929 return true;
2930
2931 case NOT:
2932 *total = COSTS_N_INSNS ((GET_MODE (x) == DImode) ? 3 : 2);
2933 return true;
2934
2935 case AND:
2936 case IOR:
2937 case XOR:
2938 if (GET_MODE (x) == DImode)
2939 *total = COSTS_N_INSNS (2);
2940 else
2941 *total = COSTS_N_INSNS (1);
2942 return true;
2943
2944 case ASHIFT:
2945 case ASHIFTRT:
2946 case LSHIFTRT:
2947 if (GET_MODE (x) == DImode)
2948 *total = COSTS_N_INSNS (50);
2949 else
2950 *total = COSTS_N_INSNS (1);
2951 return true;
2952
2953 case ABS:
2954 {
2955 enum machine_mode xmode = GET_MODE (x);
2956 if (xmode == SFmode)
2957 *total = COSTS_N_INSNS (TARGET_HARD_FLOAT ? 1 : 50);
2958 else if (xmode == DFmode)
2959 *total = COSTS_N_INSNS (50);
2960 else
2961 *total = COSTS_N_INSNS (4);
2962 return true;
2963 }
2964
2965 case PLUS:
2966 case MINUS:
2967 {
2968 enum machine_mode xmode = GET_MODE (x);
2969 if (xmode == SFmode)
2970 *total = COSTS_N_INSNS (TARGET_HARD_FLOAT ? 1 : 50);
2971 else if (xmode == DFmode || xmode == DImode)
2972 *total = COSTS_N_INSNS (50);
2973 else
2974 *total = COSTS_N_INSNS (1);
2975 return true;
2976 }
2977
2978 case NEG:
2979 *total = COSTS_N_INSNS ((GET_MODE (x) == DImode) ? 4 : 2);
2980 return true;
2981
2982 case MULT:
2983 {
2984 enum machine_mode xmode = GET_MODE (x);
2985 if (xmode == SFmode)
2986 *total = COSTS_N_INSNS (TARGET_HARD_FLOAT ? 4 : 50);
2987 else if (xmode == DFmode || xmode == DImode)
2988 *total = COSTS_N_INSNS (50);
2989 else if (TARGET_MUL32)
2990 *total = COSTS_N_INSNS (4);
2991 else if (TARGET_MAC16)
2992 *total = COSTS_N_INSNS (16);
2993 else if (TARGET_MUL16)
2994 *total = COSTS_N_INSNS (12);
2995 else
2996 *total = COSTS_N_INSNS (50);
2997 return true;
2998 }
2999
3000 case DIV:
3001 case MOD:
3002 {
3003 enum machine_mode xmode = GET_MODE (x);
3004 if (xmode == SFmode)
3005 {
3006 *total = COSTS_N_INSNS (TARGET_HARD_FLOAT_DIV ? 8 : 50);
3007 return true;
3008 }
3009 else if (xmode == DFmode)
3010 {
3011 *total = COSTS_N_INSNS (50);
3012 return true;
3013 }
3014 }
3015 /* fall through */
3016
3017 case UDIV:
3018 case UMOD:
3019 {
3020 enum machine_mode xmode = GET_MODE (x);
3021 if (xmode == DImode)
3022 *total = COSTS_N_INSNS (50);
3023 else if (TARGET_DIV32)
3024 *total = COSTS_N_INSNS (32);
3025 else
3026 *total = COSTS_N_INSNS (50);
3027 return true;
3028 }
3029
3030 case SQRT:
3031 if (GET_MODE (x) == SFmode)
3032 *total = COSTS_N_INSNS (TARGET_HARD_FLOAT_SQRT ? 8 : 50);
3033 else
3034 *total = COSTS_N_INSNS (50);
3035 return true;
3036
3037 case SMIN:
3038 case UMIN:
3039 case SMAX:
3040 case UMAX:
3041 *total = COSTS_N_INSNS (TARGET_MINMAX ? 1 : 50);
3042 return true;
3043
3044 case SIGN_EXTRACT:
3045 case SIGN_EXTEND:
3046 *total = COSTS_N_INSNS (TARGET_SEXT ? 1 : 2);
3047 return true;
3048
3049 case ZERO_EXTRACT:
3050 case ZERO_EXTEND:
3051 *total = COSTS_N_INSNS (1);
3052 return true;
3053
3054 default:
3055 return false;
3056 }
3057 }
3058
3059 #include "gt-xtensa.h"