]> git.ipfire.org Git - thirdparty/gcc.git/blob - gcc/config/xtensa/xtensa.c
babb5b06a515be31fb1e85081e997ea5b919eeff
[thirdparty/gcc.git] / gcc / config / xtensa / xtensa.c
1 /* Subroutines for insn-output.c for Tensilica's Xtensa architecture.
2 Copyright 2001,2002 Free Software Foundation, Inc.
3 Contributed by Bob Wilson (bwilson@tensilica.com) at Tensilica.
4
5 This file is part of GCC.
6
7 GCC is free software; you can redistribute it and/or modify it under
8 the terms of the GNU General Public License as published by the Free
9 Software Foundation; either version 2, or (at your option) any later
10 version.
11
12 GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to the Free
19 Software Foundation, 59 Temple Place - Suite 330, Boston, MA
20 02111-1307, USA. */
21
22 #include "config.h"
23 #include "system.h"
24 #include "rtl.h"
25 #include "regs.h"
26 #include "machmode.h"
27 #include "hard-reg-set.h"
28 #include "basic-block.h"
29 #include "real.h"
30 #include "insn-config.h"
31 #include "conditions.h"
32 #include "insn-flags.h"
33 #include "insn-attr.h"
34 #include "insn-codes.h"
35 #include "recog.h"
36 #include "output.h"
37 #include "tree.h"
38 #include "expr.h"
39 #include "flags.h"
40 #include "reload.h"
41 #include "tm_p.h"
42 #include "function.h"
43 #include "toplev.h"
44 #include "optabs.h"
45 #include "output.h"
46 #include "libfuncs.h"
47 #include "ggc.h"
48 #include "target.h"
49 #include "target-def.h"
50 #include "langhooks.h"
51
52 /* Enumeration for all of the relational tests, so that we can build
53 arrays indexed by the test type, and not worry about the order
54 of EQ, NE, etc. */
55
56 enum internal_test {
57 ITEST_EQ,
58 ITEST_NE,
59 ITEST_GT,
60 ITEST_GE,
61 ITEST_LT,
62 ITEST_LE,
63 ITEST_GTU,
64 ITEST_GEU,
65 ITEST_LTU,
66 ITEST_LEU,
67 ITEST_MAX
68 };
69
70 /* Cached operands, and operator to compare for use in set/branch on
71 condition codes. */
72 rtx branch_cmp[2];
73
74 /* what type of branch to use */
75 enum cmp_type branch_type;
76
77 /* Array giving truth value on whether or not a given hard register
78 can support a given mode. */
79 char xtensa_hard_regno_mode_ok[(int) MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
80
81 /* Current frame size calculated by compute_frame_size. */
82 unsigned xtensa_current_frame_size;
83
84 /* Tables of ld/st opcode names for block moves */
85 const char *xtensa_ld_opcodes[(int) MAX_MACHINE_MODE];
86 const char *xtensa_st_opcodes[(int) MAX_MACHINE_MODE];
87 #define LARGEST_MOVE_RATIO 15
88
89 /* Define the structure for the machine field in struct function. */
90 struct machine_function GTY(())
91 {
92 int accesses_prev_frame;
93 bool incoming_a7_copied;
94 };
95
96 /* Vector, indexed by hard register number, which contains 1 for a
97 register that is allowable in a candidate for leaf function
98 treatment. */
99
100 const char xtensa_leaf_regs[FIRST_PSEUDO_REGISTER] =
101 {
102 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
103 1, 1, 1,
104 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
105 1
106 };
107
108 /* Map hard register number to register class */
109 const enum reg_class xtensa_regno_to_class[FIRST_PSEUDO_REGISTER] =
110 {
111 RL_REGS, SP_REG, RL_REGS, RL_REGS,
112 RL_REGS, RL_REGS, RL_REGS, GR_REGS,
113 RL_REGS, RL_REGS, RL_REGS, RL_REGS,
114 RL_REGS, RL_REGS, RL_REGS, RL_REGS,
115 AR_REGS, AR_REGS, BR_REGS,
116 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
117 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
118 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
119 FP_REGS, FP_REGS, FP_REGS, FP_REGS,
120 ACC_REG,
121 };
122
123 /* Map register constraint character to register class. */
124 enum reg_class xtensa_char_to_class[256] =
125 {
126 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
127 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
128 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
129 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
130 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
131 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
132 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
133 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
134 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
135 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
136 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
137 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
138 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
139 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
140 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
141 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
142 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
143 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
144 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
145 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
146 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
147 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
148 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
149 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
150 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
151 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
152 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
153 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
154 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
155 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
156 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
157 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
158 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
159 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
160 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
161 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
162 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
163 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
164 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
165 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
166 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
167 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
168 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
169 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
170 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
171 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
172 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
173 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
174 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
175 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
176 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
177 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
178 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
179 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
180 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
181 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
182 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
183 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
184 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
185 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
186 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
187 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
188 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
189 NO_REGS, NO_REGS, NO_REGS, NO_REGS,
190 };
191
192 static int b4const_or_zero PARAMS ((int));
193 static enum internal_test map_test_to_internal_test PARAMS ((enum rtx_code));
194 static rtx gen_int_relational PARAMS ((enum rtx_code, rtx, rtx, int *));
195 static rtx gen_float_relational PARAMS ((enum rtx_code, rtx, rtx));
196 static rtx gen_conditional_move PARAMS ((rtx));
197 static rtx fixup_subreg_mem PARAMS ((rtx x));
198 static enum machine_mode xtensa_find_mode_for_size PARAMS ((unsigned));
199 static struct machine_function * xtensa_init_machine_status PARAMS ((void));
200 static void printx PARAMS ((FILE *, signed int));
201 static unsigned int xtensa_multibss_section_type_flags
202 PARAMS ((tree, const char *, int));
203 static void xtensa_select_rtx_section
204 PARAMS ((enum machine_mode, rtx, unsigned HOST_WIDE_INT));
205 static void xtensa_encode_section_info PARAMS ((tree, int));
206
207 static rtx frame_size_const;
208 static int current_function_arg_words;
209 static const int reg_nonleaf_alloc_order[FIRST_PSEUDO_REGISTER] =
210 REG_ALLOC_ORDER;
211 \f
212 /* This macro generates the assembly code for function entry.
213 FILE is a stdio stream to output the code to.
214 SIZE is an int: how many units of temporary storage to allocate.
215 Refer to the array 'regs_ever_live' to determine which registers
216 to save; 'regs_ever_live[I]' is nonzero if register number I
217 is ever used in the function. This macro is responsible for
218 knowing which registers should not be saved even if used. */
219
220 #undef TARGET_ASM_FUNCTION_PROLOGUE
221 #define TARGET_ASM_FUNCTION_PROLOGUE xtensa_function_prologue
222
223 /* This macro generates the assembly code for function exit,
224 on machines that need it. If FUNCTION_EPILOGUE is not defined
225 then individual return instructions are generated for each
226 return statement. Args are same as for FUNCTION_PROLOGUE. */
227
228 #undef TARGET_ASM_FUNCTION_EPILOGUE
229 #define TARGET_ASM_FUNCTION_EPILOGUE xtensa_function_epilogue
230
231 /* These hooks specify assembly directives for creating certain kinds
232 of integer object. */
233
234 #undef TARGET_ASM_ALIGNED_SI_OP
235 #define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
236
237 #undef TARGET_ASM_SELECT_RTX_SECTION
238 #define TARGET_ASM_SELECT_RTX_SECTION xtensa_select_rtx_section
239 #undef TARGET_ENCODE_SECTION_INFO
240 #define TARGET_ENCODE_SECTION_INFO xtensa_encode_section_info
241
242 struct gcc_target targetm = TARGET_INITIALIZER;
243 \f
244
245 /*
246 * Functions to test Xtensa immediate operand validity.
247 */
248
249 int
250 xtensa_b4constu (v)
251 int v;
252 {
253 switch (v)
254 {
255 case 32768:
256 case 65536:
257 case 2:
258 case 3:
259 case 4:
260 case 5:
261 case 6:
262 case 7:
263 case 8:
264 case 10:
265 case 12:
266 case 16:
267 case 32:
268 case 64:
269 case 128:
270 case 256:
271 return 1;
272 }
273 return 0;
274 }
275
276 int
277 xtensa_simm8x256 (v)
278 int v;
279 {
280 return (v & 255) == 0 && (v >= -32768 && v <= 32512);
281 }
282
283 int
284 xtensa_ai4const (v)
285 int v;
286 {
287 return (v == -1 || (v >= 1 && v <= 15));
288 }
289
290 int
291 xtensa_simm7 (v)
292 int v;
293 {
294 return v >= -32 && v <= 95;
295 }
296
297 int
298 xtensa_b4const (v)
299 int v;
300 {
301 switch (v)
302 {
303 case -1:
304 case 1:
305 case 2:
306 case 3:
307 case 4:
308 case 5:
309 case 6:
310 case 7:
311 case 8:
312 case 10:
313 case 12:
314 case 16:
315 case 32:
316 case 64:
317 case 128:
318 case 256:
319 return 1;
320 }
321 return 0;
322 }
323
324 int
325 xtensa_simm8 (v)
326 int v;
327 {
328 return v >= -128 && v <= 127;
329 }
330
331 int
332 xtensa_tp7 (v)
333 int v;
334 {
335 return (v >= 7 && v <= 22);
336 }
337
338 int
339 xtensa_lsi4x4 (v)
340 int v;
341 {
342 return (v & 3) == 0 && (v >= 0 && v <= 60);
343 }
344
345 int
346 xtensa_simm12b (v)
347 int v;
348 {
349 return v >= -2048 && v <= 2047;
350 }
351
352 int
353 xtensa_uimm8 (v)
354 int v;
355 {
356 return v >= 0 && v <= 255;
357 }
358
359 int
360 xtensa_uimm8x2 (v)
361 int v;
362 {
363 return (v & 1) == 0 && (v >= 0 && v <= 510);
364 }
365
366 int
367 xtensa_uimm8x4 (v)
368 int v;
369 {
370 return (v & 3) == 0 && (v >= 0 && v <= 1020);
371 }
372
373
374 /* This is just like the standard true_regnum() function except that it
375 works even when reg_renumber is not initialized. */
376
377 int
378 xt_true_regnum (x)
379 rtx x;
380 {
381 if (GET_CODE (x) == REG)
382 {
383 if (reg_renumber
384 && REGNO (x) >= FIRST_PSEUDO_REGISTER
385 && reg_renumber[REGNO (x)] >= 0)
386 return reg_renumber[REGNO (x)];
387 return REGNO (x);
388 }
389 if (GET_CODE (x) == SUBREG)
390 {
391 int base = xt_true_regnum (SUBREG_REG (x));
392 if (base >= 0 && base < FIRST_PSEUDO_REGISTER)
393 return base + subreg_regno_offset (REGNO (SUBREG_REG (x)),
394 GET_MODE (SUBREG_REG (x)),
395 SUBREG_BYTE (x), GET_MODE (x));
396 }
397 return -1;
398 }
399
400
401 int
402 add_operand (op, mode)
403 rtx op;
404 enum machine_mode mode;
405 {
406 if (GET_CODE (op) == CONST_INT)
407 return (xtensa_simm8 (INTVAL (op)) ||
408 xtensa_simm8x256 (INTVAL (op)));
409
410 return register_operand (op, mode);
411 }
412
413
414 int
415 arith_operand (op, mode)
416 rtx op;
417 enum machine_mode mode;
418 {
419 if (GET_CODE (op) == CONST_INT)
420 return xtensa_simm8 (INTVAL (op));
421
422 return register_operand (op, mode);
423 }
424
425
426 int
427 nonimmed_operand (op, mode)
428 rtx op;
429 enum machine_mode mode;
430 {
431 /* We cannot use the standard nonimmediate_operand() predicate because
432 it includes constant pool memory operands. */
433
434 if (memory_operand (op, mode))
435 return !constantpool_address_p (XEXP (op, 0));
436
437 return register_operand (op, mode);
438 }
439
440
441 int
442 mem_operand (op, mode)
443 rtx op;
444 enum machine_mode mode;
445 {
446 /* We cannot use the standard memory_operand() predicate because
447 it includes constant pool memory operands. */
448
449 if (memory_operand (op, mode))
450 return !constantpool_address_p (XEXP (op, 0));
451
452 return FALSE;
453 }
454
455
456 int
457 xtensa_valid_move (mode, operands)
458 enum machine_mode mode;
459 rtx *operands;
460 {
461 /* Either the destination or source must be a register, and the
462 MAC16 accumulator doesn't count. */
463
464 if (register_operand (operands[0], mode))
465 {
466 int dst_regnum = xt_true_regnum (operands[0]);
467
468 /* The stack pointer can only be assigned with a MOVSP opcode. */
469 if (dst_regnum == STACK_POINTER_REGNUM)
470 return (mode == SImode
471 && register_operand (operands[1], mode)
472 && !ACC_REG_P (xt_true_regnum (operands[1])));
473
474 if (!ACC_REG_P (dst_regnum))
475 return true;
476 }
477 if (register_operand (operands[1], mode))
478 {
479 int src_regnum = xt_true_regnum (operands[1]);
480 if (!ACC_REG_P (src_regnum))
481 return true;
482 }
483 return FALSE;
484 }
485
486
487 int
488 mask_operand (op, mode)
489 rtx op;
490 enum machine_mode mode;
491 {
492 if (GET_CODE (op) == CONST_INT)
493 return xtensa_mask_immediate (INTVAL (op));
494
495 return register_operand (op, mode);
496 }
497
498
499 int
500 extui_fldsz_operand (op, mode)
501 rtx op;
502 enum machine_mode mode ATTRIBUTE_UNUSED;
503 {
504 return ((GET_CODE (op) == CONST_INT)
505 && xtensa_mask_immediate ((1 << INTVAL (op)) - 1));
506 }
507
508
509 int
510 sext_operand (op, mode)
511 rtx op;
512 enum machine_mode mode;
513 {
514 if (TARGET_SEXT)
515 return nonimmed_operand (op, mode);
516 return mem_operand (op, mode);
517 }
518
519
520 int
521 sext_fldsz_operand (op, mode)
522 rtx op;
523 enum machine_mode mode ATTRIBUTE_UNUSED;
524 {
525 return ((GET_CODE (op) == CONST_INT) && xtensa_tp7 (INTVAL (op) - 1));
526 }
527
528
529 int
530 lsbitnum_operand (op, mode)
531 rtx op;
532 enum machine_mode mode ATTRIBUTE_UNUSED;
533 {
534 if (GET_CODE (op) == CONST_INT)
535 {
536 return (BITS_BIG_ENDIAN
537 ? (INTVAL (op) == BITS_PER_WORD-1)
538 : (INTVAL (op) == 0));
539 }
540 return FALSE;
541 }
542
543
544 static int
545 b4const_or_zero (v)
546 int v;
547 {
548 if (v == 0)
549 return TRUE;
550 return xtensa_b4const (v);
551 }
552
553
554 int
555 branch_operand (op, mode)
556 rtx op;
557 enum machine_mode mode;
558 {
559 if (GET_CODE (op) == CONST_INT)
560 return b4const_or_zero (INTVAL (op));
561
562 return register_operand (op, mode);
563 }
564
565
566 int
567 ubranch_operand (op, mode)
568 rtx op;
569 enum machine_mode mode;
570 {
571 if (GET_CODE (op) == CONST_INT)
572 return xtensa_b4constu (INTVAL (op));
573
574 return register_operand (op, mode);
575 }
576
577
578 int
579 call_insn_operand (op, mode)
580 rtx op;
581 enum machine_mode mode ATTRIBUTE_UNUSED;
582 {
583 if ((GET_CODE (op) == REG)
584 && (op != arg_pointer_rtx)
585 && ((REGNO (op) < FRAME_POINTER_REGNUM)
586 || (REGNO (op) > LAST_VIRTUAL_REGISTER)))
587 return TRUE;
588
589 if (CONSTANT_ADDRESS_P (op))
590 {
591 /* Direct calls only allowed to static functions with PIC. */
592 return (!flag_pic || (GET_CODE (op) == SYMBOL_REF
593 && SYMBOL_REF_FLAG (op)));
594 }
595
596 return FALSE;
597 }
598
599
600 int
601 move_operand (op, mode)
602 rtx op;
603 enum machine_mode mode;
604 {
605 if (register_operand (op, mode))
606 return TRUE;
607
608 /* Accept CONSTANT_P_RTX, since it will be gone by CSE1 and
609 result in 0/1. */
610 if (GET_CODE (op) == CONSTANT_P_RTX)
611 return TRUE;
612
613 if (GET_CODE (op) == CONST_INT)
614 return xtensa_simm12b (INTVAL (op));
615
616 if (GET_CODE (op) == MEM)
617 return memory_address_p (mode, XEXP (op, 0));
618
619 return FALSE;
620 }
621
622
623 int
624 smalloffset_mem_p (op)
625 rtx op;
626 {
627 if (GET_CODE (op) == MEM)
628 {
629 rtx addr = XEXP (op, 0);
630 if (GET_CODE (addr) == REG)
631 return REG_OK_FOR_BASE_P (addr);
632 if (GET_CODE (addr) == PLUS)
633 {
634 rtx offset = XEXP (addr, 0);
635 if (GET_CODE (offset) != CONST_INT)
636 offset = XEXP (addr, 1);
637 if (GET_CODE (offset) != CONST_INT)
638 return FALSE;
639 return xtensa_lsi4x4 (INTVAL (offset));
640 }
641 }
642 return FALSE;
643 }
644
645
646 int
647 smalloffset_double_mem_p (op)
648 rtx op;
649 {
650 if (!smalloffset_mem_p (op))
651 return FALSE;
652 return smalloffset_mem_p (adjust_address (op, GET_MODE (op), 4));
653 }
654
655
656 int
657 constantpool_address_p (addr)
658 rtx addr;
659 {
660 rtx sym = addr;
661
662 if (GET_CODE (addr) == CONST)
663 {
664 rtx offset;
665
666 /* only handle (PLUS (SYM, OFFSET)) form */
667 addr = XEXP (addr, 0);
668 if (GET_CODE (addr) != PLUS)
669 return FALSE;
670
671 /* make sure the address is word aligned */
672 offset = XEXP (addr, 1);
673 if ((GET_CODE (offset) != CONST_INT)
674 || ((INTVAL (offset) & 3) != 0))
675 return FALSE;
676
677 sym = XEXP (addr, 0);
678 }
679
680 if ((GET_CODE (sym) == SYMBOL_REF)
681 && CONSTANT_POOL_ADDRESS_P (sym))
682 return TRUE;
683 return FALSE;
684 }
685
686
687 int
688 constantpool_mem_p (op)
689 rtx op;
690 {
691 if (GET_CODE (op) == MEM)
692 return constantpool_address_p (XEXP (op, 0));
693 return FALSE;
694 }
695
696
697 int
698 non_const_move_operand (op, mode)
699 rtx op;
700 enum machine_mode mode;
701 {
702 if (register_operand (op, mode))
703 return 1;
704 if (GET_CODE (op) == SUBREG)
705 op = SUBREG_REG (op);
706 if (GET_CODE (op) == MEM)
707 return memory_address_p (mode, XEXP (op, 0));
708 return FALSE;
709 }
710
711
712 /* Accept the floating point constant 1 in the appropriate mode. */
713
714 int
715 const_float_1_operand (op, mode)
716 rtx op;
717 enum machine_mode mode;
718 {
719 REAL_VALUE_TYPE d;
720 static REAL_VALUE_TYPE onedf;
721 static REAL_VALUE_TYPE onesf;
722 static int one_initialized;
723
724 if ((GET_CODE (op) != CONST_DOUBLE)
725 || (mode != GET_MODE (op))
726 || (mode != DFmode && mode != SFmode))
727 return FALSE;
728
729 REAL_VALUE_FROM_CONST_DOUBLE (d, op);
730
731 if (! one_initialized)
732 {
733 onedf = REAL_VALUE_ATOF ("1.0", DFmode);
734 onesf = REAL_VALUE_ATOF ("1.0", SFmode);
735 one_initialized = TRUE;
736 }
737
738 if (mode == DFmode)
739 return REAL_VALUES_EQUAL (d, onedf);
740 else
741 return REAL_VALUES_EQUAL (d, onesf);
742 }
743
744
745 int
746 fpmem_offset_operand (op, mode)
747 rtx op;
748 enum machine_mode mode ATTRIBUTE_UNUSED;
749 {
750 if (GET_CODE (op) == CONST_INT)
751 return xtensa_mem_offset (INTVAL (op), SFmode);
752 return 0;
753 }
754
755
756 void
757 xtensa_extend_reg (dst, src)
758 rtx dst;
759 rtx src;
760 {
761 rtx temp = gen_reg_rtx (SImode);
762 rtx shift = GEN_INT (BITS_PER_WORD - GET_MODE_BITSIZE (GET_MODE (src)));
763
764 /* generate paradoxical subregs as needed so that the modes match */
765 src = simplify_gen_subreg (SImode, src, GET_MODE (src), 0);
766 dst = simplify_gen_subreg (SImode, dst, GET_MODE (dst), 0);
767
768 emit_insn (gen_ashlsi3 (temp, src, shift));
769 emit_insn (gen_ashrsi3 (dst, temp, shift));
770 }
771
772
773 void
774 xtensa_load_constant (dst, src)
775 rtx dst;
776 rtx src;
777 {
778 enum machine_mode mode = GET_MODE (dst);
779 src = force_const_mem (SImode, src);
780
781 /* PC-relative loads are always SImode so we have to add a SUBREG if that
782 is not the desired mode */
783
784 if (mode != SImode)
785 {
786 if (register_operand (dst, mode))
787 dst = simplify_gen_subreg (SImode, dst, mode, 0);
788 else
789 {
790 src = force_reg (SImode, src);
791 src = gen_lowpart_SUBREG (mode, src);
792 }
793 }
794
795 emit_move_insn (dst, src);
796 }
797
798
799 int
800 branch_operator (x, mode)
801 rtx x;
802 enum machine_mode mode;
803 {
804 if (GET_MODE (x) != mode)
805 return FALSE;
806
807 switch (GET_CODE (x))
808 {
809 case EQ:
810 case NE:
811 case LT:
812 case GE:
813 return TRUE;
814 default:
815 break;
816 }
817 return FALSE;
818 }
819
820
821 int
822 ubranch_operator (x, mode)
823 rtx x;
824 enum machine_mode mode;
825 {
826 if (GET_MODE (x) != mode)
827 return FALSE;
828
829 switch (GET_CODE (x))
830 {
831 case LTU:
832 case GEU:
833 return TRUE;
834 default:
835 break;
836 }
837 return FALSE;
838 }
839
840
841 int
842 boolean_operator (x, mode)
843 rtx x;
844 enum machine_mode mode;
845 {
846 if (GET_MODE (x) != mode)
847 return FALSE;
848
849 switch (GET_CODE (x))
850 {
851 case EQ:
852 case NE:
853 return TRUE;
854 default:
855 break;
856 }
857 return FALSE;
858 }
859
860
861 int
862 xtensa_mask_immediate (v)
863 int v;
864 {
865 #define MAX_MASK_SIZE 16
866 int mask_size;
867
868 for (mask_size = 1; mask_size <= MAX_MASK_SIZE; mask_size++)
869 {
870 if ((v & 1) == 0)
871 return FALSE;
872 v = v >> 1;
873 if (v == 0)
874 return TRUE;
875 }
876
877 return FALSE;
878 }
879
880
881 int
882 xtensa_mem_offset (v, mode)
883 unsigned v;
884 enum machine_mode mode;
885 {
886 switch (mode)
887 {
888 case BLKmode:
889 /* Handle the worst case for block moves. See xtensa_expand_block_move
890 where we emit an optimized block move operation if the block can be
891 moved in < "move_ratio" pieces. The worst case is when the block is
892 aligned but has a size of (3 mod 4) (does this happen?) so that the
893 last piece requires a byte load/store. */
894 return (xtensa_uimm8 (v) &&
895 xtensa_uimm8 (v + MOVE_MAX * LARGEST_MOVE_RATIO));
896
897 case QImode:
898 return xtensa_uimm8 (v);
899
900 case HImode:
901 return xtensa_uimm8x2 (v);
902
903 case DFmode:
904 return (xtensa_uimm8x4 (v) && xtensa_uimm8x4 (v + 4));
905
906 default:
907 break;
908 }
909
910 return xtensa_uimm8x4 (v);
911 }
912
913
914 /* Make normal rtx_code into something we can index from an array */
915
916 static enum internal_test
917 map_test_to_internal_test (test_code)
918 enum rtx_code test_code;
919 {
920 enum internal_test test = ITEST_MAX;
921
922 switch (test_code)
923 {
924 default: break;
925 case EQ: test = ITEST_EQ; break;
926 case NE: test = ITEST_NE; break;
927 case GT: test = ITEST_GT; break;
928 case GE: test = ITEST_GE; break;
929 case LT: test = ITEST_LT; break;
930 case LE: test = ITEST_LE; break;
931 case GTU: test = ITEST_GTU; break;
932 case GEU: test = ITEST_GEU; break;
933 case LTU: test = ITEST_LTU; break;
934 case LEU: test = ITEST_LEU; break;
935 }
936
937 return test;
938 }
939
940
941 /* Generate the code to compare two integer values. The return value is
942 the comparison expression. */
943
944 static rtx
945 gen_int_relational (test_code, cmp0, cmp1, p_invert)
946 enum rtx_code test_code; /* relational test (EQ, etc) */
947 rtx cmp0; /* first operand to compare */
948 rtx cmp1; /* second operand to compare */
949 int *p_invert; /* whether branch needs to reverse its test */
950 {
951 struct cmp_info {
952 enum rtx_code test_code; /* test code to use in insn */
953 int (*const_range_p) PARAMS ((int)); /* predicate function to check range */
954 int const_add; /* constant to add (convert LE -> LT) */
955 int reverse_regs; /* reverse registers in test */
956 int invert_const; /* != 0 if invert value if cmp1 is constant */
957 int invert_reg; /* != 0 if invert value if cmp1 is register */
958 int unsignedp; /* != 0 for unsigned comparisons. */
959 };
960
961 static struct cmp_info info[ (int)ITEST_MAX ] = {
962
963 { EQ, b4const_or_zero, 0, 0, 0, 0, 0 }, /* EQ */
964 { NE, b4const_or_zero, 0, 0, 0, 0, 0 }, /* NE */
965
966 { LT, b4const_or_zero, 1, 1, 1, 0, 0 }, /* GT */
967 { GE, b4const_or_zero, 0, 0, 0, 0, 0 }, /* GE */
968 { LT, b4const_or_zero, 0, 0, 0, 0, 0 }, /* LT */
969 { GE, b4const_or_zero, 1, 1, 1, 0, 0 }, /* LE */
970
971 { LTU, xtensa_b4constu, 1, 1, 1, 0, 1 }, /* GTU */
972 { GEU, xtensa_b4constu, 0, 0, 0, 0, 1 }, /* GEU */
973 { LTU, xtensa_b4constu, 0, 0, 0, 0, 1 }, /* LTU */
974 { GEU, xtensa_b4constu, 1, 1, 1, 0, 1 }, /* LEU */
975 };
976
977 enum internal_test test;
978 enum machine_mode mode;
979 struct cmp_info *p_info;
980
981 test = map_test_to_internal_test (test_code);
982 if (test == ITEST_MAX)
983 abort ();
984
985 p_info = &info[ (int)test ];
986
987 mode = GET_MODE (cmp0);
988 if (mode == VOIDmode)
989 mode = GET_MODE (cmp1);
990
991 /* Make sure we can handle any constants given to us. */
992 if (GET_CODE (cmp1) == CONST_INT)
993 {
994 HOST_WIDE_INT value = INTVAL (cmp1);
995 unsigned HOST_WIDE_INT uvalue = (unsigned HOST_WIDE_INT)value;
996
997 /* if the immediate overflows or does not fit in the immediate field,
998 spill it to a register */
999
1000 if ((p_info->unsignedp ?
1001 (uvalue + p_info->const_add > uvalue) :
1002 (value + p_info->const_add > value)) != (p_info->const_add > 0))
1003 {
1004 cmp1 = force_reg (mode, cmp1);
1005 }
1006 else if (!(p_info->const_range_p) (value + p_info->const_add))
1007 {
1008 cmp1 = force_reg (mode, cmp1);
1009 }
1010 }
1011 else if ((GET_CODE (cmp1) != REG) && (GET_CODE (cmp1) != SUBREG))
1012 {
1013 cmp1 = force_reg (mode, cmp1);
1014 }
1015
1016 /* See if we need to invert the result. */
1017 *p_invert = ((GET_CODE (cmp1) == CONST_INT)
1018 ? p_info->invert_const
1019 : p_info->invert_reg);
1020
1021 /* Comparison to constants, may involve adding 1 to change a LT into LE.
1022 Comparison between two registers, may involve switching operands. */
1023 if (GET_CODE (cmp1) == CONST_INT)
1024 {
1025 if (p_info->const_add != 0)
1026 cmp1 = GEN_INT (INTVAL (cmp1) + p_info->const_add);
1027
1028 }
1029 else if (p_info->reverse_regs)
1030 {
1031 rtx temp = cmp0;
1032 cmp0 = cmp1;
1033 cmp1 = temp;
1034 }
1035
1036 return gen_rtx (p_info->test_code, VOIDmode, cmp0, cmp1);
1037 }
1038
1039
1040 /* Generate the code to compare two float values. The return value is
1041 the comparison expression. */
1042
1043 static rtx
1044 gen_float_relational (test_code, cmp0, cmp1)
1045 enum rtx_code test_code; /* relational test (EQ, etc) */
1046 rtx cmp0; /* first operand to compare */
1047 rtx cmp1; /* second operand to compare */
1048 {
1049 rtx (*gen_fn) PARAMS ((rtx, rtx, rtx));
1050 rtx brtmp;
1051 int reverse_regs, invert;
1052
1053 switch (test_code)
1054 {
1055 case EQ: reverse_regs = 0; invert = 0; gen_fn = gen_seq_sf; break;
1056 case NE: reverse_regs = 0; invert = 1; gen_fn = gen_seq_sf; break;
1057 case LE: reverse_regs = 0; invert = 0; gen_fn = gen_sle_sf; break;
1058 case GT: reverse_regs = 1; invert = 0; gen_fn = gen_slt_sf; break;
1059 case LT: reverse_regs = 0; invert = 0; gen_fn = gen_slt_sf; break;
1060 case GE: reverse_regs = 1; invert = 0; gen_fn = gen_sle_sf; break;
1061 default:
1062 fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));
1063 reverse_regs = 0; invert = 0; gen_fn = 0; /* avoid compiler warnings */
1064 }
1065
1066 if (reverse_regs)
1067 {
1068 rtx temp = cmp0;
1069 cmp0 = cmp1;
1070 cmp1 = temp;
1071 }
1072
1073 brtmp = gen_rtx_REG (CCmode, FPCC_REGNUM);
1074 emit_insn (gen_fn (brtmp, cmp0, cmp1));
1075
1076 return gen_rtx (invert ? EQ : NE, VOIDmode, brtmp, const0_rtx);
1077 }
1078
1079
1080 void
1081 xtensa_expand_conditional_branch (operands, test_code)
1082 rtx *operands;
1083 enum rtx_code test_code;
1084 {
1085 enum cmp_type type = branch_type;
1086 rtx cmp0 = branch_cmp[0];
1087 rtx cmp1 = branch_cmp[1];
1088 rtx cmp;
1089 int invert;
1090 rtx label1, label2;
1091
1092 switch (type)
1093 {
1094 case CMP_DF:
1095 default:
1096 fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));
1097
1098 case CMP_SI:
1099 invert = FALSE;
1100 cmp = gen_int_relational (test_code, cmp0, cmp1, &invert);
1101 break;
1102
1103 case CMP_SF:
1104 if (!TARGET_HARD_FLOAT)
1105 fatal_insn ("bad test", gen_rtx (test_code, VOIDmode, cmp0, cmp1));
1106 invert = FALSE;
1107 cmp = gen_float_relational (test_code, cmp0, cmp1);
1108 break;
1109 }
1110
1111 /* Generate the branch. */
1112
1113 label1 = gen_rtx_LABEL_REF (VOIDmode, operands[0]);
1114 label2 = pc_rtx;
1115
1116 if (invert)
1117 {
1118 label2 = label1;
1119 label1 = pc_rtx;
1120 }
1121
1122 emit_jump_insn (gen_rtx_SET (VOIDmode, pc_rtx,
1123 gen_rtx_IF_THEN_ELSE (VOIDmode, cmp,
1124 label1,
1125 label2)));
1126 }
1127
1128
1129 static rtx
1130 gen_conditional_move (cmp)
1131 rtx cmp;
1132 {
1133 enum rtx_code code = GET_CODE (cmp);
1134 rtx op0 = branch_cmp[0];
1135 rtx op1 = branch_cmp[1];
1136
1137 if (branch_type == CMP_SI)
1138 {
1139 /* Jump optimization calls get_condition() which canonicalizes
1140 comparisons like (GE x <const>) to (GT x <const-1>).
1141 Transform those comparisons back to GE, since that is the
1142 comparison supported in Xtensa. We shouldn't have to
1143 transform <LE x const> comparisons, because neither
1144 xtensa_expand_conditional_branch() nor get_condition() will
1145 produce them. */
1146
1147 if ((code == GT) && (op1 == constm1_rtx))
1148 {
1149 code = GE;
1150 op1 = const0_rtx;
1151 }
1152 cmp = gen_rtx (code, VOIDmode, cc0_rtx, const0_rtx);
1153
1154 if (boolean_operator (cmp, VOIDmode))
1155 {
1156 /* swap the operands to make const0 second */
1157 if (op0 == const0_rtx)
1158 {
1159 op0 = op1;
1160 op1 = const0_rtx;
1161 }
1162
1163 /* if not comparing against zero, emit a comparison (subtract) */
1164 if (op1 != const0_rtx)
1165 {
1166 op0 = expand_binop (SImode, sub_optab, op0, op1,
1167 0, 0, OPTAB_LIB_WIDEN);
1168 op1 = const0_rtx;
1169 }
1170 }
1171 else if (branch_operator (cmp, VOIDmode))
1172 {
1173 /* swap the operands to make const0 second */
1174 if (op0 == const0_rtx)
1175 {
1176 op0 = op1;
1177 op1 = const0_rtx;
1178
1179 switch (code)
1180 {
1181 case LT: code = GE; break;
1182 case GE: code = LT; break;
1183 default: abort ();
1184 }
1185 }
1186
1187 if (op1 != const0_rtx)
1188 return 0;
1189 }
1190 else
1191 return 0;
1192
1193 return gen_rtx (code, VOIDmode, op0, op1);
1194 }
1195
1196 if (TARGET_HARD_FLOAT && (branch_type == CMP_SF))
1197 return gen_float_relational (code, op0, op1);
1198
1199 return 0;
1200 }
1201
1202
1203 int
1204 xtensa_expand_conditional_move (operands, isflt)
1205 rtx *operands;
1206 int isflt;
1207 {
1208 rtx cmp;
1209 rtx (*gen_fn) PARAMS ((rtx, rtx, rtx, rtx, rtx));
1210
1211 if (!(cmp = gen_conditional_move (operands[1])))
1212 return 0;
1213
1214 if (isflt)
1215 gen_fn = (branch_type == CMP_SI
1216 ? gen_movsfcc_internal0
1217 : gen_movsfcc_internal1);
1218 else
1219 gen_fn = (branch_type == CMP_SI
1220 ? gen_movsicc_internal0
1221 : gen_movsicc_internal1);
1222
1223 emit_insn (gen_fn (operands[0], XEXP (cmp, 0),
1224 operands[2], operands[3], cmp));
1225 return 1;
1226 }
1227
1228
1229 int
1230 xtensa_expand_scc (operands)
1231 rtx *operands;
1232 {
1233 rtx dest = operands[0];
1234 rtx cmp = operands[1];
1235 rtx one_tmp, zero_tmp;
1236 rtx (*gen_fn) PARAMS ((rtx, rtx, rtx, rtx, rtx));
1237
1238 if (!(cmp = gen_conditional_move (cmp)))
1239 return 0;
1240
1241 one_tmp = gen_reg_rtx (SImode);
1242 zero_tmp = gen_reg_rtx (SImode);
1243 emit_insn (gen_movsi (one_tmp, const_true_rtx));
1244 emit_insn (gen_movsi (zero_tmp, const0_rtx));
1245
1246 gen_fn = (branch_type == CMP_SI
1247 ? gen_movsicc_internal0
1248 : gen_movsicc_internal1);
1249 emit_insn (gen_fn (dest, XEXP (cmp, 0), one_tmp, zero_tmp, cmp));
1250 return 1;
1251 }
1252
1253
1254 /* Emit insns to move operands[1] into operands[0].
1255
1256 Return 1 if we have written out everything that needs to be done to
1257 do the move. Otherwise, return 0 and the caller will emit the move
1258 normally. */
1259
1260 int
1261 xtensa_emit_move_sequence (operands, mode)
1262 rtx *operands;
1263 enum machine_mode mode;
1264 {
1265 if (CONSTANT_P (operands[1])
1266 && GET_CODE (operands[1]) != CONSTANT_P_RTX
1267 && (GET_CODE (operands[1]) != CONST_INT
1268 || !xtensa_simm12b (INTVAL (operands[1]))))
1269 {
1270 xtensa_load_constant (operands[0], operands[1]);
1271 return 1;
1272 }
1273
1274 if (!(reload_in_progress | reload_completed))
1275 {
1276 if (!xtensa_valid_move (mode, operands))
1277 operands[1] = force_reg (mode, operands[1]);
1278
1279 if (xtensa_copy_incoming_a7 (operands, mode))
1280 return 1;
1281 }
1282
1283 /* During reload we don't want to emit (subreg:X (mem:Y)) since that
1284 instruction won't be recognized after reload. So we remove the
1285 subreg and adjust mem accordingly. */
1286 if (reload_in_progress)
1287 {
1288 operands[0] = fixup_subreg_mem (operands[0]);
1289 operands[1] = fixup_subreg_mem (operands[1]);
1290 }
1291 return 0;
1292 }
1293
1294 static rtx
1295 fixup_subreg_mem (x)
1296 rtx x;
1297 {
1298 if (GET_CODE (x) == SUBREG
1299 && GET_CODE (SUBREG_REG (x)) == REG
1300 && REGNO (SUBREG_REG (x)) >= FIRST_PSEUDO_REGISTER)
1301 {
1302 rtx temp =
1303 gen_rtx_SUBREG (GET_MODE (x),
1304 reg_equiv_mem [REGNO (SUBREG_REG (x))],
1305 SUBREG_BYTE (x));
1306 x = alter_subreg (&temp);
1307 }
1308 return x;
1309 }
1310
1311
1312 /* Check if this move is copying an incoming argument in a7. If so,
1313 emit the move, followed by the special "set_frame_ptr"
1314 unspec_volatile insn, at the very beginning of the function. This
1315 is necessary because the register allocator will ignore conflicts
1316 with a7 and may assign some other pseudo to a7. If that pseudo was
1317 assigned prior to this move, it would clobber the incoming argument
1318 in a7. By copying the argument out of a7 as the very first thing,
1319 and then immediately following that with an unspec_volatile to keep
1320 the scheduler away, we should avoid any problems. */
1321
1322 bool
1323 xtensa_copy_incoming_a7 (operands, mode)
1324 rtx *operands;
1325 enum machine_mode mode;
1326 {
1327 if (a7_overlap_mentioned_p (operands[1])
1328 && !cfun->machine->incoming_a7_copied)
1329 {
1330 rtx mov;
1331 switch (mode)
1332 {
1333 case DFmode:
1334 mov = gen_movdf_internal (operands[0], operands[1]);
1335 break;
1336 case SFmode:
1337 mov = gen_movsf_internal (operands[0], operands[1]);
1338 break;
1339 case DImode:
1340 mov = gen_movdi_internal (operands[0], operands[1]);
1341 break;
1342 case SImode:
1343 mov = gen_movsi_internal (operands[0], operands[1]);
1344 break;
1345 case HImode:
1346 mov = gen_movhi_internal (operands[0], operands[1]);
1347 break;
1348 case QImode:
1349 mov = gen_movqi_internal (operands[0], operands[1]);
1350 break;
1351 default:
1352 abort ();
1353 }
1354
1355 /* Insert the instructions before any other argument copies.
1356 (The set_frame_ptr insn comes _after_ the move, so push it
1357 out first.) */
1358 push_topmost_sequence ();
1359 emit_insn_after (gen_set_frame_ptr (), get_insns ());
1360 emit_insn_after (mov, get_insns ());
1361 pop_topmost_sequence ();
1362
1363 /* Ideally the incoming argument in a7 would only be copied
1364 once, since propagating a7 into the body of a function
1365 will almost certainly lead to errors. However, there is
1366 at least one harmless case (in GCSE) where the original
1367 copy from a7 is changed to copy into a new pseudo. Thus,
1368 we use a flag to only do this special treatment for the
1369 first copy of a7. */
1370
1371 cfun->machine->incoming_a7_copied = true;
1372
1373 return 1;
1374 }
1375
1376 return 0;
1377 }
1378
1379
1380 /* Try to expand a block move operation to an RTL block move instruction.
1381 If not optimizing or if the block size is not a constant or if the
1382 block is small, the expansion fails and GCC falls back to calling
1383 memcpy().
1384
1385 operands[0] is the destination
1386 operands[1] is the source
1387 operands[2] is the length
1388 operands[3] is the alignment */
1389
1390 int
1391 xtensa_expand_block_move (operands)
1392 rtx *operands;
1393 {
1394 rtx dest = operands[0];
1395 rtx src = operands[1];
1396 int bytes = INTVAL (operands[2]);
1397 int align = XINT (operands[3], 0);
1398 int num_pieces, move_ratio;
1399
1400 /* If this is not a fixed size move, just call memcpy */
1401 if (!optimize || (GET_CODE (operands[2]) != CONST_INT))
1402 return 0;
1403
1404 /* Anything to move? */
1405 if (bytes <= 0)
1406 return 1;
1407
1408 if (align > MOVE_MAX)
1409 align = MOVE_MAX;
1410
1411 /* decide whether to expand inline based on the optimization level */
1412 move_ratio = 4;
1413 if (optimize > 2)
1414 move_ratio = LARGEST_MOVE_RATIO;
1415 num_pieces = (bytes / align) + (bytes % align); /* close enough anyway */
1416 if (num_pieces >= move_ratio)
1417 return 0;
1418
1419 /* make sure the memory addresses are valid */
1420 operands[0] = validize_mem (dest);
1421 operands[1] = validize_mem (src);
1422
1423 emit_insn (gen_movstrsi_internal (operands[0], operands[1],
1424 operands[2], operands[3]));
1425 return 1;
1426 }
1427
1428
1429 /* Emit a sequence of instructions to implement a block move, trying
1430 to hide load delay slots as much as possible. Load N values into
1431 temporary registers, store those N values, and repeat until the
1432 complete block has been moved. N=delay_slots+1 */
1433
1434 struct meminsnbuf {
1435 char template[30];
1436 rtx operands[2];
1437 };
1438
1439 void
1440 xtensa_emit_block_move (operands, tmpregs, delay_slots)
1441 rtx *operands;
1442 rtx *tmpregs;
1443 int delay_slots;
1444 {
1445 rtx dest = operands[0];
1446 rtx src = operands[1];
1447 int bytes = INTVAL (operands[2]);
1448 int align = XINT (operands[3], 0);
1449 rtx from_addr = XEXP (src, 0);
1450 rtx to_addr = XEXP (dest, 0);
1451 int from_struct = MEM_IN_STRUCT_P (src);
1452 int to_struct = MEM_IN_STRUCT_P (dest);
1453 int offset = 0;
1454 int chunk_size, item_size;
1455 struct meminsnbuf *ldinsns, *stinsns;
1456 const char *ldname, *stname;
1457 enum machine_mode mode;
1458
1459 if (align > MOVE_MAX)
1460 align = MOVE_MAX;
1461 item_size = align;
1462 chunk_size = delay_slots + 1;
1463
1464 ldinsns = (struct meminsnbuf *)
1465 alloca (chunk_size * sizeof (struct meminsnbuf));
1466 stinsns = (struct meminsnbuf *)
1467 alloca (chunk_size * sizeof (struct meminsnbuf));
1468
1469 mode = xtensa_find_mode_for_size (item_size);
1470 item_size = GET_MODE_SIZE (mode);
1471 ldname = xtensa_ld_opcodes[(int) mode];
1472 stname = xtensa_st_opcodes[(int) mode];
1473
1474 while (bytes > 0)
1475 {
1476 int n;
1477
1478 for (n = 0; n < chunk_size; n++)
1479 {
1480 rtx addr, mem;
1481
1482 if (bytes == 0)
1483 {
1484 chunk_size = n;
1485 break;
1486 }
1487
1488 if (bytes < item_size)
1489 {
1490 /* find a smaller item_size which we can load & store */
1491 item_size = bytes;
1492 mode = xtensa_find_mode_for_size (item_size);
1493 item_size = GET_MODE_SIZE (mode);
1494 ldname = xtensa_ld_opcodes[(int) mode];
1495 stname = xtensa_st_opcodes[(int) mode];
1496 }
1497
1498 /* record the load instruction opcode and operands */
1499 addr = plus_constant (from_addr, offset);
1500 mem = gen_rtx_MEM (mode, addr);
1501 if (! memory_address_p (mode, addr))
1502 abort ();
1503 MEM_IN_STRUCT_P (mem) = from_struct;
1504 ldinsns[n].operands[0] = tmpregs[n];
1505 ldinsns[n].operands[1] = mem;
1506 sprintf (ldinsns[n].template, "%s\t%%0, %%1", ldname);
1507
1508 /* record the store instruction opcode and operands */
1509 addr = plus_constant (to_addr, offset);
1510 mem = gen_rtx_MEM (mode, addr);
1511 if (! memory_address_p (mode, addr))
1512 abort ();
1513 MEM_IN_STRUCT_P (mem) = to_struct;
1514 stinsns[n].operands[0] = tmpregs[n];
1515 stinsns[n].operands[1] = mem;
1516 sprintf (stinsns[n].template, "%s\t%%0, %%1", stname);
1517
1518 offset += item_size;
1519 bytes -= item_size;
1520 }
1521
1522 /* now output the loads followed by the stores */
1523 for (n = 0; n < chunk_size; n++)
1524 output_asm_insn (ldinsns[n].template, ldinsns[n].operands);
1525 for (n = 0; n < chunk_size; n++)
1526 output_asm_insn (stinsns[n].template, stinsns[n].operands);
1527 }
1528 }
1529
1530
1531 static enum machine_mode
1532 xtensa_find_mode_for_size (item_size)
1533 unsigned item_size;
1534 {
1535 enum machine_mode mode, tmode;
1536
1537 while (1)
1538 {
1539 mode = VOIDmode;
1540
1541 /* find mode closest to but not bigger than item_size */
1542 for (tmode = GET_CLASS_NARROWEST_MODE (MODE_INT);
1543 tmode != VOIDmode; tmode = GET_MODE_WIDER_MODE (tmode))
1544 if (GET_MODE_SIZE (tmode) <= item_size)
1545 mode = tmode;
1546 if (mode == VOIDmode)
1547 abort ();
1548
1549 item_size = GET_MODE_SIZE (mode);
1550
1551 if (xtensa_ld_opcodes[(int) mode]
1552 && xtensa_st_opcodes[(int) mode])
1553 break;
1554
1555 /* cannot load & store this mode; try something smaller */
1556 item_size -= 1;
1557 }
1558
1559 return mode;
1560 }
1561
1562
1563 void
1564 xtensa_expand_nonlocal_goto (operands)
1565 rtx *operands;
1566 {
1567 rtx goto_handler = operands[1];
1568 rtx containing_fp = operands[3];
1569
1570 /* generate a call to "__xtensa_nonlocal_goto" (in libgcc); the code
1571 is too big to generate in-line */
1572
1573 if (GET_CODE (containing_fp) != REG)
1574 containing_fp = force_reg (Pmode, containing_fp);
1575
1576 goto_handler = replace_rtx (copy_rtx (goto_handler),
1577 virtual_stack_vars_rtx,
1578 containing_fp);
1579
1580 emit_library_call (gen_rtx_SYMBOL_REF (Pmode, "__xtensa_nonlocal_goto"),
1581 0, VOIDmode, 2,
1582 containing_fp, Pmode,
1583 goto_handler, Pmode);
1584 }
1585
1586
1587 static struct machine_function *
1588 xtensa_init_machine_status ()
1589 {
1590 return ggc_alloc_cleared (sizeof (struct machine_function));
1591 }
1592
1593
1594 void
1595 xtensa_setup_frame_addresses ()
1596 {
1597 /* Set flag to cause FRAME_POINTER_REQUIRED to be set. */
1598 cfun->machine->accesses_prev_frame = 1;
1599
1600 emit_library_call
1601 (gen_rtx_SYMBOL_REF (Pmode, "__xtensa_libgcc_window_spill"),
1602 0, VOIDmode, 0);
1603 }
1604
1605
1606 /* Emit the assembly for the end of a zero-cost loop. Normally we just emit
1607 a comment showing where the end of the loop is. However, if there is a
1608 label or a branch at the end of the loop then we need to place a nop
1609 there. If the loop ends with a label we need the nop so that branches
1610 targetting that label will target the nop (and thus remain in the loop),
1611 instead of targetting the instruction after the loop (and thus exiting
1612 the loop). If the loop ends with a branch, we need the nop in case the
1613 branch is targetting a location inside the loop. When the branch
1614 executes it will cause the loop count to be decremented even if it is
1615 taken (because it is the last instruction in the loop), so we need to
1616 nop after the branch to prevent the loop count from being decremented
1617 when the branch is taken. */
1618
1619 void
1620 xtensa_emit_loop_end (insn, operands)
1621 rtx insn;
1622 rtx *operands;
1623 {
1624 char done = 0;
1625
1626 for (insn = PREV_INSN (insn); insn && !done; insn = PREV_INSN (insn))
1627 {
1628 switch (GET_CODE (insn))
1629 {
1630 case NOTE:
1631 case BARRIER:
1632 break;
1633
1634 case CODE_LABEL:
1635 output_asm_insn ("nop.n", operands);
1636 done = 1;
1637 break;
1638
1639 default:
1640 {
1641 rtx body = PATTERN (insn);
1642
1643 if (GET_CODE (body) == JUMP_INSN)
1644 {
1645 output_asm_insn ("nop.n", operands);
1646 done = 1;
1647 }
1648 else if ((GET_CODE (body) != USE)
1649 && (GET_CODE (body) != CLOBBER))
1650 done = 1;
1651 }
1652 break;
1653 }
1654 }
1655
1656 output_asm_insn ("# loop end for %0", operands);
1657 }
1658
1659
1660 char *
1661 xtensa_emit_call (callop, operands)
1662 int callop;
1663 rtx *operands;
1664 {
1665 static char result[64];
1666 rtx tgt = operands[callop];
1667
1668 if (GET_CODE (tgt) == CONST_INT)
1669 sprintf (result, "call8\t0x%x", INTVAL (tgt));
1670 else if (register_operand (tgt, VOIDmode))
1671 sprintf (result, "callx8\t%%%d", callop);
1672 else
1673 sprintf (result, "call8\t%%%d", callop);
1674
1675 return result;
1676 }
1677
1678
1679 /* Return the stabs register number to use for 'regno'. */
1680
1681 int
1682 xtensa_dbx_register_number (regno)
1683 int regno;
1684 {
1685 int first = -1;
1686
1687 if (GP_REG_P (regno)) {
1688 regno -= GP_REG_FIRST;
1689 first = 0;
1690 }
1691 else if (BR_REG_P (regno)) {
1692 regno -= BR_REG_FIRST;
1693 first = 16;
1694 }
1695 else if (FP_REG_P (regno)) {
1696 regno -= FP_REG_FIRST;
1697 /* The current numbering convention is that TIE registers are
1698 numbered in libcc order beginning with 256. We can't guarantee
1699 that the FP registers will come first, so the following is just
1700 a guess. It seems like we should make a special case for FP
1701 registers and give them fixed numbers < 256. */
1702 first = 256;
1703 }
1704 else if (ACC_REG_P (regno))
1705 {
1706 first = 0;
1707 regno = -1;
1708 }
1709
1710 /* When optimizing, we sometimes get asked about pseudo-registers
1711 that don't represent hard registers. Return 0 for these. */
1712 if (first == -1)
1713 return 0;
1714
1715 return first + regno;
1716 }
1717
1718
1719 /* Argument support functions. */
1720
1721 /* Initialize CUMULATIVE_ARGS for a function. */
1722
1723 void
1724 init_cumulative_args (cum, fntype, libname)
1725 CUMULATIVE_ARGS *cum; /* argument info to initialize */
1726 tree fntype ATTRIBUTE_UNUSED; /* tree ptr for function decl */
1727 rtx libname ATTRIBUTE_UNUSED; /* SYMBOL_REF of library name or 0 */
1728 {
1729 cum->arg_words = 0;
1730 }
1731
1732 /* Advance the argument to the next argument position. */
1733
1734 void
1735 function_arg_advance (cum, mode, type)
1736 CUMULATIVE_ARGS *cum; /* current arg information */
1737 enum machine_mode mode; /* current arg mode */
1738 tree type; /* type of the argument or 0 if lib support */
1739 {
1740 int words, max;
1741 int *arg_words;
1742
1743 arg_words = &cum->arg_words;
1744 max = MAX_ARGS_IN_REGISTERS;
1745
1746 words = (((mode != BLKmode)
1747 ? (int) GET_MODE_SIZE (mode)
1748 : int_size_in_bytes (type)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1749
1750 if ((*arg_words + words > max) && (*arg_words < max))
1751 *arg_words = max;
1752
1753 *arg_words += words;
1754 }
1755
1756
1757 /* Return an RTL expression containing the register for the given mode,
1758 or 0 if the argument is to be passed on the stack. */
1759
1760 rtx
1761 function_arg (cum, mode, type, incoming_p)
1762 CUMULATIVE_ARGS *cum; /* current arg information */
1763 enum machine_mode mode; /* current arg mode */
1764 tree type; /* type of the argument or 0 if lib support */
1765 int incoming_p; /* computing the incoming registers? */
1766 {
1767 int regbase, words, max;
1768 int *arg_words;
1769 int regno;
1770 enum machine_mode result_mode;
1771
1772 arg_words = &cum->arg_words;
1773 regbase = (incoming_p ? GP_ARG_FIRST : GP_OUTGOING_ARG_FIRST);
1774 max = MAX_ARGS_IN_REGISTERS;
1775
1776 words = (((mode != BLKmode)
1777 ? (int) GET_MODE_SIZE (mode)
1778 : int_size_in_bytes (type)) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1779
1780 if (type && (TYPE_ALIGN (type) > BITS_PER_WORD))
1781 *arg_words += (*arg_words & 1);
1782
1783 if (*arg_words + words > max)
1784 return (rtx)0;
1785
1786 regno = regbase + *arg_words;
1787 result_mode = (mode == BLKmode ? TYPE_MODE (type) : mode);
1788
1789 /* We need to make sure that references to a7 are represented with
1790 rtx that is not equal to hard_frame_pointer_rtx. For BLKmode and
1791 modes bigger than 2 words (because we only have patterns for
1792 modes of 2 words or smaller), we can't control the expansion
1793 unless we explicitly list the individual registers in a PARALLEL. */
1794
1795 if ((mode == BLKmode || words > 2)
1796 && regno < A7_REG
1797 && regno + words > A7_REG)
1798 {
1799 rtx result;
1800 int n;
1801
1802 result = gen_rtx_PARALLEL (result_mode, rtvec_alloc (words));
1803 for (n = 0; n < words; n++)
1804 {
1805 XVECEXP (result, 0, n) =
1806 gen_rtx_EXPR_LIST (VOIDmode,
1807 gen_raw_REG (SImode, regno + n),
1808 GEN_INT (n * UNITS_PER_WORD));
1809 }
1810 return result;
1811 }
1812
1813 return gen_raw_REG (result_mode, regno);
1814 }
1815
1816
1817 void
1818 override_options ()
1819 {
1820 int regno;
1821 enum machine_mode mode;
1822
1823 if (!TARGET_BOOLEANS && TARGET_HARD_FLOAT)
1824 error ("boolean registers required for the floating-point option");
1825
1826 /* set up the tables of ld/st opcode names for block moves */
1827 xtensa_ld_opcodes[(int) SImode] = "l32i";
1828 xtensa_ld_opcodes[(int) HImode] = "l16ui";
1829 xtensa_ld_opcodes[(int) QImode] = "l8ui";
1830 xtensa_st_opcodes[(int) SImode] = "s32i";
1831 xtensa_st_opcodes[(int) HImode] = "s16i";
1832 xtensa_st_opcodes[(int) QImode] = "s8i";
1833
1834 xtensa_char_to_class['q'] = SP_REG;
1835 xtensa_char_to_class['a'] = GR_REGS;
1836 xtensa_char_to_class['b'] = ((TARGET_BOOLEANS) ? BR_REGS : NO_REGS);
1837 xtensa_char_to_class['f'] = ((TARGET_HARD_FLOAT) ? FP_REGS : NO_REGS);
1838 xtensa_char_to_class['A'] = ((TARGET_MAC16) ? ACC_REG : NO_REGS);
1839 xtensa_char_to_class['B'] = ((TARGET_SEXT) ? GR_REGS : NO_REGS);
1840 xtensa_char_to_class['C'] = ((TARGET_MUL16) ? GR_REGS: NO_REGS);
1841 xtensa_char_to_class['D'] = ((TARGET_DENSITY) ? GR_REGS: NO_REGS);
1842 xtensa_char_to_class['d'] = ((TARGET_DENSITY) ? AR_REGS: NO_REGS);
1843
1844 /* Set up array giving whether a given register can hold a given mode. */
1845 for (mode = VOIDmode;
1846 mode != MAX_MACHINE_MODE;
1847 mode = (enum machine_mode) ((int) mode + 1))
1848 {
1849 int size = GET_MODE_SIZE (mode);
1850 enum mode_class class = GET_MODE_CLASS (mode);
1851
1852 for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1853 {
1854 int temp;
1855
1856 if (ACC_REG_P (regno))
1857 temp = (TARGET_MAC16 &&
1858 (class == MODE_INT) && (size <= UNITS_PER_WORD));
1859 else if (GP_REG_P (regno))
1860 temp = ((regno & 1) == 0 || (size <= UNITS_PER_WORD));
1861 else if (FP_REG_P (regno))
1862 temp = (TARGET_HARD_FLOAT && (mode == SFmode));
1863 else if (BR_REG_P (regno))
1864 temp = (TARGET_BOOLEANS && (mode == CCmode));
1865 else
1866 temp = FALSE;
1867
1868 xtensa_hard_regno_mode_ok[(int) mode][regno] = temp;
1869 }
1870 }
1871
1872 init_machine_status = xtensa_init_machine_status;
1873
1874 /* Check PIC settings. There's no need for -fPIC on Xtensa and
1875 some targets need to always use PIC. */
1876 if (flag_pic > 1 || (XTENSA_ALWAYS_PIC))
1877 flag_pic = 1;
1878 }
1879
1880
1881 /* A C compound statement to output to stdio stream STREAM the
1882 assembler syntax for an instruction operand X. X is an RTL
1883 expression.
1884
1885 CODE is a value that can be used to specify one of several ways
1886 of printing the operand. It is used when identical operands
1887 must be printed differently depending on the context. CODE
1888 comes from the '%' specification that was used to request
1889 printing of the operand. If the specification was just '%DIGIT'
1890 then CODE is 0; if the specification was '%LTR DIGIT' then CODE
1891 is the ASCII code for LTR.
1892
1893 If X is a register, this macro should print the register's name.
1894 The names can be found in an array 'reg_names' whose type is
1895 'char *[]'. 'reg_names' is initialized from 'REGISTER_NAMES'.
1896
1897 When the machine description has a specification '%PUNCT' (a '%'
1898 followed by a punctuation character), this macro is called with
1899 a null pointer for X and the punctuation character for CODE.
1900
1901 'a', 'c', 'l', and 'n' are reserved.
1902
1903 The Xtensa specific codes are:
1904
1905 'd' CONST_INT, print as signed decimal
1906 'x' CONST_INT, print as signed hexadecimal
1907 'K' CONST_INT, print number of bits in mask for EXTUI
1908 'R' CONST_INT, print (X & 0x1f)
1909 'L' CONST_INT, print ((32 - X) & 0x1f)
1910 'D' REG, print second register of double-word register operand
1911 'N' MEM, print address of next word following a memory operand
1912 'v' MEM, if memory reference is volatile, output a MEMW before it
1913 */
1914
1915 static void
1916 printx (file, val)
1917 FILE *file;
1918 signed int val;
1919 {
1920 /* print a hexadecimal value in a nice way */
1921 if ((val > -0xa) && (val < 0xa))
1922 fprintf (file, "%d", val);
1923 else if (val < 0)
1924 fprintf (file, "-0x%x", -val);
1925 else
1926 fprintf (file, "0x%x", val);
1927 }
1928
1929
1930 void
1931 print_operand (file, op, letter)
1932 FILE *file; /* file to write to */
1933 rtx op; /* operand to print */
1934 int letter; /* %<letter> or 0 */
1935 {
1936 enum rtx_code code;
1937
1938 if (! op)
1939 error ("PRINT_OPERAND null pointer");
1940
1941 code = GET_CODE (op);
1942 switch (code)
1943 {
1944 case REG:
1945 case SUBREG:
1946 {
1947 int regnum = xt_true_regnum (op);
1948 if (letter == 'D')
1949 regnum++;
1950 fprintf (file, "%s", reg_names[regnum]);
1951 break;
1952 }
1953
1954 case MEM:
1955 /* For a volatile memory reference, emit a MEMW before the
1956 load or store. */
1957 if (letter == 'v')
1958 {
1959 if (MEM_VOLATILE_P (op) && TARGET_SERIALIZE_VOLATILE)
1960 fprintf (file, "memw\n\t");
1961 break;
1962 }
1963 else if (letter == 'N')
1964 {
1965 enum machine_mode mode;
1966 switch (GET_MODE (op))
1967 {
1968 case DFmode: mode = SFmode; break;
1969 case DImode: mode = SImode; break;
1970 default: abort ();
1971 }
1972 op = adjust_address (op, mode, 4);
1973 }
1974
1975 output_address (XEXP (op, 0));
1976 break;
1977
1978 case CONST_INT:
1979 switch (letter)
1980 {
1981 case 'K':
1982 {
1983 int num_bits = 0;
1984 unsigned val = INTVAL (op);
1985 while (val & 1)
1986 {
1987 num_bits += 1;
1988 val = val >> 1;
1989 }
1990 if ((val != 0) || (num_bits == 0) || (num_bits > 16))
1991 fatal_insn ("invalid mask", op);
1992
1993 fprintf (file, "%d", num_bits);
1994 break;
1995 }
1996
1997 case 'L':
1998 fprintf (file, "%d", (32 - INTVAL (op)) & 0x1f);
1999 break;
2000
2001 case 'R':
2002 fprintf (file, "%d", INTVAL (op) & 0x1f);
2003 break;
2004
2005 case 'x':
2006 printx (file, INTVAL (op));
2007 break;
2008
2009 case 'd':
2010 default:
2011 fprintf (file, "%d", INTVAL (op));
2012 break;
2013
2014 }
2015 break;
2016
2017 default:
2018 output_addr_const (file, op);
2019 }
2020 }
2021
2022
2023 /* A C compound statement to output to stdio stream STREAM the
2024 assembler syntax for an instruction operand that is a memory
2025 reference whose address is ADDR. ADDR is an RTL expression. */
2026
2027 void
2028 print_operand_address (file, addr)
2029 FILE *file;
2030 rtx addr;
2031 {
2032 if (!addr)
2033 error ("PRINT_OPERAND_ADDRESS, null pointer");
2034
2035 switch (GET_CODE (addr))
2036 {
2037 default:
2038 fatal_insn ("invalid address", addr);
2039 break;
2040
2041 case REG:
2042 fprintf (file, "%s, 0", reg_names [REGNO (addr)]);
2043 break;
2044
2045 case PLUS:
2046 {
2047 rtx reg = (rtx)0;
2048 rtx offset = (rtx)0;
2049 rtx arg0 = XEXP (addr, 0);
2050 rtx arg1 = XEXP (addr, 1);
2051
2052 if (GET_CODE (arg0) == REG)
2053 {
2054 reg = arg0;
2055 offset = arg1;
2056 }
2057 else if (GET_CODE (arg1) == REG)
2058 {
2059 reg = arg1;
2060 offset = arg0;
2061 }
2062 else
2063 fatal_insn ("no register in address", addr);
2064
2065 if (CONSTANT_P (offset))
2066 {
2067 fprintf (file, "%s, ", reg_names [REGNO (reg)]);
2068 output_addr_const (file, offset);
2069 }
2070 else
2071 fatal_insn ("address offset not a constant", addr);
2072 }
2073 break;
2074
2075 case LABEL_REF:
2076 case SYMBOL_REF:
2077 case CONST_INT:
2078 case CONST:
2079 output_addr_const (file, addr);
2080 break;
2081 }
2082 }
2083
2084
2085 /* Emit either a label, .comm, or .lcomm directive. */
2086
2087 void
2088 xtensa_declare_object (file, name, init_string, final_string, size)
2089 FILE *file;
2090 char *name;
2091 char *init_string;
2092 char *final_string;
2093 int size;
2094 {
2095 fputs (init_string, file); /* "", "\t.comm\t", or "\t.lcomm\t" */
2096 assemble_name (file, name);
2097 fprintf (file, final_string, size); /* ":\n", ",%u\n", ",%u\n" */
2098 }
2099
2100
2101 void
2102 xtensa_output_literal (file, x, mode, labelno)
2103 FILE *file;
2104 rtx x;
2105 enum machine_mode mode;
2106 int labelno;
2107 {
2108 long value_long[2];
2109 REAL_VALUE_TYPE r;
2110 int size;
2111
2112 fprintf (file, "\t.literal .LC%u, ", (unsigned) labelno);
2113
2114 switch (GET_MODE_CLASS (mode))
2115 {
2116 case MODE_FLOAT:
2117 if (GET_CODE (x) != CONST_DOUBLE)
2118 abort ();
2119
2120 REAL_VALUE_FROM_CONST_DOUBLE (r, x);
2121 switch (mode)
2122 {
2123 case SFmode:
2124 REAL_VALUE_TO_TARGET_SINGLE (r, value_long[0]);
2125 fprintf (file, "0x%08lx\n", value_long[0]);
2126 break;
2127
2128 case DFmode:
2129 REAL_VALUE_TO_TARGET_DOUBLE (r, value_long);
2130 fprintf (file, "0x%08lx, 0x%08lx\n",
2131 value_long[0], value_long[1]);
2132 break;
2133
2134 default:
2135 abort ();
2136 }
2137
2138 break;
2139
2140 case MODE_INT:
2141 case MODE_PARTIAL_INT:
2142 size = GET_MODE_SIZE (mode);
2143 if (size == 4)
2144 {
2145 output_addr_const (file, x);
2146 fputs ("\n", file);
2147 }
2148 else if (size == 8)
2149 {
2150 output_addr_const (file, operand_subword (x, 0, 0, DImode));
2151 fputs (", ", file);
2152 output_addr_const (file, operand_subword (x, 1, 0, DImode));
2153 fputs ("\n", file);
2154 }
2155 else
2156 abort ();
2157 break;
2158
2159 default:
2160 abort ();
2161 }
2162 }
2163
2164
2165 /* Return the bytes needed to compute the frame pointer from the current
2166 stack pointer. */
2167
2168 #define STACK_BYTES (STACK_BOUNDARY / BITS_PER_UNIT)
2169 #define XTENSA_STACK_ALIGN(LOC) (((LOC) + STACK_BYTES-1) & ~(STACK_BYTES-1))
2170
2171 long
2172 compute_frame_size (size)
2173 int size; /* # of var. bytes allocated */
2174 {
2175 /* add space for the incoming static chain value */
2176 if (current_function_needs_context)
2177 size += (1 * UNITS_PER_WORD);
2178
2179 xtensa_current_frame_size =
2180 XTENSA_STACK_ALIGN (size
2181 + current_function_outgoing_args_size
2182 + (WINDOW_SIZE * UNITS_PER_WORD));
2183 return xtensa_current_frame_size;
2184 }
2185
2186
2187 int
2188 xtensa_frame_pointer_required ()
2189 {
2190 /* The code to expand builtin_frame_addr and builtin_return_addr
2191 currently uses the hard_frame_pointer instead of frame_pointer.
2192 This seems wrong but maybe it's necessary for other architectures.
2193 This function is derived from the i386 code. */
2194
2195 if (cfun->machine->accesses_prev_frame)
2196 return 1;
2197
2198 return 0;
2199 }
2200
2201
2202 void
2203 xtensa_reorg (first)
2204 rtx first;
2205 {
2206 rtx insn, set_frame_ptr_insn = 0;
2207
2208 unsigned long tsize = compute_frame_size (get_frame_size ());
2209 if (tsize < (1 << (12+3)))
2210 frame_size_const = 0;
2211 else
2212 {
2213 frame_size_const = force_const_mem (SImode, GEN_INT (tsize - 16));;
2214
2215 /* make sure the constant is used so it doesn't get eliminated
2216 from the constant pool */
2217 emit_insn_before (gen_rtx_USE (SImode, frame_size_const), first);
2218 }
2219
2220 if (!frame_pointer_needed)
2221 return;
2222
2223 /* Search all instructions, looking for the insn that sets up the
2224 frame pointer. This search will fail if the function does not
2225 have an incoming argument in $a7, but in that case, we can just
2226 set up the frame pointer at the very beginning of the
2227 function. */
2228
2229 for (insn = first; insn; insn = NEXT_INSN (insn))
2230 {
2231 rtx pat;
2232
2233 if (!INSN_P (insn))
2234 continue;
2235
2236 pat = PATTERN (insn);
2237 if (GET_CODE (pat) == UNSPEC_VOLATILE
2238 && (XINT (pat, 1) == UNSPECV_SET_FP))
2239 {
2240 set_frame_ptr_insn = insn;
2241 break;
2242 }
2243 }
2244
2245 if (set_frame_ptr_insn)
2246 {
2247 /* for all instructions prior to set_frame_ptr_insn, replace
2248 hard_frame_pointer references with stack_pointer */
2249 for (insn = first; insn != set_frame_ptr_insn; insn = NEXT_INSN (insn))
2250 {
2251 if (INSN_P (insn))
2252 PATTERN (insn) = replace_rtx (copy_rtx (PATTERN (insn)),
2253 hard_frame_pointer_rtx,
2254 stack_pointer_rtx);
2255 }
2256 }
2257 else
2258 {
2259 /* emit the frame pointer move immediately after the NOTE that starts
2260 the function */
2261 emit_insn_after (gen_movsi (hard_frame_pointer_rtx,
2262 stack_pointer_rtx), first);
2263 }
2264 }
2265
2266
2267 /* Set up the stack and frame (if desired) for the function. */
2268
2269 void
2270 xtensa_function_prologue (file, size)
2271 FILE *file;
2272 int size ATTRIBUTE_UNUSED;
2273 {
2274 unsigned long tsize = compute_frame_size (get_frame_size ());
2275
2276 if (frame_pointer_needed)
2277 fprintf (file, "\t.frame\ta7, %ld\n", tsize);
2278 else
2279 fprintf (file, "\t.frame\tsp, %ld\n", tsize);
2280
2281
2282 if (tsize < (1 << (12+3)))
2283 {
2284 fprintf (file, "\tentry\tsp, %ld\n", tsize);
2285 }
2286 else
2287 {
2288 fprintf (file, "\tentry\tsp, 16\n");
2289
2290 /* use a8 as a temporary since a0-a7 may be live */
2291 fprintf (file, "\tl32r\ta8, ");
2292 print_operand (file, frame_size_const, 0);
2293 fprintf (file, "\n\tsub\ta8, sp, a8\n");
2294 fprintf (file, "\tmovsp\tsp, a8\n");
2295 }
2296 }
2297
2298
2299 /* Do any necessary cleanup after a function to restore
2300 stack, frame, and regs. */
2301
2302 void
2303 xtensa_function_epilogue (file, size)
2304 FILE *file;
2305 int size ATTRIBUTE_UNUSED;
2306 {
2307 rtx insn = get_last_insn ();
2308 /* If the last insn was a BARRIER, we don't have to write anything. */
2309 if (GET_CODE (insn) == NOTE)
2310 insn = prev_nonnote_insn (insn);
2311 if (insn == 0 || GET_CODE (insn) != BARRIER)
2312 fprintf (file, TARGET_DENSITY ? "\tretw.n\n" : "\tretw\n");
2313
2314 xtensa_current_frame_size = 0;
2315 }
2316
2317
2318 rtx
2319 xtensa_return_addr (count, frame)
2320 int count;
2321 rtx frame;
2322 {
2323 rtx result, retaddr;
2324
2325 if (count == -1)
2326 retaddr = gen_rtx_REG (Pmode, 0);
2327 else
2328 {
2329 rtx addr = plus_constant (frame, -4 * UNITS_PER_WORD);
2330 addr = memory_address (Pmode, addr);
2331 retaddr = gen_reg_rtx (Pmode);
2332 emit_move_insn (retaddr, gen_rtx_MEM (Pmode, addr));
2333 }
2334
2335 /* The 2 most-significant bits of the return address on Xtensa hold
2336 the register window size. To get the real return address, these
2337 bits must be replaced with the high bits from the current PC. */
2338
2339 result = gen_reg_rtx (Pmode);
2340 emit_insn (gen_fix_return_addr (result, retaddr));
2341 return result;
2342 }
2343
2344
2345 /* Create the va_list data type.
2346 This structure is set up by __builtin_saveregs. The __va_reg
2347 field points to a stack-allocated region holding the contents of the
2348 incoming argument registers. The __va_ndx field is an index initialized
2349 to the position of the first unnamed (variable) argument. This same index
2350 is also used to address the arguments passed in memory. Thus, the
2351 __va_stk field is initialized to point to the position of the first
2352 argument in memory offset to account for the arguments passed in
2353 registers. E.G., if there are 6 argument registers, and each register is
2354 4 bytes, then __va_stk is set to $sp - (6 * 4); then __va_reg[N*4]
2355 references argument word N for 0 <= N < 6, and __va_stk[N*4] references
2356 argument word N for N >= 6. */
2357
2358 tree
2359 xtensa_build_va_list ()
2360 {
2361 tree f_stk, f_reg, f_ndx, record, type_decl;
2362
2363 record = (*lang_hooks.types.make_type) (RECORD_TYPE);
2364 type_decl = build_decl (TYPE_DECL, get_identifier ("__va_list_tag"), record);
2365
2366 f_stk = build_decl (FIELD_DECL, get_identifier ("__va_stk"),
2367 ptr_type_node);
2368 f_reg = build_decl (FIELD_DECL, get_identifier ("__va_reg"),
2369 ptr_type_node);
2370 f_ndx = build_decl (FIELD_DECL, get_identifier ("__va_ndx"),
2371 integer_type_node);
2372
2373 DECL_FIELD_CONTEXT (f_stk) = record;
2374 DECL_FIELD_CONTEXT (f_reg) = record;
2375 DECL_FIELD_CONTEXT (f_ndx) = record;
2376
2377 TREE_CHAIN (record) = type_decl;
2378 TYPE_NAME (record) = type_decl;
2379 TYPE_FIELDS (record) = f_stk;
2380 TREE_CHAIN (f_stk) = f_reg;
2381 TREE_CHAIN (f_reg) = f_ndx;
2382
2383 layout_type (record);
2384 return record;
2385 }
2386
2387
2388 /* Save the incoming argument registers on the stack. Returns the
2389 address of the saved registers. */
2390
2391 rtx
2392 xtensa_builtin_saveregs ()
2393 {
2394 rtx gp_regs, dest;
2395 int arg_words = current_function_arg_words;
2396 int gp_left = MAX_ARGS_IN_REGISTERS - arg_words;
2397 int i;
2398
2399 if (gp_left == 0)
2400 return const0_rtx;
2401
2402 /* allocate the general-purpose register space */
2403 gp_regs = assign_stack_local
2404 (BLKmode, MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, -1);
2405 set_mem_alias_set (gp_regs, get_varargs_alias_set ());
2406
2407 /* Now store the incoming registers. */
2408 dest = change_address (gp_regs, SImode,
2409 plus_constant (XEXP (gp_regs, 0),
2410 arg_words * UNITS_PER_WORD));
2411
2412 /* Note: Don't use move_block_from_reg() here because the incoming
2413 argument in a7 cannot be represented by hard_frame_pointer_rtx.
2414 Instead, call gen_raw_REG() directly so that we get a distinct
2415 instance of (REG:SI 7). */
2416 for (i = 0; i < gp_left; i++)
2417 {
2418 emit_move_insn (operand_subword (dest, i, 1, BLKmode),
2419 gen_raw_REG (SImode, GP_ARG_FIRST + arg_words + i));
2420 }
2421
2422 return XEXP (gp_regs, 0);
2423 }
2424
2425
2426 /* Implement `va_start' for varargs and stdarg. We look at the
2427 current function to fill in an initial va_list. */
2428
2429 void
2430 xtensa_va_start (valist, nextarg)
2431 tree valist;
2432 rtx nextarg ATTRIBUTE_UNUSED;
2433 {
2434 tree f_stk, stk;
2435 tree f_reg, reg;
2436 tree f_ndx, ndx;
2437 tree t, u;
2438 int arg_words;
2439
2440 arg_words = current_function_args_info.arg_words;
2441
2442 f_stk = TYPE_FIELDS (va_list_type_node);
2443 f_reg = TREE_CHAIN (f_stk);
2444 f_ndx = TREE_CHAIN (f_reg);
2445
2446 stk = build (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk);
2447 reg = build (COMPONENT_REF, TREE_TYPE (f_reg), valist, f_reg);
2448 ndx = build (COMPONENT_REF, TREE_TYPE (f_ndx), valist, f_ndx);
2449
2450 /* Call __builtin_saveregs; save the result in __va_reg */
2451 current_function_arg_words = arg_words;
2452 u = make_tree (ptr_type_node, expand_builtin_saveregs ());
2453 t = build (MODIFY_EXPR, ptr_type_node, reg, u);
2454 TREE_SIDE_EFFECTS (t) = 1;
2455 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2456
2457 /* Set the __va_stk member to $arg_ptr - (size of __va_reg area) */
2458 u = make_tree (ptr_type_node, virtual_incoming_args_rtx);
2459 u = fold (build (PLUS_EXPR, ptr_type_node, u,
2460 build_int_2 (-MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, -1)));
2461 t = build (MODIFY_EXPR, ptr_type_node, stk, u);
2462 TREE_SIDE_EFFECTS (t) = 1;
2463 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2464
2465 /* Set the __va_ndx member. */
2466 u = build_int_2 (arg_words * UNITS_PER_WORD, 0);
2467 t = build (MODIFY_EXPR, integer_type_node, ndx, u);
2468 TREE_SIDE_EFFECTS (t) = 1;
2469 expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
2470 }
2471
2472
2473 /* Implement `va_arg'. */
2474
2475 rtx
2476 xtensa_va_arg (valist, type)
2477 tree valist, type;
2478 {
2479 tree f_stk, stk;
2480 tree f_reg, reg;
2481 tree f_ndx, ndx;
2482 tree tmp, addr_tree, type_size;
2483 rtx array, orig_ndx, r, addr, size, va_size;
2484 rtx lab_false, lab_over, lab_false2;
2485
2486 f_stk = TYPE_FIELDS (va_list_type_node);
2487 f_reg = TREE_CHAIN (f_stk);
2488 f_ndx = TREE_CHAIN (f_reg);
2489
2490 stk = build (COMPONENT_REF, TREE_TYPE (f_stk), valist, f_stk);
2491 reg = build (COMPONENT_REF, TREE_TYPE (f_reg), valist, f_reg);
2492 ndx = build (COMPONENT_REF, TREE_TYPE (f_ndx), valist, f_ndx);
2493
2494 type_size = TYPE_SIZE_UNIT (TYPE_MAIN_VARIANT (type));
2495
2496 va_size = gen_reg_rtx (SImode);
2497 tmp = fold (build (MULT_EXPR, sizetype,
2498 fold (build (TRUNC_DIV_EXPR, sizetype,
2499 fold (build (PLUS_EXPR, sizetype,
2500 type_size,
2501 size_int (UNITS_PER_WORD - 1))),
2502 size_int (UNITS_PER_WORD))),
2503 size_int (UNITS_PER_WORD)));
2504 r = expand_expr (tmp, va_size, SImode, EXPAND_NORMAL);
2505 if (r != va_size)
2506 emit_move_insn (va_size, r);
2507
2508
2509 /* First align __va_ndx to a double word boundary if necessary for this arg:
2510
2511 if (__alignof__ (TYPE) > 4)
2512 (AP).__va_ndx = (((AP).__va_ndx + 7) & -8)
2513 */
2514
2515 if (TYPE_ALIGN (type) > BITS_PER_WORD)
2516 {
2517 tmp = build (PLUS_EXPR, integer_type_node, ndx,
2518 build_int_2 ((2 * UNITS_PER_WORD) - 1, 0));
2519 tmp = build (BIT_AND_EXPR, integer_type_node, tmp,
2520 build_int_2 (-2 * UNITS_PER_WORD, -1));
2521 tmp = build (MODIFY_EXPR, integer_type_node, ndx, tmp);
2522 TREE_SIDE_EFFECTS (tmp) = 1;
2523 expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
2524 }
2525
2526
2527 /* Increment __va_ndx to point past the argument:
2528
2529 orig_ndx = (AP).__va_ndx;
2530 (AP).__va_ndx += __va_size (TYPE);
2531 */
2532
2533 orig_ndx = gen_reg_rtx (SImode);
2534 r = expand_expr (ndx, orig_ndx, SImode, EXPAND_NORMAL);
2535 if (r != orig_ndx)
2536 emit_move_insn (orig_ndx, r);
2537
2538 tmp = build (PLUS_EXPR, integer_type_node, ndx,
2539 make_tree (intSI_type_node, va_size));
2540 tmp = build (MODIFY_EXPR, integer_type_node, ndx, tmp);
2541 TREE_SIDE_EFFECTS (tmp) = 1;
2542 expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
2543
2544
2545 /* Check if the argument is in registers:
2546
2547 if ((AP).__va_ndx <= __MAX_ARGS_IN_REGISTERS * 4
2548 && !MUST_PASS_IN_STACK (type))
2549 __array = (AP).__va_reg;
2550 */
2551
2552 array = gen_reg_rtx (Pmode);
2553
2554 lab_over = NULL_RTX;
2555 if (!MUST_PASS_IN_STACK (VOIDmode, type))
2556 {
2557 lab_false = gen_label_rtx ();
2558 lab_over = gen_label_rtx ();
2559
2560 emit_cmp_and_jump_insns (expand_expr (ndx, NULL_RTX, SImode,
2561 EXPAND_NORMAL),
2562 GEN_INT (MAX_ARGS_IN_REGISTERS
2563 * UNITS_PER_WORD),
2564 GT, const1_rtx, SImode, 0, lab_false);
2565
2566 r = expand_expr (reg, array, Pmode, EXPAND_NORMAL);
2567 if (r != array)
2568 emit_move_insn (array, r);
2569
2570 emit_jump_insn (gen_jump (lab_over));
2571 emit_barrier ();
2572 emit_label (lab_false);
2573 }
2574
2575 /* ...otherwise, the argument is on the stack (never split between
2576 registers and the stack -- change __va_ndx if necessary):
2577
2578 else
2579 {
2580 if (orig_ndx < __MAX_ARGS_IN_REGISTERS * 4)
2581 (AP).__va_ndx = __MAX_ARGS_IN_REGISTERS * 4 + __va_size (TYPE);
2582 __array = (AP).__va_stk;
2583 }
2584 */
2585
2586 lab_false2 = gen_label_rtx ();
2587 emit_cmp_and_jump_insns (orig_ndx,
2588 GEN_INT (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD),
2589 GE, const1_rtx, SImode, 0, lab_false2);
2590
2591 tmp = build (PLUS_EXPR, sizetype, make_tree (intSI_type_node, va_size),
2592 build_int_2 (MAX_ARGS_IN_REGISTERS * UNITS_PER_WORD, 0));
2593 tmp = build (MODIFY_EXPR, integer_type_node, ndx, tmp);
2594 TREE_SIDE_EFFECTS (tmp) = 1;
2595 expand_expr (tmp, const0_rtx, VOIDmode, EXPAND_NORMAL);
2596
2597 emit_label (lab_false2);
2598
2599 r = expand_expr (stk, array, Pmode, EXPAND_NORMAL);
2600 if (r != array)
2601 emit_move_insn (array, r);
2602
2603 if (lab_over != NULL_RTX)
2604 emit_label (lab_over);
2605
2606
2607 /* Given the base array pointer (__array) and index to the subsequent
2608 argument (__va_ndx), find the address:
2609
2610 __array + (AP).__va_ndx - (BYTES_BIG_ENDIAN && sizeof (TYPE) < 4
2611 ? sizeof (TYPE)
2612 : __va_size (TYPE))
2613
2614 The results are endian-dependent because values smaller than one word
2615 are aligned differently.
2616 */
2617
2618 size = gen_reg_rtx (SImode);
2619 emit_move_insn (size, va_size);
2620
2621 if (BYTES_BIG_ENDIAN)
2622 {
2623 rtx lab_use_va_size = gen_label_rtx ();
2624
2625 emit_cmp_and_jump_insns (expand_expr (type_size, NULL_RTX, SImode,
2626 EXPAND_NORMAL),
2627 GEN_INT (PARM_BOUNDARY / BITS_PER_UNIT),
2628 GE, const1_rtx, SImode, 0, lab_use_va_size);
2629
2630 r = expand_expr (type_size, size, SImode, EXPAND_NORMAL);
2631 if (r != size)
2632 emit_move_insn (size, r);
2633
2634 emit_label (lab_use_va_size);
2635 }
2636
2637 addr_tree = build (PLUS_EXPR, ptr_type_node,
2638 make_tree (ptr_type_node, array),
2639 ndx);
2640 addr_tree = build (MINUS_EXPR, ptr_type_node, addr_tree,
2641 make_tree (intSI_type_node, size));
2642 addr = expand_expr (addr_tree, NULL_RTX, Pmode, EXPAND_NORMAL);
2643 addr = copy_to_reg (addr);
2644 return addr;
2645 }
2646
2647
2648 enum reg_class
2649 xtensa_preferred_reload_class (x, class, isoutput)
2650 rtx x;
2651 enum reg_class class;
2652 int isoutput;
2653 {
2654 if (!isoutput && CONSTANT_P (x) && GET_CODE (x) == CONST_DOUBLE)
2655 return NO_REGS;
2656
2657 /* Don't use the stack pointer or hard frame pointer for reloads!
2658 The hard frame pointer would normally be OK except that it may
2659 briefly hold an incoming argument in the prologue, and reload
2660 won't know that it is live because the hard frame pointer is
2661 treated specially. */
2662
2663 if (class == AR_REGS || class == GR_REGS)
2664 return RL_REGS;
2665
2666 return class;
2667 }
2668
2669
2670 enum reg_class
2671 xtensa_secondary_reload_class (class, mode, x, isoutput)
2672 enum reg_class class;
2673 enum machine_mode mode ATTRIBUTE_UNUSED;
2674 rtx x;
2675 int isoutput;
2676 {
2677 int regno;
2678
2679 if (GET_CODE (x) == SIGN_EXTEND)
2680 x = XEXP (x, 0);
2681 regno = xt_true_regnum (x);
2682
2683 if (!isoutput)
2684 {
2685 if (class == FP_REGS && constantpool_mem_p (x))
2686 return RL_REGS;
2687 }
2688
2689 if (ACC_REG_P (regno))
2690 return ((class == GR_REGS || class == RL_REGS) ? NO_REGS : RL_REGS);
2691 if (class == ACC_REG)
2692 return (GP_REG_P (regno) ? NO_REGS : RL_REGS);
2693
2694 return NO_REGS;
2695 }
2696
2697
2698 void
2699 order_regs_for_local_alloc ()
2700 {
2701 if (!leaf_function_p ())
2702 {
2703 memcpy (reg_alloc_order, reg_nonleaf_alloc_order,
2704 FIRST_PSEUDO_REGISTER * sizeof (int));
2705 }
2706 else
2707 {
2708 int i, num_arg_regs;
2709 int nxt = 0;
2710
2711 /* use the AR registers in increasing order (skipping a0 and a1)
2712 but save the incoming argument registers for a last resort */
2713 num_arg_regs = current_function_args_info.arg_words;
2714 if (num_arg_regs > MAX_ARGS_IN_REGISTERS)
2715 num_arg_regs = MAX_ARGS_IN_REGISTERS;
2716 for (i = GP_ARG_FIRST; i < 16 - num_arg_regs; i++)
2717 reg_alloc_order[nxt++] = i + num_arg_regs;
2718 for (i = 0; i < num_arg_regs; i++)
2719 reg_alloc_order[nxt++] = GP_ARG_FIRST + i;
2720
2721 /* list the FP registers in order for now */
2722 for (i = 0; i < 16; i++)
2723 reg_alloc_order[nxt++] = FP_REG_FIRST + i;
2724
2725 /* GCC requires that we list *all* the registers.... */
2726 reg_alloc_order[nxt++] = 0; /* a0 = return address */
2727 reg_alloc_order[nxt++] = 1; /* a1 = stack pointer */
2728 reg_alloc_order[nxt++] = 16; /* pseudo frame pointer */
2729 reg_alloc_order[nxt++] = 17; /* pseudo arg pointer */
2730
2731 /* list the coprocessor registers in order */
2732 for (i = 0; i < BR_REG_NUM; i++)
2733 reg_alloc_order[nxt++] = BR_REG_FIRST + i;
2734
2735 reg_alloc_order[nxt++] = ACC_REG_FIRST; /* MAC16 accumulator */
2736 }
2737 }
2738
2739
2740 /* A customized version of reg_overlap_mentioned_p that only looks for
2741 references to a7 (as opposed to hard_frame_pointer_rtx). */
2742
2743 int
2744 a7_overlap_mentioned_p (x)
2745 rtx x;
2746 {
2747 int i, j;
2748 unsigned int x_regno;
2749 const char *fmt;
2750
2751 if (GET_CODE (x) == REG)
2752 {
2753 x_regno = REGNO (x);
2754 return (x != hard_frame_pointer_rtx
2755 && x_regno < A7_REG + 1
2756 && x_regno + HARD_REGNO_NREGS (A7_REG, GET_MODE (x)) > A7_REG);
2757 }
2758
2759 if (GET_CODE (x) == SUBREG
2760 && GET_CODE (SUBREG_REG (x)) == REG
2761 && REGNO (SUBREG_REG (x)) < FIRST_PSEUDO_REGISTER)
2762 {
2763 x_regno = subreg_regno (x);
2764 return (SUBREG_REG (x) != hard_frame_pointer_rtx
2765 && x_regno < A7_REG + 1
2766 && x_regno + HARD_REGNO_NREGS (A7_REG, GET_MODE (x)) > A7_REG);
2767 }
2768
2769 /* X does not match, so try its subexpressions. */
2770 fmt = GET_RTX_FORMAT (GET_CODE (x));
2771 for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
2772 {
2773 if (fmt[i] == 'e')
2774 {
2775 if (a7_overlap_mentioned_p (XEXP (x, i)))
2776 return 1;
2777 }
2778 else if (fmt[i] == 'E')
2779 {
2780 for (j = XVECLEN (x, i) - 1; j >=0; j--)
2781 if (a7_overlap_mentioned_p (XVECEXP (x, i, j)))
2782 return 1;
2783 }
2784 }
2785
2786 return 0;
2787 }
2788
2789
2790 /* Some Xtensa targets support multiple bss sections. If the section
2791 name ends with ".bss", add SECTION_BSS to the flags. */
2792
2793 static unsigned int
2794 xtensa_multibss_section_type_flags (decl, name, reloc)
2795 tree decl;
2796 const char *name;
2797 int reloc;
2798 {
2799 unsigned int flags = default_section_type_flags (decl, name, reloc);
2800 const char *suffix;
2801
2802 suffix = strrchr (name, '.');
2803 if (suffix && strcmp (suffix, ".bss") == 0)
2804 {
2805 if (!decl || (TREE_CODE (decl) == VAR_DECL
2806 && DECL_INITIAL (decl) == NULL_TREE))
2807 flags |= SECTION_BSS; /* @nobits */
2808 else
2809 warning ("only uninitialized variables can be placed in a "
2810 ".bss section");
2811 }
2812
2813 return flags;
2814 }
2815
2816
2817 /* The literal pool stays with the function. */
2818
2819 static void
2820 xtensa_select_rtx_section (mode, x, align)
2821 enum machine_mode mode ATTRIBUTE_UNUSED;
2822 rtx x ATTRIBUTE_UNUSED;
2823 unsigned HOST_WIDE_INT align ATTRIBUTE_UNUSED;
2824 {
2825 function_section (current_function_decl);
2826 }
2827
2828 /* If we are referencing a function that is static, make the SYMBOL_REF
2829 special so that we can generate direct calls to it even with -fpic. */
2830
2831 static void
2832 xtensa_encode_section_info (decl, first)
2833 tree decl;
2834 int first ATTRIBUTE_UNUSED;
2835 {
2836 if (TREE_CODE (decl) == FUNCTION_DECL && ! TREE_PUBLIC (decl))
2837 SYMBOL_REF_FLAG (XEXP (DECL_RTL (decl), 0)) = 1;
2838 }
2839
2840 #include "gt-xtensa.h"