1 /* Output routines for Motorola MCore processor
2 Copyright (C) 1993, 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2007
3 Free Software Foundation, Inc.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published
9 by the Free Software Foundation; either version 2, or (at your
10 option) any later version.
12 GCC is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING. If not, write to
19 the Free Software Foundation, 51 Franklin Street, Fifth Floor,
20 Boston, MA 02110-1301, USA. */
24 #include "coretypes.h"
32 #include "hard-reg-set.h"
34 #include "insn-config.h"
35 #include "conditions.h"
37 #include "insn-attr.h"
47 #include "target-def.h"
49 /* Maximum size we are allowed to grow the stack in a single operation.
50 If we want more, we must do it in increments of at most this size.
51 If this value is 0, we don't check at all. */
52 int mcore_stack_increment
= STACK_UNITS_MAXSTEP
;
54 /* For dumping information about frame sizes. */
55 char * mcore_current_function_name
= 0;
56 long mcore_current_compilation_timestamp
= 0;
58 /* Global variables for machine-dependent things. */
60 /* Saved operands from the last compare to use when we generate an scc
65 /* Provides the class number of the smallest class containing
67 const int regno_reg_class
[FIRST_PSEUDO_REGISTER
] =
69 GENERAL_REGS
, ONLYR1_REGS
, LRW_REGS
, LRW_REGS
,
70 LRW_REGS
, LRW_REGS
, LRW_REGS
, LRW_REGS
,
71 LRW_REGS
, LRW_REGS
, LRW_REGS
, LRW_REGS
,
72 LRW_REGS
, LRW_REGS
, LRW_REGS
, GENERAL_REGS
,
73 GENERAL_REGS
, C_REGS
, NO_REGS
, NO_REGS
,
76 /* Provide reg_class from a letter such as appears in the machine
78 const enum reg_class reg_class_from_letter
[] =
80 /* a */ LRW_REGS
, /* b */ ONLYR1_REGS
, /* c */ C_REGS
, /* d */ NO_REGS
,
81 /* e */ NO_REGS
, /* f */ NO_REGS
, /* g */ NO_REGS
, /* h */ NO_REGS
,
82 /* i */ NO_REGS
, /* j */ NO_REGS
, /* k */ NO_REGS
, /* l */ NO_REGS
,
83 /* m */ NO_REGS
, /* n */ NO_REGS
, /* o */ NO_REGS
, /* p */ NO_REGS
,
84 /* q */ NO_REGS
, /* r */ GENERAL_REGS
, /* s */ NO_REGS
, /* t */ NO_REGS
,
85 /* u */ NO_REGS
, /* v */ NO_REGS
, /* w */ NO_REGS
, /* x */ ALL_REGS
,
86 /* y */ NO_REGS
, /* z */ NO_REGS
91 int arg_size
; /* Stdarg spills (bytes). */
92 int reg_size
; /* Non-volatile reg saves (bytes). */
93 int reg_mask
; /* Non-volatile reg saves. */
94 int local_size
; /* Locals. */
95 int outbound_size
; /* Arg overflow on calls out. */
99 /* Describe the steps we'll use to grow it. */
100 #define MAX_STACK_GROWS 4 /* Gives us some spare space. */
101 int growth
[MAX_STACK_GROWS
];
119 static void output_stack_adjust (int, int);
120 static int calc_live_regs (int *);
121 static int try_constant_tricks (long, HOST_WIDE_INT
*, HOST_WIDE_INT
*);
122 static const char * output_inline_const (enum machine_mode
, rtx
*);
123 static void layout_mcore_frame (struct mcore_frame
*);
124 static void mcore_setup_incoming_varargs (CUMULATIVE_ARGS
*, enum machine_mode
, tree
, int *, int);
125 static cond_type
is_cond_candidate (rtx
);
126 static rtx
emit_new_cond_insn (rtx
, int);
127 static rtx
conditionalize_block (rtx
);
128 static void conditionalize_optimization (void);
129 static void mcore_reorg (void);
130 static rtx
handle_structs_in_regs (enum machine_mode
, tree
, int);
131 static void mcore_mark_dllexport (tree
);
132 static void mcore_mark_dllimport (tree
);
133 static int mcore_dllexport_p (tree
);
134 static int mcore_dllimport_p (tree
);
135 const struct attribute_spec mcore_attribute_table
[];
136 static tree
mcore_handle_naked_attribute (tree
*, tree
, tree
, int, bool *);
137 #ifdef OBJECT_FORMAT_ELF
138 static void mcore_asm_named_section (const char *,
141 static void mcore_unique_section (tree
, int);
142 static void mcore_encode_section_info (tree
, rtx
, int);
143 static const char *mcore_strip_name_encoding (const char *);
144 static int mcore_const_costs (rtx
, RTX_CODE
);
145 static int mcore_and_cost (rtx
);
146 static int mcore_ior_cost (rtx
);
147 static bool mcore_rtx_costs (rtx
, int, int, int *);
148 static void mcore_external_libcall (rtx
);
149 static bool mcore_return_in_memory (tree
, tree
);
150 static int mcore_arg_partial_bytes (CUMULATIVE_ARGS
*,
155 /* Initialize the GCC target structure. */
156 #undef TARGET_ASM_EXTERNAL_LIBCALL
157 #define TARGET_ASM_EXTERNAL_LIBCALL mcore_external_libcall
159 #if TARGET_DLLIMPORT_DECL_ATTRIBUTES
160 #undef TARGET_MERGE_DECL_ATTRIBUTES
161 #define TARGET_MERGE_DECL_ATTRIBUTES merge_dllimport_decl_attributes
164 #ifdef OBJECT_FORMAT_ELF
165 #undef TARGET_ASM_UNALIGNED_HI_OP
166 #define TARGET_ASM_UNALIGNED_HI_OP "\t.short\t"
167 #undef TARGET_ASM_UNALIGNED_SI_OP
168 #define TARGET_ASM_UNALIGNED_SI_OP "\t.long\t"
171 #undef TARGET_ATTRIBUTE_TABLE
172 #define TARGET_ATTRIBUTE_TABLE mcore_attribute_table
173 #undef TARGET_ASM_UNIQUE_SECTION
174 #define TARGET_ASM_UNIQUE_SECTION mcore_unique_section
175 #undef TARGET_ASM_FUNCTION_RODATA_SECTION
176 #define TARGET_ASM_FUNCTION_RODATA_SECTION default_no_function_rodata_section
177 #undef TARGET_DEFAULT_TARGET_FLAGS
178 #define TARGET_DEFAULT_TARGET_FLAGS TARGET_DEFAULT
179 #undef TARGET_ENCODE_SECTION_INFO
180 #define TARGET_ENCODE_SECTION_INFO mcore_encode_section_info
181 #undef TARGET_STRIP_NAME_ENCODING
182 #define TARGET_STRIP_NAME_ENCODING mcore_strip_name_encoding
183 #undef TARGET_RTX_COSTS
184 #define TARGET_RTX_COSTS mcore_rtx_costs
185 #undef TARGET_ADDRESS_COST
186 #define TARGET_ADDRESS_COST hook_int_rtx_0
187 #undef TARGET_MACHINE_DEPENDENT_REORG
188 #define TARGET_MACHINE_DEPENDENT_REORG mcore_reorg
190 #undef TARGET_PROMOTE_FUNCTION_ARGS
191 #define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
192 #undef TARGET_PROMOTE_FUNCTION_RETURN
193 #define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
194 #undef TARGET_PROMOTE_PROTOTYPES
195 #define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
197 #undef TARGET_RETURN_IN_MEMORY
198 #define TARGET_RETURN_IN_MEMORY mcore_return_in_memory
199 #undef TARGET_MUST_PASS_IN_STACK
200 #define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
201 #undef TARGET_PASS_BY_REFERENCE
202 #define TARGET_PASS_BY_REFERENCE hook_pass_by_reference_must_pass_in_stack
203 #undef TARGET_ARG_PARTIAL_BYTES
204 #define TARGET_ARG_PARTIAL_BYTES mcore_arg_partial_bytes
206 #undef TARGET_SETUP_INCOMING_VARARGS
207 #define TARGET_SETUP_INCOMING_VARARGS mcore_setup_incoming_varargs
209 struct gcc_target targetm
= TARGET_INITIALIZER
;
211 /* Adjust the stack and return the number of bytes taken to do it. */
213 output_stack_adjust (int direction
, int size
)
215 /* If extending stack a lot, we do it incrementally. */
216 if (direction
< 0 && size
> mcore_stack_increment
&& mcore_stack_increment
> 0)
218 rtx tmp
= gen_rtx_REG (SImode
, 1);
221 emit_insn (gen_movsi (tmp
, GEN_INT (mcore_stack_increment
)));
224 emit_insn (gen_subsi3 (stack_pointer_rtx
, stack_pointer_rtx
, tmp
));
225 memref
= gen_rtx_MEM (SImode
, stack_pointer_rtx
);
226 MEM_VOLATILE_P (memref
) = 1;
227 emit_insn (gen_movsi (memref
, stack_pointer_rtx
));
228 size
-= mcore_stack_increment
;
230 while (size
> mcore_stack_increment
);
232 /* SIZE is now the residual for the last adjustment,
233 which doesn't require a probe. */
239 rtx val
= GEN_INT (size
);
243 rtx nval
= gen_rtx_REG (SImode
, 1);
244 emit_insn (gen_movsi (nval
, val
));
249 insn
= gen_addsi3 (stack_pointer_rtx
, stack_pointer_rtx
, val
);
251 insn
= gen_subsi3 (stack_pointer_rtx
, stack_pointer_rtx
, val
);
257 /* Work out the registers which need to be saved,
258 both as a mask and a count. */
261 calc_live_regs (int * count
)
264 int live_regs_mask
= 0;
268 for (reg
= 0; reg
< FIRST_PSEUDO_REGISTER
; reg
++)
270 if (df_regs_ever_live_p (reg
) && !call_used_regs
[reg
])
273 live_regs_mask
|= (1 << reg
);
277 return live_regs_mask
;
280 /* Print the operand address in x to the stream. */
283 mcore_print_operand_address (FILE * stream
, rtx x
)
285 switch (GET_CODE (x
))
288 fprintf (stream
, "(%s)", reg_names
[REGNO (x
)]);
293 rtx base
= XEXP (x
, 0);
294 rtx index
= XEXP (x
, 1);
296 if (GET_CODE (base
) != REG
)
298 /* Ensure that BASE is a register (one of them must be). */
304 switch (GET_CODE (index
))
307 fprintf (stream
, "(%s," HOST_WIDE_INT_PRINT_DEC
")",
308 reg_names
[REGNO(base
)], INTVAL (index
));
319 output_addr_const (stream
, x
);
324 /* Print operand x (an rtx) in assembler syntax to file stream
325 according to modifier code.
327 'R' print the next register or memory location along, i.e. the lsw in
329 'O' print a constant without the #
330 'M' print a constant as its negative
331 'P' print log2 of a power of two
332 'Q' print log2 of an inverse of a power of two
333 'U' print register for ldm/stm instruction
334 'X' print byte number for xtrbN instruction. */
337 mcore_print_operand (FILE * stream
, rtx x
, int code
)
343 fprintf (asm_out_file
, "32");
345 fprintf (asm_out_file
, "%d", exact_log2 (INTVAL (x
) + 1));
348 fprintf (asm_out_file
, "%d", exact_log2 (INTVAL (x
) & 0xffffffff));
351 fprintf (asm_out_file
, "%d", exact_log2 (~INTVAL (x
)));
354 fprintf (asm_out_file
, HOST_WIDE_INT_PRINT_DEC
, INTVAL (x
));
357 fprintf (asm_out_file
, HOST_WIDE_INT_PRINT_DEC
, - INTVAL (x
));
360 /* Next location along in memory or register. */
361 switch (GET_CODE (x
))
364 fputs (reg_names
[REGNO (x
) + 1], (stream
));
367 mcore_print_operand_address
368 (stream
, XEXP (adjust_address (x
, SImode
, 4), 0));
375 fprintf (asm_out_file
, "%s-%s", reg_names
[REGNO (x
)],
376 reg_names
[REGNO (x
) + 3]);
379 fprintf (asm_out_file
, HOST_WIDE_INT_PRINT_HEX
, INTVAL (x
));
382 fprintf (asm_out_file
, HOST_WIDE_INT_PRINT_DEC
, 3 - INTVAL (x
) / 8);
386 switch (GET_CODE (x
))
389 fputs (reg_names
[REGNO (x
)], (stream
));
392 output_address (XEXP (x
, 0));
395 output_addr_const (stream
, x
);
402 /* What does a constant cost ? */
405 mcore_const_costs (rtx exp
, enum rtx_code code
)
407 HOST_WIDE_INT val
= INTVAL (exp
);
409 /* Easy constants. */
410 if ( CONST_OK_FOR_I (val
)
411 || CONST_OK_FOR_M (val
)
412 || CONST_OK_FOR_N (val
)
413 || (code
== PLUS
&& CONST_OK_FOR_L (val
)))
416 && ( CONST_OK_FOR_M (~val
)
417 || CONST_OK_FOR_N (~val
)))
419 else if (code
== PLUS
420 && ( CONST_OK_FOR_I (-val
)
421 || CONST_OK_FOR_M (-val
)
422 || CONST_OK_FOR_N (-val
)))
428 /* What does an and instruction cost - we do this b/c immediates may
429 have been relaxed. We want to ensure that cse will cse relaxed immeds
430 out. Otherwise we'll get bad code (multiple reloads of the same const). */
433 mcore_and_cost (rtx x
)
437 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
440 val
= INTVAL (XEXP (x
, 1));
442 /* Do it directly. */
443 if (CONST_OK_FOR_K (val
) || CONST_OK_FOR_M (~val
))
445 /* Takes one instruction to load. */
446 else if (const_ok_for_mcore (val
))
448 /* Takes two instructions to load. */
449 else if (TARGET_HARDLIT
&& mcore_const_ok_for_inline (val
))
452 /* Takes a lrw to load. */
456 /* What does an or cost - see and_cost(). */
459 mcore_ior_cost (rtx x
)
463 if (GET_CODE (XEXP (x
, 1)) != CONST_INT
)
466 val
= INTVAL (XEXP (x
, 1));
468 /* Do it directly with bclri. */
469 if (CONST_OK_FOR_M (val
))
471 /* Takes one instruction to load. */
472 else if (const_ok_for_mcore (val
))
474 /* Takes two instructions to load. */
475 else if (TARGET_HARDLIT
&& mcore_const_ok_for_inline (val
))
478 /* Takes a lrw to load. */
483 mcore_rtx_costs (rtx x
, int code
, int outer_code
, int * total
)
488 *total
= mcore_const_costs (x
, outer_code
);
500 *total
= COSTS_N_INSNS (mcore_and_cost (x
));
504 *total
= COSTS_N_INSNS (mcore_ior_cost (x
));
513 *total
= COSTS_N_INSNS (100);
521 /* Check to see if a comparison against a constant can be made more efficient
522 by incrementing/decrementing the constant to get one that is more efficient
526 mcore_modify_comparison (enum rtx_code code
)
528 rtx op1
= arch_compare_op1
;
530 if (GET_CODE (op1
) == CONST_INT
)
532 HOST_WIDE_INT val
= INTVAL (op1
);
537 if (CONST_OK_FOR_J (val
+ 1))
539 arch_compare_op1
= GEN_INT (val
+ 1);
552 /* Prepare the operands for a comparison. */
555 mcore_gen_compare_reg (enum rtx_code code
)
557 rtx op0
= arch_compare_op0
;
558 rtx op1
= arch_compare_op1
;
559 rtx cc_reg
= gen_rtx_REG (CCmode
, CC_REG
);
561 if (CONSTANT_P (op1
) && GET_CODE (op1
) != CONST_INT
)
562 op1
= force_reg (SImode
, op1
);
564 /* cmpnei: 0-31 (K immediate)
565 cmplti: 1-32 (J immediate, 0 using btsti x,31). */
568 case EQ
: /* Use inverted condition, cmpne. */
572 case NE
: /* Use normal condition, cmpne. */
573 if (GET_CODE (op1
) == CONST_INT
&& ! CONST_OK_FOR_K (INTVAL (op1
)))
574 op1
= force_reg (SImode
, op1
);
577 case LE
: /* Use inverted condition, reversed cmplt. */
581 case GT
: /* Use normal condition, reversed cmplt. */
582 if (GET_CODE (op1
) == CONST_INT
)
583 op1
= force_reg (SImode
, op1
);
586 case GE
: /* Use inverted condition, cmplt. */
590 case LT
: /* Use normal condition, cmplt. */
591 if (GET_CODE (op1
) == CONST_INT
&&
592 /* covered by btsti x,31. */
594 ! CONST_OK_FOR_J (INTVAL (op1
)))
595 op1
= force_reg (SImode
, op1
);
598 case GTU
: /* Use inverted condition, cmple. */
599 /* Unsigned > 0 is the same as != 0, but we need to invert the
600 condition, so we want to set code = EQ. This cannot be done
601 however, as the mcore does not support such a test. Instead
602 we cope with this case in the "bgtu" pattern itself so we
603 should never reach this point. */
604 gcc_assert (GET_CODE (op1
) != CONST_INT
|| INTVAL (op1
) != 0);
608 case LEU
: /* Use normal condition, reversed cmphs. */
609 if (GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) != 0)
610 op1
= force_reg (SImode
, op1
);
613 case LTU
: /* Use inverted condition, cmphs. */
617 case GEU
: /* Use normal condition, cmphs. */
618 if (GET_CODE (op1
) == CONST_INT
&& INTVAL (op1
) != 0)
619 op1
= force_reg (SImode
, op1
);
626 emit_insn (gen_rtx_SET (VOIDmode
, cc_reg
, gen_rtx_fmt_ee (code
, CCmode
, op0
, op1
)));
632 mcore_symbolic_address_p (rtx x
)
634 switch (GET_CODE (x
))
641 return ( (GET_CODE (XEXP (x
, 0)) == SYMBOL_REF
642 || GET_CODE (XEXP (x
, 0)) == LABEL_REF
)
643 && GET_CODE (XEXP (x
, 1)) == CONST_INT
);
649 /* Functions to output assembly code for a function call. */
652 mcore_output_call (rtx operands
[], int index
)
654 static char buffer
[20];
655 rtx addr
= operands
[index
];
661 gcc_assert (mcore_current_function_name
);
663 ASM_OUTPUT_CG_EDGE (asm_out_file
, mcore_current_function_name
,
667 sprintf (buffer
, "jsr\t%%%d", index
);
673 gcc_assert (mcore_current_function_name
);
674 gcc_assert (GET_CODE (addr
) == SYMBOL_REF
);
676 ASM_OUTPUT_CG_EDGE (asm_out_file
, mcore_current_function_name
,
680 sprintf (buffer
, "jbsr\t%%%d", index
);
686 /* Can we load a constant with a single instruction ? */
689 const_ok_for_mcore (HOST_WIDE_INT value
)
691 if (value
>= 0 && value
<= 127)
694 /* Try exact power of two. */
695 if (CONST_OK_FOR_M (value
))
698 /* Try exact power of two - 1. */
699 if (CONST_OK_FOR_N (value
) && value
!= -1)
705 /* Can we load a constant inline with up to 2 instructions ? */
708 mcore_const_ok_for_inline (HOST_WIDE_INT value
)
712 return try_constant_tricks (value
, & x
, & y
) > 0;
715 /* Are we loading the constant using a not ? */
718 mcore_const_trick_uses_not (HOST_WIDE_INT value
)
722 return try_constant_tricks (value
, & x
, & y
) == 2;
725 /* Try tricks to load a constant inline and return the trick number if
726 success (0 is non-inlinable).
729 1: single instruction (do the usual thing)
730 2: single insn followed by a 'not'
731 3: single insn followed by a subi
732 4: single insn followed by an addi
733 5: single insn followed by rsubi
734 6: single insn followed by bseti
735 7: single insn followed by bclri
736 8: single insn followed by rotli
737 9: single insn followed by lsli
738 10: single insn followed by ixh
739 11: single insn followed by ixw. */
742 try_constant_tricks (HOST_WIDE_INT value
, HOST_WIDE_INT
* x
, HOST_WIDE_INT
* y
)
745 unsigned HOST_WIDE_INT bit
, shf
, rot
;
747 if (const_ok_for_mcore (value
))
748 return 1; /* Do the usual thing. */
750 if (! TARGET_HARDLIT
)
753 if (const_ok_for_mcore (~value
))
759 for (i
= 1; i
<= 32; i
++)
761 if (const_ok_for_mcore (value
- i
))
769 if (const_ok_for_mcore (value
+ i
))
780 for (i
= 0; i
<= 31; i
++)
782 if (const_ok_for_mcore (i
- value
))
790 if (const_ok_for_mcore (value
& ~bit
))
797 if (const_ok_for_mcore (value
| bit
))
811 for (i
= 1; i
< 31; i
++)
815 /* MCore has rotate left. */
819 rot
|= c
; /* Simulate rotate. */
821 if (const_ok_for_mcore (rot
))
830 shf
= 0; /* Can't use logical shift, low order bit is one. */
834 if (shf
!= 0 && const_ok_for_mcore (shf
))
843 if ((value
% 3) == 0 && const_ok_for_mcore (value
/ 3))
850 if ((value
% 5) == 0 && const_ok_for_mcore (value
/ 5))
860 /* Check whether reg is dead at first. This is done by searching ahead
861 for either the next use (i.e., reg is live), a death note, or a set of
862 reg. Don't just use dead_or_set_p() since reload does not always mark
863 deaths (especially if PRESERVE_DEATH_NOTES_REGNO_P is not defined). We
864 can ignore subregs by extracting the actual register. BRC */
867 mcore_is_dead (rtx first
, rtx reg
)
871 /* For mcore, subregs can't live independently of their parent regs. */
872 if (GET_CODE (reg
) == SUBREG
)
873 reg
= SUBREG_REG (reg
);
875 /* Dies immediately. */
876 if (dead_or_set_p (first
, reg
))
879 /* Look for conclusive evidence of live/death, otherwise we have
880 to assume that it is live. */
881 for (insn
= NEXT_INSN (first
); insn
; insn
= NEXT_INSN (insn
))
883 if (GET_CODE (insn
) == JUMP_INSN
)
884 return 0; /* We lose track, assume it is alive. */
886 else if (GET_CODE(insn
) == CALL_INSN
)
888 /* Call's might use it for target or register parms. */
889 if (reg_referenced_p (reg
, PATTERN (insn
))
890 || find_reg_fusage (insn
, USE
, reg
))
892 else if (dead_or_set_p (insn
, reg
))
895 else if (GET_CODE (insn
) == INSN
)
897 if (reg_referenced_p (reg
, PATTERN (insn
)))
899 else if (dead_or_set_p (insn
, reg
))
904 /* No conclusive evidence either way, we cannot take the chance
905 that control flow hid the use from us -- "I'm not dead yet". */
909 /* Count the number of ones in mask. */
912 mcore_num_ones (HOST_WIDE_INT mask
)
914 /* A trick to count set bits recently posted on comp.compilers. */
915 mask
= (mask
>> 1 & 0x55555555) + (mask
& 0x55555555);
916 mask
= ((mask
>> 2) & 0x33333333) + (mask
& 0x33333333);
917 mask
= ((mask
>> 4) + mask
) & 0x0f0f0f0f;
918 mask
= ((mask
>> 8) + mask
);
920 return (mask
+ (mask
>> 16)) & 0xff;
923 /* Count the number of zeros in mask. */
926 mcore_num_zeros (HOST_WIDE_INT mask
)
928 return 32 - mcore_num_ones (mask
);
931 /* Determine byte being masked. */
934 mcore_byte_offset (unsigned int mask
)
936 if (mask
== 0x00ffffffL
)
938 else if (mask
== 0xff00ffffL
)
940 else if (mask
== 0xffff00ffL
)
942 else if (mask
== 0xffffff00L
)
948 /* Determine halfword being masked. */
951 mcore_halfword_offset (unsigned int mask
)
953 if (mask
== 0x0000ffffL
)
955 else if (mask
== 0xffff0000L
)
961 /* Output a series of bseti's corresponding to mask. */
964 mcore_output_bseti (rtx dst
, int mask
)
969 out_operands
[0] = dst
;
971 for (bit
= 0; bit
< 32; bit
++)
973 if ((mask
& 0x1) == 0x1)
975 out_operands
[1] = GEN_INT (bit
);
977 output_asm_insn ("bseti\t%0,%1", out_operands
);
985 /* Output a series of bclri's corresponding to mask. */
988 mcore_output_bclri (rtx dst
, int mask
)
993 out_operands
[0] = dst
;
995 for (bit
= 0; bit
< 32; bit
++)
997 if ((mask
& 0x1) == 0x0)
999 out_operands
[1] = GEN_INT (bit
);
1001 output_asm_insn ("bclri\t%0,%1", out_operands
);
1010 /* Output a conditional move of two constants that are +/- 1 within each
1011 other. See the "movtK" patterns in mcore.md. I'm not sure this is
1012 really worth the effort. */
1015 mcore_output_cmov (rtx operands
[], int cmp_t
, const char * test
)
1017 HOST_WIDE_INT load_value
;
1018 HOST_WIDE_INT adjust_value
;
1019 rtx out_operands
[4];
1021 out_operands
[0] = operands
[0];
1023 /* Check to see which constant is loadable. */
1024 if (const_ok_for_mcore (INTVAL (operands
[1])))
1026 out_operands
[1] = operands
[1];
1027 out_operands
[2] = operands
[2];
1029 else if (const_ok_for_mcore (INTVAL (operands
[2])))
1031 out_operands
[1] = operands
[2];
1032 out_operands
[2] = operands
[1];
1034 /* Complement test since constants are swapped. */
1035 cmp_t
= (cmp_t
== 0);
1037 load_value
= INTVAL (out_operands
[1]);
1038 adjust_value
= INTVAL (out_operands
[2]);
1040 /* First output the test if folded into the pattern. */
1043 output_asm_insn (test
, operands
);
1045 /* Load the constant - for now, only support constants that can be
1046 generated with a single instruction. maybe add general inlinable
1047 constants later (this will increase the # of patterns since the
1048 instruction sequence has a different length attribute). */
1049 if (load_value
>= 0 && load_value
<= 127)
1050 output_asm_insn ("movi\t%0,%1", out_operands
);
1051 else if (CONST_OK_FOR_M (load_value
))
1052 output_asm_insn ("bgeni\t%0,%P1", out_operands
);
1053 else if (CONST_OK_FOR_N (load_value
))
1054 output_asm_insn ("bmaski\t%0,%N1", out_operands
);
1056 /* Output the constant adjustment. */
1057 if (load_value
> adjust_value
)
1060 output_asm_insn ("decf\t%0", out_operands
);
1062 output_asm_insn ("dect\t%0", out_operands
);
1067 output_asm_insn ("incf\t%0", out_operands
);
1069 output_asm_insn ("inct\t%0", out_operands
);
1075 /* Outputs the peephole for moving a constant that gets not'ed followed
1076 by an and (i.e. combine the not and the and into andn). BRC */
1079 mcore_output_andn (rtx insn ATTRIBUTE_UNUSED
, rtx operands
[])
1082 rtx out_operands
[3];
1083 const char * load_op
;
1087 trick_no
= try_constant_tricks (INTVAL (operands
[1]), &x
, &y
);
1088 gcc_assert (trick_no
== 2);
1090 out_operands
[0] = operands
[0];
1091 out_operands
[1] = GEN_INT (x
);
1092 out_operands
[2] = operands
[2];
1094 if (x
>= 0 && x
<= 127)
1095 load_op
= "movi\t%0,%1";
1097 /* Try exact power of two. */
1098 else if (CONST_OK_FOR_M (x
))
1099 load_op
= "bgeni\t%0,%P1";
1101 /* Try exact power of two - 1. */
1102 else if (CONST_OK_FOR_N (x
))
1103 load_op
= "bmaski\t%0,%N1";
1107 load_op
= "BADMOVI-andn\t%0, %1";
1111 sprintf (buf
, "%s\n\tandn\t%%2,%%0", load_op
);
1112 output_asm_insn (buf
, out_operands
);
1117 /* Output an inline constant. */
1120 output_inline_const (enum machine_mode mode
, rtx operands
[])
1122 HOST_WIDE_INT x
= 0, y
= 0;
1124 rtx out_operands
[3];
1127 const char *dst_fmt
;
1128 HOST_WIDE_INT value
;
1130 value
= INTVAL (operands
[1]);
1132 trick_no
= try_constant_tricks (value
, &x
, &y
);
1133 /* lrw's are handled separately: Large inlinable constants never get
1134 turned into lrw's. Our caller uses try_constant_tricks to back
1135 off to an lrw rather than calling this routine. */
1136 gcc_assert (trick_no
!= 0);
1141 /* operands: 0 = dst, 1 = load immed., 2 = immed. adjustment. */
1142 out_operands
[0] = operands
[0];
1143 out_operands
[1] = GEN_INT (x
);
1146 out_operands
[2] = GEN_INT (y
);
1148 /* Select dst format based on mode. */
1149 if (mode
== DImode
&& (! TARGET_LITTLE_END
))
1154 if (x
>= 0 && x
<= 127)
1155 sprintf (load_op
, "movi\t%s,%%1", dst_fmt
);
1157 /* Try exact power of two. */
1158 else if (CONST_OK_FOR_M (x
))
1159 sprintf (load_op
, "bgeni\t%s,%%P1", dst_fmt
);
1161 /* Try exact power of two - 1. */
1162 else if (CONST_OK_FOR_N (x
))
1163 sprintf (load_op
, "bmaski\t%s,%%N1", dst_fmt
);
1167 sprintf (load_op
, "BADMOVI-inline_const %s, %%1", dst_fmt
);
1174 strcpy (buf
, load_op
);
1177 sprintf (buf
, "%s\n\tnot\t%s\t// %ld 0x%lx", load_op
, dst_fmt
, value
, value
);
1180 sprintf (buf
, "%s\n\taddi\t%s,%%2\t// %ld 0x%lx", load_op
, dst_fmt
, value
, value
);
1183 sprintf (buf
, "%s\n\tsubi\t%s,%%2\t// %ld 0x%lx", load_op
, dst_fmt
, value
, value
);
1186 /* Never happens unless -mrsubi, see try_constant_tricks(). */
1187 sprintf (buf
, "%s\n\trsubi\t%s,%%2\t// %ld 0x%lx", load_op
, dst_fmt
, value
, value
);
1190 sprintf (buf
, "%s\n\tbseti\t%s,%%P2\t// %ld 0x%lx", load_op
, dst_fmt
, value
, value
);
1193 sprintf (buf
, "%s\n\tbclri\t%s,%%Q2\t// %ld 0x%lx", load_op
, dst_fmt
, value
, value
);
1196 sprintf (buf
, "%s\n\trotli\t%s,%%2\t// %ld 0x%lx", load_op
, dst_fmt
, value
, value
);
1199 sprintf (buf
, "%s\n\tlsli\t%s,%%2\t// %ld 0x%lx", load_op
, dst_fmt
, value
, value
);
1202 sprintf (buf
, "%s\n\tixh\t%s,%s\t// %ld 0x%lx", load_op
, dst_fmt
, dst_fmt
, value
, value
);
1205 sprintf (buf
, "%s\n\tixw\t%s,%s\t// %ld 0x%lx", load_op
, dst_fmt
, dst_fmt
, value
, value
);
1211 output_asm_insn (buf
, out_operands
);
1216 /* Output a move of a word or less value. */
1219 mcore_output_move (rtx insn ATTRIBUTE_UNUSED
, rtx operands
[],
1220 enum machine_mode mode ATTRIBUTE_UNUSED
)
1222 rtx dst
= operands
[0];
1223 rtx src
= operands
[1];
1225 if (GET_CODE (dst
) == REG
)
1227 if (GET_CODE (src
) == REG
)
1229 if (REGNO (src
) == CC_REG
) /* r-c */
1232 return "mov\t%0,%1"; /* r-r*/
1234 else if (GET_CODE (src
) == MEM
)
1236 if (GET_CODE (XEXP (src
, 0)) == LABEL_REF
)
1237 return "lrw\t%0,[%1]"; /* a-R */
1239 switch (GET_MODE (src
)) /* r-m */
1242 return "ldw\t%0,%1";
1244 return "ld.h\t%0,%1";
1246 return "ld.b\t%0,%1";
1251 else if (GET_CODE (src
) == CONST_INT
)
1255 if (CONST_OK_FOR_I (INTVAL (src
))) /* r-I */
1256 return "movi\t%0,%1";
1257 else if (CONST_OK_FOR_M (INTVAL (src
))) /* r-M */
1258 return "bgeni\t%0,%P1\t// %1 %x1";
1259 else if (CONST_OK_FOR_N (INTVAL (src
))) /* r-N */
1260 return "bmaski\t%0,%N1\t// %1 %x1";
1261 else if (try_constant_tricks (INTVAL (src
), &x
, &y
)) /* R-P */
1262 return output_inline_const (SImode
, operands
); /* 1-2 insns */
1264 return "lrw\t%0,%x1\t// %1"; /* Get it from literal pool. */
1267 return "lrw\t%0, %1"; /* Into the literal pool. */
1269 else if (GET_CODE (dst
) == MEM
) /* m-r */
1270 switch (GET_MODE (dst
))
1273 return "stw\t%1,%0";
1275 return "st.h\t%1,%0";
1277 return "st.b\t%1,%0";
1285 /* Return a sequence of instructions to perform DI or DF move.
1286 Since the MCORE cannot move a DI or DF in one instruction, we have
1287 to take care when we see overlapping source and dest registers. */
1290 mcore_output_movedouble (rtx operands
[], enum machine_mode mode ATTRIBUTE_UNUSED
)
1292 rtx dst
= operands
[0];
1293 rtx src
= operands
[1];
1295 if (GET_CODE (dst
) == REG
)
1297 if (GET_CODE (src
) == REG
)
1299 int dstreg
= REGNO (dst
);
1300 int srcreg
= REGNO (src
);
1302 /* Ensure the second source not overwritten. */
1303 if (srcreg
+ 1 == dstreg
)
1304 return "mov %R0,%R1\n\tmov %0,%1";
1306 return "mov %0,%1\n\tmov %R0,%R1";
1308 else if (GET_CODE (src
) == MEM
)
1310 rtx memexp
= memexp
= XEXP (src
, 0);
1311 int dstreg
= REGNO (dst
);
1314 if (GET_CODE (memexp
) == LABEL_REF
)
1315 return "lrw\t%0,[%1]\n\tlrw\t%R0,[%R1]";
1316 else if (GET_CODE (memexp
) == REG
)
1317 basereg
= REGNO (memexp
);
1318 else if (GET_CODE (memexp
) == PLUS
)
1320 if (GET_CODE (XEXP (memexp
, 0)) == REG
)
1321 basereg
= REGNO (XEXP (memexp
, 0));
1322 else if (GET_CODE (XEXP (memexp
, 1)) == REG
)
1323 basereg
= REGNO (XEXP (memexp
, 1));
1330 /* ??? length attribute is wrong here. */
1331 if (dstreg
== basereg
)
1333 /* Just load them in reverse order. */
1334 return "ldw\t%R0,%R1\n\tldw\t%0,%1";
1336 /* XXX: alternative: move basereg to basereg+1
1337 and then fall through. */
1340 return "ldw\t%0,%1\n\tldw\t%R0,%R1";
1342 else if (GET_CODE (src
) == CONST_INT
)
1344 if (TARGET_LITTLE_END
)
1346 if (CONST_OK_FOR_I (INTVAL (src
)))
1347 output_asm_insn ("movi %0,%1", operands
);
1348 else if (CONST_OK_FOR_M (INTVAL (src
)))
1349 output_asm_insn ("bgeni %0,%P1", operands
);
1350 else if (CONST_OK_FOR_N (INTVAL (src
)))
1351 output_asm_insn ("bmaski %0,%N1", operands
);
1355 if (INTVAL (src
) < 0)
1356 return "bmaski %R0,32";
1358 return "movi %R0,0";
1362 if (CONST_OK_FOR_I (INTVAL (src
)))
1363 output_asm_insn ("movi %R0,%1", operands
);
1364 else if (CONST_OK_FOR_M (INTVAL (src
)))
1365 output_asm_insn ("bgeni %R0,%P1", operands
);
1366 else if (CONST_OK_FOR_N (INTVAL (src
)))
1367 output_asm_insn ("bmaski %R0,%N1", operands
);
1371 if (INTVAL (src
) < 0)
1372 return "bmaski %0,32";
1380 else if (GET_CODE (dst
) == MEM
&& GET_CODE (src
) == REG
)
1381 return "stw\t%1,%0\n\tstw\t%R1,%R0";
1386 /* Predicates used by the templates. */
1389 mcore_arith_S_operand (rtx op
)
1391 if (GET_CODE (op
) == CONST_INT
&& CONST_OK_FOR_M (~INTVAL (op
)))
1397 /* Expand insert bit field. BRC */
1400 mcore_expand_insv (rtx operands
[])
1402 int width
= INTVAL (operands
[1]);
1403 int posn
= INTVAL (operands
[2]);
1405 rtx mreg
, sreg
, ereg
;
1407 /* To get width 1 insv, the test in store_bit_field() (expmed.c, line 191)
1408 for width==1 must be removed. Look around line 368. This is something
1409 we really want the md part to do. */
1410 if (width
== 1 && GET_CODE (operands
[3]) == CONST_INT
)
1412 /* Do directly with bseti or bclri. */
1413 /* RBE: 2/97 consider only low bit of constant. */
1414 if ((INTVAL (operands
[3]) & 1) == 0)
1416 mask
= ~(1 << posn
);
1417 emit_insn (gen_rtx_SET (SImode
, operands
[0],
1418 gen_rtx_AND (SImode
, operands
[0], GEN_INT (mask
))));
1423 emit_insn (gen_rtx_SET (SImode
, operands
[0],
1424 gen_rtx_IOR (SImode
, operands
[0], GEN_INT (mask
))));
1430 /* Look at some bit-field placements that we aren't interested
1431 in handling ourselves, unless specifically directed to do so. */
1432 if (! TARGET_W_FIELD
)
1433 return 0; /* Generally, give up about now. */
1435 if (width
== 8 && posn
% 8 == 0)
1436 /* Byte sized and aligned; let caller break it up. */
1439 if (width
== 16 && posn
% 16 == 0)
1440 /* Short sized and aligned; let caller break it up. */
1443 /* The general case - we can do this a little bit better than what the
1444 machine independent part tries. This will get rid of all the subregs
1445 that mess up constant folding in combine when working with relaxed
1448 /* If setting the entire field, do it directly. */
1449 if (GET_CODE (operands
[3]) == CONST_INT
1450 && INTVAL (operands
[3]) == ((1 << width
) - 1))
1452 mreg
= force_reg (SImode
, GEN_INT (INTVAL (operands
[3]) << posn
));
1453 emit_insn (gen_rtx_SET (SImode
, operands
[0],
1454 gen_rtx_IOR (SImode
, operands
[0], mreg
)));
1458 /* Generate the clear mask. */
1459 mreg
= force_reg (SImode
, GEN_INT (~(((1 << width
) - 1) << posn
)));
1461 /* Clear the field, to overlay it later with the source. */
1462 emit_insn (gen_rtx_SET (SImode
, operands
[0],
1463 gen_rtx_AND (SImode
, operands
[0], mreg
)));
1465 /* If the source is constant 0, we've nothing to add back. */
1466 if (GET_CODE (operands
[3]) == CONST_INT
&& INTVAL (operands
[3]) == 0)
1469 /* XXX: Should we worry about more games with constant values?
1470 We've covered the high profile: set/clear single-bit and many-bit
1471 fields. How often do we see "arbitrary bit pattern" constants? */
1472 sreg
= copy_to_mode_reg (SImode
, operands
[3]);
1474 /* Extract src as same width as dst (needed for signed values). We
1475 always have to do this since we widen everything to SImode.
1476 We don't have to mask if we're shifting this up against the
1477 MSB of the register (e.g., the shift will push out any hi-order
1479 if (width
+ posn
!= (int) GET_MODE_SIZE (SImode
))
1481 ereg
= force_reg (SImode
, GEN_INT ((1 << width
) - 1));
1482 emit_insn (gen_rtx_SET (SImode
, sreg
,
1483 gen_rtx_AND (SImode
, sreg
, ereg
)));
1486 /* Insert source value in dest. */
1488 emit_insn (gen_rtx_SET (SImode
, sreg
,
1489 gen_rtx_ASHIFT (SImode
, sreg
, GEN_INT (posn
))));
1491 emit_insn (gen_rtx_SET (SImode
, operands
[0],
1492 gen_rtx_IOR (SImode
, operands
[0], sreg
)));
1497 /* ??? Block move stuff stolen from m88k. This code has not been
1498 verified for correctness. */
1500 /* Emit code to perform a block move. Choose the best method.
1502 OPERANDS[0] is the destination.
1503 OPERANDS[1] is the source.
1504 OPERANDS[2] is the size.
1505 OPERANDS[3] is the alignment safe to use. */
1507 /* Emit code to perform a block move with an offset sequence of ldw/st
1508 instructions (..., ldw 0, stw 1, ldw 1, stw 0, ...). SIZE and ALIGN are
1509 known constants. DEST and SRC are registers. OFFSET is the known
1510 starting point for the output pattern. */
1512 static const enum machine_mode mode_from_align
[] =
1514 VOIDmode
, QImode
, HImode
, VOIDmode
, SImode
,
1518 block_move_sequence (rtx dst_mem
, rtx src_mem
, int size
, int align
)
1521 enum machine_mode mode
[2];
1530 x
= XEXP (dst_mem
, 0);
1533 x
= force_reg (Pmode
, x
);
1534 dst_mem
= replace_equiv_address (dst_mem
, x
);
1537 x
= XEXP (src_mem
, 0);
1540 x
= force_reg (Pmode
, x
);
1541 src_mem
= replace_equiv_address (src_mem
, x
);
1544 active
[0] = active
[1] = false;
1555 next_amount
= (size
>= 4 ? 4 : (size
>= 2 ? 2 : 1));
1556 next_amount
= MIN (next_amount
, align
);
1558 amount
[next
] = next_amount
;
1559 mode
[next
] = mode_from_align
[next_amount
];
1560 temp
[next
] = gen_reg_rtx (mode
[next
]);
1562 x
= adjust_address (src_mem
, mode
[next
], offset_ld
);
1563 emit_insn (gen_rtx_SET (VOIDmode
, temp
[next
], x
));
1565 offset_ld
+= next_amount
;
1566 size
-= next_amount
;
1567 active
[next
] = true;
1572 active
[phase
] = false;
1574 x
= adjust_address (dst_mem
, mode
[phase
], offset_st
);
1575 emit_insn (gen_rtx_SET (VOIDmode
, x
, temp
[phase
]));
1577 offset_st
+= amount
[phase
];
1580 while (active
[next
]);
1584 mcore_expand_block_move (rtx
*operands
)
1586 HOST_WIDE_INT align
, bytes
, max
;
1588 if (GET_CODE (operands
[2]) != CONST_INT
)
1591 bytes
= INTVAL (operands
[2]);
1592 align
= INTVAL (operands
[3]);
1621 block_move_sequence (operands
[0], operands
[1], bytes
, align
);
1629 /* Code to generate prologue and epilogue sequences. */
1630 static int number_of_regs_before_varargs
;
1632 /* Set by TARGET_SETUP_INCOMING_VARARGS to indicate to prolog that this is
1633 for a varargs function. */
1634 static int current_function_anonymous_args
;
1636 #define STACK_BYTES (STACK_BOUNDARY/BITS_PER_UNIT)
1637 #define STORE_REACH (64) /* Maximum displace of word store + 4. */
1638 #define ADDI_REACH (32) /* Maximum addi operand. */
1641 layout_mcore_frame (struct mcore_frame
* infp
)
1650 unsigned int growths
;
1653 /* Might have to spill bytes to re-assemble a big argument that
1654 was passed partially in registers and partially on the stack. */
1655 nbytes
= current_function_pretend_args_size
;
1657 /* Determine how much space for spilled anonymous args (e.g., stdarg). */
1658 if (current_function_anonymous_args
)
1659 nbytes
+= (NPARM_REGS
- number_of_regs_before_varargs
) * UNITS_PER_WORD
;
1661 infp
->arg_size
= nbytes
;
1663 /* How much space to save non-volatile registers we stomp. */
1664 infp
->reg_mask
= calc_live_regs (& n
);
1665 infp
->reg_size
= n
* 4;
1667 /* And the rest of it... locals and space for overflowed outbounds. */
1668 infp
->local_size
= get_frame_size ();
1669 infp
->outbound_size
= current_function_outgoing_args_size
;
1671 /* Make sure we have a whole number of words for the locals. */
1672 if (infp
->local_size
% STACK_BYTES
)
1673 infp
->local_size
= (infp
->local_size
+ STACK_BYTES
- 1) & ~ (STACK_BYTES
-1);
1675 /* Only thing we know we have to pad is the outbound space, since
1676 we've aligned our locals assuming that base of locals is aligned. */
1677 infp
->pad_local
= 0;
1679 infp
->pad_outbound
= 0;
1680 if (infp
->outbound_size
% STACK_BYTES
)
1681 infp
->pad_outbound
= STACK_BYTES
- (infp
->outbound_size
% STACK_BYTES
);
1683 /* Now we see how we want to stage the prologue so that it does
1684 the most appropriate stack growth and register saves to either:
1686 (2) reduce instruction space, or
1687 (3) reduce stack space. */
1688 for (i
= 0; i
< ARRAY_SIZE (infp
->growth
); i
++)
1689 infp
->growth
[i
] = 0;
1691 regarg
= infp
->reg_size
+ infp
->arg_size
;
1692 localregarg
= infp
->local_size
+ regarg
;
1693 localreg
= infp
->local_size
+ infp
->reg_size
;
1694 outbounds
= infp
->outbound_size
+ infp
->pad_outbound
;
1697 /* XXX: Consider one where we consider localregarg + outbound too! */
1699 /* Frame of <= 32 bytes and using stm would get <= 2 registers.
1700 use stw's with offsets and buy the frame in one shot. */
1701 if (localregarg
<= ADDI_REACH
1702 && (infp
->reg_size
<= 8 || (infp
->reg_mask
& 0xc000) != 0xc000))
1704 /* Make sure we'll be aligned. */
1705 if (localregarg
% STACK_BYTES
)
1706 infp
->pad_reg
= STACK_BYTES
- (localregarg
% STACK_BYTES
);
1708 step
= localregarg
+ infp
->pad_reg
;
1709 infp
->reg_offset
= infp
->local_size
;
1711 if (outbounds
+ step
<= ADDI_REACH
&& !frame_pointer_needed
)
1714 infp
->reg_offset
+= outbounds
;
1718 infp
->arg_offset
= step
- 4;
1719 infp
->growth
[growths
++] = step
;
1720 infp
->reg_growth
= growths
;
1721 infp
->local_growth
= growths
;
1723 /* If we haven't already folded it in. */
1725 infp
->growth
[growths
++] = outbounds
;
1730 /* Frame can't be done with a single subi, but can be done with 2
1731 insns. If the 'stm' is getting <= 2 registers, we use stw's and
1732 shift some of the stack purchase into the first subi, so both are
1733 single instructions. */
1734 if (localregarg
<= STORE_REACH
1735 && (infp
->local_size
> ADDI_REACH
)
1736 && (infp
->reg_size
<= 8 || (infp
->reg_mask
& 0xc000) != 0xc000))
1740 /* Make sure we'll be aligned; use either pad_reg or pad_local. */
1741 if (localregarg
% STACK_BYTES
)
1742 infp
->pad_reg
= STACK_BYTES
- (localregarg
% STACK_BYTES
);
1744 all
= localregarg
+ infp
->pad_reg
+ infp
->pad_local
;
1745 step
= ADDI_REACH
; /* As much up front as we can. */
1749 /* XXX: Consider whether step will still be aligned; we believe so. */
1750 infp
->arg_offset
= step
- 4;
1751 infp
->growth
[growths
++] = step
;
1752 infp
->reg_growth
= growths
;
1753 infp
->reg_offset
= step
- infp
->pad_reg
- infp
->reg_size
;
1756 /* Can we fold in any space required for outbounds? */
1757 if (outbounds
+ all
<= ADDI_REACH
&& !frame_pointer_needed
)
1763 /* Get the rest of the locals in place. */
1765 infp
->growth
[growths
++] = step
;
1766 infp
->local_growth
= growths
;
1771 /* Finish off if we need to do so. */
1773 infp
->growth
[growths
++] = outbounds
;
1778 /* Registers + args is nicely aligned, so we'll buy that in one shot.
1779 Then we buy the rest of the frame in 1 or 2 steps depending on
1780 whether we need a frame pointer. */
1781 if ((regarg
% STACK_BYTES
) == 0)
1783 infp
->growth
[growths
++] = regarg
;
1784 infp
->reg_growth
= growths
;
1785 infp
->arg_offset
= regarg
- 4;
1786 infp
->reg_offset
= 0;
1788 if (infp
->local_size
% STACK_BYTES
)
1789 infp
->pad_local
= STACK_BYTES
- (infp
->local_size
% STACK_BYTES
);
1791 step
= infp
->local_size
+ infp
->pad_local
;
1793 if (!frame_pointer_needed
)
1799 infp
->growth
[growths
++] = step
;
1800 infp
->local_growth
= growths
;
1802 /* If there's any left to be done. */
1804 infp
->growth
[growths
++] = outbounds
;
1809 /* XXX: optimizations that we'll want to play with....
1810 -- regarg is not aligned, but it's a small number of registers;
1811 use some of localsize so that regarg is aligned and then
1812 save the registers. */
1814 /* Simple encoding; plods down the stack buying the pieces as it goes.
1815 -- does not optimize space consumption.
1816 -- does not attempt to optimize instruction counts.
1817 -- but it is safe for all alignments. */
1818 if (regarg
% STACK_BYTES
!= 0)
1819 infp
->pad_reg
= STACK_BYTES
- (regarg
% STACK_BYTES
);
1821 infp
->growth
[growths
++] = infp
->arg_size
+ infp
->reg_size
+ infp
->pad_reg
;
1822 infp
->reg_growth
= growths
;
1823 infp
->arg_offset
= infp
->growth
[0] - 4;
1824 infp
->reg_offset
= 0;
1826 if (frame_pointer_needed
)
1828 if (infp
->local_size
% STACK_BYTES
!= 0)
1829 infp
->pad_local
= STACK_BYTES
- (infp
->local_size
% STACK_BYTES
);
1831 infp
->growth
[growths
++] = infp
->local_size
+ infp
->pad_local
;
1832 infp
->local_growth
= growths
;
1834 infp
->growth
[growths
++] = outbounds
;
1838 if ((infp
->local_size
+ outbounds
) % STACK_BYTES
!= 0)
1839 infp
->pad_local
= STACK_BYTES
- ((infp
->local_size
+ outbounds
) % STACK_BYTES
);
1841 infp
->growth
[growths
++] = infp
->local_size
+ infp
->pad_local
+ outbounds
;
1842 infp
->local_growth
= growths
;
1845 /* Anything else that we've forgotten?, plus a few consistency checks. */
1847 assert (infp
->reg_offset
>= 0);
1848 assert (growths
<= MAX_STACK_GROWS
);
1850 for (i
= 0; i
< growths
; i
++)
1851 gcc_assert (!(infp
->growth
[i
] % STACK_BYTES
));
1854 /* Define the offset between two registers, one to be eliminated, and
1855 the other its replacement, at the start of a routine. */
1858 mcore_initial_elimination_offset (int from
, int to
)
1862 struct mcore_frame fi
;
1864 layout_mcore_frame (& fi
);
1867 above_frame
= fi
.local_size
+ fi
.pad_local
+ fi
.reg_size
+ fi
.pad_reg
;
1869 below_frame
= fi
.outbound_size
+ fi
.pad_outbound
;
1871 if (from
== ARG_POINTER_REGNUM
&& to
== FRAME_POINTER_REGNUM
)
1874 if (from
== ARG_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
1875 return above_frame
+ below_frame
;
1877 if (from
== FRAME_POINTER_REGNUM
&& to
== STACK_POINTER_REGNUM
)
1883 /* Keep track of some information about varargs for the prolog. */
1886 mcore_setup_incoming_varargs (CUMULATIVE_ARGS
*args_so_far
,
1887 enum machine_mode mode
, tree type
,
1888 int * ptr_pretend_size ATTRIBUTE_UNUSED
,
1889 int second_time ATTRIBUTE_UNUSED
)
1891 current_function_anonymous_args
= 1;
1893 /* We need to know how many argument registers are used before
1894 the varargs start, so that we can push the remaining argument
1895 registers during the prologue. */
1896 number_of_regs_before_varargs
= *args_so_far
+ mcore_num_arg_regs (mode
, type
);
1898 /* There is a bug somewhere in the arg handling code.
1899 Until I can find it this workaround always pushes the
1900 last named argument onto the stack. */
1901 number_of_regs_before_varargs
= *args_so_far
;
1903 /* The last named argument may be split between argument registers
1904 and the stack. Allow for this here. */
1905 if (number_of_regs_before_varargs
> NPARM_REGS
)
1906 number_of_regs_before_varargs
= NPARM_REGS
;
1910 mcore_expand_prolog (void)
1912 struct mcore_frame fi
;
1913 int space_allocated
= 0;
1916 /* Find out what we're doing. */
1917 layout_mcore_frame (&fi
);
1919 space_allocated
= fi
.arg_size
+ fi
.reg_size
+ fi
.local_size
+
1920 fi
.outbound_size
+ fi
.pad_outbound
+ fi
.pad_local
+ fi
.pad_reg
;
1924 /* Emit a symbol for this routine's frame size. */
1927 x
= DECL_RTL (current_function_decl
);
1929 gcc_assert (GET_CODE (x
) == MEM
);
1933 gcc_assert (GET_CODE (x
) == SYMBOL_REF
);
1935 if (mcore_current_function_name
)
1936 free (mcore_current_function_name
);
1938 mcore_current_function_name
= xstrdup (XSTR (x
, 0));
1940 ASM_OUTPUT_CG_NODE (asm_out_file
, mcore_current_function_name
, space_allocated
);
1942 if (current_function_calls_alloca
)
1943 ASM_OUTPUT_CG_EDGE (asm_out_file
, mcore_current_function_name
, "alloca", 1);
1946 We're looking at how the 8byte alignment affects stack layout
1947 and where we had to pad things. This emits information we can
1948 extract which tells us about frame sizes and the like. */
1949 fprintf (asm_out_file
,
1950 "\t.equ\t__$frame$info$_%s_$_%d_%d_x%x_%d_%d_%d,0\n",
1951 mcore_current_function_name
,
1952 fi
.arg_size
, fi
.reg_size
, fi
.reg_mask
,
1953 fi
.local_size
, fi
.outbound_size
,
1954 frame_pointer_needed
);
1957 if (mcore_naked_function_p ())
1960 /* Handle stdarg+regsaves in one shot: can't be more than 64 bytes. */
1961 output_stack_adjust (-1, fi
.growth
[growth
++]); /* Grows it. */
1963 /* If we have a parameter passed partially in regs and partially in memory,
1964 the registers will have been stored to memory already in function.c. So
1965 we only need to do something here for varargs functions. */
1966 if (fi
.arg_size
!= 0 && current_function_pretend_args_size
== 0)
1969 int rn
= FIRST_PARM_REG
+ NPARM_REGS
- 1;
1970 int remaining
= fi
.arg_size
;
1972 for (offset
= fi
.arg_offset
; remaining
>= 4; offset
-= 4, rn
--, remaining
-= 4)
1974 emit_insn (gen_movsi
1975 (gen_rtx_MEM (SImode
,
1976 plus_constant (stack_pointer_rtx
, offset
)),
1977 gen_rtx_REG (SImode
, rn
)));
1981 /* Do we need another stack adjustment before we do the register saves? */
1982 if (growth
< fi
.reg_growth
)
1983 output_stack_adjust (-1, fi
.growth
[growth
++]); /* Grows it. */
1985 if (fi
.reg_size
!= 0)
1988 int offs
= fi
.reg_offset
;
1990 for (i
= 15; i
>= 0; i
--)
1992 if (offs
== 0 && i
== 15 && ((fi
.reg_mask
& 0xc000) == 0xc000))
1996 while (fi
.reg_mask
& (1 << first_reg
))
2000 emit_insn (gen_store_multiple (gen_rtx_MEM (SImode
, stack_pointer_rtx
),
2001 gen_rtx_REG (SImode
, first_reg
),
2002 GEN_INT (16 - first_reg
)));
2004 i
-= (15 - first_reg
);
2005 offs
+= (16 - first_reg
) * 4;
2007 else if (fi
.reg_mask
& (1 << i
))
2009 emit_insn (gen_movsi
2010 (gen_rtx_MEM (SImode
,
2011 plus_constant (stack_pointer_rtx
, offs
)),
2012 gen_rtx_REG (SImode
, i
)));
2018 /* Figure the locals + outbounds. */
2019 if (frame_pointer_needed
)
2021 /* If we haven't already purchased to 'fp'. */
2022 if (growth
< fi
.local_growth
)
2023 output_stack_adjust (-1, fi
.growth
[growth
++]); /* Grows it. */
2025 emit_insn (gen_movsi (frame_pointer_rtx
, stack_pointer_rtx
));
2027 /* ... and then go any remaining distance for outbounds, etc. */
2028 if (fi
.growth
[growth
])
2029 output_stack_adjust (-1, fi
.growth
[growth
++]);
2033 if (growth
< fi
.local_growth
)
2034 output_stack_adjust (-1, fi
.growth
[growth
++]); /* Grows it. */
2035 if (fi
.growth
[growth
])
2036 output_stack_adjust (-1, fi
.growth
[growth
++]);
2041 mcore_expand_epilog (void)
2043 struct mcore_frame fi
;
2046 int growth
= MAX_STACK_GROWS
- 1 ;
2049 /* Find out what we're doing. */
2050 layout_mcore_frame(&fi
);
2052 if (mcore_naked_function_p ())
2055 /* If we had a frame pointer, restore the sp from that. */
2056 if (frame_pointer_needed
)
2058 emit_insn (gen_movsi (stack_pointer_rtx
, frame_pointer_rtx
));
2059 growth
= fi
.local_growth
- 1;
2063 /* XXX: while loop should accumulate and do a single sell. */
2064 while (growth
>= fi
.local_growth
)
2066 if (fi
.growth
[growth
] != 0)
2067 output_stack_adjust (1, fi
.growth
[growth
]);
2072 /* Make sure we've shrunk stack back to the point where the registers
2073 were laid down. This is typically 0/1 iterations. Then pull the
2074 register save information back off the stack. */
2075 while (growth
>= fi
.reg_growth
)
2076 output_stack_adjust ( 1, fi
.growth
[growth
--]);
2078 offs
= fi
.reg_offset
;
2080 for (i
= 15; i
>= 0; i
--)
2082 if (offs
== 0 && i
== 15 && ((fi
.reg_mask
& 0xc000) == 0xc000))
2086 /* Find the starting register. */
2089 while (fi
.reg_mask
& (1 << first_reg
))
2094 emit_insn (gen_load_multiple (gen_rtx_REG (SImode
, first_reg
),
2095 gen_rtx_MEM (SImode
, stack_pointer_rtx
),
2096 GEN_INT (16 - first_reg
)));
2098 i
-= (15 - first_reg
);
2099 offs
+= (16 - first_reg
) * 4;
2101 else if (fi
.reg_mask
& (1 << i
))
2103 emit_insn (gen_movsi
2104 (gen_rtx_REG (SImode
, i
),
2105 gen_rtx_MEM (SImode
,
2106 plus_constant (stack_pointer_rtx
, offs
))));
2111 /* Give back anything else. */
2112 /* XXX: Should accumulate total and then give it back. */
2114 output_stack_adjust ( 1, fi
.growth
[growth
--]);
2117 /* This code is borrowed from the SH port. */
2119 /* The MCORE cannot load a large constant into a register, constants have to
2120 come from a pc relative load. The reference of a pc relative load
2121 instruction must be less than 1k in front of the instruction. This
2122 means that we often have to dump a constant inside a function, and
2123 generate code to branch around it.
2125 It is important to minimize this, since the branches will slow things
2126 down and make things bigger.
2128 Worst case code looks like:
2144 We fix this by performing a scan before scheduling, which notices which
2145 instructions need to have their operands fetched from the constant table
2146 and builds the table.
2150 scan, find an instruction which needs a pcrel move. Look forward, find the
2151 last barrier which is within MAX_COUNT bytes of the requirement.
2152 If there isn't one, make one. Process all the instructions between
2153 the find and the barrier.
2155 In the above example, we can tell that L3 is within 1k of L1, so
2156 the first move can be shrunk from the 2 insn+constant sequence into
2157 just 1 insn, and the constant moved to L3 to make:
2167 Then the second move becomes the target for the shortening process. */
2171 rtx value
; /* Value in table. */
2172 rtx label
; /* Label of value. */
2175 /* The maximum number of constants that can fit into one pool, since
2176 the pc relative range is 0...1020 bytes and constants are at least 4
2177 bytes long. We subtract 4 from the range to allow for the case where
2178 we need to add a branch/align before the constant pool. */
2180 #define MAX_COUNT 1016
2181 #define MAX_POOL_SIZE (MAX_COUNT/4)
2182 static pool_node pool_vector
[MAX_POOL_SIZE
];
2183 static int pool_size
;
2185 /* Dump out any constants accumulated in the final pass. These
2186 will only be labels. */
2189 mcore_output_jump_label_table (void)
2195 fprintf (asm_out_file
, "\t.align 2\n");
2197 for (i
= 0; i
< pool_size
; i
++)
2199 pool_node
* p
= pool_vector
+ i
;
2201 (*targetm
.asm_out
.internal_label
) (asm_out_file
, "L", CODE_LABEL_NUMBER (p
->label
));
2203 output_asm_insn (".long %0", &p
->value
);
2212 /* Check whether insn is a candidate for a conditional. */
2215 is_cond_candidate (rtx insn
)
2217 /* The only things we conditionalize are those that can be directly
2218 changed into a conditional. Only bother with SImode items. If
2219 we wanted to be a little more aggressive, we could also do other
2220 modes such as DImode with reg-reg move or load 0. */
2221 if (GET_CODE (insn
) == INSN
)
2223 rtx pat
= PATTERN (insn
);
2226 if (GET_CODE (pat
) != SET
)
2229 dst
= XEXP (pat
, 0);
2231 if ((GET_CODE (dst
) != REG
&&
2232 GET_CODE (dst
) != SUBREG
) ||
2233 GET_MODE (dst
) != SImode
)
2236 src
= XEXP (pat
, 1);
2238 if ((GET_CODE (src
) == REG
||
2239 (GET_CODE (src
) == SUBREG
&&
2240 GET_CODE (SUBREG_REG (src
)) == REG
)) &&
2241 GET_MODE (src
) == SImode
)
2242 return COND_MOV_INSN
;
2243 else if (GET_CODE (src
) == CONST_INT
&&
2245 return COND_CLR_INSN
;
2246 else if (GET_CODE (src
) == PLUS
&&
2247 (GET_CODE (XEXP (src
, 0)) == REG
||
2248 (GET_CODE (XEXP (src
, 0)) == SUBREG
&&
2249 GET_CODE (SUBREG_REG (XEXP (src
, 0))) == REG
)) &&
2250 GET_MODE (XEXP (src
, 0)) == SImode
&&
2251 GET_CODE (XEXP (src
, 1)) == CONST_INT
&&
2252 INTVAL (XEXP (src
, 1)) == 1)
2253 return COND_INC_INSN
;
2254 else if (((GET_CODE (src
) == MINUS
&&
2255 GET_CODE (XEXP (src
, 1)) == CONST_INT
&&
2256 INTVAL( XEXP (src
, 1)) == 1) ||
2257 (GET_CODE (src
) == PLUS
&&
2258 GET_CODE (XEXP (src
, 1)) == CONST_INT
&&
2259 INTVAL (XEXP (src
, 1)) == -1)) &&
2260 (GET_CODE (XEXP (src
, 0)) == REG
||
2261 (GET_CODE (XEXP (src
, 0)) == SUBREG
&&
2262 GET_CODE (SUBREG_REG (XEXP (src
, 0))) == REG
)) &&
2263 GET_MODE (XEXP (src
, 0)) == SImode
)
2264 return COND_DEC_INSN
;
2266 /* Some insns that we don't bother with:
2267 (set (rx:DI) (ry:DI))
2268 (set (rx:DI) (const_int 0))
2272 else if (GET_CODE (insn
) == JUMP_INSN
&&
2273 GET_CODE (PATTERN (insn
)) == SET
&&
2274 GET_CODE (XEXP (PATTERN (insn
), 1)) == LABEL_REF
)
2275 return COND_BRANCH_INSN
;
2280 /* Emit a conditional version of insn and replace the old insn with the
2281 new one. Return the new insn if emitted. */
2284 emit_new_cond_insn (rtx insn
, int cond
)
2290 if ((num
= is_cond_candidate (insn
)) == COND_NO
)
2293 pat
= PATTERN (insn
);
2295 if (GET_CODE (insn
) == INSN
)
2297 dst
= SET_DEST (pat
);
2298 src
= SET_SRC (pat
);
2302 dst
= JUMP_LABEL (insn
);
2311 c_insn
= gen_movt0 (dst
, src
, dst
);
2313 c_insn
= gen_movt0 (dst
, dst
, src
);
2318 c_insn
= gen_incscc (dst
, dst
);
2320 c_insn
= gen_incscc_false (dst
, dst
);
2325 c_insn
= gen_decscc (dst
, dst
);
2327 c_insn
= gen_decscc_false (dst
, dst
);
2330 case COND_BRANCH_INSN
:
2332 c_insn
= gen_branch_true (dst
);
2334 c_insn
= gen_branch_false (dst
);
2341 /* Only copy the notes if they exist. */
2342 if (rtx_length
[GET_CODE (c_insn
)] >= 7 && rtx_length
[GET_CODE (insn
)] >= 7)
2344 /* We really don't need to bother with the notes and links at this
2345 point, but go ahead and save the notes. This will help is_dead()
2346 when applying peepholes (links don't matter since they are not
2347 used any more beyond this point for the mcore). */
2348 REG_NOTES (c_insn
) = REG_NOTES (insn
);
2351 if (num
== COND_BRANCH_INSN
)
2353 /* For jumps, we need to be a little bit careful and emit the new jump
2354 before the old one and to update the use count for the target label.
2355 This way, the barrier following the old (uncond) jump will get
2356 deleted, but the label won't. */
2357 c_insn
= emit_jump_insn_before (c_insn
, insn
);
2359 ++ LABEL_NUSES (dst
);
2361 JUMP_LABEL (c_insn
) = dst
;
2364 c_insn
= emit_insn_after (c_insn
, insn
);
2371 /* Attempt to change a basic block into a series of conditional insns. This
2372 works by taking the branch at the end of the 1st block and scanning for the
2373 end of the 2nd block. If all instructions in the 2nd block have cond.
2374 versions and the label at the start of block 3 is the same as the target
2375 from the branch at block 1, then conditionalize all insn in block 2 using
2376 the inverse condition of the branch at block 1. (Note I'm bending the
2377 definition of basic block here.)
2381 bt L2 <-- end of block 1 (delete)
2384 br L3 <-- end of block 2
2386 L2: ... <-- start of block 3 (NUSES==1)
2397 we can delete the L2 label if NUSES==1 and re-apply the optimization
2398 starting at the last instruction of block 2. This may allow an entire
2399 if-then-else statement to be conditionalized. BRC */
2401 conditionalize_block (rtx first
)
2405 rtx end_blk_1_br
= 0;
2406 rtx end_blk_2_insn
= 0;
2407 rtx start_blk_3_lab
= 0;
2413 /* Check that the first insn is a candidate conditional jump. This is
2414 the one that we'll eliminate. If not, advance to the next insn to
2416 if (GET_CODE (first
) != JUMP_INSN
||
2417 GET_CODE (PATTERN (first
)) != SET
||
2418 GET_CODE (XEXP (PATTERN (first
), 1)) != IF_THEN_ELSE
)
2419 return NEXT_INSN (first
);
2421 /* Extract some information we need. */
2422 end_blk_1_br
= first
;
2423 br_pat
= PATTERN (end_blk_1_br
);
2425 /* Complement the condition since we use the reverse cond. for the insns. */
2426 cond
= (GET_CODE (XEXP (XEXP (br_pat
, 1), 0)) == EQ
);
2428 /* Determine what kind of branch we have. */
2429 if (GET_CODE (XEXP (XEXP (br_pat
, 1), 1)) == LABEL_REF
)
2431 /* A normal branch, so extract label out of first arm. */
2432 br_lab_num
= CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat
, 1), 1), 0));
2436 /* An inverse branch, so extract the label out of the 2nd arm
2437 and complement the condition. */
2439 br_lab_num
= CODE_LABEL_NUMBER (XEXP (XEXP (XEXP (br_pat
, 1), 2), 0));
2442 /* Scan forward for the start of block 2: it must start with a
2443 label and that label must be the same as the branch target
2444 label from block 1. We don't care about whether block 2 actually
2445 ends with a branch or a label (an uncond. branch is
2446 conditionalizable). */
2447 for (insn
= NEXT_INSN (first
); insn
; insn
= NEXT_INSN (insn
))
2451 code
= GET_CODE (insn
);
2453 /* Look for the label at the start of block 3. */
2454 if (code
== CODE_LABEL
&& CODE_LABEL_NUMBER (insn
) == br_lab_num
)
2457 /* Skip barriers, notes, and conditionalizable insns. If the
2458 insn is not conditionalizable or makes this optimization fail,
2459 just return the next insn so we can start over from that point. */
2460 if (code
!= BARRIER
&& code
!= NOTE
&& !is_cond_candidate (insn
))
2461 return NEXT_INSN (insn
);
2463 /* Remember the last real insn before the label (i.e. end of block 2). */
2464 if (code
== JUMP_INSN
|| code
== INSN
)
2467 end_blk_2_insn
= insn
;
2474 /* It is possible for this optimization to slow performance if the blocks
2475 are long. This really depends upon whether the branch is likely taken
2476 or not. If the branch is taken, we slow performance in many cases. But,
2477 if the branch is not taken, we always help performance (for a single
2478 block, but for a double block (i.e. when the optimization is re-applied)
2479 this is not true since the 'right thing' depends on the overall length of
2480 the collapsed block). As a compromise, don't apply this optimization on
2481 blocks larger than size 2 (unlikely for the mcore) when speed is important.
2482 the best threshold depends on the latencies of the instructions (i.e.,
2483 the branch penalty). */
2484 if (optimize
> 1 && blk_size
> 2)
2487 /* At this point, we've found the start of block 3 and we know that
2488 it is the destination of the branch from block 1. Also, all
2489 instructions in the block 2 are conditionalizable. So, apply the
2490 conditionalization and delete the branch. */
2491 start_blk_3_lab
= insn
;
2493 for (insn
= NEXT_INSN (end_blk_1_br
); insn
!= start_blk_3_lab
;
2494 insn
= NEXT_INSN (insn
))
2498 if (INSN_DELETED_P (insn
))
2501 /* Try to form a conditional variant of the instruction and emit it. */
2502 if ((newinsn
= emit_new_cond_insn (insn
, cond
)))
2504 if (end_blk_2_insn
== insn
)
2505 end_blk_2_insn
= newinsn
;
2511 /* Note whether we will delete the label starting blk 3 when the jump
2512 gets deleted. If so, we want to re-apply this optimization at the
2513 last real instruction right before the label. */
2514 if (LABEL_NUSES (start_blk_3_lab
) == 1)
2516 start_blk_3_lab
= 0;
2519 /* ??? we probably should redistribute the death notes for this insn, esp.
2520 the death of cc, but it doesn't really matter this late in the game.
2521 The peepholes all use is_dead() which will find the correct death
2522 regardless of whether there is a note. */
2523 delete_insn (end_blk_1_br
);
2525 if (! start_blk_3_lab
)
2526 return end_blk_2_insn
;
2528 /* Return the insn right after the label at the start of block 3. */
2529 return NEXT_INSN (start_blk_3_lab
);
2532 /* Apply the conditionalization of blocks optimization. This is the
2533 outer loop that traverses through the insns scanning for a branch
2534 that signifies an opportunity to apply the optimization. Note that
2535 this optimization is applied late. If we could apply it earlier,
2536 say before cse 2, it may expose more optimization opportunities.
2537 but, the pay back probably isn't really worth the effort (we'd have
2538 to update all reg/flow/notes/links/etc to make it work - and stick it
2539 in before cse 2). */
2542 conditionalize_optimization (void)
2546 for (insn
= get_insns (); insn
; insn
= conditionalize_block (insn
))
2550 static int saved_warn_return_type
= -1;
2551 static int saved_warn_return_type_count
= 0;
2553 /* This is to handle loads from the constant pool. */
2558 /* Reset this variable. */
2559 current_function_anonymous_args
= 0;
2561 /* Restore the warn_return_type if it has been altered. */
2562 if (saved_warn_return_type
!= -1)
2564 /* Only restore the value if we have reached another function.
2565 The test of warn_return_type occurs in final_function () in
2566 c-decl.c a long time after the code for the function is generated,
2567 so we need a counter to tell us when we have finished parsing that
2568 function and can restore the flag. */
2569 if (--saved_warn_return_type_count
== 0)
2571 warn_return_type
= saved_warn_return_type
;
2572 saved_warn_return_type
= -1;
2579 /* Conditionalize blocks where we can. */
2580 conditionalize_optimization ();
2582 /* Literal pool generation is now pushed off until the assembler. */
2586 /* Return true if X is something that can be moved directly into r15. */
2589 mcore_r15_operand_p (rtx x
)
2591 switch (GET_CODE (x
))
2594 return mcore_const_ok_for_inline (INTVAL (x
));
2606 /* Implement SECONDARY_RELOAD_CLASS. If CLASS contains r15, and we can't
2607 directly move X into it, use r1-r14 as a temporary. */
2610 mcore_secondary_reload_class (enum reg_class
class,
2611 enum machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
2613 if (TEST_HARD_REG_BIT (reg_class_contents
[class], 15)
2614 && !mcore_r15_operand_p (x
))
2619 /* Return the reg_class to use when reloading the rtx X into the class
2620 CLASS. If X is too complex to move directly into r15, prefer to
2621 use LRW_REGS instead. */
2624 mcore_reload_class (rtx x
, enum reg_class
class)
2626 if (reg_class_subset_p (LRW_REGS
, class) && !mcore_r15_operand_p (x
))
2632 /* Tell me if a pair of reg/subreg rtx's actually refer to the same
2633 register. Note that the current version doesn't worry about whether
2634 they are the same mode or note (e.g., a QImode in r2 matches an HImode
2635 in r2 matches an SImode in r2. Might think in the future about whether
2636 we want to be able to say something about modes. */
2639 mcore_is_same_reg (rtx x
, rtx y
)
2641 /* Strip any and all of the subreg wrappers. */
2642 while (GET_CODE (x
) == SUBREG
)
2645 while (GET_CODE (y
) == SUBREG
)
2648 if (GET_CODE(x
) == REG
&& GET_CODE(y
) == REG
&& REGNO(x
) == REGNO(y
))
2655 mcore_override_options (void)
2657 /* Only the m340 supports little endian code. */
2658 if (TARGET_LITTLE_END
&& ! TARGET_M340
)
2659 target_flags
|= MASK_M340
;
2662 /* Compute the number of word sized registers needed to
2663 hold a function argument of mode MODE and type TYPE. */
2666 mcore_num_arg_regs (enum machine_mode mode
, tree type
)
2670 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
2673 if (type
&& mode
== BLKmode
)
2674 size
= int_size_in_bytes (type
);
2676 size
= GET_MODE_SIZE (mode
);
2678 return ROUND_ADVANCE (size
);
2682 handle_structs_in_regs (enum machine_mode mode
, tree type
, int reg
)
2686 /* The MCore ABI defines that a structure whose size is not a whole multiple
2687 of bytes is passed packed into registers (or spilled onto the stack if
2688 not enough registers are available) with the last few bytes of the
2689 structure being packed, left-justified, into the last register/stack slot.
2690 GCC handles this correctly if the last word is in a stack slot, but we
2691 have to generate a special, PARALLEL RTX if the last word is in an
2692 argument register. */
2694 && TYPE_MODE (type
) == BLKmode
2695 && TREE_CODE (TYPE_SIZE (type
)) == INTEGER_CST
2696 && (size
= int_size_in_bytes (type
)) > UNITS_PER_WORD
2697 && (size
% UNITS_PER_WORD
!= 0)
2698 && (reg
+ mcore_num_arg_regs (mode
, type
) <= (FIRST_PARM_REG
+ NPARM_REGS
)))
2700 rtx arg_regs
[NPARM_REGS
];
2705 for (nregs
= 0; size
> 0; size
-= UNITS_PER_WORD
)
2708 gen_rtx_EXPR_LIST (SImode
, gen_rtx_REG (SImode
, reg
++),
2709 GEN_INT (nregs
* UNITS_PER_WORD
));
2713 /* We assume here that NPARM_REGS == 6. The assert checks this. */
2714 assert (ARRAY_SIZE (arg_regs
) == 6);
2715 rtvec
= gen_rtvec (nregs
, arg_regs
[0], arg_regs
[1], arg_regs
[2],
2716 arg_regs
[3], arg_regs
[4], arg_regs
[5]);
2718 result
= gen_rtx_PARALLEL (mode
, rtvec
);
2722 return gen_rtx_REG (mode
, reg
);
2726 mcore_function_value (tree valtype
, tree func ATTRIBUTE_UNUSED
)
2728 enum machine_mode mode
;
2731 mode
= TYPE_MODE (valtype
);
2733 PROMOTE_MODE (mode
, unsigned_p
, NULL
);
2735 return handle_structs_in_regs (mode
, valtype
, FIRST_RET_REG
);
2738 /* Define where to put the arguments to a function.
2739 Value is zero to push the argument on the stack,
2740 or a hard register in which to store the argument.
2742 MODE is the argument's machine mode.
2743 TYPE is the data type of the argument (as a tree).
2744 This is null for libcalls where that information may
2746 CUM is a variable of type CUMULATIVE_ARGS which gives info about
2747 the preceding args and about the function being called.
2748 NAMED is nonzero if this argument is a named parameter
2749 (otherwise it is an extra parameter matching an ellipsis).
2751 On MCore the first args are normally in registers
2752 and the rest are pushed. Any arg that starts within the first
2753 NPARM_REGS words is at least partially passed in a register unless
2754 its data type forbids. */
2757 mcore_function_arg (CUMULATIVE_ARGS cum
, enum machine_mode mode
,
2758 tree type
, int named
)
2762 if (! named
|| mode
== VOIDmode
)
2765 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
2768 arg_reg
= ROUND_REG (cum
, mode
);
2770 if (arg_reg
< NPARM_REGS
)
2771 return handle_structs_in_regs (mode
, type
, FIRST_PARM_REG
+ arg_reg
);
2776 /* Returns the number of bytes of argument registers required to hold *part*
2777 of a parameter of machine mode MODE and type TYPE (which may be NULL if
2778 the type is not known). If the argument fits entirely in the argument
2779 registers, or entirely on the stack, then 0 is returned. CUM is the
2780 number of argument registers already used by earlier parameters to
2784 mcore_arg_partial_bytes (CUMULATIVE_ARGS
*cum
, enum machine_mode mode
,
2785 tree type
, bool named
)
2787 int reg
= ROUND_REG (*cum
, mode
);
2792 if (targetm
.calls
.must_pass_in_stack (mode
, type
))
2795 /* REG is not the *hardware* register number of the register that holds
2796 the argument, it is the *argument* register number. So for example,
2797 the first argument to a function goes in argument register 0, which
2798 translates (for the MCore) into hardware register 2. The second
2799 argument goes into argument register 1, which translates into hardware
2800 register 3, and so on. NPARM_REGS is the number of argument registers
2801 supported by the target, not the maximum hardware register number of
2803 if (reg
>= NPARM_REGS
)
2806 /* If the argument fits entirely in registers, return 0. */
2807 if (reg
+ mcore_num_arg_regs (mode
, type
) <= NPARM_REGS
)
2810 /* The argument overflows the number of available argument registers.
2811 Compute how many argument registers have not yet been assigned to
2812 hold an argument. */
2813 reg
= NPARM_REGS
- reg
;
2815 /* Return partially in registers and partially on the stack. */
2816 return reg
* UNITS_PER_WORD
;
2819 /* Return nonzero if SYMBOL is marked as being dllexport'd. */
2822 mcore_dllexport_name_p (const char * symbol
)
2824 return symbol
[0] == '@' && symbol
[1] == 'e' && symbol
[2] == '.';
2827 /* Return nonzero if SYMBOL is marked as being dllimport'd. */
2830 mcore_dllimport_name_p (const char * symbol
)
2832 return symbol
[0] == '@' && symbol
[1] == 'i' && symbol
[2] == '.';
2835 /* Mark a DECL as being dllexport'd. */
2838 mcore_mark_dllexport (tree decl
)
2840 const char * oldname
;
2845 rtlname
= XEXP (DECL_RTL (decl
), 0);
2847 if (GET_CODE (rtlname
) == MEM
)
2848 rtlname
= XEXP (rtlname
, 0);
2849 gcc_assert (GET_CODE (rtlname
) == SYMBOL_REF
);
2850 oldname
= XSTR (rtlname
, 0);
2852 if (mcore_dllexport_name_p (oldname
))
2853 return; /* Already done. */
2855 newname
= alloca (strlen (oldname
) + 4);
2856 sprintf (newname
, "@e.%s", oldname
);
2858 /* We pass newname through get_identifier to ensure it has a unique
2859 address. RTL processing can sometimes peek inside the symbol ref
2860 and compare the string's addresses to see if two symbols are
2862 /* ??? At least I think that's why we do this. */
2863 idp
= get_identifier (newname
);
2865 XEXP (DECL_RTL (decl
), 0) =
2866 gen_rtx_SYMBOL_REF (Pmode
, IDENTIFIER_POINTER (idp
));
2869 /* Mark a DECL as being dllimport'd. */
2872 mcore_mark_dllimport (tree decl
)
2874 const char * oldname
;
2880 rtlname
= XEXP (DECL_RTL (decl
), 0);
2882 if (GET_CODE (rtlname
) == MEM
)
2883 rtlname
= XEXP (rtlname
, 0);
2884 gcc_assert (GET_CODE (rtlname
) == SYMBOL_REF
);
2885 oldname
= XSTR (rtlname
, 0);
2887 gcc_assert (!mcore_dllexport_name_p (oldname
));
2888 if (mcore_dllimport_name_p (oldname
))
2889 return; /* Already done. */
2891 /* ??? One can well ask why we're making these checks here,
2892 and that would be a good question. */
2894 /* Imported variables can't be initialized. */
2895 if (TREE_CODE (decl
) == VAR_DECL
2896 && !DECL_VIRTUAL_P (decl
)
2897 && DECL_INITIAL (decl
))
2899 error ("initialized variable %q+D is marked dllimport", decl
);
2903 /* `extern' needn't be specified with dllimport.
2904 Specify `extern' now and hope for the best. Sigh. */
2905 if (TREE_CODE (decl
) == VAR_DECL
2906 /* ??? Is this test for vtables needed? */
2907 && !DECL_VIRTUAL_P (decl
))
2909 DECL_EXTERNAL (decl
) = 1;
2910 TREE_PUBLIC (decl
) = 1;
2913 newname
= alloca (strlen (oldname
) + 11);
2914 sprintf (newname
, "@i.__imp_%s", oldname
);
2916 /* We pass newname through get_identifier to ensure it has a unique
2917 address. RTL processing can sometimes peek inside the symbol ref
2918 and compare the string's addresses to see if two symbols are
2920 /* ??? At least I think that's why we do this. */
2921 idp
= get_identifier (newname
);
2923 newrtl
= gen_rtx_MEM (Pmode
,
2924 gen_rtx_SYMBOL_REF (Pmode
,
2925 IDENTIFIER_POINTER (idp
)));
2926 XEXP (DECL_RTL (decl
), 0) = newrtl
;
2930 mcore_dllexport_p (tree decl
)
2932 if ( TREE_CODE (decl
) != VAR_DECL
2933 && TREE_CODE (decl
) != FUNCTION_DECL
)
2936 return lookup_attribute ("dllexport", DECL_ATTRIBUTES (decl
)) != 0;
2940 mcore_dllimport_p (tree decl
)
2942 if ( TREE_CODE (decl
) != VAR_DECL
2943 && TREE_CODE (decl
) != FUNCTION_DECL
)
2946 return lookup_attribute ("dllimport", DECL_ATTRIBUTES (decl
)) != 0;
2949 /* We must mark dll symbols specially. Definitions of dllexport'd objects
2950 install some info in the .drective (PE) or .exports (ELF) sections. */
2953 mcore_encode_section_info (tree decl
, rtx rtl ATTRIBUTE_UNUSED
, int first ATTRIBUTE_UNUSED
)
2955 /* Mark the decl so we can tell from the rtl whether the object is
2956 dllexport'd or dllimport'd. */
2957 if (mcore_dllexport_p (decl
))
2958 mcore_mark_dllexport (decl
);
2959 else if (mcore_dllimport_p (decl
))
2960 mcore_mark_dllimport (decl
);
2962 /* It might be that DECL has already been marked as dllimport, but
2963 a subsequent definition nullified that. The attribute is gone
2964 but DECL_RTL still has @i.__imp_foo. We need to remove that. */
2965 else if ((TREE_CODE (decl
) == FUNCTION_DECL
2966 || TREE_CODE (decl
) == VAR_DECL
)
2967 && DECL_RTL (decl
) != NULL_RTX
2968 && GET_CODE (DECL_RTL (decl
)) == MEM
2969 && GET_CODE (XEXP (DECL_RTL (decl
), 0)) == MEM
2970 && GET_CODE (XEXP (XEXP (DECL_RTL (decl
), 0), 0)) == SYMBOL_REF
2971 && mcore_dllimport_name_p (XSTR (XEXP (XEXP (DECL_RTL (decl
), 0), 0), 0)))
2973 const char * oldname
= XSTR (XEXP (XEXP (DECL_RTL (decl
), 0), 0), 0);
2974 tree idp
= get_identifier (oldname
+ 9);
2975 rtx newrtl
= gen_rtx_SYMBOL_REF (Pmode
, IDENTIFIER_POINTER (idp
));
2977 XEXP (DECL_RTL (decl
), 0) = newrtl
;
2979 /* We previously set TREE_PUBLIC and DECL_EXTERNAL.
2980 ??? We leave these alone for now. */
2984 /* Undo the effects of the above. */
2987 mcore_strip_name_encoding (const char * str
)
2989 return str
+ (str
[0] == '@' ? 3 : 0);
2992 /* MCore specific attribute support.
2993 dllexport - for exporting a function/variable that will live in a dll
2994 dllimport - for importing a function/variable from a dll
2995 naked - do not create a function prologue/epilogue. */
2997 const struct attribute_spec mcore_attribute_table
[] =
2999 /* { name, min_len, max_len, decl_req, type_req, fn_type_req, handler } */
3000 { "dllexport", 0, 0, true, false, false, NULL
},
3001 { "dllimport", 0, 0, true, false, false, NULL
},
3002 { "naked", 0, 0, true, false, false, mcore_handle_naked_attribute
},
3003 { NULL
, 0, 0, false, false, false, NULL
}
3006 /* Handle a "naked" attribute; arguments as in
3007 struct attribute_spec.handler. */
3010 mcore_handle_naked_attribute (tree
* node
, tree name
, tree args ATTRIBUTE_UNUSED
,
3011 int flags ATTRIBUTE_UNUSED
, bool * no_add_attrs
)
3013 if (TREE_CODE (*node
) == FUNCTION_DECL
)
3015 /* PR14310 - don't complain about lack of return statement
3016 in naked functions. The solution here is a gross hack
3017 but this is the only way to solve the problem without
3018 adding a new feature to GCC. I did try submitting a patch
3019 that would add such a new feature, but it was (rightfully)
3020 rejected on the grounds that it was creeping featurism,
3021 so hence this code. */
3022 if (warn_return_type
)
3024 saved_warn_return_type
= warn_return_type
;
3025 warn_return_type
= 0;
3026 saved_warn_return_type_count
= 2;
3028 else if (saved_warn_return_type_count
)
3029 saved_warn_return_type_count
= 2;
3033 warning (OPT_Wattributes
, "%qs attribute only applies to functions",
3034 IDENTIFIER_POINTER (name
));
3035 *no_add_attrs
= true;
3041 /* ??? It looks like this is PE specific? Oh well, this is what the
3042 old code did as well. */
3045 mcore_unique_section (tree decl
, int reloc ATTRIBUTE_UNUSED
)
3050 const char * prefix
;
3052 name
= IDENTIFIER_POINTER (DECL_ASSEMBLER_NAME (decl
));
3054 /* Strip off any encoding in name. */
3055 name
= (* targetm
.strip_name_encoding
) (name
);
3057 /* The object is put in, for example, section .text$foo.
3058 The linker will then ultimately place them in .text
3059 (everything from the $ on is stripped). */
3060 if (TREE_CODE (decl
) == FUNCTION_DECL
)
3062 /* For compatibility with EPOC, we ignore the fact that the
3063 section might have relocs against it. */
3064 else if (decl_readonly_section (decl
, 0))
3069 len
= strlen (name
) + strlen (prefix
);
3070 string
= alloca (len
+ 1);
3072 sprintf (string
, "%s%s", prefix
, name
);
3074 DECL_SECTION_NAME (decl
) = build_string (len
, string
);
3078 mcore_naked_function_p (void)
3080 return lookup_attribute ("naked", DECL_ATTRIBUTES (current_function_decl
)) != NULL_TREE
;
3083 #ifdef OBJECT_FORMAT_ELF
3085 mcore_asm_named_section (const char *name
,
3086 unsigned int flags ATTRIBUTE_UNUSED
,
3087 tree decl ATTRIBUTE_UNUSED
)
3089 fprintf (asm_out_file
, "\t.section %s\n", name
);
3091 #endif /* OBJECT_FORMAT_ELF */
3093 /* Worker function for TARGET_ASM_EXTERNAL_LIBCALL. */
3096 mcore_external_libcall (rtx fun
)
3098 fprintf (asm_out_file
, "\t.import\t");
3099 assemble_name (asm_out_file
, XSTR (fun
, 0));
3100 fprintf (asm_out_file
, "\n");
3103 /* Worker function for TARGET_RETURN_IN_MEMORY. */
3106 mcore_return_in_memory (tree type
, tree fntype ATTRIBUTE_UNUSED
)
3108 HOST_WIDE_INT size
= int_size_in_bytes (type
);
3109 return (size
== -1 || size
> 2 * UNITS_PER_WORD
);