1 /* Subroutines for insn-output.c for Matsushita MN10300 series
2 Copyright (C) 1996-2015 Free Software Foundation, Inc.
3 Contributed by Jeff Law (law@cygnus.com).
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
23 #include "coretypes.h"
29 #include "double-int.h"
36 #include "stor-layout.h"
40 #include "hard-reg-set.h"
41 #include "insn-config.h"
42 #include "conditions.h"
44 #include "insn-attr.h"
50 #include "statistics.h"
52 #include "fixed-value.h"
59 #include "insn-codes.h"
62 #include "diagnostic-core.h"
64 #include "tm-constrs.h"
66 #include "target-def.h"
67 #include "dominance.h"
73 #include "cfgcleanup.h"
75 #include "basic-block.h"
82 /* This is used in the am33_2.0-linux-gnu port, in which global symbol
83 names are not prefixed by underscores, to tell whether to prefix a
84 label with a plus sign or not, so that the assembler can tell
85 symbol names from register names. */
86 int mn10300_protect_label
;
88 /* Selected processor type for tuning. */
89 enum processor_type mn10300_tune_cpu
= PROCESSOR_DEFAULT
;
96 static int cc_flags_for_mode(machine_mode
);
97 static int cc_flags_for_code(enum rtx_code
);
99 /* Implement TARGET_OPTION_OVERRIDE. */
101 mn10300_option_override (void)
104 target_flags
&= ~MASK_MULT_BUG
;
107 /* Disable scheduling for the MN10300 as we do
108 not have timing information available for it. */
109 flag_schedule_insns
= 0;
110 flag_schedule_insns_after_reload
= 0;
112 /* Force enable splitting of wide types, as otherwise it is trivial
113 to run out of registers. Indeed, this works so well that register
114 allocation problems are now more common *without* optimization,
115 when this flag is not enabled by default. */
116 flag_split_wide_types
= 1;
119 if (mn10300_tune_string
)
121 if (strcasecmp (mn10300_tune_string
, "mn10300") == 0)
122 mn10300_tune_cpu
= PROCESSOR_MN10300
;
123 else if (strcasecmp (mn10300_tune_string
, "am33") == 0)
124 mn10300_tune_cpu
= PROCESSOR_AM33
;
125 else if (strcasecmp (mn10300_tune_string
, "am33-2") == 0)
126 mn10300_tune_cpu
= PROCESSOR_AM33_2
;
127 else if (strcasecmp (mn10300_tune_string
, "am34") == 0)
128 mn10300_tune_cpu
= PROCESSOR_AM34
;
130 error ("-mtune= expects mn10300, am33, am33-2, or am34");
135 mn10300_file_start (void)
137 default_file_start ();
140 fprintf (asm_out_file
, "\t.am33_2\n");
141 else if (TARGET_AM33
)
142 fprintf (asm_out_file
, "\t.am33\n");
145 /* Note: This list must match the liw_op attribute in mn10300.md. */
147 static const char *liw_op_names
[] =
149 "add", "cmp", "sub", "mov",
155 /* Print operand X using operand code CODE to assembly language output file
159 mn10300_print_operand (FILE *file
, rtx x
, int code
)
165 unsigned int liw_op
= UINTVAL (x
);
167 gcc_assert (TARGET_ALLOW_LIW
);
168 gcc_assert (liw_op
< LIW_OP_MAX
);
169 fputs (liw_op_names
[liw_op
], file
);
176 enum rtx_code cmp
= GET_CODE (x
);
177 machine_mode mode
= GET_MODE (XEXP (x
, 0));
182 cmp
= reverse_condition (cmp
);
183 have_flags
= cc_flags_for_mode (mode
);
194 /* bge is smaller than bnc. */
195 str
= (have_flags
& CC_FLAG_V
? "ge" : "nc");
198 str
= (have_flags
& CC_FLAG_V
? "lt" : "ns");
246 gcc_checking_assert ((cc_flags_for_code (cmp
) & ~have_flags
) == 0);
252 /* This is used for the operand to a call instruction;
253 if it's a REG, enclose it in parens, else output
254 the operand normally. */
258 mn10300_print_operand (file
, x
, 0);
262 mn10300_print_operand (file
, x
, 0);
266 switch (GET_CODE (x
))
270 output_address (XEXP (x
, 0));
275 fprintf (file
, "fd%d", REGNO (x
) - 18);
283 /* These are the least significant word in a 64bit value. */
285 switch (GET_CODE (x
))
289 output_address (XEXP (x
, 0));
294 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
298 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
306 switch (GET_MODE (x
))
309 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
310 REAL_VALUE_TO_TARGET_DOUBLE (rv
, val
);
311 fprintf (file
, "0x%lx", val
[0]);
314 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
315 REAL_VALUE_TO_TARGET_SINGLE (rv
, val
[0]);
316 fprintf (file
, "0x%lx", val
[0]);
320 mn10300_print_operand_address (file
,
321 GEN_INT (CONST_DOUBLE_LOW (x
)));
332 split_double (x
, &low
, &high
);
333 fprintf (file
, "%ld", (long)INTVAL (low
));
342 /* Similarly, but for the most significant word. */
344 switch (GET_CODE (x
))
348 x
= adjust_address (x
, SImode
, 4);
349 output_address (XEXP (x
, 0));
354 fprintf (file
, "%s", reg_names
[REGNO (x
) + 1]);
358 fprintf (file
, "%s", reg_names
[subreg_regno (x
) + 1]);
366 switch (GET_MODE (x
))
369 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
370 REAL_VALUE_TO_TARGET_DOUBLE (rv
, val
);
371 fprintf (file
, "0x%lx", val
[1]);
377 mn10300_print_operand_address (file
,
378 GEN_INT (CONST_DOUBLE_HIGH (x
)));
389 split_double (x
, &low
, &high
);
390 fprintf (file
, "%ld", (long)INTVAL (high
));
401 if (REG_P (XEXP (x
, 0)))
402 output_address (gen_rtx_PLUS (SImode
, XEXP (x
, 0), const0_rtx
));
404 output_address (XEXP (x
, 0));
409 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
410 fprintf (file
, "%d", (int)((~INTVAL (x
)) & 0xff));
414 gcc_assert (INTVAL (x
) >= -128 && INTVAL (x
) <= 255);
415 fprintf (file
, "%d", (int)(INTVAL (x
) & 0xff));
418 /* For shift counts. The hardware ignores the upper bits of
419 any immediate, but the assembler will flag an out of range
420 shift count as an error. So we mask off the high bits
421 of the immediate here. */
425 fprintf (file
, "%d", (int)(INTVAL (x
) & 0x1f));
431 switch (GET_CODE (x
))
435 output_address (XEXP (x
, 0));
444 fprintf (file
, "%s", reg_names
[REGNO (x
)]);
448 fprintf (file
, "%s", reg_names
[subreg_regno (x
)]);
451 /* This will only be single precision.... */
457 REAL_VALUE_FROM_CONST_DOUBLE (rv
, x
);
458 REAL_VALUE_TO_TARGET_SINGLE (rv
, val
);
459 fprintf (file
, "0x%lx", val
);
469 mn10300_print_operand_address (file
, x
);
478 /* Output assembly language output for the address ADDR to FILE. */
481 mn10300_print_operand_address (FILE *file
, rtx addr
)
483 switch (GET_CODE (addr
))
486 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
491 mn10300_print_operand (file
, XEXP (addr
, 0), 0);
494 mn10300_print_operand (file
, XEXP (addr
, 1), 0);
498 mn10300_print_operand (file
, addr
, 0);
502 rtx base
= XEXP (addr
, 0);
503 rtx index
= XEXP (addr
, 1);
505 if (REG_P (index
) && !REG_OK_FOR_INDEX_P (index
))
511 gcc_assert (REG_P (index
) && REG_OK_FOR_INDEX_P (index
));
513 gcc_assert (REG_OK_FOR_BASE_P (base
));
515 mn10300_print_operand (file
, index
, 0);
517 mn10300_print_operand (file
, base
, 0);
521 output_addr_const (file
, addr
);
524 output_addr_const (file
, addr
);
529 /* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
531 Used for PIC-specific UNSPECs. */
534 mn10300_asm_output_addr_const_extra (FILE *file
, rtx x
)
536 if (GET_CODE (x
) == UNSPEC
)
541 /* GLOBAL_OFFSET_TABLE or local symbols, no suffix. */
542 output_addr_const (file
, XVECEXP (x
, 0, 0));
545 output_addr_const (file
, XVECEXP (x
, 0, 0));
546 fputs ("@GOT", file
);
549 output_addr_const (file
, XVECEXP (x
, 0, 0));
550 fputs ("@GOTOFF", file
);
553 output_addr_const (file
, XVECEXP (x
, 0, 0));
554 fputs ("@PLT", file
);
556 case UNSPEC_GOTSYM_OFF
:
557 assemble_name (file
, GOT_SYMBOL_NAME
);
559 output_addr_const (file
, XVECEXP (x
, 0, 0));
571 /* Count the number of FP registers that have to be saved. */
573 fp_regs_to_save (void)
580 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
581 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
587 /* Print a set of registers in the format required by "movm" and "ret".
588 Register K is saved if bit K of MASK is set. The data and address
589 registers can be stored individually, but the extended registers cannot.
590 We assume that the mask already takes that into account. For instance,
591 bits 14 to 17 must have the same value. */
594 mn10300_print_reg_list (FILE *file
, int mask
)
602 for (i
= 0; i
< FIRST_EXTENDED_REGNUM
; i
++)
603 if ((mask
& (1 << i
)) != 0)
607 fputs (reg_names
[i
], file
);
611 if ((mask
& 0x3c000) != 0)
613 gcc_assert ((mask
& 0x3c000) == 0x3c000);
616 fputs ("exreg1", file
);
623 /* If the MDR register is never clobbered, we can use the RETF instruction
624 which takes the address from the MDR register. This is 3 cycles faster
625 than having to load the address from the stack. */
628 mn10300_can_use_retf_insn (void)
630 /* Don't bother if we're not optimizing. In this case we won't
631 have proper access to df_regs_ever_live_p. */
635 /* EH returns alter the saved return address; MDR is not current. */
636 if (crtl
->calls_eh_return
)
639 /* Obviously not if MDR is ever clobbered. */
640 if (df_regs_ever_live_p (MDR_REG
))
643 /* ??? Careful not to use this during expand_epilogue etc. */
644 gcc_assert (!in_sequence_p ());
645 return leaf_function_p ();
649 mn10300_can_use_rets_insn (void)
651 return !mn10300_initial_offset (ARG_POINTER_REGNUM
, STACK_POINTER_REGNUM
);
654 /* Returns the set of live, callee-saved registers as a bitmask. The
655 callee-saved extended registers cannot be stored individually, so
656 all of them will be included in the mask if any one of them is used.
657 Also returns the number of bytes in the registers in the mask if
658 BYTES_SAVED is not NULL. */
661 mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved
)
668 for (i
= 0; i
<= LAST_EXTENDED_REGNUM
; i
++)
669 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
675 if ((mask
& 0x3c000) != 0)
677 for (i
= 0x04000; i
< 0x40000; i
<<= 1)
685 * bytes_saved
= count
* UNITS_PER_WORD
;
693 RTX_FRAME_RELATED_P (r
) = 1;
697 /* Generate an instruction that pushes several registers onto the stack.
698 Register K will be saved if bit K in MASK is set. The function does
699 nothing if MASK is zero.
701 To be compatible with the "movm" instruction, the lowest-numbered
702 register must be stored in the lowest slot. If MASK is the set
703 { R1,...,RN }, where R1...RN are ordered least first, the generated
704 instruction will have the form:
707 (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
708 (set (mem:SI (plus:SI (reg:SI 9)
712 (set (mem:SI (plus:SI (reg:SI 9)
717 mn10300_gen_multiple_store (unsigned int mask
)
719 /* The order in which registers are stored, from SP-4 through SP-N*4. */
720 static const unsigned int store_order
[8] = {
721 /* e2, e3: never saved */
722 FIRST_EXTENDED_REGNUM
+ 4,
723 FIRST_EXTENDED_REGNUM
+ 5,
724 FIRST_EXTENDED_REGNUM
+ 6,
725 FIRST_EXTENDED_REGNUM
+ 7,
726 /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
727 FIRST_DATA_REGNUM
+ 2,
728 FIRST_DATA_REGNUM
+ 3,
729 FIRST_ADDRESS_REGNUM
+ 2,
730 FIRST_ADDRESS_REGNUM
+ 3,
731 /* d0, d1, a0, a1, mdr, lir, lar: never saved. */
741 for (i
= count
= 0; i
< ARRAY_SIZE(store_order
); ++i
)
743 unsigned regno
= store_order
[i
];
745 if (((mask
>> regno
) & 1) == 0)
749 x
= plus_constant (Pmode
, stack_pointer_rtx
, count
* -4);
750 x
= gen_frame_mem (SImode
, x
);
751 x
= gen_rtx_SET (x
, gen_rtx_REG (SImode
, regno
));
754 /* Remove the register from the mask so that... */
755 mask
&= ~(1u << regno
);
758 /* ... we can make sure that we didn't try to use a register
759 not listed in the store order. */
760 gcc_assert (mask
== 0);
762 /* Create the instruction that updates the stack pointer. */
763 x
= plus_constant (Pmode
, stack_pointer_rtx
, count
* -4);
764 x
= gen_rtx_SET (stack_pointer_rtx
, x
);
767 /* We need one PARALLEL element to update the stack pointer and
768 an additional element for each register that is stored. */
769 x
= gen_rtx_PARALLEL (VOIDmode
, gen_rtvec_v (count
+ 1, elts
));
773 static inline unsigned int
774 popcount (unsigned int mask
)
776 unsigned int count
= 0;
781 mask
&= ~ (mask
& - mask
);
787 mn10300_expand_prologue (void)
789 HOST_WIDE_INT size
= mn10300_frame_size ();
792 mask
= mn10300_get_live_callee_saved_regs (NULL
);
793 /* If we use any of the callee-saved registers, save them now. */
794 mn10300_gen_multiple_store (mask
);
796 if (flag_stack_usage_info
)
797 current_function_static_stack_size
= size
+ popcount (mask
) * 4;
799 if (TARGET_AM33_2
&& fp_regs_to_save ())
801 int num_regs_to_save
= fp_regs_to_save (), i
;
807 save_sp_partial_merge
,
811 unsigned int strategy_size
= (unsigned)-1, this_strategy_size
;
814 if (flag_stack_usage_info
)
815 current_function_static_stack_size
+= num_regs_to_save
* 4;
817 /* We have several different strategies to save FP registers.
818 We can store them using SP offsets, which is beneficial if
819 there are just a few registers to save, or we can use `a0' in
820 post-increment mode (`a0' is the only call-clobbered address
821 register that is never used to pass information to a
822 function). Furthermore, if we don't need a frame pointer, we
823 can merge the two SP adds into a single one, but this isn't
824 always beneficial; sometimes we can just split the two adds
825 so that we don't exceed a 16-bit constant size. The code
826 below will select which strategy to use, so as to generate
827 smallest code. Ties are broken in favor or shorter sequences
828 (in terms of number of instructions). */
830 #define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
831 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
832 #define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
833 : (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
835 /* We add 0 * (S) in two places to promote to the type of S,
836 so that all arms of the conditional have the same type. */
837 #define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
838 (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
839 : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
840 + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
842 #define SIZE_FMOV_SP_(S,N) \
843 (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
844 SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
845 (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
846 #define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
848 /* Consider alternative save_sp_merge only if we don't need the
849 frame pointer and size is nonzero. */
850 if (! frame_pointer_needed
&& size
)
852 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
853 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
854 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
855 this_strategy_size
+= SIZE_FMOV_SP (size
, num_regs_to_save
);
857 if (this_strategy_size
< strategy_size
)
859 strategy
= save_sp_merge
;
860 strategy_size
= this_strategy_size
;
864 /* Consider alternative save_sp_no_merge unconditionally. */
865 /* Insn: add -4 * num_regs_to_save, sp. */
866 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
867 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
868 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
871 /* Insn: add -size, sp. */
872 this_strategy_size
+= SIZE_ADD_SP (-size
);
875 if (this_strategy_size
< strategy_size
)
877 strategy
= save_sp_no_merge
;
878 strategy_size
= this_strategy_size
;
881 /* Consider alternative save_sp_partial_merge only if we don't
882 need a frame pointer and size is reasonably large. */
883 if (! frame_pointer_needed
&& size
+ 4 * num_regs_to_save
> 128)
885 /* Insn: add -128, sp. */
886 this_strategy_size
= SIZE_ADD_SP (-128);
887 /* Insn: fmov fs#, (##, sp), for each fs# to be saved. */
888 this_strategy_size
+= SIZE_FMOV_SP (128 - 4 * num_regs_to_save
,
892 /* Insn: add 128-size, sp. */
893 this_strategy_size
+= SIZE_ADD_SP (128 - size
);
896 if (this_strategy_size
< strategy_size
)
898 strategy
= save_sp_partial_merge
;
899 strategy_size
= this_strategy_size
;
903 /* Consider alternative save_a0_merge only if we don't need a
904 frame pointer, size is nonzero and the user hasn't
905 changed the calling conventions of a0. */
906 if (! frame_pointer_needed
&& size
907 && call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
908 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
910 /* Insn: add -(size + 4 * num_regs_to_save), sp. */
911 this_strategy_size
= SIZE_ADD_SP (-(size
+ 4 * num_regs_to_save
));
912 /* Insn: mov sp, a0. */
913 this_strategy_size
++;
916 /* Insn: add size, a0. */
917 this_strategy_size
+= SIZE_ADD_AX (size
);
919 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
920 this_strategy_size
+= 3 * num_regs_to_save
;
922 if (this_strategy_size
< strategy_size
)
924 strategy
= save_a0_merge
;
925 strategy_size
= this_strategy_size
;
929 /* Consider alternative save_a0_no_merge if the user hasn't
930 changed the calling conventions of a0. */
931 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
]
932 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
])
934 /* Insn: add -4 * num_regs_to_save, sp. */
935 this_strategy_size
= SIZE_ADD_SP (-4 * num_regs_to_save
);
936 /* Insn: mov sp, a0. */
937 this_strategy_size
++;
938 /* Insn: fmov fs#, (a0+), for each fs# to be saved. */
939 this_strategy_size
+= 3 * num_regs_to_save
;
942 /* Insn: add -size, sp. */
943 this_strategy_size
+= SIZE_ADD_SP (-size
);
946 if (this_strategy_size
< strategy_size
)
948 strategy
= save_a0_no_merge
;
949 strategy_size
= this_strategy_size
;
953 /* Emit the initial SP add, common to all strategies. */
956 case save_sp_no_merge
:
957 case save_a0_no_merge
:
958 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
960 GEN_INT (-4 * num_regs_to_save
))));
964 case save_sp_partial_merge
:
965 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
968 xsize
= 128 - 4 * num_regs_to_save
;
974 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
976 GEN_INT (-(size
+ 4 * num_regs_to_save
)))));
977 /* We'll have to adjust FP register saves according to the
980 /* Since we've already created the stack frame, don't do it
981 again at the end of the function. */
989 /* Now prepare register a0, if we have decided to use it. */
993 case save_sp_no_merge
:
994 case save_sp_partial_merge
:
999 case save_a0_no_merge
:
1000 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
);
1001 F (emit_insn (gen_movsi (reg
, stack_pointer_rtx
)));
1003 F (emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (xsize
))));
1004 reg
= gen_rtx_POST_INC (SImode
, reg
);
1011 /* Now actually save the FP registers. */
1012 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
1013 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
1021 /* If we aren't using `a0', use an SP offset. */
1024 addr
= gen_rtx_PLUS (SImode
,
1029 addr
= stack_pointer_rtx
;
1034 F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode
, addr
),
1035 gen_rtx_REG (SFmode
, i
))));
1039 /* Now put the frame pointer into the frame pointer register. */
1040 if (frame_pointer_needed
)
1041 F (emit_move_insn (frame_pointer_rtx
, stack_pointer_rtx
));
1043 /* Allocate stack for this frame. */
1045 F (emit_insn (gen_addsi3 (stack_pointer_rtx
,
1049 if (flag_pic
&& df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM
))
1050 emit_insn (gen_load_pic ());
1054 mn10300_expand_epilogue (void)
1056 HOST_WIDE_INT size
= mn10300_frame_size ();
1057 unsigned int reg_save_bytes
;
1059 mn10300_get_live_callee_saved_regs (& reg_save_bytes
);
1061 if (TARGET_AM33_2
&& fp_regs_to_save ())
1063 int num_regs_to_save
= fp_regs_to_save (), i
;
1066 /* We have several options to restore FP registers. We could
1067 load them from SP offsets, but, if there are enough FP
1068 registers to restore, we win if we use a post-increment
1071 /* If we have a frame pointer, it's the best option, because we
1072 already know it has the value we want. */
1073 if (frame_pointer_needed
)
1074 reg
= gen_rtx_REG (SImode
, FRAME_POINTER_REGNUM
);
1075 /* Otherwise, we may use `a1', since it's call-clobbered and
1076 it's never used for return values. But only do so if it's
1077 smaller than using SP offsets. */
1080 enum { restore_sp_post_adjust
,
1081 restore_sp_pre_adjust
,
1082 restore_sp_partial_adjust
,
1083 restore_a1
} strategy
;
1084 unsigned int this_strategy_size
, strategy_size
= (unsigned)-1;
1086 /* Consider using sp offsets before adjusting sp. */
1087 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1088 this_strategy_size
= SIZE_FMOV_SP (size
, num_regs_to_save
);
1089 /* If size is too large, we'll have to adjust SP with an
1091 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1093 /* Insn: add size + 4 * num_regs_to_save, sp. */
1094 this_strategy_size
+= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
);
1096 /* If we don't have to restore any non-FP registers,
1097 we'll be able to save one byte by using rets. */
1098 if (! reg_save_bytes
)
1099 this_strategy_size
--;
1101 if (this_strategy_size
< strategy_size
)
1103 strategy
= restore_sp_post_adjust
;
1104 strategy_size
= this_strategy_size
;
1107 /* Consider using sp offsets after adjusting sp. */
1108 /* Insn: add size, sp. */
1109 this_strategy_size
= SIZE_ADD_SP (size
);
1110 /* Insn: fmov (##,sp),fs#, for each fs# to be restored. */
1111 this_strategy_size
+= SIZE_FMOV_SP (0, num_regs_to_save
);
1112 /* We're going to use ret to release the FP registers
1113 save area, so, no savings. */
1115 if (this_strategy_size
< strategy_size
)
1117 strategy
= restore_sp_pre_adjust
;
1118 strategy_size
= this_strategy_size
;
1121 /* Consider using sp offsets after partially adjusting sp.
1122 When size is close to 32Kb, we may be able to adjust SP
1123 with an imm16 add instruction while still using fmov
1125 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1127 /* Insn: add size + 4 * num_regs_to_save
1128 + reg_save_bytes - 252,sp. */
1129 this_strategy_size
= SIZE_ADD_SP (size
+ 4 * num_regs_to_save
1130 + (int) reg_save_bytes
- 252);
1131 /* Insn: fmov (##,sp),fs#, fo each fs# to be restored. */
1132 this_strategy_size
+= SIZE_FMOV_SP (252 - reg_save_bytes
1133 - 4 * num_regs_to_save
,
1135 /* We're going to use ret to release the FP registers
1136 save area, so, no savings. */
1138 if (this_strategy_size
< strategy_size
)
1140 strategy
= restore_sp_partial_adjust
;
1141 strategy_size
= this_strategy_size
;
1145 /* Consider using a1 in post-increment mode, as long as the
1146 user hasn't changed the calling conventions of a1. */
1147 if (call_really_used_regs
[FIRST_ADDRESS_REGNUM
+ 1]
1148 && ! fixed_regs
[FIRST_ADDRESS_REGNUM
+1])
1150 /* Insn: mov sp,a1. */
1151 this_strategy_size
= 1;
1154 /* Insn: add size,a1. */
1155 this_strategy_size
+= SIZE_ADD_AX (size
);
1157 /* Insn: fmov (a1+),fs#, for each fs# to be restored. */
1158 this_strategy_size
+= 3 * num_regs_to_save
;
1159 /* If size is large enough, we may be able to save a
1161 if (size
+ 4 * num_regs_to_save
+ reg_save_bytes
> 255)
1163 /* Insn: mov a1,sp. */
1164 this_strategy_size
+= 2;
1166 /* If we don't have to restore any non-FP registers,
1167 we'll be able to save one byte by using rets. */
1168 if (! reg_save_bytes
)
1169 this_strategy_size
--;
1171 if (this_strategy_size
< strategy_size
)
1173 strategy
= restore_a1
;
1174 strategy_size
= this_strategy_size
;
1180 case restore_sp_post_adjust
:
1183 case restore_sp_pre_adjust
:
1184 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1190 case restore_sp_partial_adjust
:
1191 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1193 GEN_INT (size
+ 4 * num_regs_to_save
1194 + reg_save_bytes
- 252)));
1195 size
= 252 - reg_save_bytes
- 4 * num_regs_to_save
;
1199 reg
= gen_rtx_REG (SImode
, FIRST_ADDRESS_REGNUM
+ 1);
1200 emit_insn (gen_movsi (reg
, stack_pointer_rtx
));
1202 emit_insn (gen_addsi3 (reg
, reg
, GEN_INT (size
)));
1210 /* Adjust the selected register, if any, for post-increment. */
1212 reg
= gen_rtx_POST_INC (SImode
, reg
);
1214 for (i
= FIRST_FP_REGNUM
; i
<= LAST_FP_REGNUM
; ++i
)
1215 if (df_regs_ever_live_p (i
) && ! call_really_used_regs
[i
])
1223 /* If we aren't using a post-increment register, use an
1225 addr
= gen_rtx_PLUS (SImode
,
1230 addr
= stack_pointer_rtx
;
1234 emit_insn (gen_movsf (gen_rtx_REG (SFmode
, i
),
1235 gen_rtx_MEM (SFmode
, addr
)));
1238 /* If we were using the restore_a1 strategy and the number of
1239 bytes to be released won't fit in the `ret' byte, copy `a1'
1240 to `sp', to avoid having to use `add' to adjust it. */
1241 if (! frame_pointer_needed
&& reg
&& size
+ reg_save_bytes
> 255)
1243 emit_move_insn (stack_pointer_rtx
, XEXP (reg
, 0));
1248 /* Maybe cut back the stack, except for the register save area.
1250 If the frame pointer exists, then use the frame pointer to
1253 If the stack size + register save area is more than 255 bytes,
1254 then the stack must be cut back here since the size + register
1255 save size is too big for a ret/retf instruction.
1257 Else leave it alone, it will be cut back as part of the
1258 ret/retf instruction, or there wasn't any stack to begin with.
1260 Under no circumstances should the register save area be
1261 deallocated here, that would leave a window where an interrupt
1262 could occur and trash the register save area. */
1263 if (frame_pointer_needed
)
1265 emit_move_insn (stack_pointer_rtx
, frame_pointer_rtx
);
1268 else if (size
+ reg_save_bytes
> 255)
1270 emit_insn (gen_addsi3 (stack_pointer_rtx
,
1276 /* Adjust the stack and restore callee-saved registers, if any. */
1277 if (mn10300_can_use_rets_insn ())
1278 emit_jump_insn (ret_rtx
);
1280 emit_jump_insn (gen_return_ret (GEN_INT (size
+ reg_save_bytes
)));
1283 /* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1284 This function is for MATCH_PARALLEL and so assumes OP is known to be
1285 parallel. If OP is a multiple store, return a mask indicating which
1286 registers it saves. Return 0 otherwise. */
1289 mn10300_store_multiple_regs (rtx op
)
1297 count
= XVECLEN (op
, 0);
1301 /* Check that first instruction has the form (set (sp) (plus A B)) */
1302 elt
= XVECEXP (op
, 0, 0);
1303 if (GET_CODE (elt
) != SET
1304 || (! REG_P (SET_DEST (elt
)))
1305 || REGNO (SET_DEST (elt
)) != STACK_POINTER_REGNUM
1306 || GET_CODE (SET_SRC (elt
)) != PLUS
)
1309 /* Check that A is the stack pointer and B is the expected stack size.
1310 For OP to match, each subsequent instruction should push a word onto
1311 the stack. We therefore expect the first instruction to create
1312 COUNT-1 stack slots. */
1313 elt
= SET_SRC (elt
);
1314 if ((! REG_P (XEXP (elt
, 0)))
1315 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1316 || (! CONST_INT_P (XEXP (elt
, 1)))
1317 || INTVAL (XEXP (elt
, 1)) != -(count
- 1) * 4)
1321 for (i
= 1; i
< count
; i
++)
1323 /* Check that element i is a (set (mem M) R). */
1324 /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1325 Remember: the ordering is *not* monotonic. */
1326 elt
= XVECEXP (op
, 0, i
);
1327 if (GET_CODE (elt
) != SET
1328 || (! MEM_P (SET_DEST (elt
)))
1329 || (! REG_P (SET_SRC (elt
))))
1332 /* Remember which registers are to be saved. */
1333 last
= REGNO (SET_SRC (elt
));
1334 mask
|= (1 << last
);
1336 /* Check that M has the form (plus (sp) (const_int -I*4)) */
1337 elt
= XEXP (SET_DEST (elt
), 0);
1338 if (GET_CODE (elt
) != PLUS
1339 || (! REG_P (XEXP (elt
, 0)))
1340 || REGNO (XEXP (elt
, 0)) != STACK_POINTER_REGNUM
1341 || (! CONST_INT_P (XEXP (elt
, 1)))
1342 || INTVAL (XEXP (elt
, 1)) != -i
* 4)
1346 /* All or none of the callee-saved extended registers must be in the set. */
1347 if ((mask
& 0x3c000) != 0
1348 && (mask
& 0x3c000) != 0x3c000)
1354 /* Implement TARGET_PREFERRED_RELOAD_CLASS. */
1357 mn10300_preferred_reload_class (rtx x
, reg_class_t rclass
)
1359 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1360 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1363 && !HARD_REGISTER_P (x
))
1364 || (GET_CODE (x
) == SUBREG
1365 && REG_P (SUBREG_REG (x
))
1366 && !HARD_REGISTER_P (SUBREG_REG (x
))))
1367 return LIMIT_RELOAD_CLASS (GET_MODE (x
), rclass
);
1372 /* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS. */
1375 mn10300_preferred_output_reload_class (rtx x
, reg_class_t rclass
)
1377 if (x
== stack_pointer_rtx
&& rclass
!= SP_REGS
)
1378 return (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
1382 /* Implement TARGET_SECONDARY_RELOAD. */
1385 mn10300_secondary_reload (bool in_p
, rtx x
, reg_class_t rclass_i
,
1386 machine_mode mode
, secondary_reload_info
*sri
)
1388 enum reg_class rclass
= (enum reg_class
) rclass_i
;
1389 enum reg_class xclass
= NO_REGS
;
1390 unsigned int xregno
= INVALID_REGNUM
;
1395 if (xregno
>= FIRST_PSEUDO_REGISTER
)
1396 xregno
= true_regnum (x
);
1397 if (xregno
!= INVALID_REGNUM
)
1398 xclass
= REGNO_REG_CLASS (xregno
);
1403 /* Memory load/stores less than a full word wide can't have an
1404 address or stack pointer destination. They must use a data
1405 register as an intermediate register. */
1406 if (rclass
!= DATA_REGS
1407 && (mode
== QImode
|| mode
== HImode
)
1408 && xclass
== NO_REGS
)
1411 /* We can only move SP to/from an address register. */
1413 && rclass
== SP_REGS
1414 && xclass
!= ADDRESS_REGS
)
1415 return ADDRESS_REGS
;
1417 && xclass
== SP_REGS
1418 && rclass
!= ADDRESS_REGS
1419 && rclass
!= SP_OR_ADDRESS_REGS
)
1420 return ADDRESS_REGS
;
1423 /* We can't directly load sp + const_int into a register;
1424 we must use an address register as an scratch. */
1426 && rclass
!= SP_REGS
1427 && rclass
!= SP_OR_ADDRESS_REGS
1428 && rclass
!= SP_OR_GENERAL_REGS
1429 && GET_CODE (x
) == PLUS
1430 && (XEXP (x
, 0) == stack_pointer_rtx
1431 || XEXP (x
, 1) == stack_pointer_rtx
))
1433 sri
->icode
= CODE_FOR_reload_plus_sp_const
;
1437 /* We can only move MDR to/from a data register. */
1438 if (rclass
== MDR_REGS
&& xclass
!= DATA_REGS
)
1440 if (xclass
== MDR_REGS
&& rclass
!= DATA_REGS
)
1443 /* We can't load/store an FP register from a constant address. */
1445 && (rclass
== FP_REGS
|| xclass
== FP_REGS
)
1446 && (xclass
== NO_REGS
|| rclass
== NO_REGS
))
1450 if (xregno
>= FIRST_PSEUDO_REGISTER
&& xregno
!= INVALID_REGNUM
)
1452 addr
= reg_equiv_mem (xregno
);
1454 addr
= XEXP (addr
, 0);
1459 if (addr
&& CONSTANT_ADDRESS_P (addr
))
1460 return GENERAL_REGS
;
1462 /* Otherwise assume no secondary reloads are needed. */
1467 mn10300_frame_size (void)
1469 /* size includes the fixed stack space needed for function calls. */
1470 int size
= get_frame_size () + crtl
->outgoing_args_size
;
1472 /* And space for the return pointer. */
1473 size
+= crtl
->outgoing_args_size
? 4 : 0;
1479 mn10300_initial_offset (int from
, int to
)
1483 gcc_assert (from
== ARG_POINTER_REGNUM
|| from
== FRAME_POINTER_REGNUM
);
1484 gcc_assert (to
== FRAME_POINTER_REGNUM
|| to
== STACK_POINTER_REGNUM
);
1486 if (to
== STACK_POINTER_REGNUM
)
1487 diff
= mn10300_frame_size ();
1489 /* The difference between the argument pointer and the frame pointer
1490 is the size of the callee register save area. */
1491 if (from
== ARG_POINTER_REGNUM
)
1493 unsigned int reg_save_bytes
;
1495 mn10300_get_live_callee_saved_regs (& reg_save_bytes
);
1496 diff
+= reg_save_bytes
;
1497 diff
+= 4 * fp_regs_to_save ();
1503 /* Worker function for TARGET_RETURN_IN_MEMORY. */
1506 mn10300_return_in_memory (const_tree type
, const_tree fntype ATTRIBUTE_UNUSED
)
1508 /* Return values > 8 bytes in length in memory. */
1509 return (int_size_in_bytes (type
) > 8
1510 || int_size_in_bytes (type
) == 0
1511 || TYPE_MODE (type
) == BLKmode
);
1514 /* Flush the argument registers to the stack for a stdarg function;
1515 return the new argument pointer. */
1517 mn10300_builtin_saveregs (void)
1520 tree fntype
= TREE_TYPE (current_function_decl
);
1521 int argadj
= ((!stdarg_p (fntype
))
1522 ? UNITS_PER_WORD
: 0);
1523 alias_set_type set
= get_varargs_alias_set ();
1526 offset
= plus_constant (Pmode
, crtl
->args
.arg_offset_rtx
, argadj
);
1528 offset
= crtl
->args
.arg_offset_rtx
;
1530 mem
= gen_rtx_MEM (SImode
, crtl
->args
.internal_arg_pointer
);
1531 set_mem_alias_set (mem
, set
);
1532 emit_move_insn (mem
, gen_rtx_REG (SImode
, 0));
1534 mem
= gen_rtx_MEM (SImode
,
1535 plus_constant (Pmode
,
1536 crtl
->args
.internal_arg_pointer
, 4));
1537 set_mem_alias_set (mem
, set
);
1538 emit_move_insn (mem
, gen_rtx_REG (SImode
, 1));
1540 return copy_to_reg (expand_binop (Pmode
, add_optab
,
1541 crtl
->args
.internal_arg_pointer
,
1542 offset
, 0, 0, OPTAB_LIB_WIDEN
));
1546 mn10300_va_start (tree valist
, rtx nextarg
)
1548 nextarg
= expand_builtin_saveregs ();
1549 std_expand_builtin_va_start (valist
, nextarg
);
1552 /* Return true when a parameter should be passed by reference. */
1555 mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED
,
1556 machine_mode mode
, const_tree type
,
1557 bool named ATTRIBUTE_UNUSED
)
1559 unsigned HOST_WIDE_INT size
;
1562 size
= int_size_in_bytes (type
);
1564 size
= GET_MODE_SIZE (mode
);
1566 return (size
> 8 || size
== 0);
1569 /* Return an RTX to represent where a value with mode MODE will be returned
1570 from a function. If the result is NULL_RTX, the argument is pushed. */
1573 mn10300_function_arg (cumulative_args_t cum_v
, machine_mode mode
,
1574 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1576 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1577 rtx result
= NULL_RTX
;
1580 /* We only support using 2 data registers as argument registers. */
1583 /* Figure out the size of the object to be passed. */
1584 if (mode
== BLKmode
)
1585 size
= int_size_in_bytes (type
);
1587 size
= GET_MODE_SIZE (mode
);
1589 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1591 /* Don't pass this arg via a register if all the argument registers
1593 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1596 /* Don't pass this arg via a register if it would be split between
1597 registers and memory. */
1598 if (type
== NULL_TREE
1599 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1602 switch (cum
->nbytes
/ UNITS_PER_WORD
)
1605 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
);
1608 result
= gen_rtx_REG (mode
, FIRST_ARGUMENT_REGNUM
+ 1);
1617 /* Update the data in CUM to advance over an argument
1618 of mode MODE and data type TYPE.
1619 (TYPE is null for libcalls where that information may not be available.) */
1622 mn10300_function_arg_advance (cumulative_args_t cum_v
, machine_mode mode
,
1623 const_tree type
, bool named ATTRIBUTE_UNUSED
)
1625 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1627 cum
->nbytes
+= (mode
!= BLKmode
1628 ? (GET_MODE_SIZE (mode
) + 3) & ~3
1629 : (int_size_in_bytes (type
) + 3) & ~3);
1632 /* Return the number of bytes of registers to use for an argument passed
1633 partially in registers and partially in memory. */
1636 mn10300_arg_partial_bytes (cumulative_args_t cum_v
, machine_mode mode
,
1637 tree type
, bool named ATTRIBUTE_UNUSED
)
1639 CUMULATIVE_ARGS
*cum
= get_cumulative_args (cum_v
);
1642 /* We only support using 2 data registers as argument registers. */
1645 /* Figure out the size of the object to be passed. */
1646 if (mode
== BLKmode
)
1647 size
= int_size_in_bytes (type
);
1649 size
= GET_MODE_SIZE (mode
);
1651 cum
->nbytes
= (cum
->nbytes
+ 3) & ~3;
1653 /* Don't pass this arg via a register if all the argument registers
1655 if (cum
->nbytes
> nregs
* UNITS_PER_WORD
)
1658 if (cum
->nbytes
+ size
<= nregs
* UNITS_PER_WORD
)
1661 /* Don't pass this arg via a register if it would be split between
1662 registers and memory. */
1663 if (type
== NULL_TREE
1664 && cum
->nbytes
+ size
> nregs
* UNITS_PER_WORD
)
1667 return nregs
* UNITS_PER_WORD
- cum
->nbytes
;
1670 /* Return the location of the function's value. This will be either
1671 $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1672 $d0 and $a0 if the -mreturn-pointer-on-do flag is set. Note that
1673 we only return the PARALLEL for outgoing values; we do not want
1674 callers relying on this extra copy. */
1677 mn10300_function_value (const_tree valtype
,
1678 const_tree fn_decl_or_type ATTRIBUTE_UNUSED
,
1682 machine_mode mode
= TYPE_MODE (valtype
);
1684 if (! POINTER_TYPE_P (valtype
))
1685 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1686 else if (! TARGET_PTR_A0D0
|| ! outgoing
1687 || cfun
->returns_struct
)
1688 return gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
);
1690 rv
= gen_rtx_PARALLEL (mode
, rtvec_alloc (2));
1692 = gen_rtx_EXPR_LIST (VOIDmode
,
1693 gen_rtx_REG (mode
, FIRST_ADDRESS_REGNUM
),
1697 = gen_rtx_EXPR_LIST (VOIDmode
,
1698 gen_rtx_REG (mode
, FIRST_DATA_REGNUM
),
1703 /* Implements TARGET_LIBCALL_VALUE. */
1706 mn10300_libcall_value (machine_mode mode
,
1707 const_rtx fun ATTRIBUTE_UNUSED
)
1709 return gen_rtx_REG (mode
, FIRST_DATA_REGNUM
);
1712 /* Implements FUNCTION_VALUE_REGNO_P. */
1715 mn10300_function_value_regno_p (const unsigned int regno
)
1717 return (regno
== FIRST_DATA_REGNUM
|| regno
== FIRST_ADDRESS_REGNUM
);
1720 /* Output an addition operation. */
1723 mn10300_output_add (rtx operands
[3], bool need_flags
)
1725 rtx dest
, src1
, src2
;
1726 unsigned int dest_regnum
, src1_regnum
, src2_regnum
;
1727 enum reg_class src1_class
, src2_class
, dest_class
;
1733 dest_regnum
= true_regnum (dest
);
1734 src1_regnum
= true_regnum (src1
);
1736 dest_class
= REGNO_REG_CLASS (dest_regnum
);
1737 src1_class
= REGNO_REG_CLASS (src1_regnum
);
1739 if (CONST_INT_P (src2
))
1741 gcc_assert (dest_regnum
== src1_regnum
);
1743 if (src2
== const1_rtx
&& !need_flags
)
1745 if (INTVAL (src2
) == 4 && !need_flags
&& dest_class
!= DATA_REGS
)
1748 gcc_assert (!need_flags
|| dest_class
!= SP_REGS
);
1751 else if (CONSTANT_P (src2
))
1754 src2_regnum
= true_regnum (src2
);
1755 src2_class
= REGNO_REG_CLASS (src2_regnum
);
1757 if (dest_regnum
== src1_regnum
)
1759 if (dest_regnum
== src2_regnum
)
1762 /* The rest of the cases are reg = reg+reg. For AM33, we can implement
1763 this directly, as below, but when optimizing for space we can sometimes
1764 do better by using a mov+add. For MN103, we claimed that we could
1765 implement a three-operand add because the various move and add insns
1766 change sizes across register classes, and we can often do better than
1767 reload in choosing which operand to move. */
1768 if (TARGET_AM33
&& optimize_insn_for_speed_p ())
1769 return "add %2,%1,%0";
1771 /* Catch cases where no extended register was used. */
1772 if (src1_class
!= EXTENDED_REGS
1773 && src2_class
!= EXTENDED_REGS
1774 && dest_class
!= EXTENDED_REGS
)
1776 /* We have to copy one of the sources into the destination, then
1777 add the other source to the destination.
1779 Carefully select which source to copy to the destination; a
1780 naive implementation will waste a byte when the source classes
1781 are different and the destination is an address register.
1782 Selecting the lowest cost register copy will optimize this
1784 if (src1_class
== dest_class
)
1785 return "mov %1,%0\n\tadd %2,%0";
1787 return "mov %2,%0\n\tadd %1,%0";
1790 /* At least one register is an extended register. */
1792 /* The three operand add instruction on the am33 is a win iff the
1793 output register is an extended register, or if both source
1794 registers are extended registers. */
1795 if (dest_class
== EXTENDED_REGS
|| src1_class
== src2_class
)
1796 return "add %2,%1,%0";
1798 /* It is better to copy one of the sources to the destination, then
1799 perform a 2 address add. The destination in this case must be
1800 an address or data register and one of the sources must be an
1801 extended register and the remaining source must not be an extended
1804 The best code for this case is to copy the extended reg to the
1805 destination, then emit a two address add. */
1806 if (src1_class
== EXTENDED_REGS
)
1807 return "mov %1,%0\n\tadd %2,%0";
1809 return "mov %2,%0\n\tadd %1,%0";
1812 /* Return 1 if X contains a symbolic expression. We know these
1813 expressions will have one of a few well defined forms, so
1814 we need only check those forms. */
1817 mn10300_symbolic_operand (rtx op
,
1818 machine_mode mode ATTRIBUTE_UNUSED
)
1820 switch (GET_CODE (op
))
1827 return ((GET_CODE (XEXP (op
, 0)) == SYMBOL_REF
1828 || GET_CODE (XEXP (op
, 0)) == LABEL_REF
)
1829 && CONST_INT_P (XEXP (op
, 1)));
1835 /* Try machine dependent ways of modifying an illegitimate address
1836 to be legitimate. If we find one, return the new valid address.
1837 This macro is used in only one place: `memory_address' in explow.c.
1839 OLDX is the address as it was before break_out_memory_refs was called.
1840 In some cases it is useful to look at this to decide what needs to be done.
1842 Normally it is always safe for this macro to do nothing. It exists to
1843 recognize opportunities to optimize the output.
1845 But on a few ports with segmented architectures and indexed addressing
1846 (mn10300, hppa) it is used to rewrite certain problematical addresses. */
1849 mn10300_legitimize_address (rtx x
, rtx oldx ATTRIBUTE_UNUSED
,
1850 machine_mode mode ATTRIBUTE_UNUSED
)
1852 if (flag_pic
&& ! mn10300_legitimate_pic_operand_p (x
))
1853 x
= mn10300_legitimize_pic_address (oldx
, NULL_RTX
);
1855 /* Uh-oh. We might have an address for x[n-100000]. This needs
1856 special handling to avoid creating an indexed memory address
1857 with x-100000 as the base. */
1858 if (GET_CODE (x
) == PLUS
1859 && mn10300_symbolic_operand (XEXP (x
, 1), VOIDmode
))
1861 /* Ugly. We modify things here so that the address offset specified
1862 by the index expression is computed first, then added to x to form
1863 the entire address. */
1865 rtx regx1
, regy1
, regy2
, y
;
1867 /* Strip off any CONST. */
1869 if (GET_CODE (y
) == CONST
)
1872 if (GET_CODE (y
) == PLUS
|| GET_CODE (y
) == MINUS
)
1874 regx1
= force_reg (Pmode
, force_operand (XEXP (x
, 0), 0));
1875 regy1
= force_reg (Pmode
, force_operand (XEXP (y
, 0), 0));
1876 regy2
= force_reg (Pmode
, force_operand (XEXP (y
, 1), 0));
1877 regx1
= force_reg (Pmode
,
1878 gen_rtx_fmt_ee (GET_CODE (y
), Pmode
, regx1
,
1880 return force_reg (Pmode
, gen_rtx_PLUS (Pmode
, regx1
, regy1
));
1886 /* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1887 @GOTOFF in `reg'. */
1890 mn10300_legitimize_pic_address (rtx orig
, rtx reg
)
1894 if (GET_CODE (orig
) == LABEL_REF
1895 || (GET_CODE (orig
) == SYMBOL_REF
1896 && (CONSTANT_POOL_ADDRESS_P (orig
)
1897 || ! MN10300_GLOBAL_P (orig
))))
1900 reg
= gen_reg_rtx (Pmode
);
1902 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOTOFF
);
1903 x
= gen_rtx_CONST (SImode
, x
);
1904 emit_move_insn (reg
, x
);
1906 x
= emit_insn (gen_addsi3 (reg
, reg
, pic_offset_table_rtx
));
1908 else if (GET_CODE (orig
) == SYMBOL_REF
)
1911 reg
= gen_reg_rtx (Pmode
);
1913 x
= gen_rtx_UNSPEC (SImode
, gen_rtvec (1, orig
), UNSPEC_GOT
);
1914 x
= gen_rtx_CONST (SImode
, x
);
1915 x
= gen_rtx_PLUS (SImode
, pic_offset_table_rtx
, x
);
1916 x
= gen_const_mem (SImode
, x
);
1918 x
= emit_move_insn (reg
, x
);
1923 set_unique_reg_note (x
, REG_EQUAL
, orig
);
1927 /* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1928 isn't protected by a PIC unspec; nonzero otherwise. */
1931 mn10300_legitimate_pic_operand_p (rtx x
)
1936 if (GET_CODE (x
) == SYMBOL_REF
|| GET_CODE (x
) == LABEL_REF
)
1939 if (GET_CODE (x
) == UNSPEC
1940 && (XINT (x
, 1) == UNSPEC_PIC
1941 || XINT (x
, 1) == UNSPEC_GOT
1942 || XINT (x
, 1) == UNSPEC_GOTOFF
1943 || XINT (x
, 1) == UNSPEC_PLT
1944 || XINT (x
, 1) == UNSPEC_GOTSYM_OFF
))
1947 fmt
= GET_RTX_FORMAT (GET_CODE (x
));
1948 for (i
= GET_RTX_LENGTH (GET_CODE (x
)) - 1; i
>= 0; i
--)
1954 for (j
= XVECLEN (x
, i
) - 1; j
>= 0; j
--)
1955 if (! mn10300_legitimate_pic_operand_p (XVECEXP (x
, i
, j
)))
1958 else if (fmt
[i
] == 'e'
1959 && ! mn10300_legitimate_pic_operand_p (XEXP (x
, i
)))
1966 /* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1967 legitimate, and FALSE otherwise.
1969 On the mn10300, the value in the address register must be
1970 in the same memory space/segment as the effective address.
1972 This is problematical for reload since it does not understand
1973 that base+index != index+base in a memory reference.
1975 Note it is still possible to use reg+reg addressing modes,
1976 it's just much more difficult. For a discussion of a possible
1977 workaround and solution, see the comments in pa.c before the
1978 function record_unscaled_index_insn_codes. */
1981 mn10300_legitimate_address_p (machine_mode mode
, rtx x
, bool strict
)
1985 if (CONSTANT_ADDRESS_P (x
))
1986 return !flag_pic
|| mn10300_legitimate_pic_operand_p (x
);
1988 if (RTX_OK_FOR_BASE_P (x
, strict
))
1991 if (TARGET_AM33
&& (mode
== SImode
|| mode
== SFmode
|| mode
== HImode
))
1993 if (GET_CODE (x
) == POST_INC
)
1994 return RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
);
1995 if (GET_CODE (x
) == POST_MODIFY
)
1996 return (RTX_OK_FOR_BASE_P (XEXP (x
, 0), strict
)
1997 && CONSTANT_ADDRESS_P (XEXP (x
, 1)));
2000 if (GET_CODE (x
) != PLUS
)
2004 index
= XEXP (x
, 1);
2010 /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
2011 addressing is hard to satisfy. */
2015 return (REGNO_GENERAL_P (REGNO (base
), strict
)
2016 && REGNO_GENERAL_P (REGNO (index
), strict
));
2019 if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base
), strict
))
2022 if (CONST_INT_P (index
))
2023 return IN_RANGE (INTVAL (index
), -1 - 0x7fffffff, 0x7fffffff);
2025 if (CONSTANT_ADDRESS_P (index
))
2026 return !flag_pic
|| mn10300_legitimate_pic_operand_p (index
);
2032 mn10300_regno_in_class_p (unsigned regno
, int rclass
, bool strict
)
2034 if (regno
>= FIRST_PSEUDO_REGISTER
)
2040 regno
= reg_renumber
[regno
];
2041 if (regno
== INVALID_REGNUM
)
2044 return TEST_HARD_REG_BIT (reg_class_contents
[rclass
], regno
);
2048 mn10300_legitimize_reload_address (rtx x
,
2049 machine_mode mode ATTRIBUTE_UNUSED
,
2050 int opnum
, int type
,
2051 int ind_levels ATTRIBUTE_UNUSED
)
2053 bool any_change
= false;
2055 /* See above re disabling reg+reg addressing for MN103. */
2059 if (GET_CODE (x
) != PLUS
)
2062 if (XEXP (x
, 0) == stack_pointer_rtx
)
2064 push_reload (XEXP (x
, 0), NULL_RTX
, &XEXP (x
, 0), NULL
,
2065 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2066 opnum
, (enum reload_type
) type
);
2069 if (XEXP (x
, 1) == stack_pointer_rtx
)
2071 push_reload (XEXP (x
, 1), NULL_RTX
, &XEXP (x
, 1), NULL
,
2072 GENERAL_REGS
, GET_MODE (x
), VOIDmode
, 0, 0,
2073 opnum
, (enum reload_type
) type
);
2077 return any_change
? x
: NULL_RTX
;
2080 /* Implement TARGET_LEGITIMATE_CONSTANT_P. Returns TRUE if X is a valid
2081 constant. Note that some "constants" aren't valid, such as TLS
2082 symbols and unconverted GOT-based references, so we eliminate
2086 mn10300_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED
, rtx x
)
2088 switch (GET_CODE (x
))
2093 if (GET_CODE (x
) == PLUS
)
2095 if (! CONST_INT_P (XEXP (x
, 1)))
2100 /* Only some unspecs are valid as "constants". */
2101 if (GET_CODE (x
) == UNSPEC
)
2103 switch (XINT (x
, 1))
2115 /* We must have drilled down to a symbol. */
2116 if (! mn10300_symbolic_operand (x
, Pmode
))
2127 /* Undo pic address legitimization for the benefit of debug info. */
2130 mn10300_delegitimize_address (rtx orig_x
)
2132 rtx x
= orig_x
, ret
, addend
= NULL
;
2137 if (GET_CODE (x
) != PLUS
|| GET_MODE (x
) != Pmode
)
2140 if (XEXP (x
, 0) == pic_offset_table_rtx
)
2142 /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2143 some odd-looking "addresses" that were never valid in the first place.
2144 We need to look harder to avoid warnings being emitted. */
2145 else if (GET_CODE (XEXP (x
, 0)) == PLUS
)
2147 rtx x0
= XEXP (x
, 0);
2148 rtx x00
= XEXP (x0
, 0);
2149 rtx x01
= XEXP (x0
, 1);
2151 if (x00
== pic_offset_table_rtx
)
2153 else if (x01
== pic_offset_table_rtx
)
2163 if (GET_CODE (x
) != CONST
)
2166 if (GET_CODE (x
) != UNSPEC
)
2169 ret
= XVECEXP (x
, 0, 0);
2170 if (XINT (x
, 1) == UNSPEC_GOTOFF
)
2172 else if (XINT (x
, 1) == UNSPEC_GOT
)
2177 gcc_assert (GET_CODE (ret
) == SYMBOL_REF
);
2178 if (need_mem
!= MEM_P (orig_x
))
2180 if (need_mem
&& addend
)
2183 ret
= gen_rtx_PLUS (Pmode
, addend
, ret
);
2187 /* For addresses, costs are relative to "MOV (Rm),Rn". For AM33 this is
2188 the 3-byte fully general instruction; for MN103 this is the 2-byte form
2189 with an address register. */
2192 mn10300_address_cost (rtx x
, machine_mode mode ATTRIBUTE_UNUSED
,
2193 addr_space_t as ATTRIBUTE_UNUSED
, bool speed
)
2198 switch (GET_CODE (x
))
2203 /* We assume all of these require a 32-bit constant, even though
2204 some symbol and label references can be relaxed. */
2205 return speed
? 1 : 4;
2213 /* Assume any symbolic offset is a 32-bit constant. */
2214 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2215 if (IN_RANGE (i
, -128, 127))
2216 return speed
? 0 : 1;
2219 if (IN_RANGE (i
, -0x800000, 0x7fffff))
2225 index
= XEXP (x
, 1);
2226 if (register_operand (index
, SImode
))
2228 /* Attempt to minimize the number of registers in the address.
2229 This is similar to what other ports do. */
2230 if (register_operand (base
, SImode
))
2234 index
= XEXP (x
, 0);
2237 /* Assume any symbolic offset is a 32-bit constant. */
2238 i
= (CONST_INT_P (XEXP (x
, 1)) ? INTVAL (XEXP (x
, 1)) : 0x12345678);
2239 if (IN_RANGE (i
, -128, 127))
2240 return speed
? 0 : 1;
2241 if (IN_RANGE (i
, -32768, 32767))
2242 return speed
? 0 : 2;
2243 return speed
? 2 : 6;
2246 return rtx_cost (x
, MEM
, 0, speed
);
2250 /* Implement the TARGET_REGISTER_MOVE_COST hook.
2252 Recall that the base value of 2 is required by assumptions elsewhere
2253 in the body of the compiler, and that cost 2 is special-cased as an
2254 early exit from reload meaning no work is required. */
2257 mn10300_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2258 reg_class_t ifrom
, reg_class_t ito
)
2260 enum reg_class from
= (enum reg_class
) ifrom
;
2261 enum reg_class to
= (enum reg_class
) ito
;
2262 enum reg_class scratch
, test
;
2264 /* Simplify the following code by unifying the fp register classes. */
2265 if (to
== FP_ACC_REGS
)
2267 if (from
== FP_ACC_REGS
)
2270 /* Diagnose invalid moves by costing them as two moves. */
2275 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2276 else if (to
== MDR_REGS
)
2277 scratch
= DATA_REGS
;
2278 else if (to
== FP_REGS
&& to
!= from
)
2279 scratch
= GENERAL_REGS
;
2283 if (from
== SP_REGS
)
2284 scratch
= (TARGET_AM33
? GENERAL_REGS
: ADDRESS_REGS
);
2285 else if (from
== MDR_REGS
)
2286 scratch
= DATA_REGS
;
2287 else if (from
== FP_REGS
&& to
!= from
)
2288 scratch
= GENERAL_REGS
;
2290 if (scratch
!= NO_REGS
&& !reg_class_subset_p (test
, scratch
))
2291 return (mn10300_register_move_cost (VOIDmode
, from
, scratch
)
2292 + mn10300_register_move_cost (VOIDmode
, scratch
, to
));
2294 /* From here on, all we need consider are legal combinations. */
2298 /* The scale here is bytes * 2. */
2300 if (from
== to
&& (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2303 if (from
== SP_REGS
)
2304 return (to
== ADDRESS_REGS
? 2 : 6);
2306 /* For MN103, all remaining legal moves are two bytes. */
2311 return (from
== ADDRESS_REGS
? 4 : 6);
2313 if ((from
== ADDRESS_REGS
|| from
== DATA_REGS
)
2314 && (to
== ADDRESS_REGS
|| to
== DATA_REGS
))
2317 if (to
== EXTENDED_REGS
)
2318 return (to
== from
? 6 : 4);
2320 /* What's left are SP_REGS, FP_REGS, or combinations of the above. */
2325 /* The scale here is cycles * 2. */
2329 if (from
== FP_REGS
)
2332 /* All legal moves between integral registers are single cycle. */
2337 /* Implement the TARGET_MEMORY_MOVE_COST hook.
2339 Given lack of the form of the address, this must be speed-relative,
2340 though we should never be less expensive than a size-relative register
2341 move cost above. This is not a problem. */
2344 mn10300_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED
,
2345 reg_class_t iclass
, bool in ATTRIBUTE_UNUSED
)
2347 enum reg_class rclass
= (enum reg_class
) iclass
;
2349 if (rclass
== FP_REGS
)
2354 /* Implement the TARGET_RTX_COSTS hook.
2356 Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2357 to represent cycles. Size-relative costs are in bytes. */
2360 mn10300_rtx_costs (rtx x
, int code
, int outer_code
, int opno ATTRIBUTE_UNUSED
,
2361 int *ptotal
, bool speed
)
2363 /* This value is used for SYMBOL_REF etc where we want to pretend
2364 we have a full 32-bit constant. */
2365 HOST_WIDE_INT i
= 0x12345678;
2375 if (outer_code
== SET
)
2377 /* 16-bit integer loads have latency 1, 32-bit loads 2. */
2378 if (IN_RANGE (i
, -32768, 32767))
2379 total
= COSTS_N_INSNS (1);
2381 total
= COSTS_N_INSNS (2);
2385 /* 16-bit integer operands don't affect latency;
2386 24-bit and 32-bit operands add a cycle. */
2387 if (IN_RANGE (i
, -32768, 32767))
2390 total
= COSTS_N_INSNS (1);
2395 if (outer_code
== SET
)
2399 else if (IN_RANGE (i
, -128, 127))
2401 else if (IN_RANGE (i
, -32768, 32767))
2408 /* Reference here is ADD An,Dn, vs ADD imm,Dn. */
2409 if (IN_RANGE (i
, -128, 127))
2411 else if (IN_RANGE (i
, -32768, 32767))
2413 else if (TARGET_AM33
&& IN_RANGE (i
, -0x01000000, 0x00ffffff))
2425 /* We assume all of these require a 32-bit constant, even though
2426 some symbol and label references can be relaxed. */
2430 switch (XINT (x
, 1))
2436 case UNSPEC_GOTSYM_OFF
:
2437 /* The PIC unspecs also resolve to a 32-bit constant. */
2441 /* Assume any non-listed unspec is some sort of arithmetic. */
2442 goto do_arith_costs
;
2446 /* Notice the size difference of INC and INC4. */
2447 if (!speed
&& outer_code
== SET
&& CONST_INT_P (XEXP (x
, 1)))
2449 i
= INTVAL (XEXP (x
, 1));
2450 if (i
== 1 || i
== 4)
2452 total
= 1 + rtx_cost (XEXP (x
, 0), PLUS
, 0, speed
);
2456 goto do_arith_costs
;
2470 total
= (speed
? COSTS_N_INSNS (1) : 2);
2474 /* Notice the size difference of ASL2 and variants. */
2475 if (!speed
&& CONST_INT_P (XEXP (x
, 1)))
2476 switch (INTVAL (XEXP (x
, 1)))
2491 total
= (speed
? COSTS_N_INSNS (1) : 3);
2495 total
= (speed
? COSTS_N_INSNS (3) : 2);
2502 total
= (speed
? COSTS_N_INSNS (39)
2503 /* Include space to load+retrieve MDR. */
2504 : code
== MOD
|| code
== UMOD
? 6 : 4);
2508 total
= mn10300_address_cost (XEXP (x
, 0), GET_MODE (x
),
2509 MEM_ADDR_SPACE (x
), speed
);
2511 total
= COSTS_N_INSNS (2 + total
);
2515 /* Probably not implemented. Assume external call. */
2516 total
= (speed
? COSTS_N_INSNS (10) : 7);
2528 /* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2529 may access it using GOTOFF instead of GOT. */
2532 mn10300_encode_section_info (tree decl
, rtx rtl
, int first
)
2536 default_encode_section_info (decl
, rtl
, first
);
2541 symbol
= XEXP (rtl
, 0);
2542 if (GET_CODE (symbol
) != SYMBOL_REF
)
2546 SYMBOL_REF_FLAG (symbol
) = (*targetm
.binds_local_p
) (decl
);
2549 /* Dispatch tables on the mn10300 are extremely expensive in terms of code
2550 and readonly data size. So we crank up the case threshold value to
2551 encourage a series of if/else comparisons to implement many small switch
2552 statements. In theory, this value could be increased much more if we
2553 were solely optimizing for space, but we keep it "reasonable" to avoid
2554 serious code efficiency lossage. */
2557 mn10300_case_values_threshold (void)
2562 /* Worker function for TARGET_TRAMPOLINE_INIT. */
2565 mn10300_trampoline_init (rtx m_tramp
, tree fndecl
, rtx chain_value
)
2567 rtx mem
, disp
, fnaddr
= XEXP (DECL_RTL (fndecl
), 0);
2569 /* This is a strict alignment target, which means that we play
2570 some games to make sure that the locations at which we need
2571 to store <chain> and <disp> wind up at aligned addresses.
2574 0xfc 0xdd mov chain,a1
2576 0xf8 0xed 0x00 btst 0,d1
2580 Note that the two extra insns are effectively nops; they
2581 clobber the flags but do not affect the contents of D0 or D1. */
2583 disp
= expand_binop (SImode
, sub_optab
, fnaddr
,
2584 plus_constant (Pmode
, XEXP (m_tramp
, 0), 11),
2585 NULL_RTX
, 1, OPTAB_DIRECT
);
2587 mem
= adjust_address (m_tramp
, SImode
, 0);
2588 emit_move_insn (mem
, gen_int_mode (0xddfc0028, SImode
));
2589 mem
= adjust_address (m_tramp
, SImode
, 4);
2590 emit_move_insn (mem
, chain_value
);
2591 mem
= adjust_address (m_tramp
, SImode
, 8);
2592 emit_move_insn (mem
, gen_int_mode (0xdc00edf8, SImode
));
2593 mem
= adjust_address (m_tramp
, SImode
, 12);
2594 emit_move_insn (mem
, disp
);
2597 /* Output the assembler code for a C++ thunk function.
2598 THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2599 is the decl for the target function. DELTA is an immediate constant
2600 offset to be added to the THIS parameter. If VCALL_OFFSET is nonzero
2601 the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2602 additionally added to THIS. Finally jump to the entry point of
2606 mn10300_asm_output_mi_thunk (FILE * file
,
2607 tree thunk_fndecl ATTRIBUTE_UNUSED
,
2608 HOST_WIDE_INT delta
,
2609 HOST_WIDE_INT vcall_offset
,
2614 /* Get the register holding the THIS parameter. Handle the case
2615 where there is a hidden first argument for a returned structure. */
2616 if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function
)), function
))
2617 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
+ 1];
2619 _this
= reg_names
[FIRST_ARGUMENT_REGNUM
];
2621 fprintf (file
, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START
);
2624 fprintf (file
, "\tadd %d, %s\n", (int) delta
, _this
);
2628 const char * scratch
= reg_names
[FIRST_ADDRESS_REGNUM
+ 1];
2630 fprintf (file
, "\tmov %s, %s\n", _this
, scratch
);
2631 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2632 fprintf (file
, "\tadd %d, %s\n", (int) vcall_offset
, scratch
);
2633 fprintf (file
, "\tmov (%s), %s\n", scratch
, scratch
);
2634 fprintf (file
, "\tadd %s, %s\n", scratch
, _this
);
2637 fputs ("\tjmp ", file
);
2638 assemble_name (file
, XSTR (XEXP (DECL_RTL (function
), 0), 0));
2642 /* Return true if mn10300_output_mi_thunk would be able to output the
2643 assembler code for the thunk function specified by the arguments
2644 it is passed, and false otherwise. */
2647 mn10300_can_output_mi_thunk (const_tree thunk_fndecl ATTRIBUTE_UNUSED
,
2648 HOST_WIDE_INT delta ATTRIBUTE_UNUSED
,
2649 HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED
,
2650 const_tree function ATTRIBUTE_UNUSED
)
2656 mn10300_hard_regno_mode_ok (unsigned int regno
, machine_mode mode
)
2658 if (REGNO_REG_CLASS (regno
) == FP_REGS
2659 || REGNO_REG_CLASS (regno
) == FP_ACC_REGS
)
2660 /* Do not store integer values in FP registers. */
2661 return GET_MODE_CLASS (mode
) == MODE_FLOAT
&& ((regno
& 1) == 0);
2663 if (! TARGET_AM33
&& REGNO_REG_CLASS (regno
) == EXTENDED_REGS
)
2666 if (((regno
) & 1) == 0 || GET_MODE_SIZE (mode
) == 4)
2669 if (REGNO_REG_CLASS (regno
) == DATA_REGS
2670 || (TARGET_AM33
&& REGNO_REG_CLASS (regno
) == ADDRESS_REGS
)
2671 || REGNO_REG_CLASS (regno
) == EXTENDED_REGS
)
2672 return GET_MODE_SIZE (mode
) <= 4;
2678 mn10300_modes_tieable (machine_mode mode1
, machine_mode mode2
)
2680 if (GET_MODE_CLASS (mode1
) == MODE_FLOAT
2681 && GET_MODE_CLASS (mode2
) != MODE_FLOAT
)
2684 if (GET_MODE_CLASS (mode2
) == MODE_FLOAT
2685 && GET_MODE_CLASS (mode1
) != MODE_FLOAT
)
2690 || (GET_MODE_SIZE (mode1
) <= 4 && GET_MODE_SIZE (mode2
) <= 4))
2697 cc_flags_for_mode (machine_mode mode
)
2702 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
| CC_FLAG_V
;
2704 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_C
;
2706 return CC_FLAG_Z
| CC_FLAG_N
;
2715 cc_flags_for_code (enum rtx_code code
)
2728 case GT
: /* ~(Z|(N^V)) */
2729 case LE
: /* Z|(N^V) */
2730 return CC_FLAG_Z
| CC_FLAG_N
| CC_FLAG_V
;
2736 case GTU
: /* ~(C | Z) */
2737 case LEU
: /* C | Z */
2738 return CC_FLAG_Z
| CC_FLAG_C
;
2756 mn10300_select_cc_mode (enum rtx_code code
, rtx x
, rtx y ATTRIBUTE_UNUSED
)
2760 if (GET_MODE_CLASS (GET_MODE (x
)) == MODE_FLOAT
)
2761 return CC_FLOATmode
;
2763 req
= cc_flags_for_code (code
);
2765 if (req
& CC_FLAG_V
)
2767 if (req
& CC_FLAG_C
)
2773 set_is_load_p (rtx set
)
2775 return MEM_P (SET_SRC (set
));
2779 set_is_store_p (rtx set
)
2781 return MEM_P (SET_DEST (set
));
2784 /* Update scheduling costs for situations that cannot be
2785 described using the attributes and DFA machinery.
2786 DEP is the insn being scheduled.
2787 INSN is the previous insn.
2788 COST is the current cycle cost for DEP. */
2791 mn10300_adjust_sched_cost (rtx_insn
*insn
, rtx link
, rtx_insn
*dep
, int cost
)
2800 /* We are only interested in pairs of SET. */
2801 insn_set
= single_set (insn
);
2805 dep_set
= single_set (dep
);
2809 /* For the AM34 a load instruction that follows a
2810 store instruction incurs an extra cycle of delay. */
2811 if (mn10300_tune_cpu
== PROCESSOR_AM34
2812 && set_is_load_p (dep_set
)
2813 && set_is_store_p (insn_set
))
2816 /* For the AM34 a non-store, non-branch FPU insn that follows
2817 another FPU insn incurs a one cycle throughput increase. */
2818 else if (mn10300_tune_cpu
== PROCESSOR_AM34
2819 && ! set_is_store_p (insn_set
)
2821 && GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set
))) == MODE_FLOAT
2822 && GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set
))) == MODE_FLOAT
)
2825 /* Resolve the conflict described in section 1-7-4 of
2826 Chapter 3 of the MN103E Series Instruction Manual
2829 "When the preceding instruction is a CPU load or
2830 store instruction, a following FPU instruction
2831 cannot be executed until the CPU completes the
2832 latency period even though there are no register
2833 or flag dependencies between them." */
2835 /* Only the AM33-2 (and later) CPUs have FPU instructions. */
2836 if (! TARGET_AM33_2
)
2839 /* If a data dependence already exists then the cost is correct. */
2840 if (REG_NOTE_KIND (link
) == 0)
2843 /* Check that the instruction about to scheduled is an FPU instruction. */
2844 if (GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set
))) != MODE_FLOAT
)
2847 /* Now check to see if the previous instruction is a load or store. */
2848 if (! set_is_load_p (insn_set
) && ! set_is_store_p (insn_set
))
2851 /* XXX: Verify: The text of 1-7-4 implies that the restriction
2852 only applies when an INTEGER load/store precedes an FPU
2853 instruction, but is this true ? For now we assume that it is. */
2854 if (GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set
))) != MODE_INT
)
2857 /* Extract the latency value from the timings attribute. */
2858 timings
= get_attr_timings (insn
);
2859 return timings
< 100 ? (timings
% 10) : (timings
% 100);
2863 mn10300_conditional_register_usage (void)
2869 for (i
= FIRST_EXTENDED_REGNUM
;
2870 i
<= LAST_EXTENDED_REGNUM
; i
++)
2871 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2875 for (i
= FIRST_FP_REGNUM
;
2876 i
<= LAST_FP_REGNUM
; i
++)
2877 fixed_regs
[i
] = call_used_regs
[i
] = 1;
2880 fixed_regs
[PIC_OFFSET_TABLE_REGNUM
] =
2881 call_used_regs
[PIC_OFFSET_TABLE_REGNUM
] = 1;
2884 /* Worker function for TARGET_MD_ASM_CLOBBERS.
2885 We do this in the mn10300 backend to maintain source compatibility
2886 with the old cc0-based compiler. */
2889 mn10300_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED
,
2890 tree inputs ATTRIBUTE_UNUSED
,
2893 clobbers
= tree_cons (NULL_TREE
, build_string (5, "EPSW"),
2898 /* A helper function for splitting cbranch patterns after reload. */
2901 mn10300_split_cbranch (machine_mode cmp_mode
, rtx cmp_op
, rtx label_ref
)
2905 flags
= gen_rtx_REG (cmp_mode
, CC_REG
);
2906 x
= gen_rtx_COMPARE (cmp_mode
, XEXP (cmp_op
, 0), XEXP (cmp_op
, 1));
2907 x
= gen_rtx_SET (flags
, x
);
2910 x
= gen_rtx_fmt_ee (GET_CODE (cmp_op
), VOIDmode
, flags
, const0_rtx
);
2911 x
= gen_rtx_IF_THEN_ELSE (VOIDmode
, x
, label_ref
, pc_rtx
);
2912 x
= gen_rtx_SET (pc_rtx
, x
);
2916 /* A helper function for matching parallels that set the flags. */
2919 mn10300_match_ccmode (rtx insn
, machine_mode cc_mode
)
2922 machine_mode flags_mode
;
2924 gcc_checking_assert (XVECLEN (PATTERN (insn
), 0) == 2);
2926 op1
= XVECEXP (PATTERN (insn
), 0, 1);
2927 gcc_checking_assert (GET_CODE (SET_SRC (op1
)) == COMPARE
);
2929 flags
= SET_DEST (op1
);
2930 flags_mode
= GET_MODE (flags
);
2932 if (GET_MODE (SET_SRC (op1
)) != flags_mode
)
2934 if (GET_MODE_CLASS (flags_mode
) != MODE_CC
)
2937 /* Ensure that the mode of FLAGS is compatible with CC_MODE. */
2938 if (cc_flags_for_mode (flags_mode
) & ~cc_flags_for_mode (cc_mode
))
2944 /* This function is used to help split:
2946 (set (reg) (and (reg) (int)))
2950 (set (reg) (shift (reg) (int))
2951 (set (reg) (shift (reg) (int))
2953 where the shitfs will be shorter than the "and" insn.
2955 It returns the number of bits that should be shifted. A positive
2956 values means that the low bits are to be cleared (and hence the
2957 shifts should be right followed by left) whereas a negative value
2958 means that the high bits are to be cleared (left followed by right).
2959 Zero is returned when it would not be economical to split the AND. */
2962 mn10300_split_and_operand_count (rtx op
)
2964 HOST_WIDE_INT val
= INTVAL (op
);
2969 /* High bit is set, look for bits clear at the bottom. */
2970 count
= exact_log2 (-val
);
2973 /* This is only size win if we can use the asl2 insn. Otherwise we
2974 would be replacing 1 6-byte insn with 2 3-byte insns. */
2975 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2981 /* High bit is clear, look for bits set at the bottom. */
2982 count
= exact_log2 (val
+ 1);
2984 /* Again, this is only a size win with asl2. */
2985 if (count
> (optimize_insn_for_speed_p () ? 2 : 4))
2994 enum attr_liw_op op
;
2999 /* Decide if the given insn is a candidate for LIW bundling. If it is then
3000 extract the operands and LIW attributes from the insn and use them to fill
3001 in the liw_data structure. Return true upon success or false if the insn
3002 cannot be bundled. */
3005 extract_bundle (rtx_insn
*insn
, struct liw_data
* pdata
)
3007 bool allow_consts
= true;
3010 gcc_assert (pdata
!= NULL
);
3014 /* Make sure that we are dealing with a simple SET insn. */
3015 p
= single_set (insn
);
3019 /* Make sure that it could go into one of the LIW pipelines. */
3020 pdata
->slot
= get_attr_liw (insn
);
3021 if (pdata
->slot
== LIW_BOTH
)
3024 pdata
->op
= get_attr_liw_op (insn
);
3029 pdata
->dest
= SET_DEST (p
);
3030 pdata
->src
= SET_SRC (p
);
3033 pdata
->dest
= XEXP (SET_SRC (p
), 0);
3034 pdata
->src
= XEXP (SET_SRC (p
), 1);
3041 /* The AND, OR and XOR long instruction words only accept register arguments. */
3042 allow_consts
= false;
3045 pdata
->dest
= SET_DEST (p
);
3046 pdata
->src
= XEXP (SET_SRC (p
), 1);
3050 if (! REG_P (pdata
->dest
))
3053 if (REG_P (pdata
->src
))
3056 return allow_consts
&& satisfies_constraint_O (pdata
->src
);
3059 /* Make sure that it is OK to execute LIW1 and LIW2 in parallel. GCC generated
3060 the instructions with the assumption that LIW1 would be executed before LIW2
3061 so we must check for overlaps between their sources and destinations. */
3064 check_liw_constraints (struct liw_data
* pliw1
, struct liw_data
* pliw2
)
3066 /* Check for slot conflicts. */
3067 if (pliw2
->slot
== pliw1
->slot
&& pliw1
->slot
!= LIW_EITHER
)
3070 /* If either operation is a compare, then "dest" is really an input; the real
3071 destination is CC_REG. So these instructions need different checks. */
3073 /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3074 check its values prior to any changes made by OP. */
3075 if (pliw1
->op
== LIW_OP_CMP
)
3077 /* Two sequential comparisons means dead code, which ought to
3078 have been eliminated given that bundling only happens with
3079 optimization. We cannot bundle them in any case. */
3080 gcc_assert (pliw1
->op
!= pliw2
->op
);
3084 /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3085 is the destination of OP, as the CMP will look at the old value, not the new
3087 if (pliw2
->op
== LIW_OP_CMP
)
3089 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3092 if (REG_P (pliw2
->src
))
3093 return REGNO (pliw2
->src
) != REGNO (pliw1
->dest
);
3098 /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3099 same destination register. */
3100 if (REGNO (pliw2
->dest
) == REGNO (pliw1
->dest
))
3103 /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3104 of OP1 is the source of OP2. The exception is when OP1 is a MOVE instruction when
3105 we can replace the source in OP2 with the source of OP1. */
3106 if (REG_P (pliw2
->src
) && REGNO (pliw2
->src
) == REGNO (pliw1
->dest
))
3108 if (pliw1
->op
== LIW_OP_MOV
&& REG_P (pliw1
->src
))
3110 if (! REG_P (pliw1
->src
)
3111 && (pliw2
->op
== LIW_OP_AND
3112 || pliw2
->op
== LIW_OP_OR
3113 || pliw2
->op
== LIW_OP_XOR
))
3116 pliw2
->src
= pliw1
->src
;
3122 /* Everything else is OK. */
3126 /* Combine pairs of insns into LIW bundles. */
3129 mn10300_bundle_liw (void)
3133 for (r
= get_insns (); r
!= NULL
; r
= next_nonnote_nondebug_insn (r
))
3135 rtx_insn
*insn1
, *insn2
;
3136 struct liw_data liw1
, liw2
;
3139 if (! extract_bundle (insn1
, & liw1
))
3142 insn2
= next_nonnote_nondebug_insn (insn1
);
3143 if (! extract_bundle (insn2
, & liw2
))
3146 /* Check for source/destination overlap. */
3147 if (! check_liw_constraints (& liw1
, & liw2
))
3150 if (liw1
.slot
== LIW_OP2
|| liw2
.slot
== LIW_OP1
)
3152 struct liw_data temp
;
3159 delete_insn (insn2
);
3162 if (liw1
.op
== LIW_OP_CMP
)
3163 insn2_pat
= gen_cmp_liw (liw2
.dest
, liw2
.src
, liw1
.dest
, liw1
.src
,
3165 else if (liw2
.op
== LIW_OP_CMP
)
3166 insn2_pat
= gen_liw_cmp (liw1
.dest
, liw1
.src
, liw2
.dest
, liw2
.src
,
3169 insn2_pat
= gen_liw (liw1
.dest
, liw2
.dest
, liw1
.src
, liw2
.src
,
3170 GEN_INT (liw1
.op
), GEN_INT (liw2
.op
));
3172 insn2
= emit_insn_after (insn2_pat
, insn1
);
3173 delete_insn (insn1
);
3178 #define DUMP(reason, insn) \
3183 fprintf (dump_file, reason "\n"); \
3184 if (insn != NULL_RTX) \
3185 print_rtl_single (dump_file, insn); \
3186 fprintf(dump_file, "\n"); \
3191 /* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3192 Insert a SETLB insn just before LABEL. */
3195 mn10300_insert_setlb_lcc (rtx label
, rtx branch
)
3197 rtx lcc
, comparison
, cmp_reg
;
3199 if (LABEL_NUSES (label
) > 1)
3203 /* This label is used both as an entry point to the loop
3204 and as a loop-back point for the loop. We need to separate
3205 these two functions so that the SETLB happens upon entry,
3206 but the loop-back does not go to the SETLB instruction. */
3207 DUMP ("Inserting SETLB insn after:", label
);
3208 insn
= emit_insn_after (gen_setlb (), label
);
3209 label
= gen_label_rtx ();
3210 emit_label_after (label
, insn
);
3211 DUMP ("Created new loop-back label:", label
);
3215 DUMP ("Inserting SETLB insn before:", label
);
3216 emit_insn_before (gen_setlb (), label
);
3219 comparison
= XEXP (SET_SRC (PATTERN (branch
)), 0);
3220 cmp_reg
= XEXP (comparison
, 0);
3221 gcc_assert (REG_P (cmp_reg
));
3223 /* If the comparison has not already been split out of the branch
3225 gcc_assert (REGNO (cmp_reg
) == CC_REG
);
3227 if (GET_MODE (cmp_reg
) == CC_FLOATmode
)
3228 lcc
= gen_FLcc (comparison
, label
);
3230 lcc
= gen_Lcc (comparison
, label
);
3232 rtx_insn
*jump
= emit_jump_insn_before (lcc
, branch
);
3233 mark_jump_label (XVECEXP (lcc
, 0, 0), jump
, 0);
3234 JUMP_LABEL (jump
) = label
;
3235 DUMP ("Replacing branch insn...", branch
);
3236 DUMP ("... with Lcc insn:", jump
);
3237 delete_insn (branch
);
3241 mn10300_block_contains_call (basic_block block
)
3245 FOR_BB_INSNS (block
, insn
)
3253 mn10300_loop_contains_call_insn (loop_p loop
)
3256 bool result
= false;
3259 bbs
= get_loop_body (loop
);
3261 for (i
= 0; i
< loop
->num_nodes
; i
++)
3262 if (mn10300_block_contains_call (bbs
[i
]))
3273 mn10300_scan_for_setlb_lcc (void)
3277 DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX
);
3280 compute_bb_for_insn ();
3282 /* Find the loops. */
3283 loop_optimizer_init (AVOID_CFG_MODIFICATIONS
);
3285 /* FIXME: For now we only investigate innermost loops. In practice however
3286 if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3287 be the case that its parent loop is suitable. Thus we should check all
3288 loops, but work from the innermost outwards. */
3289 FOR_EACH_LOOP (loop
, LI_ONLY_INNERMOST
)
3291 const char * reason
= NULL
;
3293 /* Check to see if we can modify this loop. If we cannot
3294 then set 'reason' to describe why it could not be done. */
3295 if (loop
->latch
== NULL
)
3296 reason
= "it contains multiple latches";
3297 else if (loop
->header
!= loop
->latch
)
3298 /* FIXME: We could handle loops that span multiple blocks,
3299 but this requires a lot more work tracking down the branches
3300 that need altering, so for now keep things simple. */
3301 reason
= "the loop spans multiple blocks";
3302 else if (mn10300_loop_contains_call_insn (loop
))
3303 reason
= "it contains CALL insns";
3306 rtx_insn
*branch
= BB_END (loop
->latch
);
3308 gcc_assert (JUMP_P (branch
));
3309 if (single_set (branch
) == NULL_RTX
|| ! any_condjump_p (branch
))
3310 /* We cannot optimize tablejumps and the like. */
3311 /* FIXME: We could handle unconditional jumps. */
3312 reason
= "it is not a simple loop";
3318 flow_loop_dump (loop
, dump_file
, NULL
, 0);
3320 label
= BB_HEAD (loop
->header
);
3321 gcc_assert (LABEL_P (label
));
3323 mn10300_insert_setlb_lcc (label
, branch
);
3327 if (dump_file
&& reason
!= NULL
)
3328 fprintf (dump_file
, "Loop starting with insn %d is not suitable because %s\n",
3329 INSN_UID (BB_HEAD (loop
->header
)),
3333 loop_optimizer_finalize ();
3335 df_finish_pass (false);
3337 DUMP ("SETLB scan complete", NULL_RTX
);
3341 mn10300_reorg (void)
3343 /* These are optimizations, so only run them if optimizing. */
3344 if (TARGET_AM33
&& (optimize
> 0 || optimize_size
))
3346 if (TARGET_ALLOW_SETLB
)
3347 mn10300_scan_for_setlb_lcc ();
3349 if (TARGET_ALLOW_LIW
)
3350 mn10300_bundle_liw ();
3354 /* Initialize the GCC target structure. */
3356 #undef TARGET_MACHINE_DEPENDENT_REORG
3357 #define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3359 #undef TARGET_ASM_ALIGNED_HI_OP
3360 #define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3362 #undef TARGET_LEGITIMIZE_ADDRESS
3363 #define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3365 #undef TARGET_ADDRESS_COST
3366 #define TARGET_ADDRESS_COST mn10300_address_cost
3367 #undef TARGET_REGISTER_MOVE_COST
3368 #define TARGET_REGISTER_MOVE_COST mn10300_register_move_cost
3369 #undef TARGET_MEMORY_MOVE_COST
3370 #define TARGET_MEMORY_MOVE_COST mn10300_memory_move_cost
3371 #undef TARGET_RTX_COSTS
3372 #define TARGET_RTX_COSTS mn10300_rtx_costs
3374 #undef TARGET_ASM_FILE_START
3375 #define TARGET_ASM_FILE_START mn10300_file_start
3376 #undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
3377 #define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3379 #undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3380 #define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3382 #undef TARGET_OPTION_OVERRIDE
3383 #define TARGET_OPTION_OVERRIDE mn10300_option_override
3385 #undef TARGET_ENCODE_SECTION_INFO
3386 #define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3388 #undef TARGET_PROMOTE_PROTOTYPES
3389 #define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3390 #undef TARGET_RETURN_IN_MEMORY
3391 #define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3392 #undef TARGET_PASS_BY_REFERENCE
3393 #define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3394 #undef TARGET_CALLEE_COPIES
3395 #define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3396 #undef TARGET_ARG_PARTIAL_BYTES
3397 #define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3398 #undef TARGET_FUNCTION_ARG
3399 #define TARGET_FUNCTION_ARG mn10300_function_arg
3400 #undef TARGET_FUNCTION_ARG_ADVANCE
3401 #define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3403 #undef TARGET_EXPAND_BUILTIN_SAVEREGS
3404 #define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3405 #undef TARGET_EXPAND_BUILTIN_VA_START
3406 #define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3408 #undef TARGET_CASE_VALUES_THRESHOLD
3409 #define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3411 #undef TARGET_LEGITIMATE_ADDRESS_P
3412 #define TARGET_LEGITIMATE_ADDRESS_P mn10300_legitimate_address_p
3413 #undef TARGET_DELEGITIMIZE_ADDRESS
3414 #define TARGET_DELEGITIMIZE_ADDRESS mn10300_delegitimize_address
3415 #undef TARGET_LEGITIMATE_CONSTANT_P
3416 #define TARGET_LEGITIMATE_CONSTANT_P mn10300_legitimate_constant_p
3418 #undef TARGET_PREFERRED_RELOAD_CLASS
3419 #define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3420 #undef TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3421 #define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3422 mn10300_preferred_output_reload_class
3423 #undef TARGET_SECONDARY_RELOAD
3424 #define TARGET_SECONDARY_RELOAD mn10300_secondary_reload
3426 #undef TARGET_TRAMPOLINE_INIT
3427 #define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3429 #undef TARGET_FUNCTION_VALUE
3430 #define TARGET_FUNCTION_VALUE mn10300_function_value
3431 #undef TARGET_LIBCALL_VALUE
3432 #define TARGET_LIBCALL_VALUE mn10300_libcall_value
3434 #undef TARGET_ASM_OUTPUT_MI_THUNK
3435 #define TARGET_ASM_OUTPUT_MI_THUNK mn10300_asm_output_mi_thunk
3436 #undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
3437 #define TARGET_ASM_CAN_OUTPUT_MI_THUNK mn10300_can_output_mi_thunk
3439 #undef TARGET_SCHED_ADJUST_COST
3440 #define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3442 #undef TARGET_CONDITIONAL_REGISTER_USAGE
3443 #define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3445 #undef TARGET_MD_ASM_CLOBBERS
3446 #define TARGET_MD_ASM_CLOBBERS mn10300_md_asm_clobbers
3448 #undef TARGET_FLAGS_REGNUM
3449 #define TARGET_FLAGS_REGNUM CC_REG
3451 struct gcc_target targetm
= TARGET_INITIALIZER
;